| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | *  linux/mm/swapfile.c | 
|  | 3 | * | 
|  | 4 | *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds | 
|  | 5 | *  Swap reorganised 29.12.95, Stephen Tweedie | 
|  | 6 | */ | 
|  | 7 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | #include <linux/mm.h> | 
|  | 9 | #include <linux/hugetlb.h> | 
|  | 10 | #include <linux/mman.h> | 
|  | 11 | #include <linux/slab.h> | 
|  | 12 | #include <linux/kernel_stat.h> | 
|  | 13 | #include <linux/swap.h> | 
|  | 14 | #include <linux/vmalloc.h> | 
|  | 15 | #include <linux/pagemap.h> | 
|  | 16 | #include <linux/namei.h> | 
|  | 17 | #include <linux/shm.h> | 
|  | 18 | #include <linux/blkdev.h> | 
| Hugh Dickins | 20137a4 | 2009-01-06 14:39:54 -0800 | [diff] [blame] | 19 | #include <linux/random.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | #include <linux/writeback.h> | 
|  | 21 | #include <linux/proc_fs.h> | 
|  | 22 | #include <linux/seq_file.h> | 
|  | 23 | #include <linux/init.h> | 
|  | 24 | #include <linux/module.h> | 
|  | 25 | #include <linux/rmap.h> | 
|  | 26 | #include <linux/security.h> | 
|  | 27 | #include <linux/backing-dev.h> | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 28 | #include <linux/mutex.h> | 
| Randy.Dunlap | c59ede7 | 2006-01-11 12:17:46 -0800 | [diff] [blame] | 29 | #include <linux/capability.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | #include <linux/syscalls.h> | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 31 | #include <linux/memcontrol.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 |  | 
|  | 33 | #include <asm/pgtable.h> | 
|  | 34 | #include <asm/tlbflush.h> | 
|  | 35 | #include <linux/swapops.h> | 
| KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 36 | #include <linux/page_cgroup.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 |  | 
| Adrian Bunk | 7c363b8 | 2008-07-25 19:46:24 -0700 | [diff] [blame] | 38 | static DEFINE_SPINLOCK(swap_lock); | 
|  | 39 | static unsigned int nr_swapfiles; | 
| Hugh Dickins | b962716 | 2009-01-06 14:39:41 -0800 | [diff] [blame] | 40 | long nr_swap_pages; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | long total_swap_pages; | 
|  | 42 | static int swap_overflow; | 
| Hugh Dickins | 78ecba0 | 2008-07-23 21:28:23 -0700 | [diff] [blame] | 43 | static int least_priority; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | static const char Bad_file[] = "Bad swap file entry "; | 
|  | 46 | static const char Unused_file[] = "Unused swap file entry "; | 
|  | 47 | static const char Bad_offset[] = "Bad swap offset entry "; | 
|  | 48 | static const char Unused_offset[] = "Unused swap offset entry "; | 
|  | 49 |  | 
| Adrian Bunk | 7c363b8 | 2008-07-25 19:46:24 -0700 | [diff] [blame] | 50 | static struct swap_list_t swap_list = {-1, -1}; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 |  | 
| Rafael J. Wysocki | f577eb3 | 2006-03-23 02:59:59 -0800 | [diff] [blame] | 52 | static struct swap_info_struct swap_info[MAX_SWAPFILES]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 |  | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 54 | static DEFINE_MUTEX(swapon_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 |  | 
|  | 56 | /* | 
|  | 57 | * We need this because the bdev->unplug_fn can sleep and we cannot | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 58 | * hold swap_lock while calling the unplug_fn. And swap_lock | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 59 | * cannot be turned into a mutex. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | */ | 
|  | 61 | static DECLARE_RWSEM(swap_unplug_sem); | 
|  | 62 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page) | 
|  | 64 | { | 
|  | 65 | swp_entry_t entry; | 
|  | 66 |  | 
|  | 67 | down_read(&swap_unplug_sem); | 
| Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 68 | entry.val = page_private(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | if (PageSwapCache(page)) { | 
|  | 70 | struct block_device *bdev = swap_info[swp_type(entry)].bdev; | 
|  | 71 | struct backing_dev_info *bdi; | 
|  | 72 |  | 
|  | 73 | /* | 
|  | 74 | * If the page is removed from swapcache from under us (with a | 
|  | 75 | * racy try_to_unuse/swapoff) we need an additional reference | 
| Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 76 | * count to avoid reading garbage from page_private(page) above. | 
|  | 77 | * If the WARN_ON triggers during a swapoff it maybe the race | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | * condition and it's harmless. However if it triggers without | 
|  | 79 | * swapoff it signals a problem. | 
|  | 80 | */ | 
|  | 81 | WARN_ON(page_count(page) <= 1); | 
|  | 82 |  | 
|  | 83 | bdi = bdev->bd_inode->i_mapping->backing_dev_info; | 
| McMullan, Jason | ba32311 | 2005-05-16 21:53:40 -0700 | [diff] [blame] | 84 | blk_run_backing_dev(bdi, page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | } | 
|  | 86 | up_read(&swap_unplug_sem); | 
|  | 87 | } | 
|  | 88 |  | 
| Hugh Dickins | 6a6ba83 | 2009-01-06 14:39:51 -0800 | [diff] [blame] | 89 | /* | 
|  | 90 | * swapon tell device that all the old swap contents can be discarded, | 
|  | 91 | * to allow the swap device to optimize its wear-levelling. | 
|  | 92 | */ | 
|  | 93 | static int discard_swap(struct swap_info_struct *si) | 
|  | 94 | { | 
|  | 95 | struct swap_extent *se; | 
|  | 96 | int err = 0; | 
|  | 97 |  | 
|  | 98 | list_for_each_entry(se, &si->extent_list, list) { | 
|  | 99 | sector_t start_block = se->start_block << (PAGE_SHIFT - 9); | 
| Hugh Dickins | 858a2990 | 2009-01-06 14:39:56 -0800 | [diff] [blame] | 100 | sector_t nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9); | 
| Hugh Dickins | 6a6ba83 | 2009-01-06 14:39:51 -0800 | [diff] [blame] | 101 |  | 
|  | 102 | if (se->start_page == 0) { | 
|  | 103 | /* Do not discard the swap header page! */ | 
|  | 104 | start_block += 1 << (PAGE_SHIFT - 9); | 
|  | 105 | nr_blocks -= 1 << (PAGE_SHIFT - 9); | 
|  | 106 | if (!nr_blocks) | 
|  | 107 | continue; | 
|  | 108 | } | 
|  | 109 |  | 
|  | 110 | err = blkdev_issue_discard(si->bdev, start_block, | 
|  | 111 | nr_blocks, GFP_KERNEL); | 
|  | 112 | if (err) | 
|  | 113 | break; | 
|  | 114 |  | 
|  | 115 | cond_resched(); | 
|  | 116 | } | 
|  | 117 | return err;		/* That will often be -EOPNOTSUPP */ | 
|  | 118 | } | 
|  | 119 |  | 
| Hugh Dickins | 7992fde | 2009-01-06 14:39:53 -0800 | [diff] [blame] | 120 | /* | 
|  | 121 | * swap allocation tell device that a cluster of swap can now be discarded, | 
|  | 122 | * to allow the swap device to optimize its wear-levelling. | 
|  | 123 | */ | 
|  | 124 | static void discard_swap_cluster(struct swap_info_struct *si, | 
|  | 125 | pgoff_t start_page, pgoff_t nr_pages) | 
|  | 126 | { | 
|  | 127 | struct swap_extent *se = si->curr_swap_extent; | 
|  | 128 | int found_extent = 0; | 
|  | 129 |  | 
|  | 130 | while (nr_pages) { | 
|  | 131 | struct list_head *lh; | 
|  | 132 |  | 
|  | 133 | if (se->start_page <= start_page && | 
|  | 134 | start_page < se->start_page + se->nr_pages) { | 
|  | 135 | pgoff_t offset = start_page - se->start_page; | 
|  | 136 | sector_t start_block = se->start_block + offset; | 
| Hugh Dickins | 858a2990 | 2009-01-06 14:39:56 -0800 | [diff] [blame] | 137 | sector_t nr_blocks = se->nr_pages - offset; | 
| Hugh Dickins | 7992fde | 2009-01-06 14:39:53 -0800 | [diff] [blame] | 138 |  | 
|  | 139 | if (nr_blocks > nr_pages) | 
|  | 140 | nr_blocks = nr_pages; | 
|  | 141 | start_page += nr_blocks; | 
|  | 142 | nr_pages -= nr_blocks; | 
|  | 143 |  | 
|  | 144 | if (!found_extent++) | 
|  | 145 | si->curr_swap_extent = se; | 
|  | 146 |  | 
|  | 147 | start_block <<= PAGE_SHIFT - 9; | 
|  | 148 | nr_blocks <<= PAGE_SHIFT - 9; | 
|  | 149 | if (blkdev_issue_discard(si->bdev, start_block, | 
|  | 150 | nr_blocks, GFP_NOIO)) | 
|  | 151 | break; | 
|  | 152 | } | 
|  | 153 |  | 
|  | 154 | lh = se->list.next; | 
|  | 155 | if (lh == &si->extent_list) | 
|  | 156 | lh = lh->next; | 
|  | 157 | se = list_entry(lh, struct swap_extent, list); | 
|  | 158 | } | 
|  | 159 | } | 
|  | 160 |  | 
|  | 161 | static int wait_for_discard(void *word) | 
|  | 162 | { | 
|  | 163 | schedule(); | 
|  | 164 | return 0; | 
|  | 165 | } | 
|  | 166 |  | 
| Hugh Dickins | 048c27f | 2005-09-03 15:54:40 -0700 | [diff] [blame] | 167 | #define SWAPFILE_CLUSTER	256 | 
|  | 168 | #define LATENCY_LIMIT		256 | 
|  | 169 |  | 
| Hugh Dickins | 6eb396d | 2005-09-03 15:54:35 -0700 | [diff] [blame] | 170 | static inline unsigned long scan_swap_map(struct swap_info_struct *si) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | { | 
| Hugh Dickins | ebebbbe | 2009-01-06 14:39:50 -0800 | [diff] [blame] | 172 | unsigned long offset; | 
| Hugh Dickins | c60aa17 | 2009-01-06 14:39:55 -0800 | [diff] [blame] | 173 | unsigned long scan_base; | 
| Hugh Dickins | 7992fde | 2009-01-06 14:39:53 -0800 | [diff] [blame] | 174 | unsigned long last_in_cluster = 0; | 
| Hugh Dickins | 048c27f | 2005-09-03 15:54:40 -0700 | [diff] [blame] | 175 | int latency_ration = LATENCY_LIMIT; | 
| Hugh Dickins | 7992fde | 2009-01-06 14:39:53 -0800 | [diff] [blame] | 176 | int found_free_cluster = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 |  | 
| Hugh Dickins | 886bb7e | 2009-01-06 14:39:48 -0800 | [diff] [blame] | 178 | /* | 
| Hugh Dickins | 7dfad41 | 2005-09-03 15:54:38 -0700 | [diff] [blame] | 179 | * We try to cluster swap pages by allocating them sequentially | 
|  | 180 | * in swap.  Once we've allocated SWAPFILE_CLUSTER pages this | 
|  | 181 | * way, however, we resort to first-free allocation, starting | 
|  | 182 | * a new cluster.  This prevents us from scattering swap pages | 
|  | 183 | * all over the entire swap partition, so that we reduce | 
|  | 184 | * overall disk seek times between swap pages.  -- sct | 
|  | 185 | * But we do now try to find an empty cluster.  -Andrea | 
| Hugh Dickins | c60aa17 | 2009-01-06 14:39:55 -0800 | [diff] [blame] | 186 | * And we let swap pages go all over an SSD partition.  Hugh | 
| Hugh Dickins | 7dfad41 | 2005-09-03 15:54:38 -0700 | [diff] [blame] | 187 | */ | 
|  | 188 |  | 
| Hugh Dickins | 52b7efdb | 2005-09-03 15:54:39 -0700 | [diff] [blame] | 189 | si->flags += SWP_SCANNING; | 
| Hugh Dickins | c60aa17 | 2009-01-06 14:39:55 -0800 | [diff] [blame] | 190 | scan_base = offset = si->cluster_next; | 
| Hugh Dickins | ebebbbe | 2009-01-06 14:39:50 -0800 | [diff] [blame] | 191 |  | 
|  | 192 | if (unlikely(!si->cluster_nr--)) { | 
|  | 193 | if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) { | 
|  | 194 | si->cluster_nr = SWAPFILE_CLUSTER - 1; | 
|  | 195 | goto checks; | 
|  | 196 | } | 
| Hugh Dickins | 7992fde | 2009-01-06 14:39:53 -0800 | [diff] [blame] | 197 | if (si->flags & SWP_DISCARDABLE) { | 
|  | 198 | /* | 
|  | 199 | * Start range check on racing allocations, in case | 
|  | 200 | * they overlap the cluster we eventually decide on | 
|  | 201 | * (we scan without swap_lock to allow preemption). | 
|  | 202 | * It's hardly conceivable that cluster_nr could be | 
|  | 203 | * wrapped during our scan, but don't depend on it. | 
|  | 204 | */ | 
|  | 205 | if (si->lowest_alloc) | 
|  | 206 | goto checks; | 
|  | 207 | si->lowest_alloc = si->max; | 
|  | 208 | si->highest_alloc = 0; | 
|  | 209 | } | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 210 | spin_unlock(&swap_lock); | 
| Hugh Dickins | 7dfad41 | 2005-09-03 15:54:38 -0700 | [diff] [blame] | 211 |  | 
| Hugh Dickins | c60aa17 | 2009-01-06 14:39:55 -0800 | [diff] [blame] | 212 | /* | 
|  | 213 | * If seek is expensive, start searching for new cluster from | 
|  | 214 | * start of partition, to minimize the span of allocated swap. | 
|  | 215 | * But if seek is cheap, search from our current position, so | 
|  | 216 | * that swap is allocated from all over the partition: if the | 
|  | 217 | * Flash Translation Layer only remaps within limited zones, | 
|  | 218 | * we don't want to wear out the first zone too quickly. | 
|  | 219 | */ | 
|  | 220 | if (!(si->flags & SWP_SOLIDSTATE)) | 
|  | 221 | scan_base = offset = si->lowest_bit; | 
| Hugh Dickins | 7dfad41 | 2005-09-03 15:54:38 -0700 | [diff] [blame] | 222 | last_in_cluster = offset + SWAPFILE_CLUSTER - 1; | 
|  | 223 |  | 
|  | 224 | /* Locate the first empty (unaligned) cluster */ | 
|  | 225 | for (; last_in_cluster <= si->highest_bit; offset++) { | 
|  | 226 | if (si->swap_map[offset]) | 
|  | 227 | last_in_cluster = offset + SWAPFILE_CLUSTER; | 
|  | 228 | else if (offset == last_in_cluster) { | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 229 | spin_lock(&swap_lock); | 
| Hugh Dickins | ebebbbe | 2009-01-06 14:39:50 -0800 | [diff] [blame] | 230 | offset -= SWAPFILE_CLUSTER - 1; | 
|  | 231 | si->cluster_next = offset; | 
|  | 232 | si->cluster_nr = SWAPFILE_CLUSTER - 1; | 
| Hugh Dickins | 7992fde | 2009-01-06 14:39:53 -0800 | [diff] [blame] | 233 | found_free_cluster = 1; | 
| Hugh Dickins | ebebbbe | 2009-01-06 14:39:50 -0800 | [diff] [blame] | 234 | goto checks; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 235 | } | 
| Hugh Dickins | 048c27f | 2005-09-03 15:54:40 -0700 | [diff] [blame] | 236 | if (unlikely(--latency_ration < 0)) { | 
|  | 237 | cond_resched(); | 
|  | 238 | latency_ration = LATENCY_LIMIT; | 
|  | 239 | } | 
| Hugh Dickins | 7dfad41 | 2005-09-03 15:54:38 -0700 | [diff] [blame] | 240 | } | 
| Hugh Dickins | ebebbbe | 2009-01-06 14:39:50 -0800 | [diff] [blame] | 241 |  | 
|  | 242 | offset = si->lowest_bit; | 
| Hugh Dickins | c60aa17 | 2009-01-06 14:39:55 -0800 | [diff] [blame] | 243 | last_in_cluster = offset + SWAPFILE_CLUSTER - 1; | 
|  | 244 |  | 
|  | 245 | /* Locate the first empty (unaligned) cluster */ | 
|  | 246 | for (; last_in_cluster < scan_base; offset++) { | 
|  | 247 | if (si->swap_map[offset]) | 
|  | 248 | last_in_cluster = offset + SWAPFILE_CLUSTER; | 
|  | 249 | else if (offset == last_in_cluster) { | 
|  | 250 | spin_lock(&swap_lock); | 
|  | 251 | offset -= SWAPFILE_CLUSTER - 1; | 
|  | 252 | si->cluster_next = offset; | 
|  | 253 | si->cluster_nr = SWAPFILE_CLUSTER - 1; | 
|  | 254 | found_free_cluster = 1; | 
|  | 255 | goto checks; | 
|  | 256 | } | 
|  | 257 | if (unlikely(--latency_ration < 0)) { | 
|  | 258 | cond_resched(); | 
|  | 259 | latency_ration = LATENCY_LIMIT; | 
|  | 260 | } | 
|  | 261 | } | 
|  | 262 |  | 
|  | 263 | offset = scan_base; | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 264 | spin_lock(&swap_lock); | 
| Hugh Dickins | ebebbbe | 2009-01-06 14:39:50 -0800 | [diff] [blame] | 265 | si->cluster_nr = SWAPFILE_CLUSTER - 1; | 
| Hugh Dickins | 7992fde | 2009-01-06 14:39:53 -0800 | [diff] [blame] | 266 | si->lowest_alloc = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 | } | 
| Hugh Dickins | 7dfad41 | 2005-09-03 15:54:38 -0700 | [diff] [blame] | 268 |  | 
| Hugh Dickins | ebebbbe | 2009-01-06 14:39:50 -0800 | [diff] [blame] | 269 | checks: | 
|  | 270 | if (!(si->flags & SWP_WRITEOK)) | 
| Hugh Dickins | 52b7efdb | 2005-09-03 15:54:39 -0700 | [diff] [blame] | 271 | goto no_page; | 
| Hugh Dickins | 7dfad41 | 2005-09-03 15:54:38 -0700 | [diff] [blame] | 272 | if (!si->highest_bit) | 
|  | 273 | goto no_page; | 
| Hugh Dickins | ebebbbe | 2009-01-06 14:39:50 -0800 | [diff] [blame] | 274 | if (offset > si->highest_bit) | 
| Hugh Dickins | c60aa17 | 2009-01-06 14:39:55 -0800 | [diff] [blame] | 275 | scan_base = offset = si->lowest_bit; | 
| Hugh Dickins | ebebbbe | 2009-01-06 14:39:50 -0800 | [diff] [blame] | 276 | if (si->swap_map[offset]) | 
|  | 277 | goto scan; | 
| Hugh Dickins | 7dfad41 | 2005-09-03 15:54:38 -0700 | [diff] [blame] | 278 |  | 
| Hugh Dickins | ebebbbe | 2009-01-06 14:39:50 -0800 | [diff] [blame] | 279 | if (offset == si->lowest_bit) | 
|  | 280 | si->lowest_bit++; | 
|  | 281 | if (offset == si->highest_bit) | 
|  | 282 | si->highest_bit--; | 
|  | 283 | si->inuse_pages++; | 
|  | 284 | if (si->inuse_pages == si->pages) { | 
|  | 285 | si->lowest_bit = si->max; | 
|  | 286 | si->highest_bit = 0; | 
|  | 287 | } | 
|  | 288 | si->swap_map[offset] = 1; | 
|  | 289 | si->cluster_next = offset + 1; | 
|  | 290 | si->flags -= SWP_SCANNING; | 
| Hugh Dickins | 7992fde | 2009-01-06 14:39:53 -0800 | [diff] [blame] | 291 |  | 
|  | 292 | if (si->lowest_alloc) { | 
|  | 293 | /* | 
|  | 294 | * Only set when SWP_DISCARDABLE, and there's a scan | 
|  | 295 | * for a free cluster in progress or just completed. | 
|  | 296 | */ | 
|  | 297 | if (found_free_cluster) { | 
|  | 298 | /* | 
|  | 299 | * To optimize wear-levelling, discard the | 
|  | 300 | * old data of the cluster, taking care not to | 
|  | 301 | * discard any of its pages that have already | 
|  | 302 | * been allocated by racing tasks (offset has | 
|  | 303 | * already stepped over any at the beginning). | 
|  | 304 | */ | 
|  | 305 | if (offset < si->highest_alloc && | 
|  | 306 | si->lowest_alloc <= last_in_cluster) | 
|  | 307 | last_in_cluster = si->lowest_alloc - 1; | 
|  | 308 | si->flags |= SWP_DISCARDING; | 
|  | 309 | spin_unlock(&swap_lock); | 
|  | 310 |  | 
|  | 311 | if (offset < last_in_cluster) | 
|  | 312 | discard_swap_cluster(si, offset, | 
|  | 313 | last_in_cluster - offset + 1); | 
|  | 314 |  | 
|  | 315 | spin_lock(&swap_lock); | 
|  | 316 | si->lowest_alloc = 0; | 
|  | 317 | si->flags &= ~SWP_DISCARDING; | 
|  | 318 |  | 
|  | 319 | smp_mb();	/* wake_up_bit advises this */ | 
|  | 320 | wake_up_bit(&si->flags, ilog2(SWP_DISCARDING)); | 
|  | 321 |  | 
|  | 322 | } else if (si->flags & SWP_DISCARDING) { | 
|  | 323 | /* | 
|  | 324 | * Delay using pages allocated by racing tasks | 
|  | 325 | * until the whole discard has been issued. We | 
|  | 326 | * could defer that delay until swap_writepage, | 
|  | 327 | * but it's easier to keep this self-contained. | 
|  | 328 | */ | 
|  | 329 | spin_unlock(&swap_lock); | 
|  | 330 | wait_on_bit(&si->flags, ilog2(SWP_DISCARDING), | 
|  | 331 | wait_for_discard, TASK_UNINTERRUPTIBLE); | 
|  | 332 | spin_lock(&swap_lock); | 
|  | 333 | } else { | 
|  | 334 | /* | 
|  | 335 | * Note pages allocated by racing tasks while | 
|  | 336 | * scan for a free cluster is in progress, so | 
|  | 337 | * that its final discard can exclude them. | 
|  | 338 | */ | 
|  | 339 | if (offset < si->lowest_alloc) | 
|  | 340 | si->lowest_alloc = offset; | 
|  | 341 | if (offset > si->highest_alloc) | 
|  | 342 | si->highest_alloc = offset; | 
|  | 343 | } | 
|  | 344 | } | 
| Hugh Dickins | ebebbbe | 2009-01-06 14:39:50 -0800 | [diff] [blame] | 345 | return offset; | 
|  | 346 |  | 
|  | 347 | scan: | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 348 | spin_unlock(&swap_lock); | 
| Hugh Dickins | 7dfad41 | 2005-09-03 15:54:38 -0700 | [diff] [blame] | 349 | while (++offset <= si->highest_bit) { | 
| Hugh Dickins | 52b7efdb | 2005-09-03 15:54:39 -0700 | [diff] [blame] | 350 | if (!si->swap_map[offset]) { | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 351 | spin_lock(&swap_lock); | 
| Hugh Dickins | 52b7efdb | 2005-09-03 15:54:39 -0700 | [diff] [blame] | 352 | goto checks; | 
|  | 353 | } | 
| Hugh Dickins | 048c27f | 2005-09-03 15:54:40 -0700 | [diff] [blame] | 354 | if (unlikely(--latency_ration < 0)) { | 
|  | 355 | cond_resched(); | 
|  | 356 | latency_ration = LATENCY_LIMIT; | 
|  | 357 | } | 
| Hugh Dickins | 7dfad41 | 2005-09-03 15:54:38 -0700 | [diff] [blame] | 358 | } | 
| Hugh Dickins | c60aa17 | 2009-01-06 14:39:55 -0800 | [diff] [blame] | 359 | offset = si->lowest_bit; | 
|  | 360 | while (++offset < scan_base) { | 
|  | 361 | if (!si->swap_map[offset]) { | 
|  | 362 | spin_lock(&swap_lock); | 
|  | 363 | goto checks; | 
|  | 364 | } | 
|  | 365 | if (unlikely(--latency_ration < 0)) { | 
|  | 366 | cond_resched(); | 
|  | 367 | latency_ration = LATENCY_LIMIT; | 
|  | 368 | } | 
|  | 369 | } | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 370 | spin_lock(&swap_lock); | 
| Hugh Dickins | 7dfad41 | 2005-09-03 15:54:38 -0700 | [diff] [blame] | 371 |  | 
|  | 372 | no_page: | 
| Hugh Dickins | 52b7efdb | 2005-09-03 15:54:39 -0700 | [diff] [blame] | 373 | si->flags -= SWP_SCANNING; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 374 | return 0; | 
|  | 375 | } | 
|  | 376 |  | 
|  | 377 | swp_entry_t get_swap_page(void) | 
|  | 378 | { | 
| Hugh Dickins | fb4f88d | 2005-09-03 15:54:37 -0700 | [diff] [blame] | 379 | struct swap_info_struct *si; | 
|  | 380 | pgoff_t offset; | 
|  | 381 | int type, next; | 
|  | 382 | int wrapped = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 383 |  | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 384 | spin_lock(&swap_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 385 | if (nr_swap_pages <= 0) | 
| Hugh Dickins | fb4f88d | 2005-09-03 15:54:37 -0700 | [diff] [blame] | 386 | goto noswap; | 
|  | 387 | nr_swap_pages--; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 388 |  | 
| Hugh Dickins | fb4f88d | 2005-09-03 15:54:37 -0700 | [diff] [blame] | 389 | for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) { | 
|  | 390 | si = swap_info + type; | 
|  | 391 | next = si->next; | 
|  | 392 | if (next < 0 || | 
|  | 393 | (!wrapped && si->prio != swap_info[next].prio)) { | 
|  | 394 | next = swap_list.head; | 
|  | 395 | wrapped++; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 396 | } | 
| Hugh Dickins | fb4f88d | 2005-09-03 15:54:37 -0700 | [diff] [blame] | 397 |  | 
|  | 398 | if (!si->highest_bit) | 
|  | 399 | continue; | 
|  | 400 | if (!(si->flags & SWP_WRITEOK)) | 
|  | 401 | continue; | 
|  | 402 |  | 
|  | 403 | swap_list.next = next; | 
| Hugh Dickins | fb4f88d | 2005-09-03 15:54:37 -0700 | [diff] [blame] | 404 | offset = scan_swap_map(si); | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 405 | if (offset) { | 
|  | 406 | spin_unlock(&swap_lock); | 
| Hugh Dickins | fb4f88d | 2005-09-03 15:54:37 -0700 | [diff] [blame] | 407 | return swp_entry(type, offset); | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 408 | } | 
| Hugh Dickins | fb4f88d | 2005-09-03 15:54:37 -0700 | [diff] [blame] | 409 | next = swap_list.next; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 410 | } | 
| Hugh Dickins | fb4f88d | 2005-09-03 15:54:37 -0700 | [diff] [blame] | 411 |  | 
|  | 412 | nr_swap_pages++; | 
|  | 413 | noswap: | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 414 | spin_unlock(&swap_lock); | 
| Hugh Dickins | fb4f88d | 2005-09-03 15:54:37 -0700 | [diff] [blame] | 415 | return (swp_entry_t) {0}; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 416 | } | 
|  | 417 |  | 
| Rafael J. Wysocki | 3a291a2 | 2006-01-06 00:16:37 -0800 | [diff] [blame] | 418 | swp_entry_t get_swap_page_of_type(int type) | 
|  | 419 | { | 
|  | 420 | struct swap_info_struct *si; | 
|  | 421 | pgoff_t offset; | 
|  | 422 |  | 
|  | 423 | spin_lock(&swap_lock); | 
|  | 424 | si = swap_info + type; | 
|  | 425 | if (si->flags & SWP_WRITEOK) { | 
|  | 426 | nr_swap_pages--; | 
|  | 427 | offset = scan_swap_map(si); | 
|  | 428 | if (offset) { | 
|  | 429 | spin_unlock(&swap_lock); | 
|  | 430 | return swp_entry(type, offset); | 
|  | 431 | } | 
|  | 432 | nr_swap_pages++; | 
|  | 433 | } | 
|  | 434 | spin_unlock(&swap_lock); | 
|  | 435 | return (swp_entry_t) {0}; | 
|  | 436 | } | 
|  | 437 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 438 | static struct swap_info_struct * swap_info_get(swp_entry_t entry) | 
|  | 439 | { | 
|  | 440 | struct swap_info_struct * p; | 
|  | 441 | unsigned long offset, type; | 
|  | 442 |  | 
|  | 443 | if (!entry.val) | 
|  | 444 | goto out; | 
|  | 445 | type = swp_type(entry); | 
|  | 446 | if (type >= nr_swapfiles) | 
|  | 447 | goto bad_nofile; | 
|  | 448 | p = & swap_info[type]; | 
|  | 449 | if (!(p->flags & SWP_USED)) | 
|  | 450 | goto bad_device; | 
|  | 451 | offset = swp_offset(entry); | 
|  | 452 | if (offset >= p->max) | 
|  | 453 | goto bad_offset; | 
|  | 454 | if (!p->swap_map[offset]) | 
|  | 455 | goto bad_free; | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 456 | spin_lock(&swap_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 457 | return p; | 
|  | 458 |  | 
|  | 459 | bad_free: | 
|  | 460 | printk(KERN_ERR "swap_free: %s%08lx\n", Unused_offset, entry.val); | 
|  | 461 | goto out; | 
|  | 462 | bad_offset: | 
|  | 463 | printk(KERN_ERR "swap_free: %s%08lx\n", Bad_offset, entry.val); | 
|  | 464 | goto out; | 
|  | 465 | bad_device: | 
|  | 466 | printk(KERN_ERR "swap_free: %s%08lx\n", Unused_file, entry.val); | 
|  | 467 | goto out; | 
|  | 468 | bad_nofile: | 
|  | 469 | printk(KERN_ERR "swap_free: %s%08lx\n", Bad_file, entry.val); | 
|  | 470 | out: | 
|  | 471 | return NULL; | 
| Hugh Dickins | 886bb7e | 2009-01-06 14:39:48 -0800 | [diff] [blame] | 472 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 473 |  | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 474 | static int swap_entry_free(struct swap_info_struct *p, swp_entry_t ent) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 475 | { | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 476 | unsigned long offset = swp_offset(ent); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 477 | int count = p->swap_map[offset]; | 
|  | 478 |  | 
|  | 479 | if (count < SWAP_MAP_MAX) { | 
|  | 480 | count--; | 
|  | 481 | p->swap_map[offset] = count; | 
|  | 482 | if (!count) { | 
|  | 483 | if (offset < p->lowest_bit) | 
|  | 484 | p->lowest_bit = offset; | 
|  | 485 | if (offset > p->highest_bit) | 
|  | 486 | p->highest_bit = offset; | 
| Hugh Dickins | 89d09a2 | 2005-09-03 15:54:36 -0700 | [diff] [blame] | 487 | if (p->prio > swap_info[swap_list.next].prio) | 
|  | 488 | swap_list.next = p - swap_info; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 489 | nr_swap_pages++; | 
|  | 490 | p->inuse_pages--; | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 491 | mem_cgroup_uncharge_swap(ent); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 492 | } | 
|  | 493 | } | 
|  | 494 | return count; | 
|  | 495 | } | 
|  | 496 |  | 
|  | 497 | /* | 
|  | 498 | * Caller has made sure that the swapdevice corresponding to entry | 
|  | 499 | * is still around or has not been recycled. | 
|  | 500 | */ | 
|  | 501 | void swap_free(swp_entry_t entry) | 
|  | 502 | { | 
|  | 503 | struct swap_info_struct * p; | 
|  | 504 |  | 
|  | 505 | p = swap_info_get(entry); | 
|  | 506 | if (p) { | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 507 | swap_entry_free(p, entry); | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 508 | spin_unlock(&swap_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 509 | } | 
|  | 510 | } | 
|  | 511 |  | 
|  | 512 | /* | 
| Hugh Dickins | c475a8a | 2005-06-21 17:15:12 -0700 | [diff] [blame] | 513 | * How many references to page are currently swapped out? | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 514 | */ | 
| Hugh Dickins | c475a8a | 2005-06-21 17:15:12 -0700 | [diff] [blame] | 515 | static inline int page_swapcount(struct page *page) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 516 | { | 
| Hugh Dickins | c475a8a | 2005-06-21 17:15:12 -0700 | [diff] [blame] | 517 | int count = 0; | 
|  | 518 | struct swap_info_struct *p; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 519 | swp_entry_t entry; | 
|  | 520 |  | 
| Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 521 | entry.val = page_private(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 522 | p = swap_info_get(entry); | 
|  | 523 | if (p) { | 
| Hugh Dickins | c475a8a | 2005-06-21 17:15:12 -0700 | [diff] [blame] | 524 | /* Subtract the 1 for the swap cache itself */ | 
|  | 525 | count = p->swap_map[swp_offset(entry)] - 1; | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 526 | spin_unlock(&swap_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 527 | } | 
| Hugh Dickins | c475a8a | 2005-06-21 17:15:12 -0700 | [diff] [blame] | 528 | return count; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 529 | } | 
|  | 530 |  | 
|  | 531 | /* | 
| Hugh Dickins | 7b1fe59 | 2009-01-06 14:39:34 -0800 | [diff] [blame] | 532 | * We can write to an anon page without COW if there are no other references | 
|  | 533 | * to it.  And as a side-effect, free up its swap: because the old content | 
|  | 534 | * on disk will never be read, and seeking back there to write new content | 
|  | 535 | * later would only waste time away from clustering. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 536 | */ | 
| Hugh Dickins | 7b1fe59 | 2009-01-06 14:39:34 -0800 | [diff] [blame] | 537 | int reuse_swap_page(struct page *page) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 538 | { | 
| Hugh Dickins | c475a8a | 2005-06-21 17:15:12 -0700 | [diff] [blame] | 539 | int count; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 540 |  | 
| Hugh Dickins | 51726b1 | 2009-01-06 14:39:25 -0800 | [diff] [blame] | 541 | VM_BUG_ON(!PageLocked(page)); | 
| Hugh Dickins | c475a8a | 2005-06-21 17:15:12 -0700 | [diff] [blame] | 542 | count = page_mapcount(page); | 
| Hugh Dickins | 7b1fe59 | 2009-01-06 14:39:34 -0800 | [diff] [blame] | 543 | if (count <= 1 && PageSwapCache(page)) { | 
| Hugh Dickins | c475a8a | 2005-06-21 17:15:12 -0700 | [diff] [blame] | 544 | count += page_swapcount(page); | 
| Hugh Dickins | 7b1fe59 | 2009-01-06 14:39:34 -0800 | [diff] [blame] | 545 | if (count == 1 && !PageWriteback(page)) { | 
|  | 546 | delete_from_swap_cache(page); | 
|  | 547 | SetPageDirty(page); | 
|  | 548 | } | 
|  | 549 | } | 
| Hugh Dickins | c475a8a | 2005-06-21 17:15:12 -0700 | [diff] [blame] | 550 | return count == 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 551 | } | 
|  | 552 |  | 
|  | 553 | /* | 
| Hugh Dickins | a2c43ee | 2009-01-06 14:39:36 -0800 | [diff] [blame] | 554 | * If swap is getting full, or if there are no more mappings of this page, | 
|  | 555 | * then try_to_free_swap is called to free its swap space. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 556 | */ | 
| Hugh Dickins | a2c43ee | 2009-01-06 14:39:36 -0800 | [diff] [blame] | 557 | int try_to_free_swap(struct page *page) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 558 | { | 
| Hugh Dickins | 51726b1 | 2009-01-06 14:39:25 -0800 | [diff] [blame] | 559 | VM_BUG_ON(!PageLocked(page)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 560 |  | 
|  | 561 | if (!PageSwapCache(page)) | 
|  | 562 | return 0; | 
|  | 563 | if (PageWriteback(page)) | 
|  | 564 | return 0; | 
| Hugh Dickins | a2c43ee | 2009-01-06 14:39:36 -0800 | [diff] [blame] | 565 | if (page_swapcount(page)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 566 | return 0; | 
|  | 567 |  | 
| Hugh Dickins | a2c43ee | 2009-01-06 14:39:36 -0800 | [diff] [blame] | 568 | delete_from_swap_cache(page); | 
|  | 569 | SetPageDirty(page); | 
|  | 570 | return 1; | 
| Rik van Riel | 68a22394 | 2008-10-18 20:26:23 -0700 | [diff] [blame] | 571 | } | 
|  | 572 |  | 
|  | 573 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 574 | * Free the swap entry like above, but also try to | 
|  | 575 | * free the page cache entry if it is the last user. | 
|  | 576 | */ | 
| Hugh Dickins | 2509ef2 | 2009-01-06 14:40:10 -0800 | [diff] [blame] | 577 | int free_swap_and_cache(swp_entry_t entry) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 578 | { | 
| Hugh Dickins | 2509ef2 | 2009-01-06 14:40:10 -0800 | [diff] [blame] | 579 | struct swap_info_struct *p; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 580 | struct page *page = NULL; | 
|  | 581 |  | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 582 | if (is_migration_entry(entry)) | 
| Hugh Dickins | 2509ef2 | 2009-01-06 14:40:10 -0800 | [diff] [blame] | 583 | return 1; | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 584 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 585 | p = swap_info_get(entry); | 
|  | 586 | if (p) { | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 587 | if (swap_entry_free(p, entry) == 1) { | 
| Nick Piggin | 93fac70 | 2006-03-31 02:29:56 -0800 | [diff] [blame] | 588 | page = find_get_page(&swapper_space, entry.val); | 
| Nick Piggin | 8413ac9 | 2008-10-18 20:26:59 -0700 | [diff] [blame] | 589 | if (page && !trylock_page(page)) { | 
| Nick Piggin | 93fac70 | 2006-03-31 02:29:56 -0800 | [diff] [blame] | 590 | page_cache_release(page); | 
|  | 591 | page = NULL; | 
|  | 592 | } | 
|  | 593 | } | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 594 | spin_unlock(&swap_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 595 | } | 
|  | 596 | if (page) { | 
| Hugh Dickins | a2c43ee | 2009-01-06 14:39:36 -0800 | [diff] [blame] | 597 | /* | 
|  | 598 | * Not mapped elsewhere, or swap space full? Free it! | 
|  | 599 | * Also recheck PageSwapCache now page is locked (above). | 
|  | 600 | */ | 
| Nick Piggin | 93fac70 | 2006-03-31 02:29:56 -0800 | [diff] [blame] | 601 | if (PageSwapCache(page) && !PageWriteback(page) && | 
| Hugh Dickins | a2c43ee | 2009-01-06 14:39:36 -0800 | [diff] [blame] | 602 | (!page_mapped(page) || vm_swap_full())) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 603 | delete_from_swap_cache(page); | 
|  | 604 | SetPageDirty(page); | 
|  | 605 | } | 
|  | 606 | unlock_page(page); | 
|  | 607 | page_cache_release(page); | 
|  | 608 | } | 
| Hugh Dickins | 2509ef2 | 2009-01-06 14:40:10 -0800 | [diff] [blame] | 609 | return p != NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 610 | } | 
|  | 611 |  | 
| Rafael J. Wysocki | b0cb1a1 | 2007-07-29 23:24:36 +0200 | [diff] [blame] | 612 | #ifdef CONFIG_HIBERNATION | 
| Rafael J. Wysocki | f577eb3 | 2006-03-23 02:59:59 -0800 | [diff] [blame] | 613 | /* | 
| Rafael J. Wysocki | 915bae9 | 2006-12-06 20:34:07 -0800 | [diff] [blame] | 614 | * Find the swap type that corresponds to given device (if any). | 
| Rafael J. Wysocki | f577eb3 | 2006-03-23 02:59:59 -0800 | [diff] [blame] | 615 | * | 
| Rafael J. Wysocki | 915bae9 | 2006-12-06 20:34:07 -0800 | [diff] [blame] | 616 | * @offset - number of the PAGE_SIZE-sized block of the device, starting | 
|  | 617 | * from 0, in which the swap header is expected to be located. | 
|  | 618 | * | 
|  | 619 | * This is needed for the suspend to disk (aka swsusp). | 
| Rafael J. Wysocki | f577eb3 | 2006-03-23 02:59:59 -0800 | [diff] [blame] | 620 | */ | 
| Rafael J. Wysocki | 7bf2368 | 2007-01-05 16:36:28 -0800 | [diff] [blame] | 621 | int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p) | 
| Rafael J. Wysocki | f577eb3 | 2006-03-23 02:59:59 -0800 | [diff] [blame] | 622 | { | 
| Rafael J. Wysocki | 915bae9 | 2006-12-06 20:34:07 -0800 | [diff] [blame] | 623 | struct block_device *bdev = NULL; | 
| Rafael J. Wysocki | f577eb3 | 2006-03-23 02:59:59 -0800 | [diff] [blame] | 624 | int i; | 
|  | 625 |  | 
| Rafael J. Wysocki | 915bae9 | 2006-12-06 20:34:07 -0800 | [diff] [blame] | 626 | if (device) | 
|  | 627 | bdev = bdget(device); | 
|  | 628 |  | 
| Rafael J. Wysocki | f577eb3 | 2006-03-23 02:59:59 -0800 | [diff] [blame] | 629 | spin_lock(&swap_lock); | 
|  | 630 | for (i = 0; i < nr_swapfiles; i++) { | 
| Rafael J. Wysocki | 915bae9 | 2006-12-06 20:34:07 -0800 | [diff] [blame] | 631 | struct swap_info_struct *sis = swap_info + i; | 
| Rafael J. Wysocki | f577eb3 | 2006-03-23 02:59:59 -0800 | [diff] [blame] | 632 |  | 
| Rafael J. Wysocki | 915bae9 | 2006-12-06 20:34:07 -0800 | [diff] [blame] | 633 | if (!(sis->flags & SWP_WRITEOK)) | 
| Rafael J. Wysocki | f577eb3 | 2006-03-23 02:59:59 -0800 | [diff] [blame] | 634 | continue; | 
| Rafael J. Wysocki | b6b5bce | 2006-08-27 01:23:25 -0700 | [diff] [blame] | 635 |  | 
| Rafael J. Wysocki | 915bae9 | 2006-12-06 20:34:07 -0800 | [diff] [blame] | 636 | if (!bdev) { | 
| Rafael J. Wysocki | 7bf2368 | 2007-01-05 16:36:28 -0800 | [diff] [blame] | 637 | if (bdev_p) | 
|  | 638 | *bdev_p = sis->bdev; | 
|  | 639 |  | 
| Rafael J. Wysocki | 6e1819d | 2006-03-23 03:00:03 -0800 | [diff] [blame] | 640 | spin_unlock(&swap_lock); | 
|  | 641 | return i; | 
|  | 642 | } | 
| Rafael J. Wysocki | 915bae9 | 2006-12-06 20:34:07 -0800 | [diff] [blame] | 643 | if (bdev == sis->bdev) { | 
|  | 644 | struct swap_extent *se; | 
|  | 645 |  | 
|  | 646 | se = list_entry(sis->extent_list.next, | 
|  | 647 | struct swap_extent, list); | 
|  | 648 | if (se->start_block == offset) { | 
| Rafael J. Wysocki | 7bf2368 | 2007-01-05 16:36:28 -0800 | [diff] [blame] | 649 | if (bdev_p) | 
|  | 650 | *bdev_p = sis->bdev; | 
|  | 651 |  | 
| Rafael J. Wysocki | 915bae9 | 2006-12-06 20:34:07 -0800 | [diff] [blame] | 652 | spin_unlock(&swap_lock); | 
|  | 653 | bdput(bdev); | 
|  | 654 | return i; | 
|  | 655 | } | 
| Rafael J. Wysocki | f577eb3 | 2006-03-23 02:59:59 -0800 | [diff] [blame] | 656 | } | 
|  | 657 | } | 
|  | 658 | spin_unlock(&swap_lock); | 
| Rafael J. Wysocki | 915bae9 | 2006-12-06 20:34:07 -0800 | [diff] [blame] | 659 | if (bdev) | 
|  | 660 | bdput(bdev); | 
|  | 661 |  | 
| Rafael J. Wysocki | f577eb3 | 2006-03-23 02:59:59 -0800 | [diff] [blame] | 662 | return -ENODEV; | 
|  | 663 | } | 
|  | 664 |  | 
|  | 665 | /* | 
|  | 666 | * Return either the total number of swap pages of given type, or the number | 
|  | 667 | * of free pages of that type (depending on @free) | 
|  | 668 | * | 
|  | 669 | * This is needed for software suspend | 
|  | 670 | */ | 
|  | 671 | unsigned int count_swap_pages(int type, int free) | 
|  | 672 | { | 
|  | 673 | unsigned int n = 0; | 
|  | 674 |  | 
|  | 675 | if (type < nr_swapfiles) { | 
|  | 676 | spin_lock(&swap_lock); | 
|  | 677 | if (swap_info[type].flags & SWP_WRITEOK) { | 
|  | 678 | n = swap_info[type].pages; | 
|  | 679 | if (free) | 
|  | 680 | n -= swap_info[type].inuse_pages; | 
|  | 681 | } | 
|  | 682 | spin_unlock(&swap_lock); | 
|  | 683 | } | 
|  | 684 | return n; | 
|  | 685 | } | 
|  | 686 | #endif | 
|  | 687 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 688 | /* | 
| Hugh Dickins | 72866f6 | 2005-10-29 18:15:55 -0700 | [diff] [blame] | 689 | * No need to decide whether this PTE shares the swap entry with others, | 
|  | 690 | * just let do_wp_page work it out if a write is requested later - to | 
|  | 691 | * force COW, vm_page_prot omits write permission from any private vma. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 692 | */ | 
| Hugh Dickins | 044d66c | 2008-02-07 00:14:04 -0800 | [diff] [blame] | 693 | static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 694 | unsigned long addr, swp_entry_t entry, struct page *page) | 
|  | 695 | { | 
| KAMEZAWA Hiroyuki | 7a81b88 | 2009-01-07 18:07:48 -0800 | [diff] [blame] | 696 | struct mem_cgroup *ptr = NULL; | 
| Hugh Dickins | 044d66c | 2008-02-07 00:14:04 -0800 | [diff] [blame] | 697 | spinlock_t *ptl; | 
|  | 698 | pte_t *pte; | 
|  | 699 | int ret = 1; | 
|  | 700 |  | 
| KAMEZAWA Hiroyuki | 2c26fdd | 2009-01-07 18:08:10 -0800 | [diff] [blame] | 701 | if (mem_cgroup_try_charge_swapin(vma->vm_mm, page, GFP_KERNEL, &ptr)) | 
| Hugh Dickins | 044d66c | 2008-02-07 00:14:04 -0800 | [diff] [blame] | 702 | ret = -ENOMEM; | 
|  | 703 |  | 
|  | 704 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); | 
|  | 705 | if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) { | 
|  | 706 | if (ret > 0) | 
| KAMEZAWA Hiroyuki | 7a81b88 | 2009-01-07 18:07:48 -0800 | [diff] [blame] | 707 | mem_cgroup_cancel_charge_swapin(ptr); | 
| Hugh Dickins | 044d66c | 2008-02-07 00:14:04 -0800 | [diff] [blame] | 708 | ret = 0; | 
|  | 709 | goto out; | 
|  | 710 | } | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 711 |  | 
| Hugh Dickins | 4294621 | 2005-10-29 18:16:05 -0700 | [diff] [blame] | 712 | inc_mm_counter(vma->vm_mm, anon_rss); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 713 | get_page(page); | 
|  | 714 | set_pte_at(vma->vm_mm, addr, pte, | 
|  | 715 | pte_mkold(mk_pte(page, vma->vm_page_prot))); | 
|  | 716 | page_add_anon_rmap(page, vma, addr); | 
| KAMEZAWA Hiroyuki | 7a81b88 | 2009-01-07 18:07:48 -0800 | [diff] [blame] | 717 | mem_cgroup_commit_charge_swapin(page, ptr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 718 | swap_free(entry); | 
|  | 719 | /* | 
|  | 720 | * Move the page to the active list so it is not | 
|  | 721 | * immediately swapped out again after swapon. | 
|  | 722 | */ | 
|  | 723 | activate_page(page); | 
| Hugh Dickins | 044d66c | 2008-02-07 00:14:04 -0800 | [diff] [blame] | 724 | out: | 
|  | 725 | pte_unmap_unlock(pte, ptl); | 
|  | 726 | return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 727 | } | 
|  | 728 |  | 
|  | 729 | static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, | 
|  | 730 | unsigned long addr, unsigned long end, | 
|  | 731 | swp_entry_t entry, struct page *page) | 
|  | 732 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 733 | pte_t swp_pte = swp_entry_to_pte(entry); | 
| Hugh Dickins | 705e87c | 2005-10-29 18:16:27 -0700 | [diff] [blame] | 734 | pte_t *pte; | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 735 | int ret = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 736 |  | 
| Hugh Dickins | 044d66c | 2008-02-07 00:14:04 -0800 | [diff] [blame] | 737 | /* | 
|  | 738 | * We don't actually need pte lock while scanning for swp_pte: since | 
|  | 739 | * we hold page lock and mmap_sem, swp_pte cannot be inserted into the | 
|  | 740 | * page table while we're scanning; though it could get zapped, and on | 
|  | 741 | * some architectures (e.g. x86_32 with PAE) we might catch a glimpse | 
|  | 742 | * of unmatched parts which look like swp_pte, so unuse_pte must | 
|  | 743 | * recheck under pte lock.  Scanning without pte lock lets it be | 
|  | 744 | * preemptible whenever CONFIG_PREEMPT but not CONFIG_HIGHPTE. | 
|  | 745 | */ | 
|  | 746 | pte = pte_offset_map(pmd, addr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 747 | do { | 
|  | 748 | /* | 
|  | 749 | * swapoff spends a _lot_ of time in this loop! | 
|  | 750 | * Test inline before going to call unuse_pte. | 
|  | 751 | */ | 
|  | 752 | if (unlikely(pte_same(*pte, swp_pte))) { | 
| Hugh Dickins | 044d66c | 2008-02-07 00:14:04 -0800 | [diff] [blame] | 753 | pte_unmap(pte); | 
|  | 754 | ret = unuse_pte(vma, pmd, addr, entry, page); | 
|  | 755 | if (ret) | 
|  | 756 | goto out; | 
|  | 757 | pte = pte_offset_map(pmd, addr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 758 | } | 
|  | 759 | } while (pte++, addr += PAGE_SIZE, addr != end); | 
| Hugh Dickins | 044d66c | 2008-02-07 00:14:04 -0800 | [diff] [blame] | 760 | pte_unmap(pte - 1); | 
|  | 761 | out: | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 762 | return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 763 | } | 
|  | 764 |  | 
|  | 765 | static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud, | 
|  | 766 | unsigned long addr, unsigned long end, | 
|  | 767 | swp_entry_t entry, struct page *page) | 
|  | 768 | { | 
|  | 769 | pmd_t *pmd; | 
|  | 770 | unsigned long next; | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 771 | int ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 772 |  | 
|  | 773 | pmd = pmd_offset(pud, addr); | 
|  | 774 | do { | 
|  | 775 | next = pmd_addr_end(addr, end); | 
|  | 776 | if (pmd_none_or_clear_bad(pmd)) | 
|  | 777 | continue; | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 778 | ret = unuse_pte_range(vma, pmd, addr, next, entry, page); | 
|  | 779 | if (ret) | 
|  | 780 | return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 781 | } while (pmd++, addr = next, addr != end); | 
|  | 782 | return 0; | 
|  | 783 | } | 
|  | 784 |  | 
|  | 785 | static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd, | 
|  | 786 | unsigned long addr, unsigned long end, | 
|  | 787 | swp_entry_t entry, struct page *page) | 
|  | 788 | { | 
|  | 789 | pud_t *pud; | 
|  | 790 | unsigned long next; | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 791 | int ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 792 |  | 
|  | 793 | pud = pud_offset(pgd, addr); | 
|  | 794 | do { | 
|  | 795 | next = pud_addr_end(addr, end); | 
|  | 796 | if (pud_none_or_clear_bad(pud)) | 
|  | 797 | continue; | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 798 | ret = unuse_pmd_range(vma, pud, addr, next, entry, page); | 
|  | 799 | if (ret) | 
|  | 800 | return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 801 | } while (pud++, addr = next, addr != end); | 
|  | 802 | return 0; | 
|  | 803 | } | 
|  | 804 |  | 
|  | 805 | static int unuse_vma(struct vm_area_struct *vma, | 
|  | 806 | swp_entry_t entry, struct page *page) | 
|  | 807 | { | 
|  | 808 | pgd_t *pgd; | 
|  | 809 | unsigned long addr, end, next; | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 810 | int ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 811 |  | 
|  | 812 | if (page->mapping) { | 
|  | 813 | addr = page_address_in_vma(page, vma); | 
|  | 814 | if (addr == -EFAULT) | 
|  | 815 | return 0; | 
|  | 816 | else | 
|  | 817 | end = addr + PAGE_SIZE; | 
|  | 818 | } else { | 
|  | 819 | addr = vma->vm_start; | 
|  | 820 | end = vma->vm_end; | 
|  | 821 | } | 
|  | 822 |  | 
|  | 823 | pgd = pgd_offset(vma->vm_mm, addr); | 
|  | 824 | do { | 
|  | 825 | next = pgd_addr_end(addr, end); | 
|  | 826 | if (pgd_none_or_clear_bad(pgd)) | 
|  | 827 | continue; | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 828 | ret = unuse_pud_range(vma, pgd, addr, next, entry, page); | 
|  | 829 | if (ret) | 
|  | 830 | return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 831 | } while (pgd++, addr = next, addr != end); | 
|  | 832 | return 0; | 
|  | 833 | } | 
|  | 834 |  | 
|  | 835 | static int unuse_mm(struct mm_struct *mm, | 
|  | 836 | swp_entry_t entry, struct page *page) | 
|  | 837 | { | 
|  | 838 | struct vm_area_struct *vma; | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 839 | int ret = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 840 |  | 
|  | 841 | if (!down_read_trylock(&mm->mmap_sem)) { | 
|  | 842 | /* | 
| Fernando Luis Vazquez Cao | 7d03431 | 2008-07-29 22:33:41 -0700 | [diff] [blame] | 843 | * Activate page so shrink_inactive_list is unlikely to unmap | 
|  | 844 | * its ptes while lock is dropped, so swapoff can make progress. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 845 | */ | 
| Hugh Dickins | c475a8a | 2005-06-21 17:15:12 -0700 | [diff] [blame] | 846 | activate_page(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 847 | unlock_page(page); | 
|  | 848 | down_read(&mm->mmap_sem); | 
|  | 849 | lock_page(page); | 
|  | 850 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 851 | for (vma = mm->mmap; vma; vma = vma->vm_next) { | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 852 | if (vma->anon_vma && (ret = unuse_vma(vma, entry, page))) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 853 | break; | 
|  | 854 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 855 | up_read(&mm->mmap_sem); | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 856 | return (ret < 0)? ret: 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 857 | } | 
|  | 858 |  | 
|  | 859 | /* | 
|  | 860 | * Scan swap_map from current position to next entry still in use. | 
|  | 861 | * Recycle to start on reaching the end, returning 0 when empty. | 
|  | 862 | */ | 
| Hugh Dickins | 6eb396d | 2005-09-03 15:54:35 -0700 | [diff] [blame] | 863 | static unsigned int find_next_to_unuse(struct swap_info_struct *si, | 
|  | 864 | unsigned int prev) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 865 | { | 
| Hugh Dickins | 6eb396d | 2005-09-03 15:54:35 -0700 | [diff] [blame] | 866 | unsigned int max = si->max; | 
|  | 867 | unsigned int i = prev; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 868 | int count; | 
|  | 869 |  | 
|  | 870 | /* | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 871 | * No need for swap_lock here: we're just looking | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 872 | * for whether an entry is in use, not modifying it; false | 
|  | 873 | * hits are okay, and sys_swapoff() has already prevented new | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 874 | * allocations from this area (while holding swap_lock). | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 875 | */ | 
|  | 876 | for (;;) { | 
|  | 877 | if (++i >= max) { | 
|  | 878 | if (!prev) { | 
|  | 879 | i = 0; | 
|  | 880 | break; | 
|  | 881 | } | 
|  | 882 | /* | 
|  | 883 | * No entries in use at top of swap_map, | 
|  | 884 | * loop back to start and recheck there. | 
|  | 885 | */ | 
|  | 886 | max = prev + 1; | 
|  | 887 | prev = 0; | 
|  | 888 | i = 1; | 
|  | 889 | } | 
|  | 890 | count = si->swap_map[i]; | 
|  | 891 | if (count && count != SWAP_MAP_BAD) | 
|  | 892 | break; | 
|  | 893 | } | 
|  | 894 | return i; | 
|  | 895 | } | 
|  | 896 |  | 
|  | 897 | /* | 
|  | 898 | * We completely avoid races by reading each swap page in advance, | 
|  | 899 | * and then search for the process using it.  All the necessary | 
|  | 900 | * page table adjustments can then be made atomically. | 
|  | 901 | */ | 
|  | 902 | static int try_to_unuse(unsigned int type) | 
|  | 903 | { | 
|  | 904 | struct swap_info_struct * si = &swap_info[type]; | 
|  | 905 | struct mm_struct *start_mm; | 
|  | 906 | unsigned short *swap_map; | 
|  | 907 | unsigned short swcount; | 
|  | 908 | struct page *page; | 
|  | 909 | swp_entry_t entry; | 
| Hugh Dickins | 6eb396d | 2005-09-03 15:54:35 -0700 | [diff] [blame] | 910 | unsigned int i = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 911 | int retval = 0; | 
|  | 912 | int reset_overflow = 0; | 
|  | 913 | int shmem; | 
|  | 914 |  | 
|  | 915 | /* | 
|  | 916 | * When searching mms for an entry, a good strategy is to | 
|  | 917 | * start at the first mm we freed the previous entry from | 
|  | 918 | * (though actually we don't notice whether we or coincidence | 
|  | 919 | * freed the entry).  Initialize this start_mm with a hold. | 
|  | 920 | * | 
|  | 921 | * A simpler strategy would be to start at the last mm we | 
|  | 922 | * freed the previous entry from; but that would take less | 
|  | 923 | * advantage of mmlist ordering, which clusters forked mms | 
|  | 924 | * together, child after parent.  If we race with dup_mmap(), we | 
|  | 925 | * prefer to resolve parent before child, lest we miss entries | 
|  | 926 | * duplicated after we scanned child: using last mm would invert | 
|  | 927 | * that.  Though it's only a serious concern when an overflowed | 
|  | 928 | * swap count is reset from SWAP_MAP_MAX, preventing a rescan. | 
|  | 929 | */ | 
|  | 930 | start_mm = &init_mm; | 
|  | 931 | atomic_inc(&init_mm.mm_users); | 
|  | 932 |  | 
|  | 933 | /* | 
|  | 934 | * Keep on scanning until all entries have gone.  Usually, | 
|  | 935 | * one pass through swap_map is enough, but not necessarily: | 
|  | 936 | * there are races when an instance of an entry might be missed. | 
|  | 937 | */ | 
|  | 938 | while ((i = find_next_to_unuse(si, i)) != 0) { | 
|  | 939 | if (signal_pending(current)) { | 
|  | 940 | retval = -EINTR; | 
|  | 941 | break; | 
|  | 942 | } | 
|  | 943 |  | 
| Hugh Dickins | 886bb7e | 2009-01-06 14:39:48 -0800 | [diff] [blame] | 944 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 945 | * Get a page for the entry, using the existing swap | 
|  | 946 | * cache page if there is one.  Otherwise, get a clean | 
| Hugh Dickins | 886bb7e | 2009-01-06 14:39:48 -0800 | [diff] [blame] | 947 | * page and read the swap into it. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 948 | */ | 
|  | 949 | swap_map = &si->swap_map[i]; | 
|  | 950 | entry = swp_entry(type, i); | 
| Hugh Dickins | 02098fe | 2008-02-04 22:28:42 -0800 | [diff] [blame] | 951 | page = read_swap_cache_async(entry, | 
|  | 952 | GFP_HIGHUSER_MOVABLE, NULL, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 953 | if (!page) { | 
|  | 954 | /* | 
|  | 955 | * Either swap_duplicate() failed because entry | 
|  | 956 | * has been freed independently, and will not be | 
|  | 957 | * reused since sys_swapoff() already disabled | 
|  | 958 | * allocation from here, or alloc_page() failed. | 
|  | 959 | */ | 
|  | 960 | if (!*swap_map) | 
|  | 961 | continue; | 
|  | 962 | retval = -ENOMEM; | 
|  | 963 | break; | 
|  | 964 | } | 
|  | 965 |  | 
|  | 966 | /* | 
|  | 967 | * Don't hold on to start_mm if it looks like exiting. | 
|  | 968 | */ | 
|  | 969 | if (atomic_read(&start_mm->mm_users) == 1) { | 
|  | 970 | mmput(start_mm); | 
|  | 971 | start_mm = &init_mm; | 
|  | 972 | atomic_inc(&init_mm.mm_users); | 
|  | 973 | } | 
|  | 974 |  | 
|  | 975 | /* | 
|  | 976 | * Wait for and lock page.  When do_swap_page races with | 
|  | 977 | * try_to_unuse, do_swap_page can handle the fault much | 
|  | 978 | * faster than try_to_unuse can locate the entry.  This | 
|  | 979 | * apparently redundant "wait_on_page_locked" lets try_to_unuse | 
|  | 980 | * defer to do_swap_page in such a case - in some tests, | 
|  | 981 | * do_swap_page and try_to_unuse repeatedly compete. | 
|  | 982 | */ | 
|  | 983 | wait_on_page_locked(page); | 
|  | 984 | wait_on_page_writeback(page); | 
|  | 985 | lock_page(page); | 
|  | 986 | wait_on_page_writeback(page); | 
|  | 987 |  | 
|  | 988 | /* | 
|  | 989 | * Remove all references to entry. | 
|  | 990 | * Whenever we reach init_mm, there's no address space | 
|  | 991 | * to search, but use it as a reminder to search shmem. | 
|  | 992 | */ | 
|  | 993 | shmem = 0; | 
|  | 994 | swcount = *swap_map; | 
|  | 995 | if (swcount > 1) { | 
|  | 996 | if (start_mm == &init_mm) | 
|  | 997 | shmem = shmem_unuse(entry, page); | 
|  | 998 | else | 
|  | 999 | retval = unuse_mm(start_mm, entry, page); | 
|  | 1000 | } | 
|  | 1001 | if (*swap_map > 1) { | 
|  | 1002 | int set_start_mm = (*swap_map >= swcount); | 
|  | 1003 | struct list_head *p = &start_mm->mmlist; | 
|  | 1004 | struct mm_struct *new_start_mm = start_mm; | 
|  | 1005 | struct mm_struct *prev_mm = start_mm; | 
|  | 1006 | struct mm_struct *mm; | 
|  | 1007 |  | 
|  | 1008 | atomic_inc(&new_start_mm->mm_users); | 
|  | 1009 | atomic_inc(&prev_mm->mm_users); | 
|  | 1010 | spin_lock(&mmlist_lock); | 
| Hugh Dickins | 2e0e26c | 2008-02-04 22:28:53 -0800 | [diff] [blame] | 1011 | while (*swap_map > 1 && !retval && !shmem && | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1012 | (p = p->next) != &start_mm->mmlist) { | 
|  | 1013 | mm = list_entry(p, struct mm_struct, mmlist); | 
| Hugh Dickins | 70af7c5 | 2006-06-23 02:03:44 -0700 | [diff] [blame] | 1014 | if (!atomic_inc_not_zero(&mm->mm_users)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1015 | continue; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1016 | spin_unlock(&mmlist_lock); | 
|  | 1017 | mmput(prev_mm); | 
|  | 1018 | prev_mm = mm; | 
|  | 1019 |  | 
|  | 1020 | cond_resched(); | 
|  | 1021 |  | 
|  | 1022 | swcount = *swap_map; | 
|  | 1023 | if (swcount <= 1) | 
|  | 1024 | ; | 
|  | 1025 | else if (mm == &init_mm) { | 
|  | 1026 | set_start_mm = 1; | 
|  | 1027 | shmem = shmem_unuse(entry, page); | 
|  | 1028 | } else | 
|  | 1029 | retval = unuse_mm(mm, entry, page); | 
|  | 1030 | if (set_start_mm && *swap_map < swcount) { | 
|  | 1031 | mmput(new_start_mm); | 
|  | 1032 | atomic_inc(&mm->mm_users); | 
|  | 1033 | new_start_mm = mm; | 
|  | 1034 | set_start_mm = 0; | 
|  | 1035 | } | 
|  | 1036 | spin_lock(&mmlist_lock); | 
|  | 1037 | } | 
|  | 1038 | spin_unlock(&mmlist_lock); | 
|  | 1039 | mmput(prev_mm); | 
|  | 1040 | mmput(start_mm); | 
|  | 1041 | start_mm = new_start_mm; | 
|  | 1042 | } | 
| Hugh Dickins | 2e0e26c | 2008-02-04 22:28:53 -0800 | [diff] [blame] | 1043 | if (shmem) { | 
|  | 1044 | /* page has already been unlocked and released */ | 
|  | 1045 | if (shmem > 0) | 
|  | 1046 | continue; | 
|  | 1047 | retval = shmem; | 
|  | 1048 | break; | 
|  | 1049 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1050 | if (retval) { | 
|  | 1051 | unlock_page(page); | 
|  | 1052 | page_cache_release(page); | 
|  | 1053 | break; | 
|  | 1054 | } | 
|  | 1055 |  | 
|  | 1056 | /* | 
|  | 1057 | * How could swap count reach 0x7fff when the maximum | 
|  | 1058 | * pid is 0x7fff, and there's no way to repeat a swap | 
|  | 1059 | * page within an mm (except in shmem, where it's the | 
|  | 1060 | * shared object which takes the reference count)? | 
|  | 1061 | * We believe SWAP_MAP_MAX cannot occur in Linux 2.4. | 
|  | 1062 | * | 
|  | 1063 | * If that's wrong, then we should worry more about | 
|  | 1064 | * exit_mmap() and do_munmap() cases described above: | 
|  | 1065 | * we might be resetting SWAP_MAP_MAX too early here. | 
|  | 1066 | * We know "Undead"s can happen, they're okay, so don't | 
|  | 1067 | * report them; but do report if we reset SWAP_MAP_MAX. | 
|  | 1068 | */ | 
|  | 1069 | if (*swap_map == SWAP_MAP_MAX) { | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 1070 | spin_lock(&swap_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1071 | *swap_map = 1; | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 1072 | spin_unlock(&swap_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1073 | reset_overflow = 1; | 
|  | 1074 | } | 
|  | 1075 |  | 
|  | 1076 | /* | 
|  | 1077 | * If a reference remains (rare), we would like to leave | 
|  | 1078 | * the page in the swap cache; but try_to_unmap could | 
|  | 1079 | * then re-duplicate the entry once we drop page lock, | 
|  | 1080 | * so we might loop indefinitely; also, that page could | 
|  | 1081 | * not be swapped out to other storage meanwhile.  So: | 
|  | 1082 | * delete from cache even if there's another reference, | 
|  | 1083 | * after ensuring that the data has been saved to disk - | 
|  | 1084 | * since if the reference remains (rarer), it will be | 
|  | 1085 | * read from disk into another page.  Splitting into two | 
|  | 1086 | * pages would be incorrect if swap supported "shared | 
|  | 1087 | * private" pages, but they are handled by tmpfs files. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1088 | */ | 
|  | 1089 | if ((*swap_map > 1) && PageDirty(page) && PageSwapCache(page)) { | 
|  | 1090 | struct writeback_control wbc = { | 
|  | 1091 | .sync_mode = WB_SYNC_NONE, | 
|  | 1092 | }; | 
|  | 1093 |  | 
|  | 1094 | swap_writepage(page, &wbc); | 
|  | 1095 | lock_page(page); | 
|  | 1096 | wait_on_page_writeback(page); | 
|  | 1097 | } | 
| Hugh Dickins | 68bdc8d | 2009-01-06 14:39:37 -0800 | [diff] [blame] | 1098 |  | 
|  | 1099 | /* | 
|  | 1100 | * It is conceivable that a racing task removed this page from | 
|  | 1101 | * swap cache just before we acquired the page lock at the top, | 
|  | 1102 | * or while we dropped it in unuse_mm().  The page might even | 
|  | 1103 | * be back in swap cache on another swap area: that we must not | 
|  | 1104 | * delete, since it may not have been written out to swap yet. | 
|  | 1105 | */ | 
|  | 1106 | if (PageSwapCache(page) && | 
|  | 1107 | likely(page_private(page) == entry.val)) | 
| Hugh Dickins | 2e0e26c | 2008-02-04 22:28:53 -0800 | [diff] [blame] | 1108 | delete_from_swap_cache(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1109 |  | 
|  | 1110 | /* | 
|  | 1111 | * So we could skip searching mms once swap count went | 
|  | 1112 | * to 1, we did not mark any present ptes as dirty: must | 
| Anderson Briglia | 2706a1b | 2007-07-15 23:38:09 -0700 | [diff] [blame] | 1113 | * mark page dirty so shrink_page_list will preserve it. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1114 | */ | 
|  | 1115 | SetPageDirty(page); | 
|  | 1116 | unlock_page(page); | 
|  | 1117 | page_cache_release(page); | 
|  | 1118 |  | 
|  | 1119 | /* | 
|  | 1120 | * Make sure that we aren't completely killing | 
|  | 1121 | * interactive performance. | 
|  | 1122 | */ | 
|  | 1123 | cond_resched(); | 
|  | 1124 | } | 
|  | 1125 |  | 
|  | 1126 | mmput(start_mm); | 
|  | 1127 | if (reset_overflow) { | 
|  | 1128 | printk(KERN_WARNING "swapoff: cleared swap entry overflow\n"); | 
|  | 1129 | swap_overflow = 0; | 
|  | 1130 | } | 
|  | 1131 | return retval; | 
|  | 1132 | } | 
|  | 1133 |  | 
|  | 1134 | /* | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 1135 | * After a successful try_to_unuse, if no swap is now in use, we know | 
|  | 1136 | * we can empty the mmlist.  swap_lock must be held on entry and exit. | 
|  | 1137 | * Note that mmlist_lock nests inside swap_lock, and an mm must be | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1138 | * added to the mmlist just after page_duplicate - before would be racy. | 
|  | 1139 | */ | 
|  | 1140 | static void drain_mmlist(void) | 
|  | 1141 | { | 
|  | 1142 | struct list_head *p, *next; | 
|  | 1143 | unsigned int i; | 
|  | 1144 |  | 
|  | 1145 | for (i = 0; i < nr_swapfiles; i++) | 
|  | 1146 | if (swap_info[i].inuse_pages) | 
|  | 1147 | return; | 
|  | 1148 | spin_lock(&mmlist_lock); | 
|  | 1149 | list_for_each_safe(p, next, &init_mm.mmlist) | 
|  | 1150 | list_del_init(p); | 
|  | 1151 | spin_unlock(&mmlist_lock); | 
|  | 1152 | } | 
|  | 1153 |  | 
|  | 1154 | /* | 
|  | 1155 | * Use this swapdev's extent info to locate the (PAGE_SIZE) block which | 
|  | 1156 | * corresponds to page offset `offset'. | 
|  | 1157 | */ | 
|  | 1158 | sector_t map_swap_page(struct swap_info_struct *sis, pgoff_t offset) | 
|  | 1159 | { | 
|  | 1160 | struct swap_extent *se = sis->curr_swap_extent; | 
|  | 1161 | struct swap_extent *start_se = se; | 
|  | 1162 |  | 
|  | 1163 | for ( ; ; ) { | 
|  | 1164 | struct list_head *lh; | 
|  | 1165 |  | 
|  | 1166 | if (se->start_page <= offset && | 
|  | 1167 | offset < (se->start_page + se->nr_pages)) { | 
|  | 1168 | return se->start_block + (offset - se->start_page); | 
|  | 1169 | } | 
| Hugh Dickins | 11d3188 | 2005-09-03 15:54:34 -0700 | [diff] [blame] | 1170 | lh = se->list.next; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1171 | if (lh == &sis->extent_list) | 
| Hugh Dickins | 11d3188 | 2005-09-03 15:54:34 -0700 | [diff] [blame] | 1172 | lh = lh->next; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1173 | se = list_entry(lh, struct swap_extent, list); | 
|  | 1174 | sis->curr_swap_extent = se; | 
|  | 1175 | BUG_ON(se == start_se);		/* It *must* be present */ | 
|  | 1176 | } | 
|  | 1177 | } | 
|  | 1178 |  | 
| Rafael J. Wysocki | b0cb1a1 | 2007-07-29 23:24:36 +0200 | [diff] [blame] | 1179 | #ifdef CONFIG_HIBERNATION | 
| Rafael J. Wysocki | 3aef83e | 2006-12-06 20:34:10 -0800 | [diff] [blame] | 1180 | /* | 
|  | 1181 | * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev | 
|  | 1182 | * corresponding to given index in swap_info (swap type). | 
|  | 1183 | */ | 
|  | 1184 | sector_t swapdev_block(int swap_type, pgoff_t offset) | 
|  | 1185 | { | 
|  | 1186 | struct swap_info_struct *sis; | 
|  | 1187 |  | 
|  | 1188 | if (swap_type >= nr_swapfiles) | 
|  | 1189 | return 0; | 
|  | 1190 |  | 
|  | 1191 | sis = swap_info + swap_type; | 
|  | 1192 | return (sis->flags & SWP_WRITEOK) ? map_swap_page(sis, offset) : 0; | 
|  | 1193 | } | 
| Rafael J. Wysocki | b0cb1a1 | 2007-07-29 23:24:36 +0200 | [diff] [blame] | 1194 | #endif /* CONFIG_HIBERNATION */ | 
| Rafael J. Wysocki | 3aef83e | 2006-12-06 20:34:10 -0800 | [diff] [blame] | 1195 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1196 | /* | 
|  | 1197 | * Free all of a swapdev's extent information | 
|  | 1198 | */ | 
|  | 1199 | static void destroy_swap_extents(struct swap_info_struct *sis) | 
|  | 1200 | { | 
|  | 1201 | while (!list_empty(&sis->extent_list)) { | 
|  | 1202 | struct swap_extent *se; | 
|  | 1203 |  | 
|  | 1204 | se = list_entry(sis->extent_list.next, | 
|  | 1205 | struct swap_extent, list); | 
|  | 1206 | list_del(&se->list); | 
|  | 1207 | kfree(se); | 
|  | 1208 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1209 | } | 
|  | 1210 |  | 
|  | 1211 | /* | 
|  | 1212 | * Add a block range (and the corresponding page range) into this swapdev's | 
| Hugh Dickins | 11d3188 | 2005-09-03 15:54:34 -0700 | [diff] [blame] | 1213 | * extent list.  The extent list is kept sorted in page order. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1214 | * | 
| Hugh Dickins | 11d3188 | 2005-09-03 15:54:34 -0700 | [diff] [blame] | 1215 | * This function rather assumes that it is called in ascending page order. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1216 | */ | 
|  | 1217 | static int | 
|  | 1218 | add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, | 
|  | 1219 | unsigned long nr_pages, sector_t start_block) | 
|  | 1220 | { | 
|  | 1221 | struct swap_extent *se; | 
|  | 1222 | struct swap_extent *new_se; | 
|  | 1223 | struct list_head *lh; | 
|  | 1224 |  | 
| Hugh Dickins | 11d3188 | 2005-09-03 15:54:34 -0700 | [diff] [blame] | 1225 | lh = sis->extent_list.prev;	/* The highest page extent */ | 
|  | 1226 | if (lh != &sis->extent_list) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1227 | se = list_entry(lh, struct swap_extent, list); | 
| Hugh Dickins | 11d3188 | 2005-09-03 15:54:34 -0700 | [diff] [blame] | 1228 | BUG_ON(se->start_page + se->nr_pages != start_page); | 
|  | 1229 | if (se->start_block + se->nr_pages == start_block) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1230 | /* Merge it */ | 
|  | 1231 | se->nr_pages += nr_pages; | 
|  | 1232 | return 0; | 
|  | 1233 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1234 | } | 
|  | 1235 |  | 
|  | 1236 | /* | 
|  | 1237 | * No merge.  Insert a new extent, preserving ordering. | 
|  | 1238 | */ | 
|  | 1239 | new_se = kmalloc(sizeof(*se), GFP_KERNEL); | 
|  | 1240 | if (new_se == NULL) | 
|  | 1241 | return -ENOMEM; | 
|  | 1242 | new_se->start_page = start_page; | 
|  | 1243 | new_se->nr_pages = nr_pages; | 
|  | 1244 | new_se->start_block = start_block; | 
|  | 1245 |  | 
| Hugh Dickins | 11d3188 | 2005-09-03 15:54:34 -0700 | [diff] [blame] | 1246 | list_add_tail(&new_se->list, &sis->extent_list); | 
| Hugh Dickins | 53092a7 | 2005-09-03 15:54:34 -0700 | [diff] [blame] | 1247 | return 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1248 | } | 
|  | 1249 |  | 
|  | 1250 | /* | 
|  | 1251 | * A `swap extent' is a simple thing which maps a contiguous range of pages | 
|  | 1252 | * onto a contiguous range of disk blocks.  An ordered list of swap extents | 
|  | 1253 | * is built at swapon time and is then used at swap_writepage/swap_readpage | 
|  | 1254 | * time for locating where on disk a page belongs. | 
|  | 1255 | * | 
|  | 1256 | * If the swapfile is an S_ISBLK block device, a single extent is installed. | 
|  | 1257 | * This is done so that the main operating code can treat S_ISBLK and S_ISREG | 
|  | 1258 | * swap files identically. | 
|  | 1259 | * | 
|  | 1260 | * Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap | 
|  | 1261 | * extent list operates in PAGE_SIZE disk blocks.  Both S_ISREG and S_ISBLK | 
|  | 1262 | * swapfiles are handled *identically* after swapon time. | 
|  | 1263 | * | 
|  | 1264 | * For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks | 
|  | 1265 | * and will parse them into an ordered extent list, in PAGE_SIZE chunks.  If | 
|  | 1266 | * some stray blocks are found which do not fall within the PAGE_SIZE alignment | 
|  | 1267 | * requirements, they are simply tossed out - we will never use those blocks | 
|  | 1268 | * for swapping. | 
|  | 1269 | * | 
| Hugh Dickins | b0d9bcd | 2005-09-03 15:54:31 -0700 | [diff] [blame] | 1270 | * For S_ISREG swapfiles we set S_SWAPFILE across the life of the swapon.  This | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1271 | * prevents root from shooting her foot off by ftruncating an in-use swapfile, | 
|  | 1272 | * which will scribble on the fs. | 
|  | 1273 | * | 
|  | 1274 | * The amount of disk space which a single swap extent represents varies. | 
|  | 1275 | * Typically it is in the 1-4 megabyte range.  So we can have hundreds of | 
|  | 1276 | * extents in the list.  To avoid much list walking, we cache the previous | 
|  | 1277 | * search location in `curr_swap_extent', and start new searches from there. | 
|  | 1278 | * This is extremely effective.  The average number of iterations in | 
|  | 1279 | * map_swap_page() has been measured at about 0.3 per page.  - akpm. | 
|  | 1280 | */ | 
| Hugh Dickins | 53092a7 | 2005-09-03 15:54:34 -0700 | [diff] [blame] | 1281 | static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1282 | { | 
|  | 1283 | struct inode *inode; | 
|  | 1284 | unsigned blocks_per_page; | 
|  | 1285 | unsigned long page_no; | 
|  | 1286 | unsigned blkbits; | 
|  | 1287 | sector_t probe_block; | 
|  | 1288 | sector_t last_block; | 
| Hugh Dickins | 53092a7 | 2005-09-03 15:54:34 -0700 | [diff] [blame] | 1289 | sector_t lowest_block = -1; | 
|  | 1290 | sector_t highest_block = 0; | 
|  | 1291 | int nr_extents = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1292 | int ret; | 
|  | 1293 |  | 
|  | 1294 | inode = sis->swap_file->f_mapping->host; | 
|  | 1295 | if (S_ISBLK(inode->i_mode)) { | 
|  | 1296 | ret = add_swap_extent(sis, 0, sis->max, 0); | 
| Hugh Dickins | 53092a7 | 2005-09-03 15:54:34 -0700 | [diff] [blame] | 1297 | *span = sis->pages; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1298 | goto done; | 
|  | 1299 | } | 
|  | 1300 |  | 
|  | 1301 | blkbits = inode->i_blkbits; | 
|  | 1302 | blocks_per_page = PAGE_SIZE >> blkbits; | 
|  | 1303 |  | 
|  | 1304 | /* | 
|  | 1305 | * Map all the blocks into the extent list.  This code doesn't try | 
|  | 1306 | * to be very smart. | 
|  | 1307 | */ | 
|  | 1308 | probe_block = 0; | 
|  | 1309 | page_no = 0; | 
|  | 1310 | last_block = i_size_read(inode) >> blkbits; | 
|  | 1311 | while ((probe_block + blocks_per_page) <= last_block && | 
|  | 1312 | page_no < sis->max) { | 
|  | 1313 | unsigned block_in_page; | 
|  | 1314 | sector_t first_block; | 
|  | 1315 |  | 
|  | 1316 | first_block = bmap(inode, probe_block); | 
|  | 1317 | if (first_block == 0) | 
|  | 1318 | goto bad_bmap; | 
|  | 1319 |  | 
|  | 1320 | /* | 
|  | 1321 | * It must be PAGE_SIZE aligned on-disk | 
|  | 1322 | */ | 
|  | 1323 | if (first_block & (blocks_per_page - 1)) { | 
|  | 1324 | probe_block++; | 
|  | 1325 | goto reprobe; | 
|  | 1326 | } | 
|  | 1327 |  | 
|  | 1328 | for (block_in_page = 1; block_in_page < blocks_per_page; | 
|  | 1329 | block_in_page++) { | 
|  | 1330 | sector_t block; | 
|  | 1331 |  | 
|  | 1332 | block = bmap(inode, probe_block + block_in_page); | 
|  | 1333 | if (block == 0) | 
|  | 1334 | goto bad_bmap; | 
|  | 1335 | if (block != first_block + block_in_page) { | 
|  | 1336 | /* Discontiguity */ | 
|  | 1337 | probe_block++; | 
|  | 1338 | goto reprobe; | 
|  | 1339 | } | 
|  | 1340 | } | 
|  | 1341 |  | 
| Hugh Dickins | 53092a7 | 2005-09-03 15:54:34 -0700 | [diff] [blame] | 1342 | first_block >>= (PAGE_SHIFT - blkbits); | 
|  | 1343 | if (page_no) {	/* exclude the header page */ | 
|  | 1344 | if (first_block < lowest_block) | 
|  | 1345 | lowest_block = first_block; | 
|  | 1346 | if (first_block > highest_block) | 
|  | 1347 | highest_block = first_block; | 
|  | 1348 | } | 
|  | 1349 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1350 | /* | 
|  | 1351 | * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks | 
|  | 1352 | */ | 
| Hugh Dickins | 53092a7 | 2005-09-03 15:54:34 -0700 | [diff] [blame] | 1353 | ret = add_swap_extent(sis, page_no, 1, first_block); | 
|  | 1354 | if (ret < 0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1355 | goto out; | 
| Hugh Dickins | 53092a7 | 2005-09-03 15:54:34 -0700 | [diff] [blame] | 1356 | nr_extents += ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1357 | page_no++; | 
|  | 1358 | probe_block += blocks_per_page; | 
|  | 1359 | reprobe: | 
|  | 1360 | continue; | 
|  | 1361 | } | 
| Hugh Dickins | 53092a7 | 2005-09-03 15:54:34 -0700 | [diff] [blame] | 1362 | ret = nr_extents; | 
|  | 1363 | *span = 1 + highest_block - lowest_block; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1364 | if (page_no == 0) | 
| Hugh Dickins | e2244ec | 2005-09-03 15:54:32 -0700 | [diff] [blame] | 1365 | page_no = 1;	/* force Empty message */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1366 | sis->max = page_no; | 
| Hugh Dickins | e2244ec | 2005-09-03 15:54:32 -0700 | [diff] [blame] | 1367 | sis->pages = page_no - 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1368 | sis->highest_bit = page_no - 1; | 
|  | 1369 | done: | 
|  | 1370 | sis->curr_swap_extent = list_entry(sis->extent_list.prev, | 
|  | 1371 | struct swap_extent, list); | 
|  | 1372 | goto out; | 
|  | 1373 | bad_bmap: | 
|  | 1374 | printk(KERN_ERR "swapon: swapfile has holes\n"); | 
|  | 1375 | ret = -EINVAL; | 
|  | 1376 | out: | 
|  | 1377 | return ret; | 
|  | 1378 | } | 
|  | 1379 |  | 
| Heiko Carstens | c4ea37c | 2009-01-14 14:14:28 +0100 | [diff] [blame] | 1380 | SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1381 | { | 
|  | 1382 | struct swap_info_struct * p = NULL; | 
|  | 1383 | unsigned short *swap_map; | 
|  | 1384 | struct file *swap_file, *victim; | 
|  | 1385 | struct address_space *mapping; | 
|  | 1386 | struct inode *inode; | 
|  | 1387 | char * pathname; | 
|  | 1388 | int i, type, prev; | 
|  | 1389 | int err; | 
| Hugh Dickins | 886bb7e | 2009-01-06 14:39:48 -0800 | [diff] [blame] | 1390 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1391 | if (!capable(CAP_SYS_ADMIN)) | 
|  | 1392 | return -EPERM; | 
|  | 1393 |  | 
|  | 1394 | pathname = getname(specialfile); | 
|  | 1395 | err = PTR_ERR(pathname); | 
|  | 1396 | if (IS_ERR(pathname)) | 
|  | 1397 | goto out; | 
|  | 1398 |  | 
|  | 1399 | victim = filp_open(pathname, O_RDWR|O_LARGEFILE, 0); | 
|  | 1400 | putname(pathname); | 
|  | 1401 | err = PTR_ERR(victim); | 
|  | 1402 | if (IS_ERR(victim)) | 
|  | 1403 | goto out; | 
|  | 1404 |  | 
|  | 1405 | mapping = victim->f_mapping; | 
|  | 1406 | prev = -1; | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 1407 | spin_lock(&swap_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1408 | for (type = swap_list.head; type >= 0; type = swap_info[type].next) { | 
|  | 1409 | p = swap_info + type; | 
| Hugh Dickins | 22c6f8f | 2009-01-06 14:39:48 -0800 | [diff] [blame] | 1410 | if (p->flags & SWP_WRITEOK) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1411 | if (p->swap_file->f_mapping == mapping) | 
|  | 1412 | break; | 
|  | 1413 | } | 
|  | 1414 | prev = type; | 
|  | 1415 | } | 
|  | 1416 | if (type < 0) { | 
|  | 1417 | err = -EINVAL; | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 1418 | spin_unlock(&swap_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1419 | goto out_dput; | 
|  | 1420 | } | 
|  | 1421 | if (!security_vm_enough_memory(p->pages)) | 
|  | 1422 | vm_unacct_memory(p->pages); | 
|  | 1423 | else { | 
|  | 1424 | err = -ENOMEM; | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 1425 | spin_unlock(&swap_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1426 | goto out_dput; | 
|  | 1427 | } | 
|  | 1428 | if (prev < 0) { | 
|  | 1429 | swap_list.head = p->next; | 
|  | 1430 | } else { | 
|  | 1431 | swap_info[prev].next = p->next; | 
|  | 1432 | } | 
|  | 1433 | if (type == swap_list.next) { | 
|  | 1434 | /* just pick something that's safe... */ | 
|  | 1435 | swap_list.next = swap_list.head; | 
|  | 1436 | } | 
| Hugh Dickins | 78ecba0 | 2008-07-23 21:28:23 -0700 | [diff] [blame] | 1437 | if (p->prio < 0) { | 
|  | 1438 | for (i = p->next; i >= 0; i = swap_info[i].next) | 
|  | 1439 | swap_info[i].prio = p->prio--; | 
|  | 1440 | least_priority++; | 
|  | 1441 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1442 | nr_swap_pages -= p->pages; | 
|  | 1443 | total_swap_pages -= p->pages; | 
|  | 1444 | p->flags &= ~SWP_WRITEOK; | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 1445 | spin_unlock(&swap_lock); | 
| Hugh Dickins | fb4f88d | 2005-09-03 15:54:37 -0700 | [diff] [blame] | 1446 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1447 | current->flags |= PF_SWAPOFF; | 
|  | 1448 | err = try_to_unuse(type); | 
|  | 1449 | current->flags &= ~PF_SWAPOFF; | 
|  | 1450 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1451 | if (err) { | 
|  | 1452 | /* re-insert swap space back into swap_list */ | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 1453 | spin_lock(&swap_lock); | 
| Hugh Dickins | 78ecba0 | 2008-07-23 21:28:23 -0700 | [diff] [blame] | 1454 | if (p->prio < 0) | 
|  | 1455 | p->prio = --least_priority; | 
|  | 1456 | prev = -1; | 
|  | 1457 | for (i = swap_list.head; i >= 0; i = swap_info[i].next) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1458 | if (p->prio >= swap_info[i].prio) | 
|  | 1459 | break; | 
| Hugh Dickins | 78ecba0 | 2008-07-23 21:28:23 -0700 | [diff] [blame] | 1460 | prev = i; | 
|  | 1461 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1462 | p->next = i; | 
|  | 1463 | if (prev < 0) | 
|  | 1464 | swap_list.head = swap_list.next = p - swap_info; | 
|  | 1465 | else | 
|  | 1466 | swap_info[prev].next = p - swap_info; | 
|  | 1467 | nr_swap_pages += p->pages; | 
|  | 1468 | total_swap_pages += p->pages; | 
|  | 1469 | p->flags |= SWP_WRITEOK; | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 1470 | spin_unlock(&swap_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1471 | goto out_dput; | 
|  | 1472 | } | 
| Hugh Dickins | 52b7efdb | 2005-09-03 15:54:39 -0700 | [diff] [blame] | 1473 |  | 
|  | 1474 | /* wait for any unplug function to finish */ | 
|  | 1475 | down_write(&swap_unplug_sem); | 
|  | 1476 | up_write(&swap_unplug_sem); | 
|  | 1477 |  | 
| Hugh Dickins | 4cd3bb1 | 2005-09-03 15:54:33 -0700 | [diff] [blame] | 1478 | destroy_swap_extents(p); | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 1479 | mutex_lock(&swapon_mutex); | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 1480 | spin_lock(&swap_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1481 | drain_mmlist(); | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 1482 |  | 
|  | 1483 | /* wait for anyone still in scan_swap_map */ | 
|  | 1484 | p->highest_bit = 0;		/* cuts scans short */ | 
|  | 1485 | while (p->flags >= SWP_SCANNING) { | 
|  | 1486 | spin_unlock(&swap_lock); | 
| Nishanth Aravamudan | 13e4b57 | 2005-09-10 00:27:25 -0700 | [diff] [blame] | 1487 | schedule_timeout_uninterruptible(1); | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 1488 | spin_lock(&swap_lock); | 
|  | 1489 | } | 
|  | 1490 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1491 | swap_file = p->swap_file; | 
|  | 1492 | p->swap_file = NULL; | 
|  | 1493 | p->max = 0; | 
|  | 1494 | swap_map = p->swap_map; | 
|  | 1495 | p->swap_map = NULL; | 
|  | 1496 | p->flags = 0; | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 1497 | spin_unlock(&swap_lock); | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 1498 | mutex_unlock(&swapon_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1499 | vfree(swap_map); | 
| KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 1500 | /* Destroy swap account informatin */ | 
|  | 1501 | swap_cgroup_swapoff(type); | 
|  | 1502 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1503 | inode = mapping->host; | 
|  | 1504 | if (S_ISBLK(inode->i_mode)) { | 
|  | 1505 | struct block_device *bdev = I_BDEV(inode); | 
|  | 1506 | set_blocksize(bdev, p->old_block_size); | 
|  | 1507 | bd_release(bdev); | 
|  | 1508 | } else { | 
| Jes Sorensen | 1b1dcc1 | 2006-01-09 15:59:24 -0800 | [diff] [blame] | 1509 | mutex_lock(&inode->i_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1510 | inode->i_flags &= ~S_SWAPFILE; | 
| Jes Sorensen | 1b1dcc1 | 2006-01-09 15:59:24 -0800 | [diff] [blame] | 1511 | mutex_unlock(&inode->i_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1512 | } | 
|  | 1513 | filp_close(swap_file, NULL); | 
|  | 1514 | err = 0; | 
|  | 1515 |  | 
|  | 1516 | out_dput: | 
|  | 1517 | filp_close(victim, NULL); | 
|  | 1518 | out: | 
|  | 1519 | return err; | 
|  | 1520 | } | 
|  | 1521 |  | 
|  | 1522 | #ifdef CONFIG_PROC_FS | 
|  | 1523 | /* iterator */ | 
|  | 1524 | static void *swap_start(struct seq_file *swap, loff_t *pos) | 
|  | 1525 | { | 
|  | 1526 | struct swap_info_struct *ptr = swap_info; | 
|  | 1527 | int i; | 
|  | 1528 | loff_t l = *pos; | 
|  | 1529 |  | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 1530 | mutex_lock(&swapon_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1531 |  | 
| Suleiman Souhlal | 881e4aa | 2006-12-06 20:32:28 -0800 | [diff] [blame] | 1532 | if (!l) | 
|  | 1533 | return SEQ_START_TOKEN; | 
|  | 1534 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1535 | for (i = 0; i < nr_swapfiles; i++, ptr++) { | 
|  | 1536 | if (!(ptr->flags & SWP_USED) || !ptr->swap_map) | 
|  | 1537 | continue; | 
| Suleiman Souhlal | 881e4aa | 2006-12-06 20:32:28 -0800 | [diff] [blame] | 1538 | if (!--l) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1539 | return ptr; | 
|  | 1540 | } | 
|  | 1541 |  | 
|  | 1542 | return NULL; | 
|  | 1543 | } | 
|  | 1544 |  | 
|  | 1545 | static void *swap_next(struct seq_file *swap, void *v, loff_t *pos) | 
|  | 1546 | { | 
| Suleiman Souhlal | 881e4aa | 2006-12-06 20:32:28 -0800 | [diff] [blame] | 1547 | struct swap_info_struct *ptr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1548 | struct swap_info_struct *endptr = swap_info + nr_swapfiles; | 
|  | 1549 |  | 
| Suleiman Souhlal | 881e4aa | 2006-12-06 20:32:28 -0800 | [diff] [blame] | 1550 | if (v == SEQ_START_TOKEN) | 
|  | 1551 | ptr = swap_info; | 
|  | 1552 | else { | 
|  | 1553 | ptr = v; | 
|  | 1554 | ptr++; | 
|  | 1555 | } | 
|  | 1556 |  | 
|  | 1557 | for (; ptr < endptr; ptr++) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1558 | if (!(ptr->flags & SWP_USED) || !ptr->swap_map) | 
|  | 1559 | continue; | 
|  | 1560 | ++*pos; | 
|  | 1561 | return ptr; | 
|  | 1562 | } | 
|  | 1563 |  | 
|  | 1564 | return NULL; | 
|  | 1565 | } | 
|  | 1566 |  | 
|  | 1567 | static void swap_stop(struct seq_file *swap, void *v) | 
|  | 1568 | { | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 1569 | mutex_unlock(&swapon_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1570 | } | 
|  | 1571 |  | 
|  | 1572 | static int swap_show(struct seq_file *swap, void *v) | 
|  | 1573 | { | 
|  | 1574 | struct swap_info_struct *ptr = v; | 
|  | 1575 | struct file *file; | 
|  | 1576 | int len; | 
|  | 1577 |  | 
| Suleiman Souhlal | 881e4aa | 2006-12-06 20:32:28 -0800 | [diff] [blame] | 1578 | if (ptr == SEQ_START_TOKEN) { | 
|  | 1579 | seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n"); | 
|  | 1580 | return 0; | 
|  | 1581 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1582 |  | 
|  | 1583 | file = ptr->swap_file; | 
| Jan Blunck | c32c2f6 | 2008-02-14 19:38:43 -0800 | [diff] [blame] | 1584 | len = seq_path(swap, &file->f_path, " \t\n\\"); | 
| Hugh Dickins | 6eb396d | 2005-09-03 15:54:35 -0700 | [diff] [blame] | 1585 | seq_printf(swap, "%*s%s\t%u\t%u\t%d\n", | 
| Hugh Dickins | 886bb7e | 2009-01-06 14:39:48 -0800 | [diff] [blame] | 1586 | len < 40 ? 40 - len : 1, " ", | 
|  | 1587 | S_ISBLK(file->f_path.dentry->d_inode->i_mode) ? | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1588 | "partition" : "file\t", | 
| Hugh Dickins | 886bb7e | 2009-01-06 14:39:48 -0800 | [diff] [blame] | 1589 | ptr->pages << (PAGE_SHIFT - 10), | 
|  | 1590 | ptr->inuse_pages << (PAGE_SHIFT - 10), | 
|  | 1591 | ptr->prio); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1592 | return 0; | 
|  | 1593 | } | 
|  | 1594 |  | 
| Helge Deller | 15ad7cd | 2006-12-06 20:40:36 -0800 | [diff] [blame] | 1595 | static const struct seq_operations swaps_op = { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1596 | .start =	swap_start, | 
|  | 1597 | .next =		swap_next, | 
|  | 1598 | .stop =		swap_stop, | 
|  | 1599 | .show =		swap_show | 
|  | 1600 | }; | 
|  | 1601 |  | 
|  | 1602 | static int swaps_open(struct inode *inode, struct file *file) | 
|  | 1603 | { | 
|  | 1604 | return seq_open(file, &swaps_op); | 
|  | 1605 | } | 
|  | 1606 |  | 
| Helge Deller | 15ad7cd | 2006-12-06 20:40:36 -0800 | [diff] [blame] | 1607 | static const struct file_operations proc_swaps_operations = { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1608 | .open		= swaps_open, | 
|  | 1609 | .read		= seq_read, | 
|  | 1610 | .llseek		= seq_lseek, | 
|  | 1611 | .release	= seq_release, | 
|  | 1612 | }; | 
|  | 1613 |  | 
|  | 1614 | static int __init procswaps_init(void) | 
|  | 1615 | { | 
| Denis V. Lunev | 3d71f86 | 2008-04-29 01:02:13 -0700 | [diff] [blame] | 1616 | proc_create("swaps", 0, NULL, &proc_swaps_operations); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1617 | return 0; | 
|  | 1618 | } | 
|  | 1619 | __initcall(procswaps_init); | 
|  | 1620 | #endif /* CONFIG_PROC_FS */ | 
|  | 1621 |  | 
| Jan Beulich | 1796316 | 2008-12-16 11:35:24 +0000 | [diff] [blame] | 1622 | #ifdef MAX_SWAPFILES_CHECK | 
|  | 1623 | static int __init max_swapfiles_check(void) | 
|  | 1624 | { | 
|  | 1625 | MAX_SWAPFILES_CHECK(); | 
|  | 1626 | return 0; | 
|  | 1627 | } | 
|  | 1628 | late_initcall(max_swapfiles_check); | 
|  | 1629 | #endif | 
|  | 1630 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1631 | /* | 
|  | 1632 | * Written 01/25/92 by Simmule Turner, heavily changed by Linus. | 
|  | 1633 | * | 
|  | 1634 | * The swapon system call | 
|  | 1635 | */ | 
| Heiko Carstens | c4ea37c | 2009-01-14 14:14:28 +0100 | [diff] [blame] | 1636 | SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1637 | { | 
|  | 1638 | struct swap_info_struct * p; | 
|  | 1639 | char *name = NULL; | 
|  | 1640 | struct block_device *bdev = NULL; | 
|  | 1641 | struct file *swap_file = NULL; | 
|  | 1642 | struct address_space *mapping; | 
|  | 1643 | unsigned int type; | 
|  | 1644 | int i, prev; | 
|  | 1645 | int error; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1646 | union swap_header *swap_header = NULL; | 
| Hugh Dickins | 6eb396d | 2005-09-03 15:54:35 -0700 | [diff] [blame] | 1647 | unsigned int nr_good_pages = 0; | 
|  | 1648 | int nr_extents = 0; | 
| Hugh Dickins | 53092a7 | 2005-09-03 15:54:34 -0700 | [diff] [blame] | 1649 | sector_t span; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1650 | unsigned long maxpages = 1; | 
| Hugh Dickins | 73fd874 | 2009-01-06 14:39:47 -0800 | [diff] [blame] | 1651 | unsigned long swapfilepages; | 
| Hugh Dickins | 78ecba0 | 2008-07-23 21:28:23 -0700 | [diff] [blame] | 1652 | unsigned short *swap_map = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1653 | struct page *page = NULL; | 
|  | 1654 | struct inode *inode = NULL; | 
|  | 1655 | int did_down = 0; | 
|  | 1656 |  | 
|  | 1657 | if (!capable(CAP_SYS_ADMIN)) | 
|  | 1658 | return -EPERM; | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 1659 | spin_lock(&swap_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1660 | p = swap_info; | 
|  | 1661 | for (type = 0 ; type < nr_swapfiles ; type++,p++) | 
|  | 1662 | if (!(p->flags & SWP_USED)) | 
|  | 1663 | break; | 
|  | 1664 | error = -EPERM; | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 1665 | if (type >= MAX_SWAPFILES) { | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 1666 | spin_unlock(&swap_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1667 | goto out; | 
|  | 1668 | } | 
|  | 1669 | if (type >= nr_swapfiles) | 
|  | 1670 | nr_swapfiles = type+1; | 
| Hugh Dickins | 78ecba0 | 2008-07-23 21:28:23 -0700 | [diff] [blame] | 1671 | memset(p, 0, sizeof(*p)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1672 | INIT_LIST_HEAD(&p->extent_list); | 
|  | 1673 | p->flags = SWP_USED; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1674 | p->next = -1; | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 1675 | spin_unlock(&swap_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1676 | name = getname(specialfile); | 
|  | 1677 | error = PTR_ERR(name); | 
|  | 1678 | if (IS_ERR(name)) { | 
|  | 1679 | name = NULL; | 
|  | 1680 | goto bad_swap_2; | 
|  | 1681 | } | 
|  | 1682 | swap_file = filp_open(name, O_RDWR|O_LARGEFILE, 0); | 
|  | 1683 | error = PTR_ERR(swap_file); | 
|  | 1684 | if (IS_ERR(swap_file)) { | 
|  | 1685 | swap_file = NULL; | 
|  | 1686 | goto bad_swap_2; | 
|  | 1687 | } | 
|  | 1688 |  | 
|  | 1689 | p->swap_file = swap_file; | 
|  | 1690 | mapping = swap_file->f_mapping; | 
|  | 1691 | inode = mapping->host; | 
|  | 1692 |  | 
|  | 1693 | error = -EBUSY; | 
|  | 1694 | for (i = 0; i < nr_swapfiles; i++) { | 
|  | 1695 | struct swap_info_struct *q = &swap_info[i]; | 
|  | 1696 |  | 
|  | 1697 | if (i == type || !q->swap_file) | 
|  | 1698 | continue; | 
|  | 1699 | if (mapping == q->swap_file->f_mapping) | 
|  | 1700 | goto bad_swap; | 
|  | 1701 | } | 
|  | 1702 |  | 
|  | 1703 | error = -EINVAL; | 
|  | 1704 | if (S_ISBLK(inode->i_mode)) { | 
|  | 1705 | bdev = I_BDEV(inode); | 
|  | 1706 | error = bd_claim(bdev, sys_swapon); | 
|  | 1707 | if (error < 0) { | 
|  | 1708 | bdev = NULL; | 
| Rob Landley | f7b3a43 | 2005-09-22 21:44:27 -0700 | [diff] [blame] | 1709 | error = -EINVAL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1710 | goto bad_swap; | 
|  | 1711 | } | 
|  | 1712 | p->old_block_size = block_size(bdev); | 
|  | 1713 | error = set_blocksize(bdev, PAGE_SIZE); | 
|  | 1714 | if (error < 0) | 
|  | 1715 | goto bad_swap; | 
|  | 1716 | p->bdev = bdev; | 
|  | 1717 | } else if (S_ISREG(inode->i_mode)) { | 
|  | 1718 | p->bdev = inode->i_sb->s_bdev; | 
| Jes Sorensen | 1b1dcc1 | 2006-01-09 15:59:24 -0800 | [diff] [blame] | 1719 | mutex_lock(&inode->i_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1720 | did_down = 1; | 
|  | 1721 | if (IS_SWAPFILE(inode)) { | 
|  | 1722 | error = -EBUSY; | 
|  | 1723 | goto bad_swap; | 
|  | 1724 | } | 
|  | 1725 | } else { | 
|  | 1726 | goto bad_swap; | 
|  | 1727 | } | 
|  | 1728 |  | 
| Hugh Dickins | 73fd874 | 2009-01-06 14:39:47 -0800 | [diff] [blame] | 1729 | swapfilepages = i_size_read(inode) >> PAGE_SHIFT; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1730 |  | 
|  | 1731 | /* | 
|  | 1732 | * Read the swap header. | 
|  | 1733 | */ | 
|  | 1734 | if (!mapping->a_ops->readpage) { | 
|  | 1735 | error = -EINVAL; | 
|  | 1736 | goto bad_swap; | 
|  | 1737 | } | 
| Pekka Enberg | 090d2b1 | 2006-06-23 02:05:08 -0700 | [diff] [blame] | 1738 | page = read_mapping_page(mapping, 0, swap_file); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1739 | if (IS_ERR(page)) { | 
|  | 1740 | error = PTR_ERR(page); | 
|  | 1741 | goto bad_swap; | 
|  | 1742 | } | 
| Hugh Dickins | 81e3397 | 2009-01-06 14:39:49 -0800 | [diff] [blame] | 1743 | swap_header = kmap(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1744 |  | 
| Hugh Dickins | 81e3397 | 2009-01-06 14:39:49 -0800 | [diff] [blame] | 1745 | if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) { | 
| Jesper Juhl | e97a311 | 2006-01-11 01:50:28 +0100 | [diff] [blame] | 1746 | printk(KERN_ERR "Unable to find swap-space signature\n"); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1747 | error = -EINVAL; | 
|  | 1748 | goto bad_swap; | 
|  | 1749 | } | 
| Hugh Dickins | 886bb7e | 2009-01-06 14:39:48 -0800 | [diff] [blame] | 1750 |  | 
| Hugh Dickins | 81e3397 | 2009-01-06 14:39:49 -0800 | [diff] [blame] | 1751 | /* swap partition endianess hack... */ | 
|  | 1752 | if (swab32(swap_header->info.version) == 1) { | 
|  | 1753 | swab32s(&swap_header->info.version); | 
|  | 1754 | swab32s(&swap_header->info.last_page); | 
|  | 1755 | swab32s(&swap_header->info.nr_badpages); | 
|  | 1756 | for (i = 0; i < swap_header->info.nr_badpages; i++) | 
|  | 1757 | swab32s(&swap_header->info.badpages[i]); | 
|  | 1758 | } | 
|  | 1759 | /* Check the swap header's sub-version */ | 
|  | 1760 | if (swap_header->info.version != 1) { | 
|  | 1761 | printk(KERN_WARNING | 
|  | 1762 | "Unable to handle swap header version %d\n", | 
|  | 1763 | swap_header->info.version); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1764 | error = -EINVAL; | 
|  | 1765 | goto bad_swap; | 
| Hugh Dickins | 81e3397 | 2009-01-06 14:39:49 -0800 | [diff] [blame] | 1766 | } | 
|  | 1767 |  | 
|  | 1768 | p->lowest_bit  = 1; | 
|  | 1769 | p->cluster_next = 1; | 
|  | 1770 |  | 
|  | 1771 | /* | 
|  | 1772 | * Find out how many pages are allowed for a single swap | 
|  | 1773 | * device. There are two limiting factors: 1) the number of | 
|  | 1774 | * bits for the swap offset in the swp_entry_t type and | 
|  | 1775 | * 2) the number of bits in the a swap pte as defined by | 
|  | 1776 | * the different architectures. In order to find the | 
|  | 1777 | * largest possible bit mask a swap entry with swap type 0 | 
|  | 1778 | * and swap offset ~0UL is created, encoded to a swap pte, | 
|  | 1779 | * decoded to a swp_entry_t again and finally the swap | 
|  | 1780 | * offset is extracted. This will mask all the bits from | 
|  | 1781 | * the initial ~0UL mask that can't be encoded in either | 
|  | 1782 | * the swp_entry_t or the architecture definition of a | 
|  | 1783 | * swap pte. | 
|  | 1784 | */ | 
|  | 1785 | maxpages = swp_offset(pte_to_swp_entry( | 
|  | 1786 | swp_entry_to_pte(swp_entry(0, ~0UL)))) - 1; | 
|  | 1787 | if (maxpages > swap_header->info.last_page) | 
|  | 1788 | maxpages = swap_header->info.last_page; | 
|  | 1789 | p->highest_bit = maxpages - 1; | 
|  | 1790 |  | 
|  | 1791 | error = -EINVAL; | 
|  | 1792 | if (!maxpages) | 
|  | 1793 | goto bad_swap; | 
|  | 1794 | if (swapfilepages && maxpages > swapfilepages) { | 
|  | 1795 | printk(KERN_WARNING | 
|  | 1796 | "Swap area shorter than signature indicates\n"); | 
|  | 1797 | goto bad_swap; | 
|  | 1798 | } | 
|  | 1799 | if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode)) | 
|  | 1800 | goto bad_swap; | 
|  | 1801 | if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES) | 
|  | 1802 | goto bad_swap; | 
|  | 1803 |  | 
|  | 1804 | /* OK, set up the swap map and apply the bad block list */ | 
|  | 1805 | swap_map = vmalloc(maxpages * sizeof(short)); | 
|  | 1806 | if (!swap_map) { | 
|  | 1807 | error = -ENOMEM; | 
|  | 1808 | goto bad_swap; | 
|  | 1809 | } | 
|  | 1810 |  | 
|  | 1811 | memset(swap_map, 0, maxpages * sizeof(short)); | 
|  | 1812 | for (i = 0; i < swap_header->info.nr_badpages; i++) { | 
|  | 1813 | int page_nr = swap_header->info.badpages[i]; | 
|  | 1814 | if (page_nr <= 0 || page_nr >= swap_header->info.last_page) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1815 | error = -EINVAL; | 
|  | 1816 | goto bad_swap; | 
|  | 1817 | } | 
| Hugh Dickins | 81e3397 | 2009-01-06 14:39:49 -0800 | [diff] [blame] | 1818 | swap_map[page_nr] = SWAP_MAP_BAD; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1819 | } | 
| KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 1820 |  | 
|  | 1821 | error = swap_cgroup_swapon(type, maxpages); | 
|  | 1822 | if (error) | 
|  | 1823 | goto bad_swap; | 
|  | 1824 |  | 
| Hugh Dickins | 81e3397 | 2009-01-06 14:39:49 -0800 | [diff] [blame] | 1825 | nr_good_pages = swap_header->info.last_page - | 
|  | 1826 | swap_header->info.nr_badpages - | 
|  | 1827 | 1 /* header page */; | 
| Hugh Dickins | e2244ec | 2005-09-03 15:54:32 -0700 | [diff] [blame] | 1828 |  | 
| Hugh Dickins | e2244ec | 2005-09-03 15:54:32 -0700 | [diff] [blame] | 1829 | if (nr_good_pages) { | 
| Hugh Dickins | 78ecba0 | 2008-07-23 21:28:23 -0700 | [diff] [blame] | 1830 | swap_map[0] = SWAP_MAP_BAD; | 
| Hugh Dickins | e2244ec | 2005-09-03 15:54:32 -0700 | [diff] [blame] | 1831 | p->max = maxpages; | 
|  | 1832 | p->pages = nr_good_pages; | 
| Hugh Dickins | 53092a7 | 2005-09-03 15:54:34 -0700 | [diff] [blame] | 1833 | nr_extents = setup_swap_extents(p, &span); | 
|  | 1834 | if (nr_extents < 0) { | 
|  | 1835 | error = nr_extents; | 
| Hugh Dickins | e2244ec | 2005-09-03 15:54:32 -0700 | [diff] [blame] | 1836 | goto bad_swap; | 
| Hugh Dickins | 53092a7 | 2005-09-03 15:54:34 -0700 | [diff] [blame] | 1837 | } | 
| Hugh Dickins | e2244ec | 2005-09-03 15:54:32 -0700 | [diff] [blame] | 1838 | nr_good_pages = p->pages; | 
|  | 1839 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1840 | if (!nr_good_pages) { | 
|  | 1841 | printk(KERN_WARNING "Empty swap-file\n"); | 
|  | 1842 | error = -EINVAL; | 
|  | 1843 | goto bad_swap; | 
|  | 1844 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1845 |  | 
| Hugh Dickins | 20137a4 | 2009-01-06 14:39:54 -0800 | [diff] [blame] | 1846 | if (blk_queue_nonrot(bdev_get_queue(p->bdev))) { | 
|  | 1847 | p->flags |= SWP_SOLIDSTATE; | 
| Hugh Dickins | 20137a4 | 2009-01-06 14:39:54 -0800 | [diff] [blame] | 1848 | p->cluster_next = 1 + (random32() % p->highest_bit); | 
|  | 1849 | } | 
| Hugh Dickins | 6a6ba83 | 2009-01-06 14:39:51 -0800 | [diff] [blame] | 1850 | if (discard_swap(p) == 0) | 
|  | 1851 | p->flags |= SWP_DISCARDABLE; | 
|  | 1852 |  | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 1853 | mutex_lock(&swapon_mutex); | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 1854 | spin_lock(&swap_lock); | 
| Hugh Dickins | 78ecba0 | 2008-07-23 21:28:23 -0700 | [diff] [blame] | 1855 | if (swap_flags & SWAP_FLAG_PREFER) | 
|  | 1856 | p->prio = | 
|  | 1857 | (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT; | 
|  | 1858 | else | 
|  | 1859 | p->prio = --least_priority; | 
|  | 1860 | p->swap_map = swap_map; | 
| Hugh Dickins | 22c6f8f | 2009-01-06 14:39:48 -0800 | [diff] [blame] | 1861 | p->flags |= SWP_WRITEOK; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1862 | nr_swap_pages += nr_good_pages; | 
|  | 1863 | total_swap_pages += nr_good_pages; | 
| Hugh Dickins | 53092a7 | 2005-09-03 15:54:34 -0700 | [diff] [blame] | 1864 |  | 
| Hugh Dickins | 6eb396d | 2005-09-03 15:54:35 -0700 | [diff] [blame] | 1865 | printk(KERN_INFO "Adding %uk swap on %s.  " | 
| Hugh Dickins | 20137a4 | 2009-01-06 14:39:54 -0800 | [diff] [blame] | 1866 | "Priority:%d extents:%d across:%lluk %s%s\n", | 
| Hugh Dickins | 53092a7 | 2005-09-03 15:54:34 -0700 | [diff] [blame] | 1867 | nr_good_pages<<(PAGE_SHIFT-10), name, p->prio, | 
| Hugh Dickins | 6a6ba83 | 2009-01-06 14:39:51 -0800 | [diff] [blame] | 1868 | nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10), | 
| Hugh Dickins | 20137a4 | 2009-01-06 14:39:54 -0800 | [diff] [blame] | 1869 | (p->flags & SWP_SOLIDSTATE) ? "SS" : "", | 
|  | 1870 | (p->flags & SWP_DISCARDABLE) ? "D" : ""); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1871 |  | 
|  | 1872 | /* insert swap space into swap_list: */ | 
|  | 1873 | prev = -1; | 
|  | 1874 | for (i = swap_list.head; i >= 0; i = swap_info[i].next) { | 
|  | 1875 | if (p->prio >= swap_info[i].prio) { | 
|  | 1876 | break; | 
|  | 1877 | } | 
|  | 1878 | prev = i; | 
|  | 1879 | } | 
|  | 1880 | p->next = i; | 
|  | 1881 | if (prev < 0) { | 
|  | 1882 | swap_list.head = swap_list.next = p - swap_info; | 
|  | 1883 | } else { | 
|  | 1884 | swap_info[prev].next = p - swap_info; | 
|  | 1885 | } | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 1886 | spin_unlock(&swap_lock); | 
| Ingo Molnar | fc0abb1 | 2006-01-18 17:42:33 -0800 | [diff] [blame] | 1887 | mutex_unlock(&swapon_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1888 | error = 0; | 
|  | 1889 | goto out; | 
|  | 1890 | bad_swap: | 
|  | 1891 | if (bdev) { | 
|  | 1892 | set_blocksize(bdev, p->old_block_size); | 
|  | 1893 | bd_release(bdev); | 
|  | 1894 | } | 
| Hugh Dickins | 4cd3bb1 | 2005-09-03 15:54:33 -0700 | [diff] [blame] | 1895 | destroy_swap_extents(p); | 
| KAMEZAWA Hiroyuki | 27a7faa | 2009-01-07 18:07:58 -0800 | [diff] [blame] | 1896 | swap_cgroup_swapoff(type); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1897 | bad_swap_2: | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 1898 | spin_lock(&swap_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1899 | p->swap_file = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1900 | p->flags = 0; | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 1901 | spin_unlock(&swap_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1902 | vfree(swap_map); | 
|  | 1903 | if (swap_file) | 
|  | 1904 | filp_close(swap_file, NULL); | 
|  | 1905 | out: | 
|  | 1906 | if (page && !IS_ERR(page)) { | 
|  | 1907 | kunmap(page); | 
|  | 1908 | page_cache_release(page); | 
|  | 1909 | } | 
|  | 1910 | if (name) | 
|  | 1911 | putname(name); | 
|  | 1912 | if (did_down) { | 
|  | 1913 | if (!error) | 
|  | 1914 | inode->i_flags |= S_SWAPFILE; | 
| Jes Sorensen | 1b1dcc1 | 2006-01-09 15:59:24 -0800 | [diff] [blame] | 1915 | mutex_unlock(&inode->i_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1916 | } | 
|  | 1917 | return error; | 
|  | 1918 | } | 
|  | 1919 |  | 
|  | 1920 | void si_swapinfo(struct sysinfo *val) | 
|  | 1921 | { | 
|  | 1922 | unsigned int i; | 
|  | 1923 | unsigned long nr_to_be_unused = 0; | 
|  | 1924 |  | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 1925 | spin_lock(&swap_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1926 | for (i = 0; i < nr_swapfiles; i++) { | 
|  | 1927 | if (!(swap_info[i].flags & SWP_USED) || | 
|  | 1928 | (swap_info[i].flags & SWP_WRITEOK)) | 
|  | 1929 | continue; | 
|  | 1930 | nr_to_be_unused += swap_info[i].inuse_pages; | 
|  | 1931 | } | 
|  | 1932 | val->freeswap = nr_swap_pages + nr_to_be_unused; | 
|  | 1933 | val->totalswap = total_swap_pages + nr_to_be_unused; | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 1934 | spin_unlock(&swap_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1935 | } | 
|  | 1936 |  | 
|  | 1937 | /* | 
|  | 1938 | * Verify that a swap entry is valid and increment its swap map count. | 
|  | 1939 | * | 
|  | 1940 | * Note: if swap_map[] reaches SWAP_MAP_MAX the entries are treated as | 
|  | 1941 | * "permanent", but will be reclaimed by the next swapoff. | 
|  | 1942 | */ | 
|  | 1943 | int swap_duplicate(swp_entry_t entry) | 
|  | 1944 | { | 
|  | 1945 | struct swap_info_struct * p; | 
|  | 1946 | unsigned long offset, type; | 
|  | 1947 | int result = 0; | 
|  | 1948 |  | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 1949 | if (is_migration_entry(entry)) | 
|  | 1950 | return 1; | 
|  | 1951 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1952 | type = swp_type(entry); | 
|  | 1953 | if (type >= nr_swapfiles) | 
|  | 1954 | goto bad_file; | 
|  | 1955 | p = type + swap_info; | 
|  | 1956 | offset = swp_offset(entry); | 
|  | 1957 |  | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 1958 | spin_lock(&swap_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1959 | if (offset < p->max && p->swap_map[offset]) { | 
|  | 1960 | if (p->swap_map[offset] < SWAP_MAP_MAX - 1) { | 
|  | 1961 | p->swap_map[offset]++; | 
|  | 1962 | result = 1; | 
|  | 1963 | } else if (p->swap_map[offset] <= SWAP_MAP_MAX) { | 
|  | 1964 | if (swap_overflow++ < 5) | 
|  | 1965 | printk(KERN_WARNING "swap_dup: swap entry overflow\n"); | 
|  | 1966 | p->swap_map[offset] = SWAP_MAP_MAX; | 
|  | 1967 | result = 1; | 
|  | 1968 | } | 
|  | 1969 | } | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 1970 | spin_unlock(&swap_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1971 | out: | 
|  | 1972 | return result; | 
|  | 1973 |  | 
|  | 1974 | bad_file: | 
|  | 1975 | printk(KERN_ERR "swap_dup: %s%08lx\n", Bad_file, entry.val); | 
|  | 1976 | goto out; | 
|  | 1977 | } | 
|  | 1978 |  | 
|  | 1979 | struct swap_info_struct * | 
|  | 1980 | get_swap_info_struct(unsigned type) | 
|  | 1981 | { | 
|  | 1982 | return &swap_info[type]; | 
|  | 1983 | } | 
|  | 1984 |  | 
|  | 1985 | /* | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 1986 | * swap_lock prevents swap_map being freed. Don't grab an extra | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1987 | * reference on the swaphandle, it doesn't matter if it becomes unused. | 
|  | 1988 | */ | 
|  | 1989 | int valid_swaphandles(swp_entry_t entry, unsigned long *offset) | 
|  | 1990 | { | 
| Hugh Dickins | 8952898 | 2008-02-04 22:28:45 -0800 | [diff] [blame] | 1991 | struct swap_info_struct *si; | 
| Hugh Dickins | 3f9e794 | 2006-09-29 02:01:26 -0700 | [diff] [blame] | 1992 | int our_page_cluster = page_cluster; | 
| Hugh Dickins | 8952898 | 2008-02-04 22:28:45 -0800 | [diff] [blame] | 1993 | pgoff_t target, toff; | 
|  | 1994 | pgoff_t base, end; | 
|  | 1995 | int nr_pages = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1996 |  | 
| Hugh Dickins | 3f9e794 | 2006-09-29 02:01:26 -0700 | [diff] [blame] | 1997 | if (!our_page_cluster)	/* no readahead */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1998 | return 0; | 
| Hugh Dickins | 8952898 | 2008-02-04 22:28:45 -0800 | [diff] [blame] | 1999 |  | 
|  | 2000 | si = &swap_info[swp_type(entry)]; | 
|  | 2001 | target = swp_offset(entry); | 
|  | 2002 | base = (target >> our_page_cluster) << our_page_cluster; | 
|  | 2003 | end = base + (1 << our_page_cluster); | 
|  | 2004 | if (!base)		/* first page is swap header */ | 
|  | 2005 | base++; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2006 |  | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 2007 | spin_lock(&swap_lock); | 
| Hugh Dickins | 8952898 | 2008-02-04 22:28:45 -0800 | [diff] [blame] | 2008 | if (end > si->max)	/* don't go beyond end of map */ | 
|  | 2009 | end = si->max; | 
|  | 2010 |  | 
|  | 2011 | /* Count contiguous allocated slots above our target */ | 
|  | 2012 | for (toff = target; ++toff < end; nr_pages++) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2013 | /* Don't read in free or bad pages */ | 
| Hugh Dickins | 8952898 | 2008-02-04 22:28:45 -0800 | [diff] [blame] | 2014 | if (!si->swap_map[toff]) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2015 | break; | 
| Hugh Dickins | 8952898 | 2008-02-04 22:28:45 -0800 | [diff] [blame] | 2016 | if (si->swap_map[toff] == SWAP_MAP_BAD) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2017 | break; | 
| Hugh Dickins | 8952898 | 2008-02-04 22:28:45 -0800 | [diff] [blame] | 2018 | } | 
|  | 2019 | /* Count contiguous allocated slots below our target */ | 
|  | 2020 | for (toff = target; --toff >= base; nr_pages++) { | 
|  | 2021 | /* Don't read in free or bad pages */ | 
|  | 2022 | if (!si->swap_map[toff]) | 
|  | 2023 | break; | 
|  | 2024 | if (si->swap_map[toff] == SWAP_MAP_BAD) | 
|  | 2025 | break; | 
|  | 2026 | } | 
| Hugh Dickins | 5d337b9 | 2005-09-03 15:54:41 -0700 | [diff] [blame] | 2027 | spin_unlock(&swap_lock); | 
| Hugh Dickins | 8952898 | 2008-02-04 22:28:45 -0800 | [diff] [blame] | 2028 |  | 
|  | 2029 | /* | 
|  | 2030 | * Indicate starting offset, and return number of pages to get: | 
|  | 2031 | * if only 1, say 0, since there's then no readahead to be done. | 
|  | 2032 | */ | 
|  | 2033 | *offset = ++toff; | 
|  | 2034 | return nr_pages? ++nr_pages: 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2035 | } |