blob: a67c48a842869a371f51481cd7f2ae9f57fb2df4 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_SWAP_H
2#define _LINUX_SWAP_H
3
Linus Torvalds1da177e2005-04-16 15:20:36 -07004#include <linux/spinlock.h>
5#include <linux/linkage.h>
6#include <linux/mmzone.h>
7#include <linux/list.h>
Balbir Singh66e17072008-02-07 00:13:56 -08008#include <linux/memcontrol.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/sched.h>
Lee Schermerhornaf936a12008-10-18 20:26:53 -070010#include <linux/node.h>
Andrew Morton542d1c82005-07-12 13:58:31 -070011
Arun Sharma60063492011-07-26 16:09:06 -070012#include <linux/atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <asm/page.h>
14
Martin Schwidefsky8bc719d2006-09-25 23:31:20 -070015struct notifier_block;
16
Andrew Mortonab954162006-09-25 23:32:42 -070017struct bio;
18
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
20#define SWAP_FLAG_PRIO_MASK 0x7fff
21#define SWAP_FLAG_PRIO_SHIFT 0
Hugh Dickins33994462010-09-09 16:38:11 -070022#define SWAP_FLAG_DISCARD 0x10000 /* discard swap cluster after use */
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
Hugh Dickinsd15cab92012-03-28 14:42:42 -070024#define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \
25 SWAP_FLAG_DISCARD)
26
Linus Torvalds1da177e2005-04-16 15:20:36 -070027static inline int current_is_kswapd(void)
28{
29 return current->flags & PF_KSWAPD;
30}
31
32/*
33 * MAX_SWAPFILES defines the maximum number of swaptypes: things which can
34 * be swapped to. The swap type and the offset into that swap type are
35 * encoded into pte's and into pgoff_t's in the swapcache. Using five bits
36 * for the type means that the maximum number of swapcache pages is 27 bits
37 * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs
38 * the type/offset into the pte as 5/27 as well.
39 */
40#define MAX_SWAPFILES_SHIFT 5
Andi Kleena7420aa2009-09-16 11:50:05 +020041
42/*
43 * Use some of the swap files numbers for other purposes. This
44 * is a convenient way to hook into the VM to trigger special
45 * actions on faults.
46 */
47
48/*
49 * NUMA node memory migration support
50 */
51#ifdef CONFIG_MIGRATION
52#define SWP_MIGRATION_NUM 2
53#define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM)
54#define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
Christoph Lameter06972122006-06-23 02:03:35 -070055#else
Andi Kleena7420aa2009-09-16 11:50:05 +020056#define SWP_MIGRATION_NUM 0
Christoph Lameter06972122006-06-23 02:03:35 -070057#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070058
59/*
Andi Kleena7420aa2009-09-16 11:50:05 +020060 * Handling of hardware poisoned pages with memory corruption.
61 */
62#ifdef CONFIG_MEMORY_FAILURE
63#define SWP_HWPOISON_NUM 1
64#define SWP_HWPOISON MAX_SWAPFILES
65#else
66#define SWP_HWPOISON_NUM 0
67#endif
68
69#define MAX_SWAPFILES \
70 ((1 << MAX_SWAPFILES_SHIFT) - SWP_MIGRATION_NUM - SWP_HWPOISON_NUM)
71
72/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 * Magic header for a swap area. The first part of the union is
74 * what the swap magic looks like for the old (limited to 128MB)
75 * swap area format, the second part of the union adds - in the
76 * old reserved area - some extra information. Note that the first
77 * kilobyte is reserved for boot loader or disk label stuff...
78 *
79 * Having the magic at the end of the PAGE_SIZE makes detecting swap
80 * areas somewhat tricky on machines that support multiple page sizes.
81 * For 2.5 we'll probably want to move the magic to just beyond the
82 * bootbits...
83 */
84union swap_header {
85 struct {
86 char reserved[PAGE_SIZE - 10];
87 char magic[10]; /* SWAP-SPACE or SWAPSPACE2 */
88 } magic;
89 struct {
Andreas Dilgere8f03d02006-06-23 02:03:14 -070090 char bootbits[1024]; /* Space for disklabel etc. */
91 __u32 version;
92 __u32 last_page;
93 __u32 nr_badpages;
94 unsigned char sws_uuid[16];
95 unsigned char sws_volume[16];
96 __u32 padding[117];
97 __u32 badpages[1];
Linus Torvalds1da177e2005-04-16 15:20:36 -070098 } info;
99};
100
101 /* A swap entry has to fit into a "unsigned long", as
102 * the entry is hidden in the "index" field of the
103 * swapper address space.
104 */
105typedef struct {
106 unsigned long val;
107} swp_entry_t;
108
109/*
110 * current->reclaim_state points to one of these when a task is running
111 * memory reclaim
112 */
113struct reclaim_state {
114 unsigned long reclaimed_slab;
115};
116
117#ifdef __KERNEL__
118
119struct address_space;
120struct sysinfo;
121struct writeback_control;
122struct zone;
123
124/*
125 * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of
126 * disk blocks. A list of swap extents maps the entire swapfile. (Where the
127 * term `swapfile' refers to either a blockdevice or an IS_REG file. Apart
128 * from setup, they're handled identically.
129 *
130 * We always assume that blocks are of size PAGE_SIZE.
131 */
132struct swap_extent {
133 struct list_head list;
134 pgoff_t start_page;
135 pgoff_t nr_pages;
136 sector_t start_block;
137};
138
139/*
140 * Max bad pages in the new format..
141 */
142#define __swapoffset(x) ((unsigned long)&((union swap_header *)0)->x)
143#define MAX_SWAP_BADPAGES \
144 ((__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int))
145
146enum {
147 SWP_USED = (1 << 0), /* is slot in swap_info[] used? */
148 SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */
Hugh Dickins33994462010-09-09 16:38:11 -0700149 SWP_DISCARDABLE = (1 << 2), /* swapon+blkdev support discard */
Hugh Dickins7992fde2009-01-06 14:39:53 -0800150 SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */
Hugh Dickins20137a42009-01-06 14:39:54 -0800151 SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */
Hugh Dickins570a3352009-12-14 17:58:46 -0800152 SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */
Nitin Guptab2725642010-05-17 11:02:42 +0530153 SWP_BLKDEV = (1 << 6), /* its a block device */
Hugh Dickins52b7efdb2005-09-03 15:54:39 -0700154 /* add others here before... */
155 SWP_SCANNING = (1 << 8), /* refcount in scan_swap_map */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156};
157
158#define SWAP_CLUSTER_MAX 32
Mel Gorman748446b2010-05-24 14:32:27 -0700159#define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160
Mel Gorman8afdcec2011-03-22 16:33:04 -0700161/*
162 * Ratio between the present memory in the zone and the "gap" that
163 * we're allowing kswapd to shrink in addition to the per-zone high
164 * wmark, even for zones that already have the high wmark satisfied,
165 * in order to provide better per-zone lru behavior. We are ok to
166 * spend not more than 1% of the memory for this zone balancing "gap".
167 */
168#define KSWAPD_ZONE_BALANCE_GAP_RATIO 100
169
Hugh Dickins570a3352009-12-14 17:58:46 -0800170#define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */
171#define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */
172#define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */
173#define SWAP_CONT_MAX 0x7f /* Max count, in each swap_map continuation */
174#define COUNT_CONTINUED 0x80 /* See swap_map continuation for full count */
Hugh Dickinsaaa46862009-12-14 17:58:47 -0800175#define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs, in first swap_map */
Hugh Dickins253d5532009-12-14 17:58:44 -0800176
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177/*
178 * The in-memory structure used to track swap areas.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 */
180struct swap_info_struct {
Hugh Dickinsefa90a92009-12-14 17:58:41 -0800181 unsigned long flags; /* SWP_USED etc: see above */
182 signed short prio; /* swap priority of this type */
183 signed char type; /* strange name for an index */
184 signed char next; /* next type on the swap list */
Hugh Dickins75097652009-12-14 17:58:48 -0800185 unsigned int max; /* extent of the swap_map */
186 unsigned char *swap_map; /* vmalloc'ed array of usage counts */
187 unsigned int lowest_bit; /* index of first free in swap_map */
188 unsigned int highest_bit; /* index of last free in swap_map */
189 unsigned int pages; /* total of usable pages of swap */
190 unsigned int inuse_pages; /* number of those currently in use */
191 unsigned int cluster_next; /* likely index for next allocation */
192 unsigned int cluster_nr; /* countdown to next cluster search */
Hugh Dickins7992fde2009-01-06 14:39:53 -0800193 unsigned int lowest_alloc; /* while preparing discard cluster */
194 unsigned int highest_alloc; /* while preparing discard cluster */
Hugh Dickins75097652009-12-14 17:58:48 -0800195 struct swap_extent *curr_swap_extent;
196 struct swap_extent first_swap_extent;
197 struct block_device *bdev; /* swap device or bdev of swap file */
198 struct file *swap_file; /* seldom referenced */
199 unsigned int old_block_size; /* seldom referenced */
Shaohua Lid1c2fbe2013-02-22 16:34:38 -0800200 spinlock_t lock; /*
201 * protect map scan related fields like
202 * swap_map, lowest_bit, highest_bit,
203 * inuse_pages, cluster_next,
204 * cluster_nr, lowest_alloc and
205 * highest_alloc. other fields are only
206 * changed at swapon/swapoff, so are
207 * protected by swap_lock. changing
208 * flags need hold this lock and
209 * swap_lock. If both locks need hold,
210 * hold swap_lock first.
211 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212};
213
214struct swap_list_t {
215 int head; /* head of priority-ordered swapfile list */
216 int next; /* swapfile to be used next */
217};
218
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219/* linux/mm/page_alloc.c */
220extern unsigned long totalram_pages;
Hideo AOKIcb45b0e2006-04-10 22:52:59 -0700221extern unsigned long totalreserve_pages;
Johannes Weinerab8fabd2012-01-10 15:07:42 -0800222extern unsigned long dirty_balance_reserve;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223extern unsigned int nr_free_buffer_pages(void);
224extern unsigned int nr_free_pagecache_pages(void);
225
Christoph Lameter96177292007-02-10 01:43:03 -0800226/* Definition of global_page_state not available yet */
227#define nr_free_pages() global_page_state(NR_FREE_PAGES)
228
229
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230/* linux/mm/swap.c */
KOSAKI Motohirof04e9eb2008-10-18 20:26:19 -0700231extern void __lru_cache_add(struct page *, enum lru_list lru);
232extern void lru_cache_add_lru(struct page *, enum lru_list lru);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800233extern void lru_add_page_tail(struct zone* zone,
234 struct page *page, struct page *page_tail);
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800235extern void activate_page(struct page *);
236extern void mark_page_accessed(struct page *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237extern void lru_add_drain(void);
Konstantin Khlebnikovf0cb3c72012-03-21 16:34:06 -0700238extern void lru_add_drain_cpu(int cpu);
Nick Piggin053837f2006-01-18 17:42:27 -0800239extern int lru_add_drain_all(void);
Miklos Szerediac6aadb2008-04-28 02:12:38 -0700240extern void rotate_reclaimable_page(struct page *page);
Minchan Kim31560182011-03-22 16:32:52 -0700241extern void deactivate_page(struct page *page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242extern void swap_setup(void);
243
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700244extern void add_page_to_unevictable_list(struct page *page);
245
KOSAKI Motohirof04e9eb2008-10-18 20:26:19 -0700246/**
247 * lru_cache_add: add a page to the page lists
248 * @page: the page to add
249 */
Rik van Riel4f98a2f2008-10-18 20:26:32 -0700250static inline void lru_cache_add_anon(struct page *page)
KOSAKI Motohirof04e9eb2008-10-18 20:26:19 -0700251{
Rik van Riel4f98a2f2008-10-18 20:26:32 -0700252 __lru_cache_add(page, LRU_INACTIVE_ANON);
KOSAKI Motohirof04e9eb2008-10-18 20:26:19 -0700253}
254
Rik van Riel4f98a2f2008-10-18 20:26:32 -0700255static inline void lru_cache_add_file(struct page *page)
256{
257 __lru_cache_add(page, LRU_INACTIVE_FILE);
258}
259
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260/* linux/mm/vmscan.c */
Mel Gormandac1d272008-04-28 02:12:12 -0700261extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
KAMEZAWA Hiroyuki327c0e92009-03-31 15:23:31 -0700262 gfp_t gfp_mask, nodemask_t *mask);
Konstantin Khlebnikovfa168092012-05-29 15:06:54 -0700263extern int __isolate_lru_page(struct page *page, isolate_mode_t mode);
Johannes Weiner185efc02011-09-14 16:21:58 -0700264extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
265 gfp_t gfp_mask, bool noswap);
266extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
267 gfp_t gfp_mask, bool noswap,
268 struct zone *zone,
269 unsigned long *nr_scanned);
Andrew Morton69e05942006-03-22 00:08:19 -0800270extern unsigned long shrink_all_memory(unsigned long nr_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271extern int vm_swappiness;
Christoph Lameterb20a3502006-03-22 00:09:12 -0800272extern int remove_mapping(struct address_space *mapping, struct page *page);
Andrew Mortonbd1e22b2006-06-23 02:03:47 -0700273extern long vm_total_pages;
Christoph Lameterb20a3502006-03-22 00:09:12 -0800274
Christoph Lameter9eeff232006-01-18 17:42:31 -0800275#ifdef CONFIG_NUMA
276extern int zone_reclaim_mode;
Christoph Lameter96146342006-07-03 00:24:13 -0700277extern int sysctl_min_unmapped_ratio;
Christoph Lameter0ff38492006-09-25 23:31:52 -0700278extern int sysctl_min_slab_ratio;
Christoph Lameter9eeff232006-01-18 17:42:31 -0800279extern int zone_reclaim(struct zone *, gfp_t, unsigned int);
280#else
281#define zone_reclaim_mode 0
282static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order)
283{
284 return 0;
285}
286#endif
287
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700288extern int page_evictable(struct page *page, struct vm_area_struct *vma);
Hugh Dickins24513262012-01-20 14:34:21 -0800289extern void check_move_unevictable_pages(struct page **, int nr_pages);
Lee Schermerhornaf936a12008-10-18 20:26:53 -0700290
291extern unsigned long scan_unevictable_pages;
Alexey Dobriyan8d65af72009-09-23 15:57:19 -0700292extern int scan_unevictable_handler(struct ctl_table *, int,
Lee Schermerhornaf936a12008-10-18 20:26:53 -0700293 void __user *, size_t *, loff_t *);
Thadeu Lima de Souza Cascardoe4455ab2010-10-26 14:21:28 -0700294#ifdef CONFIG_NUMA
Lee Schermerhornaf936a12008-10-18 20:26:53 -0700295extern int scan_unevictable_register_node(struct node *node);
296extern void scan_unevictable_unregister_node(struct node *node);
Thadeu Lima de Souza Cascardoe4455ab2010-10-26 14:21:28 -0700297#else
298static inline int scan_unevictable_register_node(struct node *node)
299{
300 return 0;
301}
302static inline void scan_unevictable_unregister_node(struct node *node)
303{
304}
305#endif
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700306
Yasunori Goto3218ae12006-06-27 02:53:33 -0700307extern int kswapd_run(int nid);
David Rientjes8fe23e02009-12-14 17:58:33 -0800308extern void kswapd_stop(int nid);
KAMEZAWA Hiroyuki1f4c0252011-07-26 16:08:21 -0700309#ifdef CONFIG_CGROUP_MEM_RES_CTLR
310extern int mem_cgroup_swappiness(struct mem_cgroup *mem);
311#else
312static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
313{
314 return vm_swappiness;
315}
316#endif
Michal Hockodac23b02012-04-05 14:25:16 -0700317#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
318extern void mem_cgroup_uncharge_swap(swp_entry_t ent);
319#else
320static inline void mem_cgroup_uncharge_swap(swp_entry_t ent)
321{
322}
323#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324#ifdef CONFIG_SWAP
325/* linux/mm/page_io.c */
Minchan Kimaca8bf32009-06-16 15:33:02 -0700326extern int swap_readpage(struct page *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327extern int swap_writepage(struct page *page, struct writeback_control *wbc);
NeilBrown6712ecf2007-09-27 12:47:43 +0200328extern void end_swap_bio_read(struct bio *bio, int err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329
330/* linux/mm/swap_state.c */
331extern struct address_space swapper_space;
332#define total_swapcache_pages swapper_space.nrpages
333extern void show_swap_cache_info(void);
Hugh Dickinsac47b002009-01-06 14:39:39 -0800334extern int add_to_swap(struct page *);
Hugh Dickins73b12622008-02-04 22:28:50 -0800335extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336extern void __delete_from_swap_cache(struct page *);
337extern void delete_from_swap_cache(struct page *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338extern void free_page_and_swap_cache(struct page *);
339extern void free_pages_and_swap_cache(struct page **, int);
Hugh Dickins46017e92008-02-04 22:28:41 -0800340extern struct page *lookup_swap_cache(swp_entry_t);
Hugh Dickins02098fe2008-02-04 22:28:42 -0800341extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
Hugh Dickins46017e92008-02-04 22:28:41 -0800342 struct vm_area_struct *vma, unsigned long addr);
Hugh Dickins02098fe2008-02-04 22:28:42 -0800343extern struct page *swapin_readahead(swp_entry_t, gfp_t,
Hugh Dickins46017e92008-02-04 22:28:41 -0800344 struct vm_area_struct *vma, unsigned long addr);
345
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346/* linux/mm/swapfile.c */
Shaohua Lid1c2fbe2013-02-22 16:34:38 -0800347extern atomic_long_t nr_swap_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348extern long total_swap_pages;
Shaohua Lid1c2fbe2013-02-22 16:34:38 -0800349
350/* Swap 50% full? Release swapcache more aggressively.. */
351static inline bool vm_swap_full(void)
352{
353 return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages;
354}
355
356static inline long get_nr_swap_pages(void)
357{
358 return atomic_long_read(&nr_swap_pages);
359}
360
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361extern void si_swapinfo(struct sysinfo *);
362extern swp_entry_t get_swap_page(void);
Hugh Dickins910321e2010-09-09 16:38:07 -0700363extern swp_entry_t get_swap_page_of_type(int);
Hugh Dickins570a3352009-12-14 17:58:46 -0800364extern int add_swap_count_continuation(swp_entry_t, gfp_t);
Hugh Dickinsaaa46862009-12-14 17:58:47 -0800365extern void swap_shmem_alloc(swp_entry_t);
Hugh Dickins570a3352009-12-14 17:58:46 -0800366extern int swap_duplicate(swp_entry_t);
367extern int swapcache_prepare(swp_entry_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368extern void swap_free(swp_entry_t);
KAMEZAWA Hiroyukicb4b86b2009-06-16 15:32:52 -0700369extern void swapcache_free(swp_entry_t, struct page *page);
Hugh Dickins2509ef22009-01-06 14:40:10 -0800370extern int free_swap_and_cache(swp_entry_t);
Rafael J. Wysocki7bf23682007-01-05 16:36:28 -0800371extern int swap_type_of(dev_t, sector_t, struct block_device **);
Rafael J. Wysockif577eb32006-03-23 02:59:59 -0800372extern unsigned int count_swap_pages(int, int);
Lee Schermerhornd4906e12009-12-14 17:58:49 -0800373extern sector_t map_swap_page(struct page *, struct block_device **);
Rafael J. Wysocki3aef83e2006-12-06 20:34:10 -0800374extern sector_t swapdev_block(int, pgoff_t);
Hugh Dickins7b1fe592009-01-06 14:39:34 -0800375extern int reuse_swap_page(struct page *);
Hugh Dickinsa2c43ee2009-01-06 14:39:36 -0800376extern int try_to_free_swap(struct page *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377struct backing_dev_info;
378
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -0800379#ifdef CONFIG_CGROUP_MEM_RES_CTLR
KAMEZAWA Hiroyuki8a9478c2009-06-17 16:27:17 -0700380extern void
381mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout);
Daisuke Nishimura02491442010-03-10 15:22:17 -0800382extern int mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep);
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -0800383#else
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -0800384static inline void
KAMEZAWA Hiroyuki8a9478c2009-06-17 16:27:17 -0700385mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -0800386{
387}
388#endif
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -0800389
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390#else /* CONFIG_SWAP */
391
Shaohua Lid1c2fbe2013-02-22 16:34:38 -0800392#define get_nr_swap_pages() 0L
Hugh Dickinsb9627162009-01-06 14:39:41 -0800393#define total_swap_pages 0L
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394#define total_swapcache_pages 0UL
Shaohua Lid1c2fbe2013-02-22 16:34:38 -0800395#define vm_swap_full() 0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396
397#define si_swapinfo(val) \
398 do { (val)->freeswap = (val)->totalswap = 0; } while (0)
Olaf Hering9ae5b3c2005-08-07 09:42:24 -0700399/* only sparc can not include linux/pagemap.h in this file
400 * so leave page_cache_release and release_pages undeclared... */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401#define free_page_and_swap_cache(page) \
402 page_cache_release(page)
403#define free_pages_and_swap_cache(pages, nr) \
404 release_pages((pages), (nr), 0);
405
Con Kolivasbd96b9e2006-06-23 02:03:42 -0700406static inline void show_swap_cache_info(void)
407{
408}
409
Hugh Dickins2509ef22009-01-06 14:40:10 -0800410#define free_swap_and_cache(swp) is_migration_entry(swp)
KAMEZAWA Hiroyukicb4b86b2009-06-16 15:32:52 -0700411#define swapcache_prepare(swp) is_migration_entry(swp)
Con Kolivasbd96b9e2006-06-23 02:03:42 -0700412
Hugh Dickins570a3352009-12-14 17:58:46 -0800413static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
KAMEZAWA Hiroyuki355cfa72009-06-16 15:32:53 -0700414{
Hugh Dickins570a3352009-12-14 17:58:46 -0800415 return 0;
416}
417
Hugh Dickinsaaa46862009-12-14 17:58:47 -0800418static inline void swap_shmem_alloc(swp_entry_t swp)
419{
420}
421
Hugh Dickins570a3352009-12-14 17:58:46 -0800422static inline int swap_duplicate(swp_entry_t swp)
423{
424 return 0;
KAMEZAWA Hiroyuki355cfa72009-06-16 15:32:53 -0700425}
426
Con Kolivasbd96b9e2006-06-23 02:03:42 -0700427static inline void swap_free(swp_entry_t swp)
428{
429}
430
KAMEZAWA Hiroyukicb4b86b2009-06-16 15:32:52 -0700431static inline void swapcache_free(swp_entry_t swp, struct page *page)
432{
433}
434
Hugh Dickins02098fe2008-02-04 22:28:42 -0800435static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
Con Kolivasbd96b9e2006-06-23 02:03:42 -0700436 struct vm_area_struct *vma, unsigned long addr)
437{
438 return NULL;
439}
440
Hugh Dickins9fab5612009-03-31 15:23:33 -0700441static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
442{
443 return 0;
444}
445
Con Kolivasbd96b9e2006-06-23 02:03:42 -0700446static inline struct page *lookup_swap_cache(swp_entry_t swp)
447{
448 return NULL;
449}
450
Hugh Dickins60371d92009-01-06 14:39:40 -0800451static inline int add_to_swap(struct page *page)
452{
453 return 0;
454}
455
Hugh Dickins73b12622008-02-04 22:28:50 -0800456static inline int add_to_swap_cache(struct page *page, swp_entry_t entry,
457 gfp_t gfp_mask)
Con Kolivasbd96b9e2006-06-23 02:03:42 -0700458{
Hugh Dickins73b12622008-02-04 22:28:50 -0800459 return -1;
Con Kolivasbd96b9e2006-06-23 02:03:42 -0700460}
461
462static inline void __delete_from_swap_cache(struct page *page)
463{
464}
465
466static inline void delete_from_swap_cache(struct page *page)
467{
468}
469
Hugh Dickins7b1fe592009-01-06 14:39:34 -0800470#define reuse_swap_page(page) (page_mapcount(page) == 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471
Hugh Dickinsa2c43ee2009-01-06 14:39:36 -0800472static inline int try_to_free_swap(struct page *page)
Rik van Riel68a223942008-10-18 20:26:23 -0700473{
474 return 0;
475}
476
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477static inline swp_entry_t get_swap_page(void)
478{
479 swp_entry_t entry;
480 entry.val = 0;
481 return entry;
482}
483
Daisuke Nishimurae767e052009-05-28 14:34:28 -0700484static inline void
485mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
486{
487}
488
Daisuke Nishimura02491442010-03-10 15:22:17 -0800489#ifdef CONFIG_CGROUP_MEM_RES_CTLR
490static inline int
491mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep)
492{
493 return 0;
494}
495#endif
496
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497#endif /* CONFIG_SWAP */
498#endif /* __KERNEL__*/
499#endif /* _LINUX_SWAP_H */