| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __LINUX_GFP_H | 
|  | 2 | #define __LINUX_GFP_H | 
|  | 3 |  | 
|  | 4 | #include <linux/mmzone.h> | 
|  | 5 | #include <linux/stddef.h> | 
|  | 6 | #include <linux/linkage.h> | 
| Rusty Russell | 082edb7 | 2009-03-13 23:43:37 +1030 | [diff] [blame] | 7 | #include <linux/topology.h> | 
| Mel Gorman | 6484eb3 | 2009-06-16 15:31:54 -0700 | [diff] [blame] | 8 | #include <linux/mmdebug.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 |  | 
|  | 10 | struct vm_area_struct; | 
|  | 11 |  | 
| Namhyung Kim | 16b56cf | 2010-10-26 14:22:04 -0700 | [diff] [blame] | 12 | /* Plain integer GFP bitmasks. Do not use this directly. */ | 
|  | 13 | #define ___GFP_DMA		0x01u | 
|  | 14 | #define ___GFP_HIGHMEM		0x02u | 
|  | 15 | #define ___GFP_DMA32		0x04u | 
|  | 16 | #define ___GFP_MOVABLE		0x08u | 
|  | 17 | #define ___GFP_WAIT		0x10u | 
|  | 18 | #define ___GFP_HIGH		0x20u | 
|  | 19 | #define ___GFP_IO		0x40u | 
|  | 20 | #define ___GFP_FS		0x80u | 
|  | 21 | #define ___GFP_COLD		0x100u | 
|  | 22 | #define ___GFP_NOWARN		0x200u | 
|  | 23 | #define ___GFP_REPEAT		0x400u | 
|  | 24 | #define ___GFP_NOFAIL		0x800u | 
|  | 25 | #define ___GFP_NORETRY		0x1000u | 
| Heesub Shin | 48f3741 | 2013-01-07 11:10:13 +0900 | [diff] [blame] | 26 | #define ___GFP_CMA		0x2000u | 
| Namhyung Kim | 16b56cf | 2010-10-26 14:22:04 -0700 | [diff] [blame] | 27 | #define ___GFP_COMP		0x4000u | 
|  | 28 | #define ___GFP_ZERO		0x8000u | 
|  | 29 | #define ___GFP_NOMEMALLOC	0x10000u | 
|  | 30 | #define ___GFP_HARDWALL		0x20000u | 
|  | 31 | #define ___GFP_THISNODE		0x40000u | 
|  | 32 | #define ___GFP_RECLAIMABLE	0x80000u | 
|  | 33 | #ifdef CONFIG_KMEMCHECK | 
|  | 34 | #define ___GFP_NOTRACK		0x200000u | 
|  | 35 | #else | 
|  | 36 | #define ___GFP_NOTRACK		0 | 
|  | 37 | #endif | 
| Andrea Arcangeli | 32dba98 | 2011-01-13 15:46:49 -0800 | [diff] [blame] | 38 | #define ___GFP_NO_KSWAPD	0x400000u | 
| Andi Kleen | 78afd56 | 2011-03-22 16:33:12 -0700 | [diff] [blame] | 39 | #define ___GFP_OTHER_NODE	0x800000u | 
| Johannes Weiner | a756cf5 | 2012-01-10 15:07:49 -0800 | [diff] [blame] | 40 | #define ___GFP_WRITE		0x1000000u | 
| Namhyung Kim | 16b56cf | 2010-10-26 14:22:04 -0700 | [diff] [blame] | 41 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | /* | 
|  | 43 | * GFP bitmasks.. | 
| Christoph Lameter | e53ef38 | 2006-09-25 23:31:14 -0700 | [diff] [blame] | 44 | * | 
|  | 45 | * Zone modifiers (see linux/mmzone.h - low three bits) | 
|  | 46 | * | 
| Christoph Lameter | e53ef38 | 2006-09-25 23:31:14 -0700 | [diff] [blame] | 47 | * Do not put any conditional on these. If necessary modify the definitions | 
| matt mooney | 263ff5d | 2010-05-24 14:32:44 -0700 | [diff] [blame] | 48 | * without the underscores and use them consistently. The definitions here may | 
| Christoph Lameter | e53ef38 | 2006-09-25 23:31:14 -0700 | [diff] [blame] | 49 | * be used in bit comparisons. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | */ | 
| Namhyung Kim | 16b56cf | 2010-10-26 14:22:04 -0700 | [diff] [blame] | 51 | #define __GFP_DMA	((__force gfp_t)___GFP_DMA) | 
|  | 52 | #define __GFP_HIGHMEM	((__force gfp_t)___GFP_HIGHMEM) | 
|  | 53 | #define __GFP_DMA32	((__force gfp_t)___GFP_DMA32) | 
|  | 54 | #define __GFP_MOVABLE	((__force gfp_t)___GFP_MOVABLE)  /* Page is movable */ | 
| Heesub Shin | 48f3741 | 2013-01-07 11:10:13 +0900 | [diff] [blame] | 55 | #define __GFP_CMA	((__force gfp_t)___GFP_CMA) | 
|  | 56 | #define GFP_ZONEMASK	(__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE| \ | 
|  | 57 | __GFP_CMA) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | /* | 
|  | 59 | * Action modifiers - doesn't change the zoning | 
|  | 60 | * | 
|  | 61 | * __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt | 
|  | 62 | * _might_ fail.  This depends upon the particular VM implementation. | 
|  | 63 | * | 
|  | 64 | * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller | 
| David Rientjes | 478352e | 2010-03-05 13:42:23 -0800 | [diff] [blame] | 65 | * cannot handle allocation failures.  This modifier is deprecated and no new | 
|  | 66 | * users should be added. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 | * | 
|  | 68 | * __GFP_NORETRY: The VM implementation must not retry indefinitely. | 
| Mel Gorman | 769848c | 2007-07-17 04:03:05 -0700 | [diff] [blame] | 69 | * | 
|  | 70 | * __GFP_MOVABLE: Flag that this page will be movable by the page migration | 
|  | 71 | * mechanism or reclaimed | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | */ | 
| Namhyung Kim | 16b56cf | 2010-10-26 14:22:04 -0700 | [diff] [blame] | 73 | #define __GFP_WAIT	((__force gfp_t)___GFP_WAIT)	/* Can wait and reschedule? */ | 
|  | 74 | #define __GFP_HIGH	((__force gfp_t)___GFP_HIGH)	/* Should access emergency pools? */ | 
|  | 75 | #define __GFP_IO	((__force gfp_t)___GFP_IO)	/* Can start physical IO? */ | 
|  | 76 | #define __GFP_FS	((__force gfp_t)___GFP_FS)	/* Can call down to low-level FS? */ | 
|  | 77 | #define __GFP_COLD	((__force gfp_t)___GFP_COLD)	/* Cache-cold page required */ | 
|  | 78 | #define __GFP_NOWARN	((__force gfp_t)___GFP_NOWARN)	/* Suppress page allocation failure warning */ | 
|  | 79 | #define __GFP_REPEAT	((__force gfp_t)___GFP_REPEAT)	/* See above */ | 
|  | 80 | #define __GFP_NOFAIL	((__force gfp_t)___GFP_NOFAIL)	/* See above */ | 
|  | 81 | #define __GFP_NORETRY	((__force gfp_t)___GFP_NORETRY) /* See above */ | 
|  | 82 | #define __GFP_COMP	((__force gfp_t)___GFP_COMP)	/* Add compound page metadata */ | 
|  | 83 | #define __GFP_ZERO	((__force gfp_t)___GFP_ZERO)	/* Return zeroed page on success */ | 
|  | 84 | #define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) /* Don't use emergency reserves */ | 
|  | 85 | #define __GFP_HARDWALL   ((__force gfp_t)___GFP_HARDWALL) /* Enforce hardwall cpuset memory allocs */ | 
|  | 86 | #define __GFP_THISNODE	((__force gfp_t)___GFP_THISNODE)/* No fallback, no policies */ | 
|  | 87 | #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */ | 
|  | 88 | #define __GFP_NOTRACK	((__force gfp_t)___GFP_NOTRACK)  /* Don't track with kmemcheck */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 |  | 
| Andrea Arcangeli | 32dba98 | 2011-01-13 15:46:49 -0800 | [diff] [blame] | 90 | #define __GFP_NO_KSWAPD	((__force gfp_t)___GFP_NO_KSWAPD) | 
| Andi Kleen | 78afd56 | 2011-03-22 16:33:12 -0700 | [diff] [blame] | 91 | #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */ | 
| Johannes Weiner | a756cf5 | 2012-01-10 15:07:49 -0800 | [diff] [blame] | 92 | #define __GFP_WRITE	((__force gfp_t)___GFP_WRITE)	/* Allocator intends to dirty page */ | 
| Andrea Arcangeli | 32dba98 | 2011-01-13 15:46:49 -0800 | [diff] [blame] | 93 |  | 
| Vegard Nossum | 2dff440 | 2008-05-31 15:56:17 +0200 | [diff] [blame] | 94 | /* | 
|  | 95 | * This may seem redundant, but it's a way of annotating false positives vs. | 
|  | 96 | * allocations that simply cannot be supported (e.g. page tables). | 
|  | 97 | */ | 
|  | 98 | #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK) | 
|  | 99 |  | 
| Johannes Weiner | a756cf5 | 2012-01-10 15:07:49 -0800 | [diff] [blame] | 100 | #define __GFP_BITS_SHIFT 25	/* Room for N __GFP_FOO bits */ | 
| Al Viro | af4ca45 | 2005-10-21 02:55:38 -0400 | [diff] [blame] | 101 | #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 |  | 
| Jeff Dike | 7b04d71 | 2006-04-10 22:53:27 -0700 | [diff] [blame] | 103 | /* This equals 0, but use constants in case they ever change */ | 
|  | 104 | #define GFP_NOWAIT	(GFP_ATOMIC & ~__GFP_HIGH) | 
| Paul Jackson | 4eac915 | 2006-01-11 12:17:19 -0800 | [diff] [blame] | 105 | /* GFP_ATOMIC means both !wait (__GFP_WAIT not set) and use emergency pool */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | #define GFP_ATOMIC	(__GFP_HIGH) | 
|  | 107 | #define GFP_NOIO	(__GFP_WAIT) | 
|  | 108 | #define GFP_NOFS	(__GFP_WAIT | __GFP_IO) | 
|  | 109 | #define GFP_KERNEL	(__GFP_WAIT | __GFP_IO | __GFP_FS) | 
| Mel Gorman | e12ba74 | 2007-10-16 01:25:52 -0700 | [diff] [blame] | 110 | #define GFP_TEMPORARY	(__GFP_WAIT | __GFP_IO | __GFP_FS | \ | 
|  | 111 | __GFP_RECLAIMABLE) | 
| Paul Jackson | f90b1d2 | 2005-09-06 15:18:10 -0700 | [diff] [blame] | 112 | #define GFP_USER	(__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL) | 
|  | 113 | #define GFP_HIGHUSER	(__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \ | 
|  | 114 | __GFP_HIGHMEM) | 
| Mel Gorman | 769848c | 2007-07-17 04:03:05 -0700 | [diff] [blame] | 115 | #define GFP_HIGHUSER_MOVABLE	(__GFP_WAIT | __GFP_IO | __GFP_FS | \ | 
|  | 116 | __GFP_HARDWALL | __GFP_HIGHMEM | \ | 
|  | 117 | __GFP_MOVABLE) | 
| Rafael J. Wysocki | 452aa69 | 2010-03-05 13:42:13 -0800 | [diff] [blame] | 118 | #define GFP_IOFS	(__GFP_IO | __GFP_FS) | 
| Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 119 | #define GFP_TRANSHUGE	(GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ | 
|  | 120 | __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \ | 
|  | 121 | __GFP_NO_KSWAPD) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 |  | 
| Christoph Lameter | 77f700d | 2006-09-27 01:50:07 -0700 | [diff] [blame] | 123 | #ifdef CONFIG_NUMA | 
| Christoph Lameter | 980128f | 2006-09-25 23:31:46 -0700 | [diff] [blame] | 124 | #define GFP_THISNODE	(__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY) | 
| Christoph Lameter | 77f700d | 2006-09-27 01:50:07 -0700 | [diff] [blame] | 125 | #else | 
| Al Viro | f2e97df | 2007-02-09 16:38:55 +0000 | [diff] [blame] | 126 | #define GFP_THISNODE	((__force gfp_t)0) | 
| Christoph Lameter | 77f700d | 2006-09-27 01:50:07 -0700 | [diff] [blame] | 127 | #endif | 
|  | 128 |  | 
| Christoph Lameter | 6cb0622 | 2007-10-16 01:25:41 -0700 | [diff] [blame] | 129 | /* This mask makes up all the page movable related flags */ | 
| Heesub Shin | 48f3741 | 2013-01-07 11:10:13 +0900 | [diff] [blame] | 130 | #define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE|__GFP_CMA) | 
| Christoph Lameter | 6cb0622 | 2007-10-16 01:25:41 -0700 | [diff] [blame] | 131 |  | 
|  | 132 | /* Control page allocator reclaim behavior */ | 
|  | 133 | #define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\ | 
|  | 134 | __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\ | 
|  | 135 | __GFP_NORETRY|__GFP_NOMEMALLOC) | 
|  | 136 |  | 
| Pekka Enberg | 7e85ee0 | 2009-06-12 14:03:06 +0300 | [diff] [blame] | 137 | /* Control slab gfp mask during early boot */ | 
| matt mooney | fd23855 | 2010-05-24 14:32:45 -0700 | [diff] [blame] | 138 | #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS)) | 
| Pekka Enberg | 7e85ee0 | 2009-06-12 14:03:06 +0300 | [diff] [blame] | 139 |  | 
| Christoph Lameter | 6cb0622 | 2007-10-16 01:25:41 -0700 | [diff] [blame] | 140 | /* Control allocation constraints */ | 
|  | 141 | #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) | 
|  | 142 |  | 
|  | 143 | /* Do not use these with a slab allocator */ | 
|  | 144 | #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK) | 
| Christoph Lameter | 980128f | 2006-09-25 23:31:46 -0700 | [diff] [blame] | 145 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 146 | /* Flag - indicates that the buffer will be suitable for DMA.  Ignored on some | 
|  | 147 | platforms, used as appropriate on others */ | 
|  | 148 |  | 
|  | 149 | #define GFP_DMA		__GFP_DMA | 
|  | 150 |  | 
| Andi Kleen | a2f1b42 | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 151 | /* 4GB DMA on some platforms */ | 
|  | 152 | #define GFP_DMA32	__GFP_DMA32 | 
|  | 153 |  | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 154 | /* Convert GFP flags to their corresponding migrate type */ | 
|  | 155 | static inline int allocflags_to_migratetype(gfp_t gfp_flags) | 
|  | 156 | { | 
|  | 157 | WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); | 
|  | 158 |  | 
|  | 159 | if (unlikely(page_group_by_mobility_disabled)) | 
|  | 160 | return MIGRATE_UNMOVABLE; | 
|  | 161 |  | 
|  | 162 | /* Group based on mobility */ | 
| Heesub Shin | 48f3741 | 2013-01-07 11:10:13 +0900 | [diff] [blame] | 163 | #ifndef CONFIG_CMA | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 164 | return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) | | 
|  | 165 | ((gfp_flags & __GFP_RECLAIMABLE) != 0); | 
| Heesub Shin | 48f3741 | 2013-01-07 11:10:13 +0900 | [diff] [blame] | 166 | #else | 
|  | 167 | return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) | | 
|  | 168 | (((gfp_flags & __GFP_CMA) != 0) << 1) | | 
|  | 169 | ((gfp_flags & __GFP_RECLAIMABLE) != 0); | 
|  | 170 | #endif | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 171 | } | 
| Andi Kleen | a2f1b42 | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 172 |  | 
| Christoph Lameter | b70d94e | 2009-06-16 15:32:46 -0700 | [diff] [blame] | 173 | #ifdef CONFIG_HIGHMEM | 
|  | 174 | #define OPT_ZONE_HIGHMEM ZONE_HIGHMEM | 
|  | 175 | #else | 
|  | 176 | #define OPT_ZONE_HIGHMEM ZONE_NORMAL | 
|  | 177 | #endif | 
|  | 178 |  | 
|  | 179 | #ifdef CONFIG_ZONE_DMA | 
|  | 180 | #define OPT_ZONE_DMA ZONE_DMA | 
|  | 181 | #else | 
|  | 182 | #define OPT_ZONE_DMA ZONE_NORMAL | 
|  | 183 | #endif | 
|  | 184 |  | 
|  | 185 | #ifdef CONFIG_ZONE_DMA32 | 
|  | 186 | #define OPT_ZONE_DMA32 ZONE_DMA32 | 
|  | 187 | #else | 
|  | 188 | #define OPT_ZONE_DMA32 ZONE_NORMAL | 
|  | 189 | #endif | 
|  | 190 |  | 
|  | 191 | /* | 
|  | 192 | * GFP_ZONE_TABLE is a word size bitstring that is used for looking up the | 
|  | 193 | * zone to use given the lowest 4 bits of gfp_t. Entries are ZONE_SHIFT long | 
|  | 194 | * and there are 16 of them to cover all possible combinations of | 
| matt mooney | 263ff5d | 2010-05-24 14:32:44 -0700 | [diff] [blame] | 195 | * __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM. | 
| Christoph Lameter | b70d94e | 2009-06-16 15:32:46 -0700 | [diff] [blame] | 196 | * | 
|  | 197 | * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA. | 
|  | 198 | * But GFP_MOVABLE is not only a zone specifier but also an allocation | 
|  | 199 | * policy. Therefore __GFP_MOVABLE plus another zone selector is valid. | 
| matt mooney | 263ff5d | 2010-05-24 14:32:44 -0700 | [diff] [blame] | 200 | * Only 1 bit of the lowest 3 bits (DMA,DMA32,HIGHMEM) can be set to "1". | 
| Christoph Lameter | b70d94e | 2009-06-16 15:32:46 -0700 | [diff] [blame] | 201 | * | 
|  | 202 | *       bit       result | 
|  | 203 | *       ================= | 
|  | 204 | *       0x0    => NORMAL | 
|  | 205 | *       0x1    => DMA or NORMAL | 
|  | 206 | *       0x2    => HIGHMEM or NORMAL | 
|  | 207 | *       0x3    => BAD (DMA+HIGHMEM) | 
|  | 208 | *       0x4    => DMA32 or DMA or NORMAL | 
|  | 209 | *       0x5    => BAD (DMA+DMA32) | 
|  | 210 | *       0x6    => BAD (HIGHMEM+DMA32) | 
|  | 211 | *       0x7    => BAD (HIGHMEM+DMA32+DMA) | 
|  | 212 | *       0x8    => NORMAL (MOVABLE+0) | 
|  | 213 | *       0x9    => DMA or NORMAL (MOVABLE+DMA) | 
|  | 214 | *       0xa    => MOVABLE (Movable is valid only if HIGHMEM is set too) | 
|  | 215 | *       0xb    => BAD (MOVABLE+HIGHMEM+DMA) | 
|  | 216 | *       0xc    => DMA32 (MOVABLE+HIGHMEM+DMA32) | 
|  | 217 | *       0xd    => BAD (MOVABLE+DMA32+DMA) | 
|  | 218 | *       0xe    => BAD (MOVABLE+DMA32+HIGHMEM) | 
|  | 219 | *       0xf    => BAD (MOVABLE+DMA32+HIGHMEM+DMA) | 
|  | 220 | * | 
|  | 221 | * ZONES_SHIFT must be <= 2 on 32 bit platforms. | 
|  | 222 | */ | 
|  | 223 |  | 
|  | 224 | #if 16 * ZONES_SHIFT > BITS_PER_LONG | 
|  | 225 | #error ZONES_SHIFT too large to create GFP_ZONE_TABLE integer | 
|  | 226 | #endif | 
|  | 227 |  | 
|  | 228 | #define GFP_ZONE_TABLE ( \ | 
| Namhyung Kim | 16b56cf | 2010-10-26 14:22:04 -0700 | [diff] [blame] | 229 | (ZONE_NORMAL << 0 * ZONES_SHIFT)				      \ | 
|  | 230 | | (OPT_ZONE_DMA << ___GFP_DMA * ZONES_SHIFT)			      \ | 
|  | 231 | | (OPT_ZONE_HIGHMEM << ___GFP_HIGHMEM * ZONES_SHIFT)		      \ | 
|  | 232 | | (OPT_ZONE_DMA32 << ___GFP_DMA32 * ZONES_SHIFT)		      \ | 
|  | 233 | | (ZONE_NORMAL << ___GFP_MOVABLE * ZONES_SHIFT)			      \ | 
|  | 234 | | (OPT_ZONE_DMA << (___GFP_MOVABLE | ___GFP_DMA) * ZONES_SHIFT)	      \ | 
|  | 235 | | (ZONE_MOVABLE << (___GFP_MOVABLE | ___GFP_HIGHMEM) * ZONES_SHIFT)   \ | 
|  | 236 | | (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * ZONES_SHIFT)   \ | 
| Christoph Lameter | b70d94e | 2009-06-16 15:32:46 -0700 | [diff] [blame] | 237 | ) | 
|  | 238 |  | 
|  | 239 | /* | 
| matt mooney | 263ff5d | 2010-05-24 14:32:44 -0700 | [diff] [blame] | 240 | * GFP_ZONE_BAD is a bitmap for all combinations of __GFP_DMA, __GFP_DMA32 | 
| Christoph Lameter | b70d94e | 2009-06-16 15:32:46 -0700 | [diff] [blame] | 241 | * __GFP_HIGHMEM and __GFP_MOVABLE that are not permitted. One flag per | 
|  | 242 | * entry starting with bit 0. Bit is set if the combination is not | 
|  | 243 | * allowed. | 
|  | 244 | */ | 
|  | 245 | #define GFP_ZONE_BAD ( \ | 
| Namhyung Kim | 16b56cf | 2010-10-26 14:22:04 -0700 | [diff] [blame] | 246 | 1 << (___GFP_DMA | ___GFP_HIGHMEM)				      \ | 
|  | 247 | | 1 << (___GFP_DMA | ___GFP_DMA32)				      \ | 
|  | 248 | | 1 << (___GFP_DMA32 | ___GFP_HIGHMEM)				      \ | 
|  | 249 | | 1 << (___GFP_DMA | ___GFP_DMA32 | ___GFP_HIGHMEM)		      \ | 
|  | 250 | | 1 << (___GFP_MOVABLE | ___GFP_HIGHMEM | ___GFP_DMA)		      \ | 
|  | 251 | | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA)		      \ | 
|  | 252 | | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_HIGHMEM)		      \ | 
|  | 253 | | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA | ___GFP_HIGHMEM)  \ | 
| Christoph Lameter | b70d94e | 2009-06-16 15:32:46 -0700 | [diff] [blame] | 254 | ) | 
|  | 255 |  | 
| Christoph Lameter | 19655d3 | 2006-09-25 23:31:19 -0700 | [diff] [blame] | 256 | static inline enum zone_type gfp_zone(gfp_t flags) | 
| Christoph Lameter | 4e4785b | 2006-09-25 23:31:17 -0700 | [diff] [blame] | 257 | { | 
| Christoph Lameter | b70d94e | 2009-06-16 15:32:46 -0700 | [diff] [blame] | 258 | enum zone_type z; | 
| Namhyung Kim | 16b56cf | 2010-10-26 14:22:04 -0700 | [diff] [blame] | 259 | int bit = (__force int) (flags & GFP_ZONEMASK); | 
| Christoph Lameter | b70d94e | 2009-06-16 15:32:46 -0700 | [diff] [blame] | 260 |  | 
|  | 261 | z = (GFP_ZONE_TABLE >> (bit * ZONES_SHIFT)) & | 
|  | 262 | ((1 << ZONES_SHIFT) - 1); | 
| Dave Hansen | 82d4b57 | 2011-05-24 17:11:42 -0700 | [diff] [blame] | 263 | VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1); | 
| Christoph Lameter | b70d94e | 2009-06-16 15:32:46 -0700 | [diff] [blame] | 264 | return z; | 
| Christoph Lameter | 4e4785b | 2006-09-25 23:31:17 -0700 | [diff] [blame] | 265 | } | 
|  | 266 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 | /* | 
|  | 268 | * There is only one page-allocator function, and two main namespaces to | 
|  | 269 | * it. The alloc_page*() variants return 'struct page *' and as such | 
|  | 270 | * can allocate highmem pages, the *get*page*() variants return | 
|  | 271 | * virtual kernel addresses to the allocated page(s). | 
|  | 272 | */ | 
|  | 273 |  | 
| Mel Gorman | 54a6eb5 | 2008-04-28 02:12:16 -0700 | [diff] [blame] | 274 | static inline int gfp_zonelist(gfp_t flags) | 
|  | 275 | { | 
|  | 276 | if (NUMA_BUILD && unlikely(flags & __GFP_THISNODE)) | 
|  | 277 | return 1; | 
|  | 278 |  | 
|  | 279 | return 0; | 
|  | 280 | } | 
|  | 281 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 282 | /* | 
|  | 283 | * We get the zone list from the current node and the gfp_mask. | 
|  | 284 | * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones. | 
| Mel Gorman | 54a6eb5 | 2008-04-28 02:12:16 -0700 | [diff] [blame] | 285 | * There are two zonelists per node, one for all zones with memory and | 
|  | 286 | * one containing just zones from the node the zonelist belongs to. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 287 | * | 
|  | 288 | * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets | 
|  | 289 | * optimized to &contig_page_data at compile-time. | 
|  | 290 | */ | 
| Mel Gorman | 0e88460 | 2008-04-28 02:12:14 -0700 | [diff] [blame] | 291 | static inline struct zonelist *node_zonelist(int nid, gfp_t flags) | 
|  | 292 | { | 
| Mel Gorman | 54a6eb5 | 2008-04-28 02:12:16 -0700 | [diff] [blame] | 293 | return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags); | 
| Mel Gorman | 0e88460 | 2008-04-28 02:12:14 -0700 | [diff] [blame] | 294 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 295 |  | 
|  | 296 | #ifndef HAVE_ARCH_FREE_PAGE | 
|  | 297 | static inline void arch_free_page(struct page *page, int order) { } | 
|  | 298 | #endif | 
| Nick Piggin | cc10250 | 2006-12-06 20:32:00 -0800 | [diff] [blame] | 299 | #ifndef HAVE_ARCH_ALLOC_PAGE | 
|  | 300 | static inline void arch_alloc_page(struct page *page, int order) { } | 
|  | 301 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 302 |  | 
| KOSAKI Motohiro | e4048e5 | 2008-07-23 21:27:01 -0700 | [diff] [blame] | 303 | struct page * | 
| Mel Gorman | d239171 | 2009-06-16 15:31:52 -0700 | [diff] [blame] | 304 | __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, | 
| KOSAKI Motohiro | e4048e5 | 2008-07-23 21:27:01 -0700 | [diff] [blame] | 305 | struct zonelist *zonelist, nodemask_t *nodemask); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 |  | 
| KOSAKI Motohiro | e4048e5 | 2008-07-23 21:27:01 -0700 | [diff] [blame] | 307 | static inline struct page * | 
|  | 308 | __alloc_pages(gfp_t gfp_mask, unsigned int order, | 
|  | 309 | struct zonelist *zonelist) | 
|  | 310 | { | 
| Mel Gorman | d239171 | 2009-06-16 15:31:52 -0700 | [diff] [blame] | 311 | return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL); | 
| KOSAKI Motohiro | e4048e5 | 2008-07-23 21:27:01 -0700 | [diff] [blame] | 312 | } | 
|  | 313 |  | 
| Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 314 | static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 315 | unsigned int order) | 
|  | 316 | { | 
| Andi Kleen | 819a692 | 2006-01-11 22:43:45 +0100 | [diff] [blame] | 317 | /* Unknown node is current node */ | 
|  | 318 | if (nid < 0) | 
|  | 319 | nid = numa_node_id(); | 
|  | 320 |  | 
| Mel Gorman | 0e88460 | 2008-04-28 02:12:14 -0700 | [diff] [blame] | 321 | return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 322 | } | 
|  | 323 |  | 
| Mel Gorman | 6484eb3 | 2009-06-16 15:31:54 -0700 | [diff] [blame] | 324 | static inline struct page *alloc_pages_exact_node(int nid, gfp_t gfp_mask, | 
|  | 325 | unsigned int order) | 
|  | 326 | { | 
| David Rientjes | f6d7e0c | 2012-01-10 15:07:38 -0800 | [diff] [blame] | 327 | VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES || !node_online(nid)); | 
| Mel Gorman | 6484eb3 | 2009-06-16 15:31:54 -0700 | [diff] [blame] | 328 |  | 
|  | 329 | return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); | 
|  | 330 | } | 
|  | 331 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 | #ifdef CONFIG_NUMA | 
| Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 333 | extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 334 |  | 
|  | 335 | static inline struct page * | 
| Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 336 | alloc_pages(gfp_t gfp_mask, unsigned int order) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 337 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 338 | return alloc_pages_current(gfp_mask, order); | 
|  | 339 | } | 
| Andrea Arcangeli | 0bbbc0b | 2011-01-13 15:47:05 -0800 | [diff] [blame] | 340 | extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, | 
| Andi Kleen | 2f5f948 | 2011-03-04 17:36:29 -0800 | [diff] [blame] | 341 | struct vm_area_struct *vma, unsigned long addr, | 
|  | 342 | int node); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 343 | #else | 
|  | 344 | #define alloc_pages(gfp_mask, order) \ | 
|  | 345 | alloc_pages_node(numa_node_id(), gfp_mask, order) | 
| Andi Kleen | 2f5f948 | 2011-03-04 17:36:29 -0800 | [diff] [blame] | 346 | #define alloc_pages_vma(gfp_mask, order, vma, addr, node)	\ | 
| Andrea Arcangeli | 0bbbc0b | 2011-01-13 15:47:05 -0800 | [diff] [blame] | 347 | alloc_pages(gfp_mask, order) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 348 | #endif | 
|  | 349 | #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) | 
| Andi Kleen | 2f5f948 | 2011-03-04 17:36:29 -0800 | [diff] [blame] | 350 | #define alloc_page_vma(gfp_mask, vma, addr)			\ | 
|  | 351 | alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id()) | 
| Andi Kleen | 236344d | 2011-03-04 17:36:30 -0800 | [diff] [blame] | 352 | #define alloc_page_vma_node(gfp_mask, vma, addr, node)		\ | 
|  | 353 | alloc_pages_vma(gfp_mask, 0, vma, addr, node) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 354 |  | 
| Harvey Harrison | b3c9752 | 2008-02-13 15:03:15 -0800 | [diff] [blame] | 355 | extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); | 
|  | 356 | extern unsigned long get_zeroed_page(gfp_t gfp_mask); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 357 |  | 
| Timur Tabi | 2be0ffe | 2008-07-23 21:28:11 -0700 | [diff] [blame] | 358 | void *alloc_pages_exact(size_t size, gfp_t gfp_mask); | 
|  | 359 | void free_pages_exact(void *virt, size_t size); | 
| Andi Kleen | ee85c2e | 2011-05-11 15:13:34 -0700 | [diff] [blame] | 360 | /* This is different from alloc_pages_exact_node !!! */ | 
|  | 361 | void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask); | 
| Timur Tabi | 2be0ffe | 2008-07-23 21:28:11 -0700 | [diff] [blame] | 362 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 363 | #define __get_free_page(gfp_mask) \ | 
| matt mooney | fd23855 | 2010-05-24 14:32:45 -0700 | [diff] [blame] | 364 | __get_free_pages((gfp_mask), 0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 365 |  | 
|  | 366 | #define __get_dma_pages(gfp_mask, order) \ | 
| matt mooney | fd23855 | 2010-05-24 14:32:45 -0700 | [diff] [blame] | 367 | __get_free_pages((gfp_mask) | GFP_DMA, (order)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 368 |  | 
| Harvey Harrison | b3c9752 | 2008-02-13 15:03:15 -0800 | [diff] [blame] | 369 | extern void __free_pages(struct page *page, unsigned int order); | 
|  | 370 | extern void free_pages(unsigned long addr, unsigned int order); | 
| Li Hong | fc91668 | 2010-03-05 13:41:54 -0800 | [diff] [blame] | 371 | extern void free_hot_cold_page(struct page *page, int cold); | 
| Konstantin Khlebnikov | cc59850 | 2012-01-10 15:07:04 -0800 | [diff] [blame] | 372 | extern void free_hot_cold_page_list(struct list_head *list, int cold); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 373 |  | 
|  | 374 | #define __free_page(page) __free_pages((page), 0) | 
| matt mooney | fd23855 | 2010-05-24 14:32:45 -0700 | [diff] [blame] | 375 | #define free_page(addr) free_pages((addr), 0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 376 |  | 
|  | 377 | void page_alloc_init(void); | 
| Christoph Lameter | 4037d45 | 2007-05-09 02:35:14 -0700 | [diff] [blame] | 378 | void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); | 
| Christoph Lameter | 9f8f217 | 2008-02-04 22:29:11 -0800 | [diff] [blame] | 379 | void drain_all_pages(void); | 
|  | 380 | void drain_local_pages(void *dummy); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 381 |  | 
| Mel Gorman | f90ac39 | 2012-01-10 15:07:15 -0800 | [diff] [blame] | 382 | /* | 
|  | 383 | * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what | 
|  | 384 | * GFP flags are used before interrupts are enabled. Once interrupts are | 
|  | 385 | * enabled, it is set to __GFP_BITS_MASK while the system is running. During | 
|  | 386 | * hibernation, it is used by PM to avoid I/O during memory allocation while | 
|  | 387 | * devices are suspended. | 
|  | 388 | */ | 
| Benjamin Herrenschmidt | dcce284 | 2009-06-18 13:24:12 +1000 | [diff] [blame] | 389 | extern gfp_t gfp_allowed_mask; | 
|  | 390 |  | 
| Rafael J. Wysocki | c9e664f | 2010-12-03 22:57:45 +0100 | [diff] [blame] | 391 | extern void pm_restrict_gfp_mask(void); | 
|  | 392 | extern void pm_restore_gfp_mask(void); | 
| Benjamin Herrenschmidt | dcce284 | 2009-06-18 13:24:12 +1000 | [diff] [blame] | 393 |  | 
| Mel Gorman | f90ac39 | 2012-01-10 15:07:15 -0800 | [diff] [blame] | 394 | #ifdef CONFIG_PM_SLEEP | 
|  | 395 | extern bool pm_suspended_storage(void); | 
|  | 396 | #else | 
|  | 397 | static inline bool pm_suspended_storage(void) | 
|  | 398 | { | 
|  | 399 | return false; | 
|  | 400 | } | 
|  | 401 | #endif /* CONFIG_PM_SLEEP */ | 
|  | 402 |  | 
| Michal Nazarewicz | 4c1ff37 | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 403 | #ifdef CONFIG_CMA | 
|  | 404 |  | 
|  | 405 | /* The below functions must be run on a range from a single zone. */ | 
| Michal Nazarewicz | c80cd92 | 2012-04-03 15:06:15 +0200 | [diff] [blame] | 406 | extern int alloc_contig_range(unsigned long start, unsigned long end, | 
|  | 407 | unsigned migratetype); | 
| Michal Nazarewicz | 4c1ff37 | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 408 | extern void free_contig_range(unsigned long pfn, unsigned nr_pages); | 
|  | 409 |  | 
| Michal Nazarewicz | d4158d2 | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 410 | /* CMA stuff */ | 
|  | 411 | extern void init_cma_reserved_pageblock(struct page *page); | 
|  | 412 |  | 
| Michal Nazarewicz | 4c1ff37 | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 413 | #endif | 
|  | 414 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 415 | #endif /* __LINUX_GFP_H */ |