| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __LINUX_GFP_H | 
 | 2 | #define __LINUX_GFP_H | 
 | 3 |  | 
 | 4 | #include <linux/mmzone.h> | 
 | 5 | #include <linux/stddef.h> | 
 | 6 | #include <linux/linkage.h> | 
| Rusty Russell | 082edb7 | 2009-03-13 23:43:37 +1030 | [diff] [blame] | 7 | #include <linux/topology.h> | 
| Mel Gorman | 6484eb3 | 2009-06-16 15:31:54 -0700 | [diff] [blame] | 8 | #include <linux/mmdebug.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 |  | 
 | 10 | struct vm_area_struct; | 
 | 11 |  | 
| Namhyung Kim | 16b56cf | 2010-10-26 14:22:04 -0700 | [diff] [blame] | 12 | /* Plain integer GFP bitmasks. Do not use this directly. */ | 
 | 13 | #define ___GFP_DMA		0x01u | 
 | 14 | #define ___GFP_HIGHMEM		0x02u | 
 | 15 | #define ___GFP_DMA32		0x04u | 
 | 16 | #define ___GFP_MOVABLE		0x08u | 
 | 17 | #define ___GFP_WAIT		0x10u | 
 | 18 | #define ___GFP_HIGH		0x20u | 
 | 19 | #define ___GFP_IO		0x40u | 
 | 20 | #define ___GFP_FS		0x80u | 
 | 21 | #define ___GFP_COLD		0x100u | 
 | 22 | #define ___GFP_NOWARN		0x200u | 
 | 23 | #define ___GFP_REPEAT		0x400u | 
 | 24 | #define ___GFP_NOFAIL		0x800u | 
 | 25 | #define ___GFP_NORETRY		0x1000u | 
| Mel Gorman | b37f1dd | 2012-07-31 16:44:03 -0700 | [diff] [blame] | 26 | #define ___GFP_MEMALLOC		0x2000u | 
| Namhyung Kim | 16b56cf | 2010-10-26 14:22:04 -0700 | [diff] [blame] | 27 | #define ___GFP_COMP		0x4000u | 
 | 28 | #define ___GFP_ZERO		0x8000u | 
 | 29 | #define ___GFP_NOMEMALLOC	0x10000u | 
 | 30 | #define ___GFP_HARDWALL		0x20000u | 
 | 31 | #define ___GFP_THISNODE		0x40000u | 
 | 32 | #define ___GFP_RECLAIMABLE	0x80000u | 
| Namhyung Kim | 16b56cf | 2010-10-26 14:22:04 -0700 | [diff] [blame] | 33 | #define ___GFP_NOTRACK		0x200000u | 
| Andi Kleen | 78afd56 | 2011-03-22 16:33:12 -0700 | [diff] [blame] | 34 | #define ___GFP_OTHER_NODE	0x800000u | 
| Johannes Weiner | a756cf5 | 2012-01-10 15:07:49 -0800 | [diff] [blame] | 35 | #define ___GFP_WRITE		0x1000000u | 
| Namhyung Kim | 16b56cf | 2010-10-26 14:22:04 -0700 | [diff] [blame] | 36 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | /* | 
 | 38 |  * GFP bitmasks.. | 
| Christoph Lameter | e53ef38 | 2006-09-25 23:31:14 -0700 | [diff] [blame] | 39 |  * | 
 | 40 |  * Zone modifiers (see linux/mmzone.h - low three bits) | 
 | 41 |  * | 
| Christoph Lameter | e53ef38 | 2006-09-25 23:31:14 -0700 | [diff] [blame] | 42 |  * Do not put any conditional on these. If necessary modify the definitions | 
| matt mooney | 263ff5d | 2010-05-24 14:32:44 -0700 | [diff] [blame] | 43 |  * without the underscores and use them consistently. The definitions here may | 
| Christoph Lameter | e53ef38 | 2006-09-25 23:31:14 -0700 | [diff] [blame] | 44 |  * be used in bit comparisons. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 |  */ | 
| Namhyung Kim | 16b56cf | 2010-10-26 14:22:04 -0700 | [diff] [blame] | 46 | #define __GFP_DMA	((__force gfp_t)___GFP_DMA) | 
 | 47 | #define __GFP_HIGHMEM	((__force gfp_t)___GFP_HIGHMEM) | 
 | 48 | #define __GFP_DMA32	((__force gfp_t)___GFP_DMA32) | 
 | 49 | #define __GFP_MOVABLE	((__force gfp_t)___GFP_MOVABLE)  /* Page is movable */ | 
| Christoph Lameter | b70d94e | 2009-06-16 15:32:46 -0700 | [diff] [blame] | 50 | #define GFP_ZONEMASK	(__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | /* | 
 | 52 |  * Action modifiers - doesn't change the zoning | 
 | 53 |  * | 
 | 54 |  * __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt | 
 | 55 |  * _might_ fail.  This depends upon the particular VM implementation. | 
 | 56 |  * | 
 | 57 |  * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller | 
| David Rientjes | 478352e | 2010-03-05 13:42:23 -0800 | [diff] [blame] | 58 |  * cannot handle allocation failures.  This modifier is deprecated and no new | 
 | 59 |  * users should be added. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 |  * | 
 | 61 |  * __GFP_NORETRY: The VM implementation must not retry indefinitely. | 
| Mel Gorman | 769848c | 2007-07-17 04:03:05 -0700 | [diff] [blame] | 62 |  * | 
 | 63 |  * __GFP_MOVABLE: Flag that this page will be movable by the page migration | 
 | 64 |  * mechanism or reclaimed | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 |  */ | 
| Namhyung Kim | 16b56cf | 2010-10-26 14:22:04 -0700 | [diff] [blame] | 66 | #define __GFP_WAIT	((__force gfp_t)___GFP_WAIT)	/* Can wait and reschedule? */ | 
 | 67 | #define __GFP_HIGH	((__force gfp_t)___GFP_HIGH)	/* Should access emergency pools? */ | 
 | 68 | #define __GFP_IO	((__force gfp_t)___GFP_IO)	/* Can start physical IO? */ | 
 | 69 | #define __GFP_FS	((__force gfp_t)___GFP_FS)	/* Can call down to low-level FS? */ | 
 | 70 | #define __GFP_COLD	((__force gfp_t)___GFP_COLD)	/* Cache-cold page required */ | 
 | 71 | #define __GFP_NOWARN	((__force gfp_t)___GFP_NOWARN)	/* Suppress page allocation failure warning */ | 
 | 72 | #define __GFP_REPEAT	((__force gfp_t)___GFP_REPEAT)	/* See above */ | 
 | 73 | #define __GFP_NOFAIL	((__force gfp_t)___GFP_NOFAIL)	/* See above */ | 
 | 74 | #define __GFP_NORETRY	((__force gfp_t)___GFP_NORETRY) /* See above */ | 
| Mel Gorman | b37f1dd | 2012-07-31 16:44:03 -0700 | [diff] [blame] | 75 | #define __GFP_MEMALLOC	((__force gfp_t)___GFP_MEMALLOC)/* Allow access to emergency reserves */ | 
| Namhyung Kim | 16b56cf | 2010-10-26 14:22:04 -0700 | [diff] [blame] | 76 | #define __GFP_COMP	((__force gfp_t)___GFP_COMP)	/* Add compound page metadata */ | 
 | 77 | #define __GFP_ZERO	((__force gfp_t)___GFP_ZERO)	/* Return zeroed page on success */ | 
| Mel Gorman | b37f1dd | 2012-07-31 16:44:03 -0700 | [diff] [blame] | 78 | #define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) /* Don't use emergency reserves. | 
 | 79 | 							 * This takes precedence over the | 
 | 80 | 							 * __GFP_MEMALLOC flag if both are | 
 | 81 | 							 * set | 
 | 82 | 							 */ | 
| Namhyung Kim | 16b56cf | 2010-10-26 14:22:04 -0700 | [diff] [blame] | 83 | #define __GFP_HARDWALL   ((__force gfp_t)___GFP_HARDWALL) /* Enforce hardwall cpuset memory allocs */ | 
 | 84 | #define __GFP_THISNODE	((__force gfp_t)___GFP_THISNODE)/* No fallback, no policies */ | 
 | 85 | #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */ | 
 | 86 | #define __GFP_NOTRACK	((__force gfp_t)___GFP_NOTRACK)  /* Don't track with kmemcheck */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 |  | 
| Andi Kleen | 78afd56 | 2011-03-22 16:33:12 -0700 | [diff] [blame] | 88 | #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */ | 
| Johannes Weiner | a756cf5 | 2012-01-10 15:07:49 -0800 | [diff] [blame] | 89 | #define __GFP_WRITE	((__force gfp_t)___GFP_WRITE)	/* Allocator intends to dirty page */ | 
| Andrea Arcangeli | 32dba98 | 2011-01-13 15:46:49 -0800 | [diff] [blame] | 90 |  | 
| Vegard Nossum | 2dff440 | 2008-05-31 15:56:17 +0200 | [diff] [blame] | 91 | /* | 
 | 92 |  * This may seem redundant, but it's a way of annotating false positives vs. | 
 | 93 |  * allocations that simply cannot be supported (e.g. page tables). | 
 | 94 |  */ | 
 | 95 | #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK) | 
 | 96 |  | 
| Johannes Weiner | a756cf5 | 2012-01-10 15:07:49 -0800 | [diff] [blame] | 97 | #define __GFP_BITS_SHIFT 25	/* Room for N __GFP_FOO bits */ | 
| Al Viro | af4ca45 | 2005-10-21 02:55:38 -0400 | [diff] [blame] | 98 | #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 |  | 
| Jeff Dike | 7b04d71 | 2006-04-10 22:53:27 -0700 | [diff] [blame] | 100 | /* This equals 0, but use constants in case they ever change */ | 
 | 101 | #define GFP_NOWAIT	(GFP_ATOMIC & ~__GFP_HIGH) | 
| Paul Jackson | 4eac915 | 2006-01-11 12:17:19 -0800 | [diff] [blame] | 102 | /* GFP_ATOMIC means both !wait (__GFP_WAIT not set) and use emergency pool */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | #define GFP_ATOMIC	(__GFP_HIGH) | 
 | 104 | #define GFP_NOIO	(__GFP_WAIT) | 
 | 105 | #define GFP_NOFS	(__GFP_WAIT | __GFP_IO) | 
 | 106 | #define GFP_KERNEL	(__GFP_WAIT | __GFP_IO | __GFP_FS) | 
| Mel Gorman | e12ba74 | 2007-10-16 01:25:52 -0700 | [diff] [blame] | 107 | #define GFP_TEMPORARY	(__GFP_WAIT | __GFP_IO | __GFP_FS | \ | 
 | 108 | 			 __GFP_RECLAIMABLE) | 
| Paul Jackson | f90b1d2 | 2005-09-06 15:18:10 -0700 | [diff] [blame] | 109 | #define GFP_USER	(__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL) | 
 | 110 | #define GFP_HIGHUSER	(__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \ | 
 | 111 | 			 __GFP_HIGHMEM) | 
| Mel Gorman | 769848c | 2007-07-17 04:03:05 -0700 | [diff] [blame] | 112 | #define GFP_HIGHUSER_MOVABLE	(__GFP_WAIT | __GFP_IO | __GFP_FS | \ | 
 | 113 | 				 __GFP_HARDWALL | __GFP_HIGHMEM | \ | 
 | 114 | 				 __GFP_MOVABLE) | 
| Rafael J. Wysocki | 452aa69 | 2010-03-05 13:42:13 -0800 | [diff] [blame] | 115 | #define GFP_IOFS	(__GFP_IO | __GFP_FS) | 
| Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 116 | #define GFP_TRANSHUGE	(GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ | 
| Rik van Riel | c654345 | 2012-10-08 16:28:21 -0700 | [diff] [blame] | 117 | 			 __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 |  | 
| Christoph Lameter | 77f700d | 2006-09-27 01:50:07 -0700 | [diff] [blame] | 119 | #ifdef CONFIG_NUMA | 
| Christoph Lameter | 980128f | 2006-09-25 23:31:46 -0700 | [diff] [blame] | 120 | #define GFP_THISNODE	(__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY) | 
| Christoph Lameter | 77f700d | 2006-09-27 01:50:07 -0700 | [diff] [blame] | 121 | #else | 
| Al Viro | f2e97df | 2007-02-09 16:38:55 +0000 | [diff] [blame] | 122 | #define GFP_THISNODE	((__force gfp_t)0) | 
| Christoph Lameter | 77f700d | 2006-09-27 01:50:07 -0700 | [diff] [blame] | 123 | #endif | 
 | 124 |  | 
| Christoph Lameter | 6cb0622 | 2007-10-16 01:25:41 -0700 | [diff] [blame] | 125 | /* This mask makes up all the page movable related flags */ | 
| Mel Gorman | e12ba74 | 2007-10-16 01:25:52 -0700 | [diff] [blame] | 126 | #define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) | 
| Christoph Lameter | 6cb0622 | 2007-10-16 01:25:41 -0700 | [diff] [blame] | 127 |  | 
 | 128 | /* Control page allocator reclaim behavior */ | 
 | 129 | #define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\ | 
 | 130 | 			__GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\ | 
| Mel Gorman | b37f1dd | 2012-07-31 16:44:03 -0700 | [diff] [blame] | 131 | 			__GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC) | 
| Christoph Lameter | 6cb0622 | 2007-10-16 01:25:41 -0700 | [diff] [blame] | 132 |  | 
| Pekka Enberg | 7e85ee0 | 2009-06-12 14:03:06 +0300 | [diff] [blame] | 133 | /* Control slab gfp mask during early boot */ | 
| matt mooney | fd23855 | 2010-05-24 14:32:45 -0700 | [diff] [blame] | 134 | #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS)) | 
| Pekka Enberg | 7e85ee0 | 2009-06-12 14:03:06 +0300 | [diff] [blame] | 135 |  | 
| Christoph Lameter | 6cb0622 | 2007-10-16 01:25:41 -0700 | [diff] [blame] | 136 | /* Control allocation constraints */ | 
 | 137 | #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) | 
 | 138 |  | 
 | 139 | /* Do not use these with a slab allocator */ | 
 | 140 | #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK) | 
| Christoph Lameter | 980128f | 2006-09-25 23:31:46 -0700 | [diff] [blame] | 141 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | /* Flag - indicates that the buffer will be suitable for DMA.  Ignored on some | 
 | 143 |    platforms, used as appropriate on others */ | 
 | 144 |  | 
 | 145 | #define GFP_DMA		__GFP_DMA | 
 | 146 |  | 
| Andi Kleen | a2f1b42 | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 147 | /* 4GB DMA on some platforms */ | 
 | 148 | #define GFP_DMA32	__GFP_DMA32 | 
 | 149 |  | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 150 | /* Convert GFP flags to their corresponding migrate type */ | 
 | 151 | static inline int allocflags_to_migratetype(gfp_t gfp_flags) | 
 | 152 | { | 
 | 153 | 	WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); | 
 | 154 |  | 
 | 155 | 	if (unlikely(page_group_by_mobility_disabled)) | 
 | 156 | 		return MIGRATE_UNMOVABLE; | 
 | 157 |  | 
 | 158 | 	/* Group based on mobility */ | 
 | 159 | 	return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) | | 
 | 160 | 		((gfp_flags & __GFP_RECLAIMABLE) != 0); | 
 | 161 | } | 
| Andi Kleen | a2f1b42 | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 162 |  | 
| Christoph Lameter | b70d94e | 2009-06-16 15:32:46 -0700 | [diff] [blame] | 163 | #ifdef CONFIG_HIGHMEM | 
 | 164 | #define OPT_ZONE_HIGHMEM ZONE_HIGHMEM | 
 | 165 | #else | 
 | 166 | #define OPT_ZONE_HIGHMEM ZONE_NORMAL | 
 | 167 | #endif | 
 | 168 |  | 
 | 169 | #ifdef CONFIG_ZONE_DMA | 
 | 170 | #define OPT_ZONE_DMA ZONE_DMA | 
 | 171 | #else | 
 | 172 | #define OPT_ZONE_DMA ZONE_NORMAL | 
 | 173 | #endif | 
 | 174 |  | 
 | 175 | #ifdef CONFIG_ZONE_DMA32 | 
 | 176 | #define OPT_ZONE_DMA32 ZONE_DMA32 | 
 | 177 | #else | 
 | 178 | #define OPT_ZONE_DMA32 ZONE_NORMAL | 
 | 179 | #endif | 
 | 180 |  | 
 | 181 | /* | 
 | 182 |  * GFP_ZONE_TABLE is a word size bitstring that is used for looking up the | 
 | 183 |  * zone to use given the lowest 4 bits of gfp_t. Entries are ZONE_SHIFT long | 
 | 184 |  * and there are 16 of them to cover all possible combinations of | 
| matt mooney | 263ff5d | 2010-05-24 14:32:44 -0700 | [diff] [blame] | 185 |  * __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM. | 
| Christoph Lameter | b70d94e | 2009-06-16 15:32:46 -0700 | [diff] [blame] | 186 |  * | 
 | 187 |  * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA. | 
 | 188 |  * But GFP_MOVABLE is not only a zone specifier but also an allocation | 
 | 189 |  * policy. Therefore __GFP_MOVABLE plus another zone selector is valid. | 
| matt mooney | 263ff5d | 2010-05-24 14:32:44 -0700 | [diff] [blame] | 190 |  * Only 1 bit of the lowest 3 bits (DMA,DMA32,HIGHMEM) can be set to "1". | 
| Christoph Lameter | b70d94e | 2009-06-16 15:32:46 -0700 | [diff] [blame] | 191 |  * | 
 | 192 |  *       bit       result | 
 | 193 |  *       ================= | 
 | 194 |  *       0x0    => NORMAL | 
 | 195 |  *       0x1    => DMA or NORMAL | 
 | 196 |  *       0x2    => HIGHMEM or NORMAL | 
 | 197 |  *       0x3    => BAD (DMA+HIGHMEM) | 
 | 198 |  *       0x4    => DMA32 or DMA or NORMAL | 
 | 199 |  *       0x5    => BAD (DMA+DMA32) | 
 | 200 |  *       0x6    => BAD (HIGHMEM+DMA32) | 
 | 201 |  *       0x7    => BAD (HIGHMEM+DMA32+DMA) | 
 | 202 |  *       0x8    => NORMAL (MOVABLE+0) | 
 | 203 |  *       0x9    => DMA or NORMAL (MOVABLE+DMA) | 
 | 204 |  *       0xa    => MOVABLE (Movable is valid only if HIGHMEM is set too) | 
 | 205 |  *       0xb    => BAD (MOVABLE+HIGHMEM+DMA) | 
 | 206 |  *       0xc    => DMA32 (MOVABLE+HIGHMEM+DMA32) | 
 | 207 |  *       0xd    => BAD (MOVABLE+DMA32+DMA) | 
 | 208 |  *       0xe    => BAD (MOVABLE+DMA32+HIGHMEM) | 
 | 209 |  *       0xf    => BAD (MOVABLE+DMA32+HIGHMEM+DMA) | 
 | 210 |  * | 
 | 211 |  * ZONES_SHIFT must be <= 2 on 32 bit platforms. | 
 | 212 |  */ | 
 | 213 |  | 
 | 214 | #if 16 * ZONES_SHIFT > BITS_PER_LONG | 
 | 215 | #error ZONES_SHIFT too large to create GFP_ZONE_TABLE integer | 
 | 216 | #endif | 
 | 217 |  | 
 | 218 | #define GFP_ZONE_TABLE ( \ | 
| Namhyung Kim | 16b56cf | 2010-10-26 14:22:04 -0700 | [diff] [blame] | 219 | 	(ZONE_NORMAL << 0 * ZONES_SHIFT)				      \ | 
 | 220 | 	| (OPT_ZONE_DMA << ___GFP_DMA * ZONES_SHIFT)			      \ | 
 | 221 | 	| (OPT_ZONE_HIGHMEM << ___GFP_HIGHMEM * ZONES_SHIFT)		      \ | 
 | 222 | 	| (OPT_ZONE_DMA32 << ___GFP_DMA32 * ZONES_SHIFT)		      \ | 
 | 223 | 	| (ZONE_NORMAL << ___GFP_MOVABLE * ZONES_SHIFT)			      \ | 
 | 224 | 	| (OPT_ZONE_DMA << (___GFP_MOVABLE | ___GFP_DMA) * ZONES_SHIFT)	      \ | 
 | 225 | 	| (ZONE_MOVABLE << (___GFP_MOVABLE | ___GFP_HIGHMEM) * ZONES_SHIFT)   \ | 
 | 226 | 	| (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * ZONES_SHIFT)   \ | 
| Christoph Lameter | b70d94e | 2009-06-16 15:32:46 -0700 | [diff] [blame] | 227 | ) | 
 | 228 |  | 
 | 229 | /* | 
| matt mooney | 263ff5d | 2010-05-24 14:32:44 -0700 | [diff] [blame] | 230 |  * GFP_ZONE_BAD is a bitmap for all combinations of __GFP_DMA, __GFP_DMA32 | 
| Christoph Lameter | b70d94e | 2009-06-16 15:32:46 -0700 | [diff] [blame] | 231 |  * __GFP_HIGHMEM and __GFP_MOVABLE that are not permitted. One flag per | 
 | 232 |  * entry starting with bit 0. Bit is set if the combination is not | 
 | 233 |  * allowed. | 
 | 234 |  */ | 
 | 235 | #define GFP_ZONE_BAD ( \ | 
| Namhyung Kim | 16b56cf | 2010-10-26 14:22:04 -0700 | [diff] [blame] | 236 | 	1 << (___GFP_DMA | ___GFP_HIGHMEM)				      \ | 
 | 237 | 	| 1 << (___GFP_DMA | ___GFP_DMA32)				      \ | 
 | 238 | 	| 1 << (___GFP_DMA32 | ___GFP_HIGHMEM)				      \ | 
 | 239 | 	| 1 << (___GFP_DMA | ___GFP_DMA32 | ___GFP_HIGHMEM)		      \ | 
 | 240 | 	| 1 << (___GFP_MOVABLE | ___GFP_HIGHMEM | ___GFP_DMA)		      \ | 
 | 241 | 	| 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA)		      \ | 
 | 242 | 	| 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_HIGHMEM)		      \ | 
 | 243 | 	| 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA | ___GFP_HIGHMEM)  \ | 
| Christoph Lameter | b70d94e | 2009-06-16 15:32:46 -0700 | [diff] [blame] | 244 | ) | 
 | 245 |  | 
| Christoph Lameter | 19655d3 | 2006-09-25 23:31:19 -0700 | [diff] [blame] | 246 | static inline enum zone_type gfp_zone(gfp_t flags) | 
| Christoph Lameter | 4e4785b | 2006-09-25 23:31:17 -0700 | [diff] [blame] | 247 | { | 
| Christoph Lameter | b70d94e | 2009-06-16 15:32:46 -0700 | [diff] [blame] | 248 | 	enum zone_type z; | 
| Namhyung Kim | 16b56cf | 2010-10-26 14:22:04 -0700 | [diff] [blame] | 249 | 	int bit = (__force int) (flags & GFP_ZONEMASK); | 
| Christoph Lameter | b70d94e | 2009-06-16 15:32:46 -0700 | [diff] [blame] | 250 |  | 
 | 251 | 	z = (GFP_ZONE_TABLE >> (bit * ZONES_SHIFT)) & | 
 | 252 | 					 ((1 << ZONES_SHIFT) - 1); | 
| Dave Hansen | 82d4b57 | 2011-05-24 17:11:42 -0700 | [diff] [blame] | 253 | 	VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1); | 
| Christoph Lameter | b70d94e | 2009-06-16 15:32:46 -0700 | [diff] [blame] | 254 | 	return z; | 
| Christoph Lameter | 4e4785b | 2006-09-25 23:31:17 -0700 | [diff] [blame] | 255 | } | 
 | 256 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | /* | 
 | 258 |  * There is only one page-allocator function, and two main namespaces to | 
 | 259 |  * it. The alloc_page*() variants return 'struct page *' and as such | 
 | 260 |  * can allocate highmem pages, the *get*page*() variants return | 
 | 261 |  * virtual kernel addresses to the allocated page(s). | 
 | 262 |  */ | 
 | 263 |  | 
| Mel Gorman | 54a6eb5 | 2008-04-28 02:12:16 -0700 | [diff] [blame] | 264 | static inline int gfp_zonelist(gfp_t flags) | 
 | 265 | { | 
 | 266 | 	if (NUMA_BUILD && unlikely(flags & __GFP_THISNODE)) | 
 | 267 | 		return 1; | 
 | 268 |  | 
 | 269 | 	return 0; | 
 | 270 | } | 
 | 271 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 272 | /* | 
 | 273 |  * We get the zone list from the current node and the gfp_mask. | 
 | 274 |  * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones. | 
| Mel Gorman | 54a6eb5 | 2008-04-28 02:12:16 -0700 | [diff] [blame] | 275 |  * There are two zonelists per node, one for all zones with memory and | 
 | 276 |  * one containing just zones from the node the zonelist belongs to. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 277 |  * | 
 | 278 |  * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets | 
 | 279 |  * optimized to &contig_page_data at compile-time. | 
 | 280 |  */ | 
| Mel Gorman | 0e88460 | 2008-04-28 02:12:14 -0700 | [diff] [blame] | 281 | static inline struct zonelist *node_zonelist(int nid, gfp_t flags) | 
 | 282 | { | 
| Mel Gorman | 54a6eb5 | 2008-04-28 02:12:16 -0700 | [diff] [blame] | 283 | 	return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags); | 
| Mel Gorman | 0e88460 | 2008-04-28 02:12:14 -0700 | [diff] [blame] | 284 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 285 |  | 
 | 286 | #ifndef HAVE_ARCH_FREE_PAGE | 
 | 287 | static inline void arch_free_page(struct page *page, int order) { } | 
 | 288 | #endif | 
| Nick Piggin | cc10250 | 2006-12-06 20:32:00 -0800 | [diff] [blame] | 289 | #ifndef HAVE_ARCH_ALLOC_PAGE | 
 | 290 | static inline void arch_alloc_page(struct page *page, int order) { } | 
 | 291 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 292 |  | 
| KOSAKI Motohiro | e4048e5 | 2008-07-23 21:27:01 -0700 | [diff] [blame] | 293 | struct page * | 
| Mel Gorman | d239171 | 2009-06-16 15:31:52 -0700 | [diff] [blame] | 294 | __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, | 
| KOSAKI Motohiro | e4048e5 | 2008-07-23 21:27:01 -0700 | [diff] [blame] | 295 | 		       struct zonelist *zonelist, nodemask_t *nodemask); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 296 |  | 
| KOSAKI Motohiro | e4048e5 | 2008-07-23 21:27:01 -0700 | [diff] [blame] | 297 | static inline struct page * | 
 | 298 | __alloc_pages(gfp_t gfp_mask, unsigned int order, | 
 | 299 | 		struct zonelist *zonelist) | 
 | 300 | { | 
| Mel Gorman | d239171 | 2009-06-16 15:31:52 -0700 | [diff] [blame] | 301 | 	return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL); | 
| KOSAKI Motohiro | e4048e5 | 2008-07-23 21:27:01 -0700 | [diff] [blame] | 302 | } | 
 | 303 |  | 
| Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 304 | static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 305 | 						unsigned int order) | 
 | 306 | { | 
| Andi Kleen | 819a692 | 2006-01-11 22:43:45 +0100 | [diff] [blame] | 307 | 	/* Unknown node is current node */ | 
 | 308 | 	if (nid < 0) | 
 | 309 | 		nid = numa_node_id(); | 
 | 310 |  | 
| Mel Gorman | 0e88460 | 2008-04-28 02:12:14 -0700 | [diff] [blame] | 311 | 	return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 312 | } | 
 | 313 |  | 
| Mel Gorman | 6484eb3 | 2009-06-16 15:31:54 -0700 | [diff] [blame] | 314 | static inline struct page *alloc_pages_exact_node(int nid, gfp_t gfp_mask, | 
 | 315 | 						unsigned int order) | 
 | 316 | { | 
| David Rientjes | f6d7e0c | 2012-01-10 15:07:38 -0800 | [diff] [blame] | 317 | 	VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES || !node_online(nid)); | 
| Mel Gorman | 6484eb3 | 2009-06-16 15:31:54 -0700 | [diff] [blame] | 318 |  | 
 | 319 | 	return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); | 
 | 320 | } | 
 | 321 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 322 | #ifdef CONFIG_NUMA | 
| Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 323 | extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 324 |  | 
 | 325 | static inline struct page * | 
| Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 326 | alloc_pages(gfp_t gfp_mask, unsigned int order) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 327 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 328 | 	return alloc_pages_current(gfp_mask, order); | 
 | 329 | } | 
| Andrea Arcangeli | 0bbbc0b | 2011-01-13 15:47:05 -0800 | [diff] [blame] | 330 | extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, | 
| Andi Kleen | 2f5f948 | 2011-03-04 17:36:29 -0800 | [diff] [blame] | 331 | 			struct vm_area_struct *vma, unsigned long addr, | 
 | 332 | 			int node); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 333 | #else | 
 | 334 | #define alloc_pages(gfp_mask, order) \ | 
 | 335 | 		alloc_pages_node(numa_node_id(), gfp_mask, order) | 
| Andi Kleen | 2f5f948 | 2011-03-04 17:36:29 -0800 | [diff] [blame] | 336 | #define alloc_pages_vma(gfp_mask, order, vma, addr, node)	\ | 
| Andrea Arcangeli | 0bbbc0b | 2011-01-13 15:47:05 -0800 | [diff] [blame] | 337 | 	alloc_pages(gfp_mask, order) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 338 | #endif | 
 | 339 | #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) | 
| Andi Kleen | 2f5f948 | 2011-03-04 17:36:29 -0800 | [diff] [blame] | 340 | #define alloc_page_vma(gfp_mask, vma, addr)			\ | 
 | 341 | 	alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id()) | 
| Andi Kleen | 236344d | 2011-03-04 17:36:30 -0800 | [diff] [blame] | 342 | #define alloc_page_vma_node(gfp_mask, vma, addr, node)		\ | 
 | 343 | 	alloc_pages_vma(gfp_mask, 0, vma, addr, node) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 344 |  | 
| Harvey Harrison | b3c9752 | 2008-02-13 15:03:15 -0800 | [diff] [blame] | 345 | extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); | 
 | 346 | extern unsigned long get_zeroed_page(gfp_t gfp_mask); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 347 |  | 
| Timur Tabi | 2be0ffe | 2008-07-23 21:28:11 -0700 | [diff] [blame] | 348 | void *alloc_pages_exact(size_t size, gfp_t gfp_mask); | 
 | 349 | void free_pages_exact(void *virt, size_t size); | 
| Andi Kleen | ee85c2e | 2011-05-11 15:13:34 -0700 | [diff] [blame] | 350 | /* This is different from alloc_pages_exact_node !!! */ | 
 | 351 | void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask); | 
| Timur Tabi | 2be0ffe | 2008-07-23 21:28:11 -0700 | [diff] [blame] | 352 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 353 | #define __get_free_page(gfp_mask) \ | 
| matt mooney | fd23855 | 2010-05-24 14:32:45 -0700 | [diff] [blame] | 354 | 		__get_free_pages((gfp_mask), 0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 355 |  | 
 | 356 | #define __get_dma_pages(gfp_mask, order) \ | 
| matt mooney | fd23855 | 2010-05-24 14:32:45 -0700 | [diff] [blame] | 357 | 		__get_free_pages((gfp_mask) | GFP_DMA, (order)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 358 |  | 
| Harvey Harrison | b3c9752 | 2008-02-13 15:03:15 -0800 | [diff] [blame] | 359 | extern void __free_pages(struct page *page, unsigned int order); | 
 | 360 | extern void free_pages(unsigned long addr, unsigned int order); | 
| Li Hong | fc91668 | 2010-03-05 13:41:54 -0800 | [diff] [blame] | 361 | extern void free_hot_cold_page(struct page *page, int cold); | 
| Konstantin Khlebnikov | cc59850 | 2012-01-10 15:07:04 -0800 | [diff] [blame] | 362 | extern void free_hot_cold_page_list(struct list_head *list, int cold); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 363 |  | 
 | 364 | #define __free_page(page) __free_pages((page), 0) | 
| matt mooney | fd23855 | 2010-05-24 14:32:45 -0700 | [diff] [blame] | 365 | #define free_page(addr) free_pages((addr), 0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 366 |  | 
 | 367 | void page_alloc_init(void); | 
| Christoph Lameter | 4037d45 | 2007-05-09 02:35:14 -0700 | [diff] [blame] | 368 | void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); | 
| Christoph Lameter | 9f8f217 | 2008-02-04 22:29:11 -0800 | [diff] [blame] | 369 | void drain_all_pages(void); | 
 | 370 | void drain_local_pages(void *dummy); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 371 |  | 
| Mel Gorman | f90ac39 | 2012-01-10 15:07:15 -0800 | [diff] [blame] | 372 | /* | 
 | 373 |  * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what | 
 | 374 |  * GFP flags are used before interrupts are enabled. Once interrupts are | 
 | 375 |  * enabled, it is set to __GFP_BITS_MASK while the system is running. During | 
 | 376 |  * hibernation, it is used by PM to avoid I/O during memory allocation while | 
 | 377 |  * devices are suspended. | 
 | 378 |  */ | 
| Benjamin Herrenschmidt | dcce284 | 2009-06-18 13:24:12 +1000 | [diff] [blame] | 379 | extern gfp_t gfp_allowed_mask; | 
 | 380 |  | 
| Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 381 | /* Returns true if the gfp_mask allows use of ALLOC_NO_WATERMARK */ | 
 | 382 | bool gfp_pfmemalloc_allowed(gfp_t gfp_mask); | 
 | 383 |  | 
| Rafael J. Wysocki | c9e664f | 2010-12-03 22:57:45 +0100 | [diff] [blame] | 384 | extern void pm_restrict_gfp_mask(void); | 
 | 385 | extern void pm_restore_gfp_mask(void); | 
| Benjamin Herrenschmidt | dcce284 | 2009-06-18 13:24:12 +1000 | [diff] [blame] | 386 |  | 
| Mel Gorman | f90ac39 | 2012-01-10 15:07:15 -0800 | [diff] [blame] | 387 | #ifdef CONFIG_PM_SLEEP | 
 | 388 | extern bool pm_suspended_storage(void); | 
 | 389 | #else | 
 | 390 | static inline bool pm_suspended_storage(void) | 
 | 391 | { | 
 | 392 | 	return false; | 
 | 393 | } | 
 | 394 | #endif /* CONFIG_PM_SLEEP */ | 
 | 395 |  | 
| Michal Nazarewicz | 041d3a8 | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 396 | #ifdef CONFIG_CMA | 
 | 397 |  | 
 | 398 | /* The below functions must be run on a range from a single zone. */ | 
| Michal Nazarewicz | 0815f3d8 | 2012-04-03 15:06:15 +0200 | [diff] [blame] | 399 | extern int alloc_contig_range(unsigned long start, unsigned long end, | 
 | 400 | 			      unsigned migratetype); | 
| Michal Nazarewicz | 041d3a8 | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 401 | extern void free_contig_range(unsigned long pfn, unsigned nr_pages); | 
 | 402 |  | 
| Michal Nazarewicz | 47118af | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 403 | /* CMA stuff */ | 
 | 404 | extern void init_cma_reserved_pageblock(struct page *page); | 
 | 405 |  | 
| Michal Nazarewicz | 041d3a8 | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 406 | #endif | 
 | 407 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 408 | #endif /* __LINUX_GFP_H */ |