| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __LINUX_GFP_H | 
|  | 2 | #define __LINUX_GFP_H | 
|  | 3 |  | 
|  | 4 | #include <linux/mmzone.h> | 
|  | 5 | #include <linux/stddef.h> | 
|  | 6 | #include <linux/linkage.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 |  | 
|  | 8 | struct vm_area_struct; | 
|  | 9 |  | 
|  | 10 | /* | 
|  | 11 | * GFP bitmasks.. | 
| Christoph Lameter | e53ef38 | 2006-09-25 23:31:14 -0700 | [diff] [blame] | 12 | * | 
|  | 13 | * Zone modifiers (see linux/mmzone.h - low three bits) | 
|  | 14 | * | 
| Christoph Lameter | e53ef38 | 2006-09-25 23:31:14 -0700 | [diff] [blame] | 15 | * Do not put any conditional on these. If necessary modify the definitions | 
|  | 16 | * without the underscores and use the consistently. The definitions here may | 
|  | 17 | * be used in bit comparisons. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | */ | 
| Al Viro | af4ca45 | 2005-10-21 02:55:38 -0400 | [diff] [blame] | 19 | #define __GFP_DMA	((__force gfp_t)0x01u) | 
|  | 20 | #define __GFP_HIGHMEM	((__force gfp_t)0x02u) | 
| Christoph Lameter | e53ef38 | 2006-09-25 23:31:14 -0700 | [diff] [blame] | 21 | #define __GFP_DMA32	((__force gfp_t)0x04u) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 |  | 
|  | 23 | /* | 
|  | 24 | * Action modifiers - doesn't change the zoning | 
|  | 25 | * | 
|  | 26 | * __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt | 
|  | 27 | * _might_ fail.  This depends upon the particular VM implementation. | 
|  | 28 | * | 
|  | 29 | * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller | 
|  | 30 | * cannot handle allocation failures. | 
|  | 31 | * | 
|  | 32 | * __GFP_NORETRY: The VM implementation must not retry indefinitely. | 
| Mel Gorman | 769848c | 2007-07-17 04:03:05 -0700 | [diff] [blame] | 33 | * | 
|  | 34 | * __GFP_MOVABLE: Flag that this page will be movable by the page migration | 
|  | 35 | * mechanism or reclaimed | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | */ | 
| Al Viro | af4ca45 | 2005-10-21 02:55:38 -0400 | [diff] [blame] | 37 | #define __GFP_WAIT	((__force gfp_t)0x10u)	/* Can wait and reschedule? */ | 
|  | 38 | #define __GFP_HIGH	((__force gfp_t)0x20u)	/* Should access emergency pools? */ | 
|  | 39 | #define __GFP_IO	((__force gfp_t)0x40u)	/* Can start physical IO? */ | 
|  | 40 | #define __GFP_FS	((__force gfp_t)0x80u)	/* Can call down to low-level FS? */ | 
|  | 41 | #define __GFP_COLD	((__force gfp_t)0x100u)	/* Cache-cold page required */ | 
|  | 42 | #define __GFP_NOWARN	((__force gfp_t)0x200u)	/* Suppress page allocation failure warning */ | 
| Nishanth Aravamudan | ab857d0 | 2008-04-29 00:58:23 -0700 | [diff] [blame] | 43 | #define __GFP_REPEAT	((__force gfp_t)0x400u)	/* See above */ | 
|  | 44 | #define __GFP_NOFAIL	((__force gfp_t)0x800u)	/* See above */ | 
|  | 45 | #define __GFP_NORETRY	((__force gfp_t)0x1000u)/* See above */ | 
| Al Viro | af4ca45 | 2005-10-21 02:55:38 -0400 | [diff] [blame] | 46 | #define __GFP_COMP	((__force gfp_t)0x4000u)/* Add compound page metadata */ | 
|  | 47 | #define __GFP_ZERO	((__force gfp_t)0x8000u)/* Return zeroed page on success */ | 
|  | 48 | #define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */ | 
| Paul Jackson | 2d6c666 | 2005-11-13 16:06:44 -0800 | [diff] [blame] | 49 | #define __GFP_HARDWALL   ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */ | 
| Christoph Lameter | 9b819d2 | 2006-09-25 23:31:40 -0700 | [diff] [blame] | 50 | #define __GFP_THISNODE	((__force gfp_t)0x40000u)/* No fallback, no policies */ | 
| Mel Gorman | e12ba74 | 2007-10-16 01:25:52 -0700 | [diff] [blame] | 51 | #define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */ | 
|  | 52 | #define __GFP_MOVABLE	((__force gfp_t)0x100000u)  /* Page is movable */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 |  | 
| Mel Gorman | e12ba74 | 2007-10-16 01:25:52 -0700 | [diff] [blame] | 54 | #define __GFP_BITS_SHIFT 21	/* Room for 21 __GFP_FOO bits */ | 
| Al Viro | af4ca45 | 2005-10-21 02:55:38 -0400 | [diff] [blame] | 55 | #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 |  | 
| Jeff Dike | 7b04d71 | 2006-04-10 22:53:27 -0700 | [diff] [blame] | 57 | /* This equals 0, but use constants in case they ever change */ | 
|  | 58 | #define GFP_NOWAIT	(GFP_ATOMIC & ~__GFP_HIGH) | 
| Paul Jackson | 4eac915 | 2006-01-11 12:17:19 -0800 | [diff] [blame] | 59 | /* GFP_ATOMIC means both !wait (__GFP_WAIT not set) and use emergency pool */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | #define GFP_ATOMIC	(__GFP_HIGH) | 
|  | 61 | #define GFP_NOIO	(__GFP_WAIT) | 
|  | 62 | #define GFP_NOFS	(__GFP_WAIT | __GFP_IO) | 
|  | 63 | #define GFP_KERNEL	(__GFP_WAIT | __GFP_IO | __GFP_FS) | 
| Mel Gorman | e12ba74 | 2007-10-16 01:25:52 -0700 | [diff] [blame] | 64 | #define GFP_TEMPORARY	(__GFP_WAIT | __GFP_IO | __GFP_FS | \ | 
|  | 65 | __GFP_RECLAIMABLE) | 
| Paul Jackson | f90b1d2 | 2005-09-06 15:18:10 -0700 | [diff] [blame] | 66 | #define GFP_USER	(__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL) | 
|  | 67 | #define GFP_HIGHUSER	(__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \ | 
|  | 68 | __GFP_HIGHMEM) | 
| Mel Gorman | 769848c | 2007-07-17 04:03:05 -0700 | [diff] [blame] | 69 | #define GFP_HIGHUSER_MOVABLE	(__GFP_WAIT | __GFP_IO | __GFP_FS | \ | 
|  | 70 | __GFP_HARDWALL | __GFP_HIGHMEM | \ | 
|  | 71 | __GFP_MOVABLE) | 
|  | 72 | #define GFP_NOFS_PAGECACHE	(__GFP_WAIT | __GFP_IO | __GFP_MOVABLE) | 
|  | 73 | #define GFP_USER_PAGECACHE	(__GFP_WAIT | __GFP_IO | __GFP_FS | \ | 
|  | 74 | __GFP_HARDWALL | __GFP_MOVABLE) | 
|  | 75 | #define GFP_HIGHUSER_PAGECACHE	(__GFP_WAIT | __GFP_IO | __GFP_FS | \ | 
|  | 76 | __GFP_HARDWALL | __GFP_HIGHMEM | \ | 
|  | 77 | __GFP_MOVABLE) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 |  | 
| Christoph Lameter | 77f700d | 2006-09-27 01:50:07 -0700 | [diff] [blame] | 79 | #ifdef CONFIG_NUMA | 
| Christoph Lameter | 980128f | 2006-09-25 23:31:46 -0700 | [diff] [blame] | 80 | #define GFP_THISNODE	(__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY) | 
| Christoph Lameter | 77f700d | 2006-09-27 01:50:07 -0700 | [diff] [blame] | 81 | #else | 
| Al Viro | f2e97df | 2007-02-09 16:38:55 +0000 | [diff] [blame] | 82 | #define GFP_THISNODE	((__force gfp_t)0) | 
| Christoph Lameter | 77f700d | 2006-09-27 01:50:07 -0700 | [diff] [blame] | 83 | #endif | 
|  | 84 |  | 
| Christoph Lameter | 6cb0622 | 2007-10-16 01:25:41 -0700 | [diff] [blame] | 85 | /* This mask makes up all the page movable related flags */ | 
| Mel Gorman | e12ba74 | 2007-10-16 01:25:52 -0700 | [diff] [blame] | 86 | #define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) | 
| Christoph Lameter | 6cb0622 | 2007-10-16 01:25:41 -0700 | [diff] [blame] | 87 |  | 
|  | 88 | /* Control page allocator reclaim behavior */ | 
|  | 89 | #define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\ | 
|  | 90 | __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\ | 
|  | 91 | __GFP_NORETRY|__GFP_NOMEMALLOC) | 
|  | 92 |  | 
|  | 93 | /* Control allocation constraints */ | 
|  | 94 | #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) | 
|  | 95 |  | 
|  | 96 | /* Do not use these with a slab allocator */ | 
|  | 97 | #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK) | 
| Christoph Lameter | 980128f | 2006-09-25 23:31:46 -0700 | [diff] [blame] | 98 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | /* Flag - indicates that the buffer will be suitable for DMA.  Ignored on some | 
|  | 100 | platforms, used as appropriate on others */ | 
|  | 101 |  | 
|  | 102 | #define GFP_DMA		__GFP_DMA | 
|  | 103 |  | 
| Andi Kleen | a2f1b42 | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 104 | /* 4GB DMA on some platforms */ | 
|  | 105 | #define GFP_DMA32	__GFP_DMA32 | 
|  | 106 |  | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 107 | /* Convert GFP flags to their corresponding migrate type */ | 
|  | 108 | static inline int allocflags_to_migratetype(gfp_t gfp_flags) | 
|  | 109 | { | 
|  | 110 | WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); | 
|  | 111 |  | 
|  | 112 | if (unlikely(page_group_by_mobility_disabled)) | 
|  | 113 | return MIGRATE_UNMOVABLE; | 
|  | 114 |  | 
|  | 115 | /* Group based on mobility */ | 
|  | 116 | return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) | | 
|  | 117 | ((gfp_flags & __GFP_RECLAIMABLE) != 0); | 
|  | 118 | } | 
| Andi Kleen | a2f1b42 | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 119 |  | 
| Christoph Lameter | 19655d3 | 2006-09-25 23:31:19 -0700 | [diff] [blame] | 120 | static inline enum zone_type gfp_zone(gfp_t flags) | 
| Christoph Lameter | 4e4785b | 2006-09-25 23:31:17 -0700 | [diff] [blame] | 121 | { | 
| Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 122 | #ifdef CONFIG_ZONE_DMA | 
| Christoph Lameter | 4e4785b | 2006-09-25 23:31:17 -0700 | [diff] [blame] | 123 | if (flags & __GFP_DMA) | 
| KAMEZAWA Hiroyuki | 8cece85 | 2008-04-28 02:13:36 -0700 | [diff] [blame] | 124 | return ZONE_DMA; | 
| Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 125 | #endif | 
| Christoph Lameter | 4e4785b | 2006-09-25 23:31:17 -0700 | [diff] [blame] | 126 | #ifdef CONFIG_ZONE_DMA32 | 
|  | 127 | if (flags & __GFP_DMA32) | 
| KAMEZAWA Hiroyuki | 8cece85 | 2008-04-28 02:13:36 -0700 | [diff] [blame] | 128 | return ZONE_DMA32; | 
| Christoph Lameter | 4e4785b | 2006-09-25 23:31:17 -0700 | [diff] [blame] | 129 | #endif | 
| Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 130 | if ((flags & (__GFP_HIGHMEM | __GFP_MOVABLE)) == | 
|  | 131 | (__GFP_HIGHMEM | __GFP_MOVABLE)) | 
| KAMEZAWA Hiroyuki | 8cece85 | 2008-04-28 02:13:36 -0700 | [diff] [blame] | 132 | return ZONE_MOVABLE; | 
| Christoph Lameter | 4e4785b | 2006-09-25 23:31:17 -0700 | [diff] [blame] | 133 | #ifdef CONFIG_HIGHMEM | 
|  | 134 | if (flags & __GFP_HIGHMEM) | 
| KAMEZAWA Hiroyuki | 8cece85 | 2008-04-28 02:13:36 -0700 | [diff] [blame] | 135 | return ZONE_HIGHMEM; | 
| Christoph Lameter | 4e4785b | 2006-09-25 23:31:17 -0700 | [diff] [blame] | 136 | #endif | 
| KAMEZAWA Hiroyuki | 8cece85 | 2008-04-28 02:13:36 -0700 | [diff] [blame] | 137 | return ZONE_NORMAL; | 
| Christoph Lameter | 4e4785b | 2006-09-25 23:31:17 -0700 | [diff] [blame] | 138 | } | 
|  | 139 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | /* | 
|  | 141 | * There is only one page-allocator function, and two main namespaces to | 
|  | 142 | * it. The alloc_page*() variants return 'struct page *' and as such | 
|  | 143 | * can allocate highmem pages, the *get*page*() variants return | 
|  | 144 | * virtual kernel addresses to the allocated page(s). | 
|  | 145 | */ | 
|  | 146 |  | 
| Mel Gorman | 54a6eb5 | 2008-04-28 02:12:16 -0700 | [diff] [blame] | 147 | static inline int gfp_zonelist(gfp_t flags) | 
|  | 148 | { | 
|  | 149 | if (NUMA_BUILD && unlikely(flags & __GFP_THISNODE)) | 
|  | 150 | return 1; | 
|  | 151 |  | 
|  | 152 | return 0; | 
|  | 153 | } | 
|  | 154 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 155 | /* | 
|  | 156 | * We get the zone list from the current node and the gfp_mask. | 
|  | 157 | * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones. | 
| Mel Gorman | 54a6eb5 | 2008-04-28 02:12:16 -0700 | [diff] [blame] | 158 | * There are two zonelists per node, one for all zones with memory and | 
|  | 159 | * one containing just zones from the node the zonelist belongs to. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | * | 
|  | 161 | * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets | 
|  | 162 | * optimized to &contig_page_data at compile-time. | 
|  | 163 | */ | 
| Mel Gorman | 0e88460 | 2008-04-28 02:12:14 -0700 | [diff] [blame] | 164 | static inline struct zonelist *node_zonelist(int nid, gfp_t flags) | 
|  | 165 | { | 
| Mel Gorman | 54a6eb5 | 2008-04-28 02:12:16 -0700 | [diff] [blame] | 166 | return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags); | 
| Mel Gorman | 0e88460 | 2008-04-28 02:12:14 -0700 | [diff] [blame] | 167 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 |  | 
|  | 169 | #ifndef HAVE_ARCH_FREE_PAGE | 
|  | 170 | static inline void arch_free_page(struct page *page, int order) { } | 
|  | 171 | #endif | 
| Nick Piggin | cc10250 | 2006-12-06 20:32:00 -0800 | [diff] [blame] | 172 | #ifndef HAVE_ARCH_ALLOC_PAGE | 
|  | 173 | static inline void arch_alloc_page(struct page *page, int order) { } | 
|  | 174 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 175 |  | 
| KOSAKI Motohiro | e4048e5 | 2008-07-23 21:27:01 -0700 | [diff] [blame] | 176 | struct page * | 
|  | 177 | __alloc_pages_internal(gfp_t gfp_mask, unsigned int order, | 
|  | 178 | struct zonelist *zonelist, nodemask_t *nodemask); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 |  | 
| KOSAKI Motohiro | e4048e5 | 2008-07-23 21:27:01 -0700 | [diff] [blame] | 180 | static inline struct page * | 
|  | 181 | __alloc_pages(gfp_t gfp_mask, unsigned int order, | 
|  | 182 | struct zonelist *zonelist) | 
|  | 183 | { | 
|  | 184 | return __alloc_pages_internal(gfp_mask, order, zonelist, NULL); | 
|  | 185 | } | 
|  | 186 |  | 
|  | 187 | static inline struct page * | 
|  | 188 | __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, | 
|  | 189 | struct zonelist *zonelist, nodemask_t *nodemask) | 
|  | 190 | { | 
|  | 191 | return __alloc_pages_internal(gfp_mask, order, zonelist, nodemask); | 
|  | 192 | } | 
|  | 193 |  | 
| Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 194 |  | 
| Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 195 | static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | unsigned int order) | 
|  | 197 | { | 
|  | 198 | if (unlikely(order >= MAX_ORDER)) | 
|  | 199 | return NULL; | 
|  | 200 |  | 
| Andi Kleen | 819a692 | 2006-01-11 22:43:45 +0100 | [diff] [blame] | 201 | /* Unknown node is current node */ | 
|  | 202 | if (nid < 0) | 
|  | 203 | nid = numa_node_id(); | 
|  | 204 |  | 
| Mel Gorman | 0e88460 | 2008-04-28 02:12:14 -0700 | [diff] [blame] | 205 | return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 206 | } | 
|  | 207 |  | 
|  | 208 | #ifdef CONFIG_NUMA | 
| Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 209 | extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 210 |  | 
|  | 211 | static inline struct page * | 
| Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 212 | alloc_pages(gfp_t gfp_mask, unsigned int order) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 | { | 
|  | 214 | if (unlikely(order >= MAX_ORDER)) | 
|  | 215 | return NULL; | 
|  | 216 |  | 
|  | 217 | return alloc_pages_current(gfp_mask, order); | 
|  | 218 | } | 
| Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 219 | extern struct page *alloc_page_vma(gfp_t gfp_mask, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 220 | struct vm_area_struct *vma, unsigned long addr); | 
|  | 221 | #else | 
|  | 222 | #define alloc_pages(gfp_mask, order) \ | 
|  | 223 | alloc_pages_node(numa_node_id(), gfp_mask, order) | 
|  | 224 | #define alloc_page_vma(gfp_mask, vma, addr) alloc_pages(gfp_mask, 0) | 
|  | 225 | #endif | 
|  | 226 | #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) | 
|  | 227 |  | 
| Harvey Harrison | b3c9752 | 2008-02-13 15:03:15 -0800 | [diff] [blame] | 228 | extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); | 
|  | 229 | extern unsigned long get_zeroed_page(gfp_t gfp_mask); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 |  | 
| Timur Tabi | 2be0ffe | 2008-07-23 21:28:11 -0700 | [diff] [blame] | 231 | void *alloc_pages_exact(size_t size, gfp_t gfp_mask); | 
|  | 232 | void free_pages_exact(void *virt, size_t size); | 
|  | 233 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | #define __get_free_page(gfp_mask) \ | 
|  | 235 | __get_free_pages((gfp_mask),0) | 
|  | 236 |  | 
|  | 237 | #define __get_dma_pages(gfp_mask, order) \ | 
|  | 238 | __get_free_pages((gfp_mask) | GFP_DMA,(order)) | 
|  | 239 |  | 
| Harvey Harrison | b3c9752 | 2008-02-13 15:03:15 -0800 | [diff] [blame] | 240 | extern void __free_pages(struct page *page, unsigned int order); | 
|  | 241 | extern void free_pages(unsigned long addr, unsigned int order); | 
|  | 242 | extern void free_hot_page(struct page *page); | 
|  | 243 | extern void free_cold_page(struct page *page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 |  | 
|  | 245 | #define __free_page(page) __free_pages((page), 0) | 
|  | 246 | #define free_page(addr) free_pages((addr),0) | 
|  | 247 |  | 
|  | 248 | void page_alloc_init(void); | 
| Christoph Lameter | 4037d45 | 2007-05-09 02:35:14 -0700 | [diff] [blame] | 249 | void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); | 
| Christoph Lameter | 9f8f217 | 2008-02-04 22:29:11 -0800 | [diff] [blame] | 250 | void drain_all_pages(void); | 
|  | 251 | void drain_local_pages(void *dummy); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 252 |  | 
|  | 253 | #endif /* __LINUX_GFP_H */ |