| Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | *  linux/mm/memory_hotplug.c | 
|  | 3 | * | 
|  | 4 | *  Copyright (C) | 
|  | 5 | */ | 
|  | 6 |  | 
| Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 7 | #include <linux/stddef.h> | 
|  | 8 | #include <linux/mm.h> | 
|  | 9 | #include <linux/swap.h> | 
|  | 10 | #include <linux/interrupt.h> | 
|  | 11 | #include <linux/pagemap.h> | 
|  | 12 | #include <linux/bootmem.h> | 
|  | 13 | #include <linux/compiler.h> | 
|  | 14 | #include <linux/module.h> | 
|  | 15 | #include <linux/pagevec.h> | 
| Chandra Seetharaman | 2d1d43f | 2006-09-29 02:01:25 -0700 | [diff] [blame] | 16 | #include <linux/writeback.h> | 
| Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 17 | #include <linux/slab.h> | 
|  | 18 | #include <linux/sysctl.h> | 
|  | 19 | #include <linux/cpu.h> | 
|  | 20 | #include <linux/memory.h> | 
|  | 21 | #include <linux/memory_hotplug.h> | 
|  | 22 | #include <linux/highmem.h> | 
|  | 23 | #include <linux/vmalloc.h> | 
| KAMEZAWA Hiroyuki | 0a54703 | 2006-06-27 02:53:35 -0700 | [diff] [blame] | 24 | #include <linux/ioport.h> | 
| Paul Jackson | 38837fc | 2006-09-29 02:01:16 -0700 | [diff] [blame] | 25 | #include <linux/cpuset.h> | 
| KAMEZAWA Hiroyuki | 0c0e619 | 2007-10-16 01:26:12 -0700 | [diff] [blame] | 26 | #include <linux/delay.h> | 
|  | 27 | #include <linux/migrate.h> | 
|  | 28 | #include <linux/page-isolation.h> | 
| Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 29 |  | 
|  | 30 | #include <asm/tlbflush.h> | 
|  | 31 |  | 
| Keith Mannthey | 45e0b78 | 2006-09-30 23:27:09 -0700 | [diff] [blame] | 32 | /* add this memory to iomem resource */ | 
|  | 33 | static struct resource *register_memory_resource(u64 start, u64 size) | 
|  | 34 | { | 
|  | 35 | struct resource *res; | 
|  | 36 | res = kzalloc(sizeof(struct resource), GFP_KERNEL); | 
|  | 37 | BUG_ON(!res); | 
|  | 38 |  | 
|  | 39 | res->name = "System RAM"; | 
|  | 40 | res->start = start; | 
|  | 41 | res->end = start + size - 1; | 
| Yasunori Goto | 887c3cb | 2007-11-14 16:59:20 -0800 | [diff] [blame] | 42 | res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; | 
| Keith Mannthey | 45e0b78 | 2006-09-30 23:27:09 -0700 | [diff] [blame] | 43 | if (request_resource(&iomem_resource, res) < 0) { | 
|  | 44 | printk("System RAM resource %llx - %llx cannot be added\n", | 
|  | 45 | (unsigned long long)res->start, (unsigned long long)res->end); | 
|  | 46 | kfree(res); | 
|  | 47 | res = NULL; | 
|  | 48 | } | 
|  | 49 | return res; | 
|  | 50 | } | 
|  | 51 |  | 
|  | 52 | static void release_memory_resource(struct resource *res) | 
|  | 53 | { | 
|  | 54 | if (!res) | 
|  | 55 | return; | 
|  | 56 | release_resource(res); | 
|  | 57 | kfree(res); | 
|  | 58 | return; | 
|  | 59 | } | 
|  | 60 |  | 
|  | 61 |  | 
| Keith Mannthey | 5394702 | 2006-09-30 23:27:08 -0700 | [diff] [blame] | 62 | #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE | 
| Yasunori Goto | 718127c | 2006-06-23 02:03:10 -0700 | [diff] [blame] | 63 | static int __add_zone(struct zone *zone, unsigned long phys_start_pfn) | 
| Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 64 | { | 
|  | 65 | struct pglist_data *pgdat = zone->zone_pgdat; | 
|  | 66 | int nr_pages = PAGES_PER_SECTION; | 
|  | 67 | int nid = pgdat->node_id; | 
|  | 68 | int zone_type; | 
|  | 69 |  | 
|  | 70 | zone_type = zone - pgdat->node_zones; | 
| Yasunori Goto | 13466c8 | 2007-06-01 00:46:53 -0700 | [diff] [blame] | 71 | if (!zone->wait_table) { | 
| Yasunori Goto | 718127c | 2006-06-23 02:03:10 -0700 | [diff] [blame] | 72 | int ret = 0; | 
| Dave Hansen | a2f3aa0 | 2007-01-10 23:15:30 -0800 | [diff] [blame] | 73 | ret = init_currently_empty_zone(zone, phys_start_pfn, | 
|  | 74 | nr_pages, MEMMAP_HOTPLUG); | 
| Yasunori Goto | 718127c | 2006-06-23 02:03:10 -0700 | [diff] [blame] | 75 | if (ret < 0) | 
|  | 76 | return ret; | 
|  | 77 | } | 
| Dave Hansen | a2f3aa0 | 2007-01-10 23:15:30 -0800 | [diff] [blame] | 78 | memmap_init_zone(nr_pages, nid, zone_type, | 
|  | 79 | phys_start_pfn, MEMMAP_HOTPLUG); | 
| Yasunori Goto | 718127c | 2006-06-23 02:03:10 -0700 | [diff] [blame] | 80 | return 0; | 
| Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 81 | } | 
|  | 82 |  | 
| Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 83 | static int __add_section(struct zone *zone, unsigned long phys_start_pfn) | 
|  | 84 | { | 
| Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 85 | int nr_pages = PAGES_PER_SECTION; | 
| Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 86 | int ret; | 
|  | 87 |  | 
| KAMEZAWA Hiroyuki | ebd1530 | 2006-08-05 12:15:06 -0700 | [diff] [blame] | 88 | if (pfn_valid(phys_start_pfn)) | 
|  | 89 | return -EEXIST; | 
|  | 90 |  | 
| Dave Hansen | 0b0acbe | 2005-10-29 18:16:55 -0700 | [diff] [blame] | 91 | ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages); | 
| Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 92 |  | 
|  | 93 | if (ret < 0) | 
|  | 94 | return ret; | 
|  | 95 |  | 
| Yasunori Goto | 718127c | 2006-06-23 02:03:10 -0700 | [diff] [blame] | 96 | ret = __add_zone(zone, phys_start_pfn); | 
|  | 97 |  | 
|  | 98 | if (ret < 0) | 
|  | 99 | return ret; | 
|  | 100 |  | 
| Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 101 | return register_new_memory(__pfn_to_section(phys_start_pfn)); | 
|  | 102 | } | 
|  | 103 |  | 
|  | 104 | /* | 
|  | 105 | * Reasonably generic function for adding memory.  It is | 
|  | 106 | * expected that archs that support memory hotplug will | 
|  | 107 | * call this function after deciding the zone to which to | 
|  | 108 | * add the new pages. | 
|  | 109 | */ | 
|  | 110 | int __add_pages(struct zone *zone, unsigned long phys_start_pfn, | 
|  | 111 | unsigned long nr_pages) | 
|  | 112 | { | 
|  | 113 | unsigned long i; | 
|  | 114 | int err = 0; | 
| KAMEZAWA Hiroyuki | 6f71271 | 2006-08-05 12:14:58 -0700 | [diff] [blame] | 115 | int start_sec, end_sec; | 
|  | 116 | /* during initialize mem_map, align hot-added range to section */ | 
|  | 117 | start_sec = pfn_to_section_nr(phys_start_pfn); | 
|  | 118 | end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1); | 
| Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 119 |  | 
| KAMEZAWA Hiroyuki | 6f71271 | 2006-08-05 12:14:58 -0700 | [diff] [blame] | 120 | for (i = start_sec; i <= end_sec; i++) { | 
|  | 121 | err = __add_section(zone, i << PFN_SECTION_SHIFT); | 
| Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 122 |  | 
| KAMEZAWA Hiroyuki | 6f71271 | 2006-08-05 12:14:58 -0700 | [diff] [blame] | 123 | /* | 
| Simon Arlott | 183ff22 | 2007-10-20 01:27:18 +0200 | [diff] [blame] | 124 | * EEXIST is finally dealt with by ioresource collision | 
| KAMEZAWA Hiroyuki | 6f71271 | 2006-08-05 12:14:58 -0700 | [diff] [blame] | 125 | * check. see add_memory() => register_memory_resource() | 
|  | 126 | * Warning will be printed if there is collision. | 
| Joel H Schopp | bed120c | 2006-05-01 12:16:11 -0700 | [diff] [blame] | 127 | */ | 
|  | 128 | if (err && (err != -EEXIST)) | 
| Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 129 | break; | 
| KAMEZAWA Hiroyuki | 6f71271 | 2006-08-05 12:14:58 -0700 | [diff] [blame] | 130 | err = 0; | 
| Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 131 | } | 
|  | 132 |  | 
|  | 133 | return err; | 
|  | 134 | } | 
| Joel H Schopp | bed120c | 2006-05-01 12:16:11 -0700 | [diff] [blame] | 135 | EXPORT_SYMBOL_GPL(__add_pages); | 
| Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 136 |  | 
|  | 137 | static void grow_zone_span(struct zone *zone, | 
|  | 138 | unsigned long start_pfn, unsigned long end_pfn) | 
|  | 139 | { | 
|  | 140 | unsigned long old_zone_end_pfn; | 
|  | 141 |  | 
|  | 142 | zone_span_writelock(zone); | 
|  | 143 |  | 
|  | 144 | old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; | 
|  | 145 | if (start_pfn < zone->zone_start_pfn) | 
|  | 146 | zone->zone_start_pfn = start_pfn; | 
|  | 147 |  | 
| Yasunori Goto | 25a6df9 | 2006-05-30 21:25:42 -0700 | [diff] [blame] | 148 | zone->spanned_pages = max(old_zone_end_pfn, end_pfn) - | 
|  | 149 | zone->zone_start_pfn; | 
| Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 150 |  | 
|  | 151 | zone_span_writeunlock(zone); | 
|  | 152 | } | 
|  | 153 |  | 
|  | 154 | static void grow_pgdat_span(struct pglist_data *pgdat, | 
|  | 155 | unsigned long start_pfn, unsigned long end_pfn) | 
|  | 156 | { | 
|  | 157 | unsigned long old_pgdat_end_pfn = | 
|  | 158 | pgdat->node_start_pfn + pgdat->node_spanned_pages; | 
|  | 159 |  | 
|  | 160 | if (start_pfn < pgdat->node_start_pfn) | 
|  | 161 | pgdat->node_start_pfn = start_pfn; | 
|  | 162 |  | 
| Yasunori Goto | 25a6df9 | 2006-05-30 21:25:42 -0700 | [diff] [blame] | 163 | pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) - | 
|  | 164 | pgdat->node_start_pfn; | 
| Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 165 | } | 
|  | 166 |  | 
| KAMEZAWA Hiroyuki | 75884fb | 2007-10-16 01:26:10 -0700 | [diff] [blame] | 167 | static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages, | 
|  | 168 | void *arg) | 
| Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 169 | { | 
|  | 170 | unsigned long i; | 
| KAMEZAWA Hiroyuki | 75884fb | 2007-10-16 01:26:10 -0700 | [diff] [blame] | 171 | unsigned long onlined_pages = *(unsigned long *)arg; | 
|  | 172 | struct page *page; | 
|  | 173 | if (PageReserved(pfn_to_page(start_pfn))) | 
|  | 174 | for (i = 0; i < nr_pages; i++) { | 
|  | 175 | page = pfn_to_page(start_pfn + i); | 
|  | 176 | online_page(page); | 
|  | 177 | onlined_pages++; | 
|  | 178 | } | 
|  | 179 | *(unsigned long *)arg = onlined_pages; | 
|  | 180 | return 0; | 
|  | 181 | } | 
|  | 182 |  | 
|  | 183 |  | 
|  | 184 | int online_pages(unsigned long pfn, unsigned long nr_pages) | 
|  | 185 | { | 
| Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 186 | unsigned long flags; | 
|  | 187 | unsigned long onlined_pages = 0; | 
|  | 188 | struct zone *zone; | 
| Yasunori Goto | 6811378 | 2006-06-23 02:03:11 -0700 | [diff] [blame] | 189 | int need_zonelists_rebuild = 0; | 
| Yasunori Goto | 7b78d33 | 2007-10-21 16:41:36 -0700 | [diff] [blame] | 190 | int nid; | 
|  | 191 | int ret; | 
|  | 192 | struct memory_notify arg; | 
| Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 193 |  | 
| Yasunori Goto | 7b78d33 | 2007-10-21 16:41:36 -0700 | [diff] [blame] | 194 | arg.start_pfn = pfn; | 
|  | 195 | arg.nr_pages = nr_pages; | 
|  | 196 | arg.status_change_nid = -1; | 
|  | 197 |  | 
|  | 198 | nid = page_to_nid(pfn_to_page(pfn)); | 
|  | 199 | if (node_present_pages(nid) == 0) | 
|  | 200 | arg.status_change_nid = nid; | 
|  | 201 |  | 
|  | 202 | ret = memory_notify(MEM_GOING_ONLINE, &arg); | 
|  | 203 | ret = notifier_to_errno(ret); | 
|  | 204 | if (ret) { | 
|  | 205 | memory_notify(MEM_CANCEL_ONLINE, &arg); | 
|  | 206 | return ret; | 
|  | 207 | } | 
| Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 208 | /* | 
|  | 209 | * This doesn't need a lock to do pfn_to_page(). | 
|  | 210 | * The section can't be removed here because of the | 
|  | 211 | * memory_block->state_sem. | 
|  | 212 | */ | 
|  | 213 | zone = page_zone(pfn_to_page(pfn)); | 
|  | 214 | pgdat_resize_lock(zone->zone_pgdat, &flags); | 
|  | 215 | grow_zone_span(zone, pfn, pfn + nr_pages); | 
|  | 216 | grow_pgdat_span(zone->zone_pgdat, pfn, pfn + nr_pages); | 
|  | 217 | pgdat_resize_unlock(zone->zone_pgdat, &flags); | 
|  | 218 |  | 
| Yasunori Goto | 6811378 | 2006-06-23 02:03:11 -0700 | [diff] [blame] | 219 | /* | 
|  | 220 | * If this zone is not populated, then it is not in zonelist. | 
|  | 221 | * This means the page allocator ignores this zone. | 
|  | 222 | * So, zonelist must be updated after online. | 
|  | 223 | */ | 
|  | 224 | if (!populated_zone(zone)) | 
|  | 225 | need_zonelists_rebuild = 1; | 
|  | 226 |  | 
| KAMEZAWA Hiroyuki | 75884fb | 2007-10-16 01:26:10 -0700 | [diff] [blame] | 227 | walk_memory_resource(pfn, nr_pages, &onlined_pages, | 
|  | 228 | online_pages_range); | 
| Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 229 | zone->present_pages += onlined_pages; | 
| Yasunori Goto | f2937be | 2006-03-09 17:33:51 -0800 | [diff] [blame] | 230 | zone->zone_pgdat->node_present_pages += onlined_pages; | 
| Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 231 |  | 
| Dave Hansen | 61b1399 | 2005-10-29 18:16:56 -0700 | [diff] [blame] | 232 | setup_per_zone_pages_min(); | 
| Christoph Lameter | 7ea1530 | 2007-10-16 01:25:29 -0700 | [diff] [blame] | 233 | if (onlined_pages) { | 
|  | 234 | kswapd_run(zone_to_nid(zone)); | 
|  | 235 | node_set_state(zone_to_nid(zone), N_HIGH_MEMORY); | 
|  | 236 | } | 
| Dave Hansen | 61b1399 | 2005-10-29 18:16:56 -0700 | [diff] [blame] | 237 |  | 
| Yasunori Goto | 6811378 | 2006-06-23 02:03:11 -0700 | [diff] [blame] | 238 | if (need_zonelists_rebuild) | 
|  | 239 | build_all_zonelists(); | 
| KAMEZAWA Hiroyuki | 5a4d436 | 2006-06-23 02:03:47 -0700 | [diff] [blame] | 240 | vm_total_pages = nr_free_pagecache_pages(); | 
| Chandra Seetharaman | 2d1d43f | 2006-09-29 02:01:25 -0700 | [diff] [blame] | 241 | writeback_set_ratelimit(); | 
| Yasunori Goto | 7b78d33 | 2007-10-21 16:41:36 -0700 | [diff] [blame] | 242 |  | 
|  | 243 | if (onlined_pages) | 
|  | 244 | memory_notify(MEM_ONLINE, &arg); | 
|  | 245 |  | 
| Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 246 | return 0; | 
|  | 247 | } | 
| Keith Mannthey | 5394702 | 2006-09-30 23:27:08 -0700 | [diff] [blame] | 248 | #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ | 
| Yasunori Goto | bc02af9 | 2006-06-27 02:53:30 -0700 | [diff] [blame] | 249 |  | 
| Yasunori Goto | 9af3c2d | 2006-06-27 02:53:34 -0700 | [diff] [blame] | 250 | static pg_data_t *hotadd_new_pgdat(int nid, u64 start) | 
|  | 251 | { | 
|  | 252 | struct pglist_data *pgdat; | 
|  | 253 | unsigned long zones_size[MAX_NR_ZONES] = {0}; | 
|  | 254 | unsigned long zholes_size[MAX_NR_ZONES] = {0}; | 
|  | 255 | unsigned long start_pfn = start >> PAGE_SHIFT; | 
|  | 256 |  | 
|  | 257 | pgdat = arch_alloc_nodedata(nid); | 
|  | 258 | if (!pgdat) | 
|  | 259 | return NULL; | 
|  | 260 |  | 
|  | 261 | arch_refresh_nodedata(nid, pgdat); | 
|  | 262 |  | 
|  | 263 | /* we can use NODE_DATA(nid) from here */ | 
|  | 264 |  | 
|  | 265 | /* init node's zones as empty zones, we don't have any present pages.*/ | 
|  | 266 | free_area_init_node(nid, pgdat, zones_size, start_pfn, zholes_size); | 
|  | 267 |  | 
|  | 268 | return pgdat; | 
|  | 269 | } | 
|  | 270 |  | 
|  | 271 | static void rollback_node_hotadd(int nid, pg_data_t *pgdat) | 
|  | 272 | { | 
|  | 273 | arch_refresh_nodedata(nid, NULL); | 
|  | 274 | arch_free_nodedata(pgdat); | 
|  | 275 | return; | 
|  | 276 | } | 
|  | 277 |  | 
| KAMEZAWA Hiroyuki | 0a54703 | 2006-06-27 02:53:35 -0700 | [diff] [blame] | 278 |  | 
| Yasunori Goto | bc02af9 | 2006-06-27 02:53:30 -0700 | [diff] [blame] | 279 | int add_memory(int nid, u64 start, u64 size) | 
|  | 280 | { | 
| Yasunori Goto | 9af3c2d | 2006-06-27 02:53:34 -0700 | [diff] [blame] | 281 | pg_data_t *pgdat = NULL; | 
|  | 282 | int new_pgdat = 0; | 
| KAMEZAWA Hiroyuki | ebd1530 | 2006-08-05 12:15:06 -0700 | [diff] [blame] | 283 | struct resource *res; | 
| Yasunori Goto | bc02af9 | 2006-06-27 02:53:30 -0700 | [diff] [blame] | 284 | int ret; | 
|  | 285 |  | 
| KAMEZAWA Hiroyuki | ebd1530 | 2006-08-05 12:15:06 -0700 | [diff] [blame] | 286 | res = register_memory_resource(start, size); | 
|  | 287 | if (!res) | 
|  | 288 | return -EEXIST; | 
|  | 289 |  | 
| Yasunori Goto | 9af3c2d | 2006-06-27 02:53:34 -0700 | [diff] [blame] | 290 | if (!node_online(nid)) { | 
|  | 291 | pgdat = hotadd_new_pgdat(nid, start); | 
|  | 292 | if (!pgdat) | 
|  | 293 | return -ENOMEM; | 
|  | 294 | new_pgdat = 1; | 
| Yasunori Goto | 9af3c2d | 2006-06-27 02:53:34 -0700 | [diff] [blame] | 295 | } | 
|  | 296 |  | 
| Yasunori Goto | bc02af9 | 2006-06-27 02:53:30 -0700 | [diff] [blame] | 297 | /* call arch's memory hotadd */ | 
|  | 298 | ret = arch_add_memory(nid, start, size); | 
|  | 299 |  | 
| Yasunori Goto | 9af3c2d | 2006-06-27 02:53:34 -0700 | [diff] [blame] | 300 | if (ret < 0) | 
|  | 301 | goto error; | 
|  | 302 |  | 
| Yasunori Goto | 0fc4415 | 2006-06-27 02:53:38 -0700 | [diff] [blame] | 303 | /* we online node here. we can't roll back from here. */ | 
| Yasunori Goto | 9af3c2d | 2006-06-27 02:53:34 -0700 | [diff] [blame] | 304 | node_set_online(nid); | 
|  | 305 |  | 
| Paul Jackson | 38837fc | 2006-09-29 02:01:16 -0700 | [diff] [blame] | 306 | cpuset_track_online_nodes(); | 
|  | 307 |  | 
| Yasunori Goto | 0fc4415 | 2006-06-27 02:53:38 -0700 | [diff] [blame] | 308 | if (new_pgdat) { | 
|  | 309 | ret = register_one_node(nid); | 
|  | 310 | /* | 
|  | 311 | * If sysfs file of new node can't create, cpu on the node | 
|  | 312 | * can't be hot-added. There is no rollback way now. | 
|  | 313 | * So, check by BUG_ON() to catch it reluctantly.. | 
|  | 314 | */ | 
|  | 315 | BUG_ON(ret); | 
|  | 316 | } | 
|  | 317 |  | 
| Yasunori Goto | 9af3c2d | 2006-06-27 02:53:34 -0700 | [diff] [blame] | 318 | return ret; | 
|  | 319 | error: | 
|  | 320 | /* rollback pgdat allocation and others */ | 
|  | 321 | if (new_pgdat) | 
|  | 322 | rollback_node_hotadd(nid, pgdat); | 
| KAMEZAWA Hiroyuki | ebd1530 | 2006-08-05 12:15:06 -0700 | [diff] [blame] | 323 | if (res) | 
|  | 324 | release_memory_resource(res); | 
| Yasunori Goto | 9af3c2d | 2006-06-27 02:53:34 -0700 | [diff] [blame] | 325 |  | 
| Yasunori Goto | bc02af9 | 2006-06-27 02:53:30 -0700 | [diff] [blame] | 326 | return ret; | 
|  | 327 | } | 
|  | 328 | EXPORT_SYMBOL_GPL(add_memory); | 
| KAMEZAWA Hiroyuki | 0c0e619 | 2007-10-16 01:26:12 -0700 | [diff] [blame] | 329 |  | 
|  | 330 | #ifdef CONFIG_MEMORY_HOTREMOVE | 
|  | 331 | /* | 
|  | 332 | * Confirm all pages in a range [start, end) is belongs to the same zone. | 
|  | 333 | */ | 
|  | 334 | static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn) | 
|  | 335 | { | 
|  | 336 | unsigned long pfn; | 
|  | 337 | struct zone *zone = NULL; | 
|  | 338 | struct page *page; | 
|  | 339 | int i; | 
|  | 340 | for (pfn = start_pfn; | 
|  | 341 | pfn < end_pfn; | 
|  | 342 | pfn += MAX_ORDER_NR_PAGES) { | 
|  | 343 | i = 0; | 
|  | 344 | /* This is just a CONFIG_HOLES_IN_ZONE check.*/ | 
|  | 345 | while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i)) | 
|  | 346 | i++; | 
|  | 347 | if (i == MAX_ORDER_NR_PAGES) | 
|  | 348 | continue; | 
|  | 349 | page = pfn_to_page(pfn + i); | 
|  | 350 | if (zone && page_zone(page) != zone) | 
|  | 351 | return 0; | 
|  | 352 | zone = page_zone(page); | 
|  | 353 | } | 
|  | 354 | return 1; | 
|  | 355 | } | 
|  | 356 |  | 
|  | 357 | /* | 
|  | 358 | * Scanning pfn is much easier than scanning lru list. | 
|  | 359 | * Scan pfn from start to end and Find LRU page. | 
|  | 360 | */ | 
|  | 361 | int scan_lru_pages(unsigned long start, unsigned long end) | 
|  | 362 | { | 
|  | 363 | unsigned long pfn; | 
|  | 364 | struct page *page; | 
|  | 365 | for (pfn = start; pfn < end; pfn++) { | 
|  | 366 | if (pfn_valid(pfn)) { | 
|  | 367 | page = pfn_to_page(pfn); | 
|  | 368 | if (PageLRU(page)) | 
|  | 369 | return pfn; | 
|  | 370 | } | 
|  | 371 | } | 
|  | 372 | return 0; | 
|  | 373 | } | 
|  | 374 |  | 
|  | 375 | static struct page * | 
|  | 376 | hotremove_migrate_alloc(struct page *page, | 
|  | 377 | unsigned long private, | 
|  | 378 | int **x) | 
|  | 379 | { | 
|  | 380 | /* This should be improoooooved!! */ | 
|  | 381 | return alloc_page(GFP_HIGHUSER_PAGECACHE); | 
|  | 382 | } | 
|  | 383 |  | 
|  | 384 |  | 
|  | 385 | #define NR_OFFLINE_AT_ONCE_PAGES	(256) | 
|  | 386 | static int | 
|  | 387 | do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) | 
|  | 388 | { | 
|  | 389 | unsigned long pfn; | 
|  | 390 | struct page *page; | 
|  | 391 | int move_pages = NR_OFFLINE_AT_ONCE_PAGES; | 
|  | 392 | int not_managed = 0; | 
|  | 393 | int ret = 0; | 
|  | 394 | LIST_HEAD(source); | 
|  | 395 |  | 
|  | 396 | for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) { | 
|  | 397 | if (!pfn_valid(pfn)) | 
|  | 398 | continue; | 
|  | 399 | page = pfn_to_page(pfn); | 
|  | 400 | if (!page_count(page)) | 
|  | 401 | continue; | 
|  | 402 | /* | 
|  | 403 | * We can skip free pages. And we can only deal with pages on | 
|  | 404 | * LRU. | 
|  | 405 | */ | 
|  | 406 | ret = isolate_lru_page(page, &source); | 
|  | 407 | if (!ret) { /* Success */ | 
|  | 408 | move_pages--; | 
|  | 409 | } else { | 
|  | 410 | /* Becasue we don't have big zone->lock. we should | 
|  | 411 | check this again here. */ | 
|  | 412 | if (page_count(page)) | 
|  | 413 | not_managed++; | 
|  | 414 | #ifdef CONFIG_DEBUG_VM | 
|  | 415 | printk(KERN_INFO "removing from LRU failed" | 
|  | 416 | " %lx/%d/%lx\n", | 
|  | 417 | pfn, page_count(page), page->flags); | 
|  | 418 | #endif | 
|  | 419 | } | 
|  | 420 | } | 
|  | 421 | ret = -EBUSY; | 
|  | 422 | if (not_managed) { | 
|  | 423 | if (!list_empty(&source)) | 
|  | 424 | putback_lru_pages(&source); | 
|  | 425 | goto out; | 
|  | 426 | } | 
|  | 427 | ret = 0; | 
|  | 428 | if (list_empty(&source)) | 
|  | 429 | goto out; | 
|  | 430 | /* this function returns # of failed pages */ | 
|  | 431 | ret = migrate_pages(&source, hotremove_migrate_alloc, 0); | 
|  | 432 |  | 
|  | 433 | out: | 
|  | 434 | return ret; | 
|  | 435 | } | 
|  | 436 |  | 
|  | 437 | /* | 
|  | 438 | * remove from free_area[] and mark all as Reserved. | 
|  | 439 | */ | 
|  | 440 | static int | 
|  | 441 | offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages, | 
|  | 442 | void *data) | 
|  | 443 | { | 
|  | 444 | __offline_isolated_pages(start, start + nr_pages); | 
|  | 445 | return 0; | 
|  | 446 | } | 
|  | 447 |  | 
|  | 448 | static void | 
|  | 449 | offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) | 
|  | 450 | { | 
|  | 451 | walk_memory_resource(start_pfn, end_pfn - start_pfn, NULL, | 
|  | 452 | offline_isolated_pages_cb); | 
|  | 453 | } | 
|  | 454 |  | 
|  | 455 | /* | 
|  | 456 | * Check all pages in range, recoreded as memory resource, are isolated. | 
|  | 457 | */ | 
|  | 458 | static int | 
|  | 459 | check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages, | 
|  | 460 | void *data) | 
|  | 461 | { | 
|  | 462 | int ret; | 
|  | 463 | long offlined = *(long *)data; | 
|  | 464 | ret = test_pages_isolated(start_pfn, start_pfn + nr_pages); | 
|  | 465 | offlined = nr_pages; | 
|  | 466 | if (!ret) | 
|  | 467 | *(long *)data += offlined; | 
|  | 468 | return ret; | 
|  | 469 | } | 
|  | 470 |  | 
|  | 471 | static long | 
|  | 472 | check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn) | 
|  | 473 | { | 
|  | 474 | long offlined = 0; | 
|  | 475 | int ret; | 
|  | 476 |  | 
|  | 477 | ret = walk_memory_resource(start_pfn, end_pfn - start_pfn, &offlined, | 
|  | 478 | check_pages_isolated_cb); | 
|  | 479 | if (ret < 0) | 
|  | 480 | offlined = (long)ret; | 
|  | 481 | return offlined; | 
|  | 482 | } | 
|  | 483 |  | 
| KAMEZAWA Hiroyuki | 0c0e619 | 2007-10-16 01:26:12 -0700 | [diff] [blame] | 484 | int offline_pages(unsigned long start_pfn, | 
|  | 485 | unsigned long end_pfn, unsigned long timeout) | 
|  | 486 | { | 
|  | 487 | unsigned long pfn, nr_pages, expire; | 
|  | 488 | long offlined_pages; | 
| Yasunori Goto | 7b78d33 | 2007-10-21 16:41:36 -0700 | [diff] [blame] | 489 | int ret, drain, retry_max, node; | 
| KAMEZAWA Hiroyuki | 0c0e619 | 2007-10-16 01:26:12 -0700 | [diff] [blame] | 490 | struct zone *zone; | 
| Yasunori Goto | 7b78d33 | 2007-10-21 16:41:36 -0700 | [diff] [blame] | 491 | struct memory_notify arg; | 
| KAMEZAWA Hiroyuki | 0c0e619 | 2007-10-16 01:26:12 -0700 | [diff] [blame] | 492 |  | 
|  | 493 | BUG_ON(start_pfn >= end_pfn); | 
|  | 494 | /* at least, alignment against pageblock is necessary */ | 
|  | 495 | if (!IS_ALIGNED(start_pfn, pageblock_nr_pages)) | 
|  | 496 | return -EINVAL; | 
|  | 497 | if (!IS_ALIGNED(end_pfn, pageblock_nr_pages)) | 
|  | 498 | return -EINVAL; | 
|  | 499 | /* This makes hotplug much easier...and readable. | 
|  | 500 | we assume this for now. .*/ | 
|  | 501 | if (!test_pages_in_a_zone(start_pfn, end_pfn)) | 
|  | 502 | return -EINVAL; | 
| Yasunori Goto | 7b78d33 | 2007-10-21 16:41:36 -0700 | [diff] [blame] | 503 |  | 
|  | 504 | zone = page_zone(pfn_to_page(start_pfn)); | 
|  | 505 | node = zone_to_nid(zone); | 
|  | 506 | nr_pages = end_pfn - start_pfn; | 
|  | 507 |  | 
| KAMEZAWA Hiroyuki | 0c0e619 | 2007-10-16 01:26:12 -0700 | [diff] [blame] | 508 | /* set above range as isolated */ | 
|  | 509 | ret = start_isolate_page_range(start_pfn, end_pfn); | 
|  | 510 | if (ret) | 
|  | 511 | return ret; | 
| Yasunori Goto | 7b78d33 | 2007-10-21 16:41:36 -0700 | [diff] [blame] | 512 |  | 
|  | 513 | arg.start_pfn = start_pfn; | 
|  | 514 | arg.nr_pages = nr_pages; | 
|  | 515 | arg.status_change_nid = -1; | 
|  | 516 | if (nr_pages >= node_present_pages(node)) | 
|  | 517 | arg.status_change_nid = node; | 
|  | 518 |  | 
|  | 519 | ret = memory_notify(MEM_GOING_OFFLINE, &arg); | 
|  | 520 | ret = notifier_to_errno(ret); | 
|  | 521 | if (ret) | 
|  | 522 | goto failed_removal; | 
|  | 523 |  | 
| KAMEZAWA Hiroyuki | 0c0e619 | 2007-10-16 01:26:12 -0700 | [diff] [blame] | 524 | pfn = start_pfn; | 
|  | 525 | expire = jiffies + timeout; | 
|  | 526 | drain = 0; | 
|  | 527 | retry_max = 5; | 
|  | 528 | repeat: | 
|  | 529 | /* start memory hot removal */ | 
|  | 530 | ret = -EAGAIN; | 
|  | 531 | if (time_after(jiffies, expire)) | 
|  | 532 | goto failed_removal; | 
|  | 533 | ret = -EINTR; | 
|  | 534 | if (signal_pending(current)) | 
|  | 535 | goto failed_removal; | 
|  | 536 | ret = 0; | 
|  | 537 | if (drain) { | 
|  | 538 | lru_add_drain_all(); | 
|  | 539 | flush_scheduled_work(); | 
|  | 540 | cond_resched(); | 
| Christoph Lameter | 9f8f217 | 2008-02-04 22:29:11 -0800 | [diff] [blame] | 541 | drain_all_pages(); | 
| KAMEZAWA Hiroyuki | 0c0e619 | 2007-10-16 01:26:12 -0700 | [diff] [blame] | 542 | } | 
|  | 543 |  | 
|  | 544 | pfn = scan_lru_pages(start_pfn, end_pfn); | 
|  | 545 | if (pfn) { /* We have page on LRU */ | 
|  | 546 | ret = do_migrate_range(pfn, end_pfn); | 
|  | 547 | if (!ret) { | 
|  | 548 | drain = 1; | 
|  | 549 | goto repeat; | 
|  | 550 | } else { | 
|  | 551 | if (ret < 0) | 
|  | 552 | if (--retry_max == 0) | 
|  | 553 | goto failed_removal; | 
|  | 554 | yield(); | 
|  | 555 | drain = 1; | 
|  | 556 | goto repeat; | 
|  | 557 | } | 
|  | 558 | } | 
|  | 559 | /* drain all zone's lru pagevec, this is asyncronous... */ | 
|  | 560 | lru_add_drain_all(); | 
|  | 561 | flush_scheduled_work(); | 
|  | 562 | yield(); | 
|  | 563 | /* drain pcp pages , this is synchrouns. */ | 
| Christoph Lameter | 9f8f217 | 2008-02-04 22:29:11 -0800 | [diff] [blame] | 564 | drain_all_pages(); | 
| KAMEZAWA Hiroyuki | 0c0e619 | 2007-10-16 01:26:12 -0700 | [diff] [blame] | 565 | /* check again */ | 
|  | 566 | offlined_pages = check_pages_isolated(start_pfn, end_pfn); | 
|  | 567 | if (offlined_pages < 0) { | 
|  | 568 | ret = -EBUSY; | 
|  | 569 | goto failed_removal; | 
|  | 570 | } | 
|  | 571 | printk(KERN_INFO "Offlined Pages %ld\n", offlined_pages); | 
|  | 572 | /* Ok, all of our target is islaoted. | 
|  | 573 | We cannot do rollback at this point. */ | 
|  | 574 | offline_isolated_pages(start_pfn, end_pfn); | 
| KAMEZAWA Hiroyuki | dbc0e4c | 2007-11-14 16:59:12 -0800 | [diff] [blame] | 575 | /* reset pagetype flags and makes migrate type to be MOVABLE */ | 
|  | 576 | undo_isolate_page_range(start_pfn, end_pfn); | 
| KAMEZAWA Hiroyuki | 0c0e619 | 2007-10-16 01:26:12 -0700 | [diff] [blame] | 577 | /* removal success */ | 
| KAMEZAWA Hiroyuki | 0c0e619 | 2007-10-16 01:26:12 -0700 | [diff] [blame] | 578 | zone->present_pages -= offlined_pages; | 
|  | 579 | zone->zone_pgdat->node_present_pages -= offlined_pages; | 
|  | 580 | totalram_pages -= offlined_pages; | 
|  | 581 | num_physpages -= offlined_pages; | 
| Yasunori Goto | 7b78d33 | 2007-10-21 16:41:36 -0700 | [diff] [blame] | 582 |  | 
| KAMEZAWA Hiroyuki | 0c0e619 | 2007-10-16 01:26:12 -0700 | [diff] [blame] | 583 | vm_total_pages = nr_free_pagecache_pages(); | 
|  | 584 | writeback_set_ratelimit(); | 
| Yasunori Goto | 7b78d33 | 2007-10-21 16:41:36 -0700 | [diff] [blame] | 585 |  | 
|  | 586 | memory_notify(MEM_OFFLINE, &arg); | 
| KAMEZAWA Hiroyuki | 0c0e619 | 2007-10-16 01:26:12 -0700 | [diff] [blame] | 587 | return 0; | 
|  | 588 |  | 
|  | 589 | failed_removal: | 
|  | 590 | printk(KERN_INFO "memory offlining %lx to %lx failed\n", | 
|  | 591 | start_pfn, end_pfn); | 
| Yasunori Goto | 7b78d33 | 2007-10-21 16:41:36 -0700 | [diff] [blame] | 592 | memory_notify(MEM_CANCEL_OFFLINE, &arg); | 
| KAMEZAWA Hiroyuki | 0c0e619 | 2007-10-16 01:26:12 -0700 | [diff] [blame] | 593 | /* pushback to free area */ | 
|  | 594 | undo_isolate_page_range(start_pfn, end_pfn); | 
| Yasunori Goto | 7b78d33 | 2007-10-21 16:41:36 -0700 | [diff] [blame] | 595 |  | 
| KAMEZAWA Hiroyuki | 0c0e619 | 2007-10-16 01:26:12 -0700 | [diff] [blame] | 596 | return ret; | 
|  | 597 | } | 
| KAMEZAWA Hiroyuki | 48e9419 | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 598 | #else | 
|  | 599 | int remove_memory(u64 start, u64 size) | 
|  | 600 | { | 
|  | 601 | return -EINVAL; | 
|  | 602 | } | 
|  | 603 | EXPORT_SYMBOL_GPL(remove_memory); | 
| KAMEZAWA Hiroyuki | 0c0e619 | 2007-10-16 01:26:12 -0700 | [diff] [blame] | 604 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |