| Paul Mundt | 0106662 | 2007-03-28 16:38:13 +0900 | [diff] [blame] | 1 | /* | 
|  | 2 | * linux/arch/sh/mm/init.c | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * | 
|  | 4 | *  Copyright (C) 1999  Niibe Yutaka | 
| Paul Mundt | 0106662 | 2007-03-28 16:38:13 +0900 | [diff] [blame] | 5 | *  Copyright (C) 2002 - 2007  Paul Mundt | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | * | 
|  | 7 | *  Based on linux/arch/i386/mm/init.c: | 
|  | 8 | *   Copyright (C) 1995  Linus Torvalds | 
|  | 9 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | #include <linux/mm.h> | 
|  | 11 | #include <linux/swap.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | #include <linux/init.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include <linux/bootmem.h> | 
| Paul Mundt | 2cb7ce3 | 2006-09-27 18:20:58 +0900 | [diff] [blame] | 14 | #include <linux/proc_fs.h> | 
| Paul Mundt | 27641de | 2007-05-14 10:48:01 +0900 | [diff] [blame] | 15 | #include <linux/pagemap.h> | 
| Paul Mundt | 0106662 | 2007-03-28 16:38:13 +0900 | [diff] [blame] | 16 | #include <linux/percpu.h> | 
|  | 17 | #include <linux/io.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <asm/mmu_context.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #include <asm/tlb.h> | 
|  | 20 | #include <asm/cacheflush.h> | 
| Paul Mundt | 07cbb41 | 2007-06-06 12:23:06 +0900 | [diff] [blame] | 21 | #include <asm/sections.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #include <asm/cache.h> | 
|  | 23 |  | 
|  | 24 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | 
|  | 25 | pgd_t swapper_pg_dir[PTRS_PER_PGD]; | 
| Stuart Menefy | c6feb61 | 2008-09-05 16:06:42 +0900 | [diff] [blame] | 26 |  | 
|  | 27 | #ifdef CONFIG_SUPERH32 | 
|  | 28 | /* | 
|  | 29 | * Handle trivial transitions between cached and uncached | 
|  | 30 | * segments, making use of the 1:1 mapping relationship in | 
|  | 31 | * 512MB lowmem. | 
|  | 32 | * | 
|  | 33 | * This is the offset of the uncached section from its cached alias. | 
|  | 34 | * Default value only valid in 29 bit mode, in 32bit mode will be | 
|  | 35 | * overridden in pmb_init. | 
|  | 36 | */ | 
|  | 37 | unsigned long cached_to_uncached = P2SEG - P1SEG; | 
|  | 38 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 |  | 
| Yoshinori Sato | 11cbb70 | 2006-12-07 18:07:27 +0900 | [diff] [blame] | 40 | #ifdef CONFIG_MMU | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) | 
|  | 42 | { | 
|  | 43 | pgd_t *pgd; | 
| Paul Mundt | 26ff6c1 | 2006-09-27 15:13:36 +0900 | [diff] [blame] | 44 | pud_t *pud; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | pmd_t *pmd; | 
|  | 46 | pte_t *pte; | 
|  | 47 |  | 
| Stuart Menefy | 99a596f | 2006-11-21 15:38:05 +0900 | [diff] [blame] | 48 | pgd = pgd_offset_k(addr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | if (pgd_none(*pgd)) { | 
|  | 50 | pgd_ERROR(*pgd); | 
|  | 51 | return; | 
|  | 52 | } | 
|  | 53 |  | 
| Stuart Menefy | 99a596f | 2006-11-21 15:38:05 +0900 | [diff] [blame] | 54 | pud = pud_alloc(NULL, pgd, addr); | 
|  | 55 | if (unlikely(!pud)) { | 
|  | 56 | pud_ERROR(*pud); | 
|  | 57 | return; | 
| Paul Mundt | 26ff6c1 | 2006-09-27 15:13:36 +0900 | [diff] [blame] | 58 | } | 
|  | 59 |  | 
| Stuart Menefy | 99a596f | 2006-11-21 15:38:05 +0900 | [diff] [blame] | 60 | pmd = pmd_alloc(NULL, pud, addr); | 
|  | 61 | if (unlikely(!pmd)) { | 
|  | 62 | pmd_ERROR(*pmd); | 
|  | 63 | return; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | } | 
|  | 65 |  | 
|  | 66 | pte = pte_offset_kernel(pmd, addr); | 
|  | 67 | if (!pte_none(*pte)) { | 
|  | 68 | pte_ERROR(*pte); | 
|  | 69 | return; | 
|  | 70 | } | 
|  | 71 |  | 
|  | 72 | set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot)); | 
| Stuart Menefy | c6feb61 | 2008-09-05 16:06:42 +0900 | [diff] [blame] | 73 | flush_tlb_one(get_asid(), addr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 74 | } | 
|  | 75 |  | 
|  | 76 | /* | 
|  | 77 | * As a performance optimization, other platforms preserve the fixmap mapping | 
|  | 78 | * across a context switch, we don't presently do this, but this could be done | 
|  | 79 | * in a similar fashion as to the wired TLB interface that sh64 uses (by way | 
| Simon Arlott | e868d61 | 2007-05-14 08:15:10 +0900 | [diff] [blame] | 80 | * of the memory mapped UTLB configuration) -- this unfortunately forces us to | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | * give up a TLB entry for each mapping we want to preserve. While this may be | 
|  | 82 | * viable for a small number of fixmaps, it's not particularly useful for | 
|  | 83 | * everything and needs to be carefully evaluated. (ie, we may want this for | 
|  | 84 | * the vsyscall page). | 
|  | 85 | * | 
|  | 86 | * XXX: Perhaps add a _PAGE_WIRED flag or something similar that we can pass | 
|  | 87 | * in at __set_fixmap() time to determine the appropriate behavior to follow. | 
|  | 88 | * | 
|  | 89 | *					 -- PFM. | 
|  | 90 | */ | 
|  | 91 | void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) | 
|  | 92 | { | 
|  | 93 | unsigned long address = __fix_to_virt(idx); | 
|  | 94 |  | 
|  | 95 | if (idx >= __end_of_fixed_addresses) { | 
|  | 96 | BUG(); | 
|  | 97 | return; | 
|  | 98 | } | 
|  | 99 |  | 
|  | 100 | set_pte_phys(address, phys, prot); | 
|  | 101 | } | 
| Stuart Menefy | 2adb4e1 | 2007-11-30 17:59:55 +0900 | [diff] [blame] | 102 |  | 
|  | 103 | void __init page_table_range_init(unsigned long start, unsigned long end, | 
|  | 104 | pgd_t *pgd_base) | 
|  | 105 | { | 
|  | 106 | pgd_t *pgd; | 
|  | 107 | pud_t *pud; | 
|  | 108 | pmd_t *pmd; | 
|  | 109 | int pgd_idx; | 
|  | 110 | unsigned long vaddr; | 
|  | 111 |  | 
|  | 112 | vaddr = start & PMD_MASK; | 
|  | 113 | end = (end + PMD_SIZE - 1) & PMD_MASK; | 
|  | 114 | pgd_idx = pgd_index(vaddr); | 
|  | 115 | pgd = pgd_base + pgd_idx; | 
|  | 116 |  | 
|  | 117 | for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) { | 
|  | 118 | BUG_ON(pgd_none(*pgd)); | 
|  | 119 | pud = pud_offset(pgd, 0); | 
|  | 120 | BUG_ON(pud_none(*pud)); | 
|  | 121 | pmd = pmd_offset(pud, 0); | 
|  | 122 |  | 
|  | 123 | if (!pmd_present(*pmd)) { | 
|  | 124 | pte_t *pte_table; | 
|  | 125 | pte_table = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE); | 
| Stuart Menefy | 2adb4e1 | 2007-11-30 17:59:55 +0900 | [diff] [blame] | 126 | pmd_populate_kernel(&init_mm, pmd, pte_table); | 
|  | 127 | } | 
|  | 128 |  | 
|  | 129 | vaddr += PMD_SIZE; | 
|  | 130 | } | 
|  | 131 | } | 
| Yoshinori Sato | 11cbb70 | 2006-12-07 18:07:27 +0900 | [diff] [blame] | 132 | #endif	/* CONFIG_MMU */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 134 | /* | 
|  | 135 | * paging_init() sets up the page tables | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | */ | 
|  | 137 | void __init paging_init(void) | 
|  | 138 | { | 
| Paul Mundt | 2de212e | 2007-06-06 12:09:54 +0900 | [diff] [blame] | 139 | unsigned long max_zone_pfns[MAX_NR_ZONES]; | 
| Paul Mundt | acca4f4 | 2008-11-10 20:00:45 +0900 | [diff] [blame] | 140 | unsigned long vaddr; | 
| Paul Mundt | 0106662 | 2007-03-28 16:38:13 +0900 | [diff] [blame] | 141 | int nid; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 |  | 
| Paul Mundt | 0106662 | 2007-03-28 16:38:13 +0900 | [diff] [blame] | 143 | /* We don't need to map the kernel through the TLB, as | 
|  | 144 | * it is permanatly mapped using P1. So clear the | 
|  | 145 | * entire pgd. */ | 
|  | 146 | memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 |  | 
| Stuart Menefy | 6e4662f | 2006-11-21 13:53:44 +0900 | [diff] [blame] | 148 | /* Set an initial value for the MMU.TTB so we don't have to | 
|  | 149 | * check for a null value. */ | 
|  | 150 | set_TTB(swapper_pg_dir); | 
|  | 151 |  | 
| Paul Mundt | acca4f4 | 2008-11-10 20:00:45 +0900 | [diff] [blame] | 152 | /* | 
|  | 153 | * Populate the relevant portions of swapper_pg_dir so that | 
| Stuart Menefy | 2adb4e1 | 2007-11-30 17:59:55 +0900 | [diff] [blame] | 154 | * we can use the fixmap entries without calling kmalloc. | 
| Paul Mundt | acca4f4 | 2008-11-10 20:00:45 +0900 | [diff] [blame] | 155 | * pte's will be filled in by __set_fixmap(). | 
|  | 156 | */ | 
|  | 157 | vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; | 
|  | 158 | page_table_range_init(vaddr, 0, swapper_pg_dir); | 
|  | 159 |  | 
|  | 160 | kmap_coherent_init(); | 
| Stuart Menefy | 2adb4e1 | 2007-11-30 17:59:55 +0900 | [diff] [blame] | 161 |  | 
| Paul Mundt | 2de212e | 2007-06-06 12:09:54 +0900 | [diff] [blame] | 162 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); | 
|  | 163 |  | 
| Paul Mundt | 0106662 | 2007-03-28 16:38:13 +0900 | [diff] [blame] | 164 | for_each_online_node(nid) { | 
|  | 165 | pg_data_t *pgdat = NODE_DATA(nid); | 
| Paul Mundt | 0106662 | 2007-03-28 16:38:13 +0900 | [diff] [blame] | 166 | unsigned long low, start_pfn; | 
|  | 167 |  | 
| Johannes Weiner | 3560e24 | 2008-07-23 21:28:09 -0700 | [diff] [blame] | 168 | start_pfn = pgdat->bdata->node_min_pfn; | 
| Paul Mundt | 0106662 | 2007-03-28 16:38:13 +0900 | [diff] [blame] | 169 | low = pgdat->bdata->node_low_pfn; | 
|  | 170 |  | 
| Paul Mundt | 2de212e | 2007-06-06 12:09:54 +0900 | [diff] [blame] | 171 | if (max_zone_pfns[ZONE_NORMAL] < low) | 
|  | 172 | max_zone_pfns[ZONE_NORMAL] = low; | 
| Paul Mundt | 0106662 | 2007-03-28 16:38:13 +0900 | [diff] [blame] | 173 |  | 
|  | 174 | printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n", | 
|  | 175 | nid, start_pfn, low); | 
| Paul Mundt | 0106662 | 2007-03-28 16:38:13 +0900 | [diff] [blame] | 176 | } | 
| Paul Mundt | 2de212e | 2007-06-06 12:09:54 +0900 | [diff] [blame] | 177 |  | 
|  | 178 | free_area_init_nodes(max_zone_pfns); | 
| Stuart Menefy | cbaa118 | 2007-11-30 17:06:36 +0900 | [diff] [blame] | 179 |  | 
| Paul Mundt | db02612 | 2008-02-13 20:18:01 +0900 | [diff] [blame] | 180 | #ifdef CONFIG_SUPERH32 | 
| Stuart Menefy | cbaa118 | 2007-11-30 17:06:36 +0900 | [diff] [blame] | 181 | /* Set up the uncached fixmap */ | 
|  | 182 | set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start)); | 
| Paul Mundt | db02612 | 2008-02-13 20:18:01 +0900 | [diff] [blame] | 183 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | } | 
|  | 185 |  | 
| Paul Mundt | 2cb7ce3 | 2006-09-27 18:20:58 +0900 | [diff] [blame] | 186 | static struct kcore_list kcore_mem, kcore_vmalloc; | 
|  | 187 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 | void __init mem_init(void) | 
|  | 189 | { | 
| Paul Mundt | dfbb904 | 2007-05-23 17:48:36 +0900 | [diff] [blame] | 190 | int codesize, datasize, initsize; | 
| Paul Mundt | 0106662 | 2007-03-28 16:38:13 +0900 | [diff] [blame] | 191 | int nid; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 |  | 
| Paul Mundt | 2de212e | 2007-06-06 12:09:54 +0900 | [diff] [blame] | 193 | num_physpages = 0; | 
|  | 194 | high_memory = NULL; | 
|  | 195 |  | 
| Paul Mundt | 0106662 | 2007-03-28 16:38:13 +0900 | [diff] [blame] | 196 | for_each_online_node(nid) { | 
|  | 197 | pg_data_t *pgdat = NODE_DATA(nid); | 
|  | 198 | unsigned long node_pages = 0; | 
|  | 199 | void *node_high_memory; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 |  | 
| Paul Mundt | 0106662 | 2007-03-28 16:38:13 +0900 | [diff] [blame] | 201 | num_physpages += pgdat->node_present_pages; | 
|  | 202 |  | 
|  | 203 | if (pgdat->node_spanned_pages) | 
|  | 204 | node_pages = free_all_bootmem_node(pgdat); | 
|  | 205 |  | 
|  | 206 | totalram_pages += node_pages; | 
|  | 207 |  | 
| Paul Mundt | 2de212e | 2007-06-06 12:09:54 +0900 | [diff] [blame] | 208 | node_high_memory = (void *)__va((pgdat->node_start_pfn + | 
|  | 209 | pgdat->node_spanned_pages) << | 
|  | 210 | PAGE_SHIFT); | 
| Paul Mundt | 0106662 | 2007-03-28 16:38:13 +0900 | [diff] [blame] | 211 | if (node_high_memory > high_memory) | 
|  | 212 | high_memory = node_high_memory; | 
|  | 213 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 |  | 
|  | 215 | /* clear the zero-page */ | 
|  | 216 | memset(empty_zero_page, 0, PAGE_SIZE); | 
|  | 217 | __flush_wback_region(empty_zero_page, PAGE_SIZE); | 
|  | 218 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 219 | codesize =  (unsigned long) &_etext - (unsigned long) &_text; | 
|  | 220 | datasize =  (unsigned long) &_edata - (unsigned long) &_etext; | 
|  | 221 | initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin; | 
|  | 222 |  | 
| Paul Mundt | 2cb7ce3 | 2006-09-27 18:20:58 +0900 | [diff] [blame] | 223 | kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); | 
|  | 224 | kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, | 
|  | 225 | VMALLOC_END - VMALLOC_START); | 
|  | 226 |  | 
|  | 227 | printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " | 
| Paul Mundt | dfbb904 | 2007-05-23 17:48:36 +0900 | [diff] [blame] | 228 | "%dk data, %dk init)\n", | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), | 
| Paul Mundt | 2de212e | 2007-06-06 12:09:54 +0900 | [diff] [blame] | 230 | num_physpages << (PAGE_SHIFT-10), | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 | codesize >> 10, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | datasize >> 10, | 
|  | 233 | initsize >> 10); | 
|  | 234 |  | 
|  | 235 | p3_cache_init(); | 
| Paul Mundt | 19f9a34 | 2006-09-27 18:33:49 +0900 | [diff] [blame] | 236 |  | 
|  | 237 | /* Initialize the vDSO */ | 
|  | 238 | vsyscall_init(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 239 | } | 
|  | 240 |  | 
|  | 241 | void free_initmem(void) | 
|  | 242 | { | 
|  | 243 | unsigned long addr; | 
| Paul Mundt | 65463b7 | 2005-11-07 00:58:24 -0800 | [diff] [blame] | 244 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | addr = (unsigned long)(&__init_begin); | 
|  | 246 | for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { | 
|  | 247 | ClearPageReserved(virt_to_page(addr)); | 
| Nick Piggin | 7835e98 | 2006-03-22 00:08:40 -0800 | [diff] [blame] | 248 | init_page_count(virt_to_page(addr)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 249 | free_page(addr); | 
|  | 250 | totalram_pages++; | 
|  | 251 | } | 
| Paul Mundt | 07cbb41 | 2007-06-06 12:23:06 +0900 | [diff] [blame] | 252 | printk("Freeing unused kernel memory: %ldk freed\n", | 
|  | 253 | ((unsigned long)&__init_end - | 
|  | 254 | (unsigned long)&__init_begin) >> 10); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 255 | } | 
|  | 256 |  | 
|  | 257 | #ifdef CONFIG_BLK_DEV_INITRD | 
|  | 258 | void free_initrd_mem(unsigned long start, unsigned long end) | 
|  | 259 | { | 
|  | 260 | unsigned long p; | 
|  | 261 | for (p = start; p < end; p += PAGE_SIZE) { | 
|  | 262 | ClearPageReserved(virt_to_page(p)); | 
| Nick Piggin | 7835e98 | 2006-03-22 00:08:40 -0800 | [diff] [blame] | 263 | init_page_count(virt_to_page(p)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 264 | free_page(p); | 
|  | 265 | totalram_pages++; | 
|  | 266 | } | 
| Paul Mundt | 2de212e | 2007-06-06 12:09:54 +0900 | [diff] [blame] | 267 | printk("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 268 | } | 
|  | 269 | #endif | 
| Paul Mundt | 33d63bd | 2007-06-07 11:32:52 +0900 | [diff] [blame] | 270 |  | 
| Paul Mundt | c15c5f8 | 2008-09-20 20:21:33 +0900 | [diff] [blame] | 271 | #if THREAD_SHIFT < PAGE_SHIFT | 
|  | 272 | static struct kmem_cache *thread_info_cache; | 
|  | 273 |  | 
|  | 274 | struct thread_info *alloc_thread_info(struct task_struct *tsk) | 
|  | 275 | { | 
|  | 276 | struct thread_info *ti; | 
|  | 277 |  | 
|  | 278 | ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL); | 
|  | 279 | if (unlikely(ti == NULL)) | 
|  | 280 | return NULL; | 
|  | 281 | #ifdef CONFIG_DEBUG_STACK_USAGE | 
|  | 282 | memset(ti, 0, THREAD_SIZE); | 
|  | 283 | #endif | 
|  | 284 | return ti; | 
|  | 285 | } | 
|  | 286 |  | 
|  | 287 | void free_thread_info(struct thread_info *ti) | 
|  | 288 | { | 
|  | 289 | kmem_cache_free(thread_info_cache, ti); | 
|  | 290 | } | 
|  | 291 |  | 
|  | 292 | void thread_info_cache_init(void) | 
|  | 293 | { | 
|  | 294 | thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE, | 
|  | 295 | THREAD_SIZE, 0, NULL); | 
|  | 296 | BUG_ON(thread_info_cache == NULL); | 
|  | 297 | } | 
|  | 298 | #endif /* THREAD_SHIFT < PAGE_SHIFT */ | 
|  | 299 |  | 
| Paul Mundt | 33d63bd | 2007-06-07 11:32:52 +0900 | [diff] [blame] | 300 | #ifdef CONFIG_MEMORY_HOTPLUG | 
| Paul Mundt | 33d63bd | 2007-06-07 11:32:52 +0900 | [diff] [blame] | 301 | int arch_add_memory(int nid, u64 start, u64 size) | 
|  | 302 | { | 
|  | 303 | pg_data_t *pgdat; | 
|  | 304 | unsigned long start_pfn = start >> PAGE_SHIFT; | 
|  | 305 | unsigned long nr_pages = size >> PAGE_SHIFT; | 
|  | 306 | int ret; | 
|  | 307 |  | 
|  | 308 | pgdat = NODE_DATA(nid); | 
|  | 309 |  | 
|  | 310 | /* We only have ZONE_NORMAL, so this is easy.. */ | 
| Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 311 | ret = __add_pages(nid, pgdat->node_zones + ZONE_NORMAL, | 
|  | 312 | start_pfn, nr_pages); | 
| Paul Mundt | 33d63bd | 2007-06-07 11:32:52 +0900 | [diff] [blame] | 313 | if (unlikely(ret)) | 
| Harvey Harrison | 866e6b9 | 2008-03-04 15:23:47 -0800 | [diff] [blame] | 314 | printk("%s: Failed, __add_pages() == %d\n", __func__, ret); | 
| Paul Mundt | 33d63bd | 2007-06-07 11:32:52 +0900 | [diff] [blame] | 315 |  | 
|  | 316 | return ret; | 
|  | 317 | } | 
|  | 318 | EXPORT_SYMBOL_GPL(arch_add_memory); | 
|  | 319 |  | 
| Paul Mundt | 357d594 | 2007-06-11 15:32:07 +0900 | [diff] [blame] | 320 | #ifdef CONFIG_NUMA | 
| Paul Mundt | 33d63bd | 2007-06-07 11:32:52 +0900 | [diff] [blame] | 321 | int memory_add_physaddr_to_nid(u64 addr) | 
|  | 322 | { | 
|  | 323 | /* Node 0 for now.. */ | 
|  | 324 | return 0; | 
|  | 325 | } | 
|  | 326 | EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); | 
|  | 327 | #endif | 
| Paul Mundt | 3159e7d | 2008-09-05 15:39:12 +0900 | [diff] [blame] | 328 | #endif /* CONFIG_MEMORY_HOTPLUG */ |