Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame^] | 1 | /* |
| 2 | * linux/arch/arm/mm/mmu.c |
| 3 | * |
| 4 | * Copyright (C) 1995-2005 Russell King |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 as |
| 8 | * published by the Free Software Foundation. |
| 9 | */ |
| 10 | #include <linux/kernel.h> |
| 11 | #include <linux/errno.h> |
| 12 | #include <linux/init.h> |
| 13 | #include <linux/bootmem.h> |
| 14 | #include <linux/mman.h> |
| 15 | #include <linux/nodemask.h> |
| 16 | |
| 17 | #include <asm/mach-types.h> |
| 18 | #include <asm/setup.h> |
| 19 | #include <asm/sizes.h> |
| 20 | #include <asm/tlb.h> |
| 21 | |
| 22 | #include <asm/mach/arch.h> |
| 23 | #include <asm/mach/map.h> |
| 24 | |
| 25 | #include "mm.h" |
| 26 | |
| 27 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
| 28 | |
| 29 | extern void _stext, __data_start, _end; |
| 30 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; |
| 31 | |
| 32 | /* |
| 33 | * empty_zero_page is a special page that is used for |
| 34 | * zero-initialized data and COW. |
| 35 | */ |
| 36 | struct page *empty_zero_page; |
| 37 | |
| 38 | /* |
| 39 | * The pmd table for the upper-most set of pages. |
| 40 | */ |
| 41 | pmd_t *top_pmd; |
| 42 | |
| 43 | static inline void prepare_page_table(struct meminfo *mi) |
| 44 | { |
| 45 | unsigned long addr; |
| 46 | |
| 47 | /* |
| 48 | * Clear out all the mappings below the kernel image. |
| 49 | */ |
| 50 | for (addr = 0; addr < MODULE_START; addr += PGDIR_SIZE) |
| 51 | pmd_clear(pmd_off_k(addr)); |
| 52 | |
| 53 | #ifdef CONFIG_XIP_KERNEL |
| 54 | /* The XIP kernel is mapped in the module area -- skip over it */ |
| 55 | addr = ((unsigned long)&_etext + PGDIR_SIZE - 1) & PGDIR_MASK; |
| 56 | #endif |
| 57 | for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE) |
| 58 | pmd_clear(pmd_off_k(addr)); |
| 59 | |
| 60 | /* |
| 61 | * Clear out all the kernel space mappings, except for the first |
| 62 | * memory bank, up to the end of the vmalloc region. |
| 63 | */ |
| 64 | for (addr = __phys_to_virt(mi->bank[0].start + mi->bank[0].size); |
| 65 | addr < VMALLOC_END; addr += PGDIR_SIZE) |
| 66 | pmd_clear(pmd_off_k(addr)); |
| 67 | } |
| 68 | |
| 69 | /* |
| 70 | * Reserve the various regions of node 0 |
| 71 | */ |
| 72 | void __init reserve_node_zero(pg_data_t *pgdat) |
| 73 | { |
| 74 | unsigned long res_size = 0; |
| 75 | |
| 76 | /* |
| 77 | * Register the kernel text and data with bootmem. |
| 78 | * Note that this can only be in node 0. |
| 79 | */ |
| 80 | #ifdef CONFIG_XIP_KERNEL |
| 81 | reserve_bootmem_node(pgdat, __pa(&__data_start), &_end - &__data_start); |
| 82 | #else |
| 83 | reserve_bootmem_node(pgdat, __pa(&_stext), &_end - &_stext); |
| 84 | #endif |
| 85 | |
| 86 | /* |
| 87 | * Reserve the page tables. These are already in use, |
| 88 | * and can only be in node 0. |
| 89 | */ |
| 90 | reserve_bootmem_node(pgdat, __pa(swapper_pg_dir), |
| 91 | PTRS_PER_PGD * sizeof(pgd_t)); |
| 92 | |
| 93 | /* |
| 94 | * Hmm... This should go elsewhere, but we really really need to |
| 95 | * stop things allocating the low memory; ideally we need a better |
| 96 | * implementation of GFP_DMA which does not assume that DMA-able |
| 97 | * memory starts at zero. |
| 98 | */ |
| 99 | if (machine_is_integrator() || machine_is_cintegrator()) |
| 100 | res_size = __pa(swapper_pg_dir) - PHYS_OFFSET; |
| 101 | |
| 102 | /* |
| 103 | * These should likewise go elsewhere. They pre-reserve the |
| 104 | * screen memory region at the start of main system memory. |
| 105 | */ |
| 106 | if (machine_is_edb7211()) |
| 107 | res_size = 0x00020000; |
| 108 | if (machine_is_p720t()) |
| 109 | res_size = 0x00014000; |
| 110 | |
| 111 | #ifdef CONFIG_SA1111 |
| 112 | /* |
| 113 | * Because of the SA1111 DMA bug, we want to preserve our |
| 114 | * precious DMA-able memory... |
| 115 | */ |
| 116 | res_size = __pa(swapper_pg_dir) - PHYS_OFFSET; |
| 117 | #endif |
| 118 | if (res_size) |
| 119 | reserve_bootmem_node(pgdat, PHYS_OFFSET, res_size); |
| 120 | } |
| 121 | |
| 122 | /* |
| 123 | * Set up device the mappings. Since we clear out the page tables for all |
| 124 | * mappings above VMALLOC_END, we will remove any debug device mappings. |
| 125 | * This means you have to be careful how you debug this function, or any |
| 126 | * called function. This means you can't use any function or debugging |
| 127 | * method which may touch any device, otherwise the kernel _will_ crash. |
| 128 | */ |
| 129 | static void __init devicemaps_init(struct machine_desc *mdesc) |
| 130 | { |
| 131 | struct map_desc map; |
| 132 | unsigned long addr; |
| 133 | void *vectors; |
| 134 | |
| 135 | /* |
| 136 | * Allocate the vector page early. |
| 137 | */ |
| 138 | vectors = alloc_bootmem_low_pages(PAGE_SIZE); |
| 139 | BUG_ON(!vectors); |
| 140 | |
| 141 | for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) |
| 142 | pmd_clear(pmd_off_k(addr)); |
| 143 | |
| 144 | /* |
| 145 | * Map the kernel if it is XIP. |
| 146 | * It is always first in the modulearea. |
| 147 | */ |
| 148 | #ifdef CONFIG_XIP_KERNEL |
| 149 | map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK); |
| 150 | map.virtual = MODULE_START; |
| 151 | map.length = ((unsigned long)&_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK; |
| 152 | map.type = MT_ROM; |
| 153 | create_mapping(&map); |
| 154 | #endif |
| 155 | |
| 156 | /* |
| 157 | * Map the cache flushing regions. |
| 158 | */ |
| 159 | #ifdef FLUSH_BASE |
| 160 | map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS); |
| 161 | map.virtual = FLUSH_BASE; |
| 162 | map.length = SZ_1M; |
| 163 | map.type = MT_CACHECLEAN; |
| 164 | create_mapping(&map); |
| 165 | #endif |
| 166 | #ifdef FLUSH_BASE_MINICACHE |
| 167 | map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M); |
| 168 | map.virtual = FLUSH_BASE_MINICACHE; |
| 169 | map.length = SZ_1M; |
| 170 | map.type = MT_MINICLEAN; |
| 171 | create_mapping(&map); |
| 172 | #endif |
| 173 | |
| 174 | /* |
| 175 | * Create a mapping for the machine vectors at the high-vectors |
| 176 | * location (0xffff0000). If we aren't using high-vectors, also |
| 177 | * create a mapping at the low-vectors virtual address. |
| 178 | */ |
| 179 | map.pfn = __phys_to_pfn(virt_to_phys(vectors)); |
| 180 | map.virtual = 0xffff0000; |
| 181 | map.length = PAGE_SIZE; |
| 182 | map.type = MT_HIGH_VECTORS; |
| 183 | create_mapping(&map); |
| 184 | |
| 185 | if (!vectors_high()) { |
| 186 | map.virtual = 0; |
| 187 | map.type = MT_LOW_VECTORS; |
| 188 | create_mapping(&map); |
| 189 | } |
| 190 | |
| 191 | /* |
| 192 | * Ask the machine support to map in the statically mapped devices. |
| 193 | */ |
| 194 | if (mdesc->map_io) |
| 195 | mdesc->map_io(); |
| 196 | |
| 197 | /* |
| 198 | * Finally flush the caches and tlb to ensure that we're in a |
| 199 | * consistent state wrt the writebuffer. This also ensures that |
| 200 | * any write-allocated cache lines in the vector page are written |
| 201 | * back. After this point, we can start to touch devices again. |
| 202 | */ |
| 203 | local_flush_tlb_all(); |
| 204 | flush_cache_all(); |
| 205 | } |
| 206 | |
| 207 | /* |
| 208 | * paging_init() sets up the page tables, initialises the zone memory |
| 209 | * maps, and sets up the zero page, bad page and bad page tables. |
| 210 | */ |
| 211 | void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc) |
| 212 | { |
| 213 | void *zero_page; |
| 214 | |
| 215 | build_mem_type_table(); |
| 216 | prepare_page_table(mi); |
| 217 | bootmem_init(mi); |
| 218 | devicemaps_init(mdesc); |
| 219 | |
| 220 | top_pmd = pmd_off_k(0xffff0000); |
| 221 | |
| 222 | /* |
| 223 | * allocate the zero page. Note that we count on this going ok. |
| 224 | */ |
| 225 | zero_page = alloc_bootmem_low_pages(PAGE_SIZE); |
| 226 | memzero(zero_page, PAGE_SIZE); |
| 227 | empty_zero_page = virt_to_page(zero_page); |
| 228 | flush_dcache_page(empty_zero_page); |
| 229 | } |