blob: 300ab012b0fd875efa119d09f2784a60a3769843 [file] [log] [blame]
Heiko Carstensf4eb07c2006-12-08 15:56:07 +01001/*
2 * arch/s390/mm/vmem.c
3 *
4 * Copyright IBM Corp. 2006
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 */
7
8#include <linux/bootmem.h>
9#include <linux/pfn.h>
10#include <linux/mm.h>
11#include <linux/module.h>
12#include <linux/list.h>
Gerald Schaefer53492b12008-04-30 13:38:46 +020013#include <linux/hugetlb.h>
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010014#include <asm/pgalloc.h>
15#include <asm/pgtable.h>
16#include <asm/setup.h>
17#include <asm/tlbflush.h>
Gerald Schaefer53492b12008-04-30 13:38:46 +020018#include <asm/sections.h>
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010019
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010020static DEFINE_MUTEX(vmem_mutex);
21
22struct memory_segment {
23 struct list_head list;
24 unsigned long start;
25 unsigned long size;
26};
27
28static LIST_HEAD(mem_segs);
29
Heiko Carstens67060d92008-05-30 10:03:27 +020030static void __ref *vmem_alloc_pages(unsigned int order)
31{
32 if (slab_is_available())
33 return (void *)__get_free_pages(GFP_KERNEL, order);
34 return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
35}
36
37static inline pud_t *vmem_pud_alloc(void)
Martin Schwidefsky5a216a22008-02-09 18:24:36 +010038{
39 pud_t *pud = NULL;
40
41#ifdef CONFIG_64BIT
Heiko Carstens67060d92008-05-30 10:03:27 +020042 pud = vmem_alloc_pages(2);
Martin Schwidefsky5a216a22008-02-09 18:24:36 +010043 if (!pud)
44 return NULL;
Heiko Carstens8fc63652008-04-30 13:38:44 +020045 clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
Martin Schwidefsky5a216a22008-02-09 18:24:36 +010046#endif
47 return pud;
48}
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020049
Heiko Carstens67060d92008-05-30 10:03:27 +020050static inline pmd_t *vmem_pmd_alloc(void)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010051{
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020052 pmd_t *pmd = NULL;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010053
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020054#ifdef CONFIG_64BIT
Heiko Carstens67060d92008-05-30 10:03:27 +020055 pmd = vmem_alloc_pages(2);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010056 if (!pmd)
57 return NULL;
Heiko Carstens8fc63652008-04-30 13:38:44 +020058 clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020059#endif
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010060 return pmd;
61}
62
Heiko Carstens2069e972008-05-15 16:52:31 +020063static pte_t __ref *vmem_pte_alloc(void)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010064{
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010065 pte_t *pte;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010066
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010067 if (slab_is_available())
68 pte = (pte_t *) page_table_alloc(&init_mm);
69 else
70 pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010071 if (!pte)
72 return NULL;
Christian Borntraeger6a985c62009-12-07 12:52:11 +010073 if (MACHINE_HAS_HPAGE)
74 clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY | _PAGE_CO,
75 PTRS_PER_PTE * sizeof(pte_t));
76 else
77 clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
78 PTRS_PER_PTE * sizeof(pte_t));
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010079 return pte;
80}
81
82/*
83 * Add a physical memory range to the 1:1 mapping.
84 */
Heiko Carstens17f34582008-04-30 13:38:47 +020085static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010086{
87 unsigned long address;
88 pgd_t *pg_dir;
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020089 pud_t *pu_dir;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010090 pmd_t *pm_dir;
91 pte_t *pt_dir;
92 pte_t pte;
93 int ret = -ENOMEM;
94
95 for (address = start; address < start + size; address += PAGE_SIZE) {
96 pg_dir = pgd_offset_k(address);
97 if (pgd_none(*pg_dir)) {
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020098 pu_dir = vmem_pud_alloc();
99 if (!pu_dir)
100 goto out;
101 pgd_populate_kernel(&init_mm, pg_dir, pu_dir);
102 }
103
104 pu_dir = pud_offset(pg_dir, address);
105 if (pud_none(*pu_dir)) {
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100106 pm_dir = vmem_pmd_alloc();
107 if (!pm_dir)
108 goto out;
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200109 pud_populate_kernel(&init_mm, pu_dir, pm_dir);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100110 }
111
Gerald Schaefer53492b12008-04-30 13:38:46 +0200112 pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200113 pm_dir = pmd_offset(pu_dir, address);
Gerald Schaefer53492b12008-04-30 13:38:46 +0200114
115#ifdef __s390x__
116 if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) &&
117 (address + HPAGE_SIZE <= start + size) &&
118 (address >= HPAGE_SIZE)) {
Christian Borntraeger6a985c62009-12-07 12:52:11 +0100119 pte_val(pte) |= _SEGMENT_ENTRY_LARGE |
120 _SEGMENT_ENTRY_CO;
Gerald Schaefer53492b12008-04-30 13:38:46 +0200121 pmd_val(*pm_dir) = pte_val(pte);
122 address += HPAGE_SIZE - PAGE_SIZE;
123 continue;
124 }
125#endif
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100126 if (pmd_none(*pm_dir)) {
127 pt_dir = vmem_pte_alloc();
128 if (!pt_dir)
129 goto out;
130 pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
131 }
132
133 pt_dir = pte_offset_kernel(pm_dir, address);
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100134 *pt_dir = pte;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100135 }
136 ret = 0;
137out:
138 flush_tlb_kernel_range(start, start + size);
139 return ret;
140}
141
142/*
143 * Remove a physical memory range from the 1:1 mapping.
144 * Currently only invalidates page table entries.
145 */
146static void vmem_remove_range(unsigned long start, unsigned long size)
147{
148 unsigned long address;
149 pgd_t *pg_dir;
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200150 pud_t *pu_dir;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100151 pmd_t *pm_dir;
152 pte_t *pt_dir;
153 pte_t pte;
154
155 pte_val(pte) = _PAGE_TYPE_EMPTY;
156 for (address = start; address < start + size; address += PAGE_SIZE) {
157 pg_dir = pgd_offset_k(address);
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200158 pu_dir = pud_offset(pg_dir, address);
159 if (pud_none(*pu_dir))
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100160 continue;
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200161 pm_dir = pmd_offset(pu_dir, address);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100162 if (pmd_none(*pm_dir))
163 continue;
Gerald Schaefer53492b12008-04-30 13:38:46 +0200164
165 if (pmd_huge(*pm_dir)) {
166 pmd_clear_kernel(pm_dir);
167 address += HPAGE_SIZE - PAGE_SIZE;
168 continue;
169 }
170
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100171 pt_dir = pte_offset_kernel(pm_dir, address);
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100172 *pt_dir = pte;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100173 }
174 flush_tlb_kernel_range(start, start + size);
175}
176
177/*
178 * Add a backed mem_map array to the virtual mem_map array.
179 */
Heiko Carstens17f34582008-04-30 13:38:47 +0200180int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100181{
182 unsigned long address, start_addr, end_addr;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100183 pgd_t *pg_dir;
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200184 pud_t *pu_dir;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100185 pmd_t *pm_dir;
186 pte_t *pt_dir;
187 pte_t pte;
188 int ret = -ENOMEM;
189
Heiko Carstens17f34582008-04-30 13:38:47 +0200190 start_addr = (unsigned long) start;
191 end_addr = (unsigned long) (start + nr);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100192
193 for (address = start_addr; address < end_addr; address += PAGE_SIZE) {
194 pg_dir = pgd_offset_k(address);
195 if (pgd_none(*pg_dir)) {
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200196 pu_dir = vmem_pud_alloc();
197 if (!pu_dir)
198 goto out;
199 pgd_populate_kernel(&init_mm, pg_dir, pu_dir);
200 }
201
202 pu_dir = pud_offset(pg_dir, address);
203 if (pud_none(*pu_dir)) {
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100204 pm_dir = vmem_pmd_alloc();
205 if (!pm_dir)
206 goto out;
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200207 pud_populate_kernel(&init_mm, pu_dir, pm_dir);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100208 }
209
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200210 pm_dir = pmd_offset(pu_dir, address);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100211 if (pmd_none(*pm_dir)) {
212 pt_dir = vmem_pte_alloc();
213 if (!pt_dir)
214 goto out;
215 pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
216 }
217
218 pt_dir = pte_offset_kernel(pm_dir, address);
219 if (pte_none(*pt_dir)) {
220 unsigned long new_page;
221
Heiko Carstens67060d92008-05-30 10:03:27 +0200222 new_page =__pa(vmem_alloc_pages(0));
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100223 if (!new_page)
224 goto out;
225 pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100226 *pt_dir = pte;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100227 }
228 }
Heiko Carstens67060d92008-05-30 10:03:27 +0200229 memset(start, 0, nr * sizeof(struct page));
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100230 ret = 0;
231out:
232 flush_tlb_kernel_range(start_addr, end_addr);
233 return ret;
234}
235
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100236/*
237 * Add memory segment to the segment list if it doesn't overlap with
238 * an already present segment.
239 */
240static int insert_memory_segment(struct memory_segment *seg)
241{
242 struct memory_segment *tmp;
243
Heiko Carstensee0ddad2008-06-10 10:03:20 +0200244 if (seg->start + seg->size > VMEM_MAX_PHYS ||
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100245 seg->start + seg->size < seg->start)
246 return -ERANGE;
247
248 list_for_each_entry(tmp, &mem_segs, list) {
249 if (seg->start >= tmp->start + tmp->size)
250 continue;
251 if (seg->start + seg->size <= tmp->start)
252 continue;
253 return -ENOSPC;
254 }
255 list_add(&seg->list, &mem_segs);
256 return 0;
257}
258
259/*
260 * Remove memory segment from the segment list.
261 */
262static void remove_memory_segment(struct memory_segment *seg)
263{
264 list_del(&seg->list);
265}
266
267static void __remove_shared_memory(struct memory_segment *seg)
268{
269 remove_memory_segment(seg);
270 vmem_remove_range(seg->start, seg->size);
271}
272
Heiko Carstens17f34582008-04-30 13:38:47 +0200273int vmem_remove_mapping(unsigned long start, unsigned long size)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100274{
275 struct memory_segment *seg;
276 int ret;
277
278 mutex_lock(&vmem_mutex);
279
280 ret = -ENOENT;
281 list_for_each_entry(seg, &mem_segs, list) {
282 if (seg->start == start && seg->size == size)
283 break;
284 }
285
286 if (seg->start != start || seg->size != size)
287 goto out;
288
289 ret = 0;
290 __remove_shared_memory(seg);
291 kfree(seg);
292out:
293 mutex_unlock(&vmem_mutex);
294 return ret;
295}
296
Heiko Carstens17f34582008-04-30 13:38:47 +0200297int vmem_add_mapping(unsigned long start, unsigned long size)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100298{
299 struct memory_segment *seg;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100300 int ret;
301
302 mutex_lock(&vmem_mutex);
303 ret = -ENOMEM;
304 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
305 if (!seg)
306 goto out;
307 seg->start = start;
308 seg->size = size;
309
310 ret = insert_memory_segment(seg);
311 if (ret)
312 goto out_free;
313
Gerald Schaefer53492b12008-04-30 13:38:46 +0200314 ret = vmem_add_mem(start, size, 0);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100315 if (ret)
316 goto out_remove;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100317 goto out;
318
319out_remove:
320 __remove_shared_memory(seg);
321out_free:
322 kfree(seg);
323out:
324 mutex_unlock(&vmem_mutex);
325 return ret;
326}
327
328/*
329 * map whole physical memory to virtual memory (identity mapping)
Christian Borntraeger5fd9c6e2008-01-26 14:11:00 +0100330 * we reserve enough space in the vmalloc area for vmemmap to hotplug
331 * additional memory segments.
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100332 */
333void __init vmem_map_init(void)
334{
Gerald Schaefer53492b12008-04-30 13:38:46 +0200335 unsigned long ro_start, ro_end;
336 unsigned long start, end;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100337 int i;
338
Martin Schwidefsky50aa98b2009-09-11 10:28:57 +0200339 spin_lock_init(&init_mm.context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100340 INIT_LIST_HEAD(&init_mm.context.crst_list);
341 INIT_LIST_HEAD(&init_mm.context.pgtable_list);
342 init_mm.context.noexec = 0;
Gerald Schaefer53492b12008-04-30 13:38:46 +0200343 ro_start = ((unsigned long)&_stext) & PAGE_MASK;
344 ro_end = PFN_ALIGN((unsigned long)&_eshared);
345 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
346 start = memory_chunk[i].addr;
347 end = memory_chunk[i].addr + memory_chunk[i].size;
348 if (start >= ro_end || end <= ro_start)
349 vmem_add_mem(start, end - start, 0);
350 else if (start >= ro_start && end <= ro_end)
351 vmem_add_mem(start, end - start, 1);
352 else if (start >= ro_start) {
353 vmem_add_mem(start, ro_end - start, 1);
354 vmem_add_mem(ro_end, end - ro_end, 0);
355 } else if (end < ro_end) {
356 vmem_add_mem(start, ro_start - start, 0);
357 vmem_add_mem(ro_start, end - ro_start, 1);
358 } else {
359 vmem_add_mem(start, ro_start - start, 0);
360 vmem_add_mem(ro_start, ro_end - ro_start, 1);
361 vmem_add_mem(ro_end, end - ro_end, 0);
362 }
363 }
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100364}
365
366/*
367 * Convert memory chunk array to a memory segment list so there is a single
368 * list that contains both r/w memory and shared memory segments.
369 */
370static int __init vmem_convert_memory_chunk(void)
371{
372 struct memory_segment *seg;
373 int i;
374
375 mutex_lock(&vmem_mutex);
Heiko Carstens9f4b0ba2008-01-26 14:11:02 +0100376 for (i = 0; i < MEMORY_CHUNKS; i++) {
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100377 if (!memory_chunk[i].size)
378 continue;
379 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
380 if (!seg)
381 panic("Out of memory...\n");
382 seg->start = memory_chunk[i].addr;
383 seg->size = memory_chunk[i].size;
384 insert_memory_segment(seg);
385 }
386 mutex_unlock(&vmem_mutex);
387 return 0;
388}
389
390core_initcall(vmem_convert_memory_chunk);