blob: ec00576bf8d5f3fce6174818a3aff1fd64fa682c [file] [log] [blame]
Heiko Carstens23d17422008-07-14 09:59:21 +02001/*
Hans-Joachim Picht155af2f2009-06-16 10:30:52 +02002 * Copyright IBM Corp. 2008, 2009
3 *
4 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
Heiko Carstens23d17422008-07-14 09:59:21 +02005 */
6
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <asm/ipl.h>
10#include <asm/sclp.h>
11#include <asm/setup.h>
12
Heiko Carstens23d17422008-07-14 09:59:21 +020013#define ADDR2G (1ULL << 31)
14
Heiko Carstensdf1bd592013-04-30 10:34:04 +020015static void find_memory_chunks(struct mem_chunk chunk[], unsigned long maxsize)
Heiko Carstens23d17422008-07-14 09:59:21 +020016{
17 unsigned long long memsize, rnmax, rzm;
18 unsigned long addr = 0, size;
19 int i = 0, type;
20
21 rzm = sclp_get_rzm();
22 rnmax = sclp_get_rnmax();
23 memsize = rzm * rnmax;
24 if (!rzm)
25 rzm = 1ULL << 17;
26 if (sizeof(long) == 4) {
27 rzm = min(ADDR2G, rzm);
28 memsize = memsize ? min(ADDR2G, memsize) : ADDR2G;
29 }
Heiko Carstensdf1bd592013-04-30 10:34:04 +020030 if (maxsize)
31 memsize = memsize ? min((unsigned long)memsize, maxsize) : maxsize;
Heiko Carstens23d17422008-07-14 09:59:21 +020032 do {
33 size = 0;
34 type = tprot(addr);
35 do {
36 size += rzm;
37 if (memsize && addr + size >= memsize)
38 break;
39 } while (type == tprot(addr + size));
40 if (type == CHUNK_READ_WRITE || type == CHUNK_READ_ONLY) {
Heiko Carstensdf1bd592013-04-30 10:34:04 +020041 if (memsize && (addr + size > memsize))
42 size = memsize - addr;
Heiko Carstens23d17422008-07-14 09:59:21 +020043 chunk[i].addr = addr;
44 chunk[i].size = size;
45 chunk[i].type = type;
46 i++;
47 }
48 addr += size;
49 } while (addr < memsize && i < MEMORY_CHUNKS);
50}
51
Heiko Carstensdf1bd592013-04-30 10:34:04 +020052/**
53 * detect_memory_layout - fill mem_chunk array with memory layout data
54 * @chunk: mem_chunk array to be filled
55 * @maxsize: maximum address where memory detection should stop
56 *
57 * Fills the passed in memory chunk array with the memory layout of the
58 * machine. The array must have a size of at least MEMORY_CHUNKS and will
59 * be fully initialized afterwards.
60 * If the maxsize paramater has a value > 0 memory detection will stop at
61 * that address. It is guaranteed that all chunks have an ending address
62 * that is smaller than maxsize.
63 * If maxsize is 0 all memory will be detected.
64 */
65void detect_memory_layout(struct mem_chunk chunk[], unsigned long maxsize)
Heiko Carstens23d17422008-07-14 09:59:21 +020066{
Heiko Carstensd009f4d2013-04-27 12:42:18 +020067 unsigned long flags, flags_dat, cr0;
Heiko Carstens23d17422008-07-14 09:59:21 +020068
69 memset(chunk, 0, MEMORY_CHUNKS * sizeof(struct mem_chunk));
Heiko Carstens0a694062013-04-27 13:07:17 +020070 /*
71 * Disable IRQs, DAT and low address protection so tprot does the
Heiko Carstens23d17422008-07-14 09:59:21 +020072 * right thing and we don't get scheduled away with low address
73 * protection disabled.
74 */
Heiko Carstensd009f4d2013-04-27 12:42:18 +020075 local_irq_save(flags);
76 flags_dat = __arch_local_irq_stnsm(0xfb);
Heiko Carstens0a694062013-04-27 13:07:17 +020077 /*
78 * In case DAT was enabled, make sure chunk doesn't reside in vmalloc
79 * space. We have disabled DAT and any access to vmalloc area will
80 * cause an exception.
81 * If DAT was disabled we are called from early ipl code.
82 */
83 if (test_bit(5, &flags_dat)) {
84 if (WARN_ON_ONCE(is_vmalloc_or_module_addr(chunk)))
85 goto out;
86 }
Heiko Carstens23d17422008-07-14 09:59:21 +020087 __ctl_store(cr0, 0, 0);
88 __ctl_clear_bit(0, 28);
Heiko Carstensdf1bd592013-04-30 10:34:04 +020089 find_memory_chunks(chunk, maxsize);
Heiko Carstens23d17422008-07-14 09:59:21 +020090 __ctl_load(cr0, 0, 0);
Heiko Carstens0a694062013-04-27 13:07:17 +020091out:
Heiko Carstensd009f4d2013-04-27 12:42:18 +020092 __arch_local_irq_ssm(flags_dat);
93 local_irq_restore(flags);
Heiko Carstens23d17422008-07-14 09:59:21 +020094}
95EXPORT_SYMBOL(detect_memory_layout);
Michael Holzheu60a0c682011-10-30 15:16:40 +010096
97/*
Michael Holzheu44e5ddc2011-12-27 11:27:05 +010098 * Move memory chunks array from index "from" to index "to"
99 */
100static void mem_chunk_move(struct mem_chunk chunk[], int to, int from)
101{
102 int cnt = MEMORY_CHUNKS - to;
103
104 memmove(&chunk[to], &chunk[from], cnt * sizeof(struct mem_chunk));
105}
106
107/*
108 * Initialize memory chunk
109 */
110static void mem_chunk_init(struct mem_chunk *chunk, unsigned long addr,
111 unsigned long size, int type)
112{
113 chunk->type = type;
114 chunk->addr = addr;
115 chunk->size = size;
116}
117
118/*
Michael Holzheu60a0c682011-10-30 15:16:40 +0100119 * Create memory hole with given address, size, and type
120 */
Michael Holzheu44e5ddc2011-12-27 11:27:05 +0100121void create_mem_hole(struct mem_chunk chunk[], unsigned long addr,
Michael Holzheu60a0c682011-10-30 15:16:40 +0100122 unsigned long size, int type)
123{
Michael Holzheu44e5ddc2011-12-27 11:27:05 +0100124 unsigned long lh_start, lh_end, lh_size, ch_start, ch_end, ch_size;
125 int i, ch_type;
Michael Holzheu60a0c682011-10-30 15:16:40 +0100126
127 for (i = 0; i < MEMORY_CHUNKS; i++) {
Michael Holzheu44e5ddc2011-12-27 11:27:05 +0100128 if (chunk[i].size == 0)
Michael Holzheu60a0c682011-10-30 15:16:40 +0100129 continue;
Michael Holzheu44e5ddc2011-12-27 11:27:05 +0100130
131 /* Define chunk properties */
132 ch_start = chunk[i].addr;
133 ch_size = chunk[i].size;
134 ch_end = ch_start + ch_size - 1;
135 ch_type = chunk[i].type;
136
137 /* Is memory chunk hit by memory hole? */
138 if (addr + size <= ch_start)
139 continue; /* No: memory hole in front of chunk */
140 if (addr > ch_end)
141 continue; /* No: memory hole after chunk */
142
143 /* Yes: Define local hole properties */
144 lh_start = max(addr, chunk[i].addr);
145 lh_end = min(addr + size - 1, ch_end);
146 lh_size = lh_end - lh_start + 1;
147
148 if (lh_start == ch_start && lh_end == ch_end) {
149 /* Hole covers complete memory chunk */
150 mem_chunk_init(&chunk[i], lh_start, lh_size, type);
151 } else if (lh_end == ch_end) {
152 /* Hole starts in memory chunk and convers chunk end */
153 mem_chunk_move(chunk, i + 1, i);
154 mem_chunk_init(&chunk[i], ch_start, ch_size - lh_size,
155 ch_type);
156 mem_chunk_init(&chunk[i + 1], lh_start, lh_size, type);
Michael Holzheu60a0c682011-10-30 15:16:40 +0100157 i += 1;
Michael Holzheu44e5ddc2011-12-27 11:27:05 +0100158 } else if (lh_start == ch_start) {
159 /* Hole ends in memory chunk */
160 mem_chunk_move(chunk, i + 1, i);
161 mem_chunk_init(&chunk[i], lh_start, lh_size, type);
162 mem_chunk_init(&chunk[i + 1], lh_end + 1,
163 ch_size - lh_size, ch_type);
164 break;
Michael Holzheu60a0c682011-10-30 15:16:40 +0100165 } else {
Michael Holzheu44e5ddc2011-12-27 11:27:05 +0100166 /* Hole splits memory chunk */
167 mem_chunk_move(chunk, i + 2, i);
168 mem_chunk_init(&chunk[i], ch_start,
169 lh_start - ch_start, ch_type);
170 mem_chunk_init(&chunk[i + 1], lh_start, lh_size, type);
171 mem_chunk_init(&chunk[i + 2], lh_end + 1,
172 ch_end - lh_end, ch_type);
173 break;
Michael Holzheu60a0c682011-10-30 15:16:40 +0100174 }
175 }
176}