blob: fb216e11c25d22e7ba38e8f8599ca463db308e7b [file] [log] [blame]
Heiko Carstens23d17422008-07-14 09:59:21 +02001/*
Hans-Joachim Picht155af2f2009-06-16 10:30:52 +02002 * Copyright IBM Corp. 2008, 2009
3 *
4 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
Heiko Carstens23d17422008-07-14 09:59:21 +02005 */
6
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <asm/ipl.h>
10#include <asm/sclp.h>
11#include <asm/setup.h>
12
Heiko Carstens23d17422008-07-14 09:59:21 +020013#define ADDR2G (1ULL << 31)
14
15static void find_memory_chunks(struct mem_chunk chunk[])
16{
17 unsigned long long memsize, rnmax, rzm;
18 unsigned long addr = 0, size;
19 int i = 0, type;
20
21 rzm = sclp_get_rzm();
22 rnmax = sclp_get_rnmax();
23 memsize = rzm * rnmax;
24 if (!rzm)
25 rzm = 1ULL << 17;
26 if (sizeof(long) == 4) {
27 rzm = min(ADDR2G, rzm);
28 memsize = memsize ? min(ADDR2G, memsize) : ADDR2G;
29 }
30 do {
31 size = 0;
32 type = tprot(addr);
33 do {
34 size += rzm;
35 if (memsize && addr + size >= memsize)
36 break;
37 } while (type == tprot(addr + size));
38 if (type == CHUNK_READ_WRITE || type == CHUNK_READ_ONLY) {
39 chunk[i].addr = addr;
40 chunk[i].size = size;
41 chunk[i].type = type;
42 i++;
43 }
44 addr += size;
45 } while (addr < memsize && i < MEMORY_CHUNKS);
46}
47
48void detect_memory_layout(struct mem_chunk chunk[])
49{
Heiko Carstensd009f4d2013-04-27 12:42:18 +020050 unsigned long flags, flags_dat, cr0;
Heiko Carstens23d17422008-07-14 09:59:21 +020051
52 memset(chunk, 0, MEMORY_CHUNKS * sizeof(struct mem_chunk));
Heiko Carstens23d17422008-07-14 09:59:21 +020053 /* Disable IRQs, DAT and low address protection so tprot does the
54 * right thing and we don't get scheduled away with low address
55 * protection disabled.
56 */
Heiko Carstensd009f4d2013-04-27 12:42:18 +020057 local_irq_save(flags);
58 flags_dat = __arch_local_irq_stnsm(0xfb);
Heiko Carstens23d17422008-07-14 09:59:21 +020059 __ctl_store(cr0, 0, 0);
60 __ctl_clear_bit(0, 28);
61 find_memory_chunks(chunk);
62 __ctl_load(cr0, 0, 0);
Heiko Carstensd009f4d2013-04-27 12:42:18 +020063 __arch_local_irq_ssm(flags_dat);
64 local_irq_restore(flags);
Heiko Carstens23d17422008-07-14 09:59:21 +020065}
66EXPORT_SYMBOL(detect_memory_layout);
Michael Holzheu60a0c682011-10-30 15:16:40 +010067
68/*
Michael Holzheu44e5ddc2011-12-27 11:27:05 +010069 * Move memory chunks array from index "from" to index "to"
70 */
71static void mem_chunk_move(struct mem_chunk chunk[], int to, int from)
72{
73 int cnt = MEMORY_CHUNKS - to;
74
75 memmove(&chunk[to], &chunk[from], cnt * sizeof(struct mem_chunk));
76}
77
78/*
79 * Initialize memory chunk
80 */
81static void mem_chunk_init(struct mem_chunk *chunk, unsigned long addr,
82 unsigned long size, int type)
83{
84 chunk->type = type;
85 chunk->addr = addr;
86 chunk->size = size;
87}
88
89/*
Michael Holzheu60a0c682011-10-30 15:16:40 +010090 * Create memory hole with given address, size, and type
91 */
Michael Holzheu44e5ddc2011-12-27 11:27:05 +010092void create_mem_hole(struct mem_chunk chunk[], unsigned long addr,
Michael Holzheu60a0c682011-10-30 15:16:40 +010093 unsigned long size, int type)
94{
Michael Holzheu44e5ddc2011-12-27 11:27:05 +010095 unsigned long lh_start, lh_end, lh_size, ch_start, ch_end, ch_size;
96 int i, ch_type;
Michael Holzheu60a0c682011-10-30 15:16:40 +010097
98 for (i = 0; i < MEMORY_CHUNKS; i++) {
Michael Holzheu44e5ddc2011-12-27 11:27:05 +010099 if (chunk[i].size == 0)
Michael Holzheu60a0c682011-10-30 15:16:40 +0100100 continue;
Michael Holzheu44e5ddc2011-12-27 11:27:05 +0100101
102 /* Define chunk properties */
103 ch_start = chunk[i].addr;
104 ch_size = chunk[i].size;
105 ch_end = ch_start + ch_size - 1;
106 ch_type = chunk[i].type;
107
108 /* Is memory chunk hit by memory hole? */
109 if (addr + size <= ch_start)
110 continue; /* No: memory hole in front of chunk */
111 if (addr > ch_end)
112 continue; /* No: memory hole after chunk */
113
114 /* Yes: Define local hole properties */
115 lh_start = max(addr, chunk[i].addr);
116 lh_end = min(addr + size - 1, ch_end);
117 lh_size = lh_end - lh_start + 1;
118
119 if (lh_start == ch_start && lh_end == ch_end) {
120 /* Hole covers complete memory chunk */
121 mem_chunk_init(&chunk[i], lh_start, lh_size, type);
122 } else if (lh_end == ch_end) {
123 /* Hole starts in memory chunk and convers chunk end */
124 mem_chunk_move(chunk, i + 1, i);
125 mem_chunk_init(&chunk[i], ch_start, ch_size - lh_size,
126 ch_type);
127 mem_chunk_init(&chunk[i + 1], lh_start, lh_size, type);
Michael Holzheu60a0c682011-10-30 15:16:40 +0100128 i += 1;
Michael Holzheu44e5ddc2011-12-27 11:27:05 +0100129 } else if (lh_start == ch_start) {
130 /* Hole ends in memory chunk */
131 mem_chunk_move(chunk, i + 1, i);
132 mem_chunk_init(&chunk[i], lh_start, lh_size, type);
133 mem_chunk_init(&chunk[i + 1], lh_end + 1,
134 ch_size - lh_size, ch_type);
135 break;
Michael Holzheu60a0c682011-10-30 15:16:40 +0100136 } else {
Michael Holzheu44e5ddc2011-12-27 11:27:05 +0100137 /* Hole splits memory chunk */
138 mem_chunk_move(chunk, i + 2, i);
139 mem_chunk_init(&chunk[i], ch_start,
140 lh_start - ch_start, ch_type);
141 mem_chunk_init(&chunk[i + 1], lh_start, lh_size, type);
142 mem_chunk_init(&chunk[i + 2], lh_end + 1,
143 ch_end - lh_end, ch_type);
144 break;
Michael Holzheu60a0c682011-10-30 15:16:40 +0100145 }
146 }
147}