Heiko Carstens | 23d1742 | 2008-07-14 09:59:21 +0200 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright IBM Corp. 2008 |
| 3 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> |
| 4 | */ |
| 5 | |
| 6 | #include <linux/kernel.h> |
| 7 | #include <linux/module.h> |
| 8 | #include <asm/ipl.h> |
| 9 | #include <asm/sclp.h> |
| 10 | #include <asm/setup.h> |
| 11 | |
| 12 | static int memory_fast_detect(struct mem_chunk *chunk) |
| 13 | { |
| 14 | unsigned long val0 = 0; |
| 15 | unsigned long val1 = 0xc; |
| 16 | int rc = -EOPNOTSUPP; |
| 17 | |
| 18 | if (ipl_flags & IPL_NSS_VALID) |
| 19 | return -EOPNOTSUPP; |
| 20 | asm volatile( |
| 21 | " diag %1,%2,0x260\n" |
| 22 | "0: lhi %0,0\n" |
| 23 | "1:\n" |
| 24 | EX_TABLE(0b,1b) |
| 25 | : "+d" (rc), "+d" (val0), "+d" (val1) : : "cc"); |
| 26 | |
| 27 | if (rc || val0 != val1) |
| 28 | return -EOPNOTSUPP; |
| 29 | chunk->size = val0 + 1; |
| 30 | return 0; |
| 31 | } |
| 32 | |
| 33 | static inline int tprot(unsigned long addr) |
| 34 | { |
| 35 | int rc = -EFAULT; |
| 36 | |
| 37 | asm volatile( |
| 38 | " tprot 0(%1),0\n" |
| 39 | "0: ipm %0\n" |
| 40 | " srl %0,28\n" |
| 41 | "1:\n" |
| 42 | EX_TABLE(0b,1b) |
| 43 | : "+d" (rc) : "a" (addr) : "cc"); |
| 44 | return rc; |
| 45 | } |
| 46 | |
| 47 | #define ADDR2G (1ULL << 31) |
| 48 | |
| 49 | static void find_memory_chunks(struct mem_chunk chunk[]) |
| 50 | { |
| 51 | unsigned long long memsize, rnmax, rzm; |
| 52 | unsigned long addr = 0, size; |
| 53 | int i = 0, type; |
| 54 | |
| 55 | rzm = sclp_get_rzm(); |
| 56 | rnmax = sclp_get_rnmax(); |
| 57 | memsize = rzm * rnmax; |
| 58 | if (!rzm) |
| 59 | rzm = 1ULL << 17; |
| 60 | if (sizeof(long) == 4) { |
| 61 | rzm = min(ADDR2G, rzm); |
| 62 | memsize = memsize ? min(ADDR2G, memsize) : ADDR2G; |
| 63 | } |
| 64 | do { |
| 65 | size = 0; |
| 66 | type = tprot(addr); |
| 67 | do { |
| 68 | size += rzm; |
| 69 | if (memsize && addr + size >= memsize) |
| 70 | break; |
| 71 | } while (type == tprot(addr + size)); |
| 72 | if (type == CHUNK_READ_WRITE || type == CHUNK_READ_ONLY) { |
| 73 | chunk[i].addr = addr; |
| 74 | chunk[i].size = size; |
| 75 | chunk[i].type = type; |
| 76 | i++; |
| 77 | } |
| 78 | addr += size; |
| 79 | } while (addr < memsize && i < MEMORY_CHUNKS); |
| 80 | } |
| 81 | |
| 82 | void detect_memory_layout(struct mem_chunk chunk[]) |
| 83 | { |
| 84 | unsigned long flags, cr0; |
| 85 | |
| 86 | memset(chunk, 0, MEMORY_CHUNKS * sizeof(struct mem_chunk)); |
| 87 | if (memory_fast_detect(&chunk[0]) == 0) |
| 88 | return; |
| 89 | /* Disable IRQs, DAT and low address protection so tprot does the |
| 90 | * right thing and we don't get scheduled away with low address |
| 91 | * protection disabled. |
| 92 | */ |
| 93 | flags = __raw_local_irq_stnsm(0xf8); |
| 94 | __ctl_store(cr0, 0, 0); |
| 95 | __ctl_clear_bit(0, 28); |
| 96 | find_memory_chunks(chunk); |
| 97 | __ctl_load(cr0, 0, 0); |
| 98 | __raw_local_irq_ssm(flags); |
| 99 | } |
| 100 | EXPORT_SYMBOL(detect_memory_layout); |