| Michael Ellerman | 0cc4746 | 2005-12-04 18:39:37 +1100 | [diff] [blame] | 1 | /* | 
|  | 2 | * Routines for doing kexec-based kdump. | 
|  | 3 | * | 
|  | 4 | * Copyright (C) 2005, IBM Corp. | 
|  | 5 | * | 
|  | 6 | * Created by: Michael Ellerman | 
|  | 7 | * | 
|  | 8 | * This source code is licensed under the GNU General Public License, | 
|  | 9 | * Version 2.  See the file COPYING for more details. | 
|  | 10 | */ | 
|  | 11 |  | 
|  | 12 | #undef DEBUG | 
|  | 13 |  | 
| Michael Ellerman | cc53291 | 2005-12-04 18:39:43 +1100 | [diff] [blame] | 14 | #include <linux/crash_dump.h> | 
|  | 15 | #include <linux/bootmem.h> | 
| Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 16 | #include <linux/memblock.h> | 
| Michael Ellerman | aaddd3e | 2008-06-24 11:32:21 +1000 | [diff] [blame] | 17 | #include <asm/code-patching.h> | 
| Michael Ellerman | 0cc4746 | 2005-12-04 18:39:37 +1100 | [diff] [blame] | 18 | #include <asm/kdump.h> | 
| David S. Miller | d9b2b2a | 2008-02-13 16:56:49 -0800 | [diff] [blame] | 19 | #include <asm/prom.h> | 
| Michael Ellerman | 0cc4746 | 2005-12-04 18:39:37 +1100 | [diff] [blame] | 20 | #include <asm/firmware.h> | 
| Michael Ellerman | 54c3202 | 2005-12-04 18:39:51 +1100 | [diff] [blame] | 21 | #include <asm/uaccess.h> | 
| Anton Blanchard | d72e063 | 2010-08-24 14:23:44 +0000 | [diff] [blame] | 22 | #include <asm/rtas.h> | 
| Michael Ellerman | 0cc4746 | 2005-12-04 18:39:37 +1100 | [diff] [blame] | 23 |  | 
|  | 24 | #ifdef DEBUG | 
|  | 25 | #include <asm/udbg.h> | 
|  | 26 | #define DBG(fmt...) udbg_printf(fmt) | 
|  | 27 | #else | 
|  | 28 | #define DBG(fmt...) | 
|  | 29 | #endif | 
|  | 30 |  | 
| Mohan Kumar M | 54622f1 | 2008-10-21 17:38:10 +0000 | [diff] [blame] | 31 | #ifndef CONFIG_RELOCATABLE | 
| Stephen Rothwell | d56c3aa | 2007-08-15 20:53:26 +1000 | [diff] [blame] | 32 | void __init reserve_kdump_trampoline(void) | 
| Michael Ellerman | 4731041 | 2006-05-17 18:00:49 +1000 | [diff] [blame] | 33 | { | 
| Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 34 | memblock_reserve(0, KDUMP_RESERVE_LIMIT); | 
| Michael Ellerman | 4731041 | 2006-05-17 18:00:49 +1000 | [diff] [blame] | 35 | } | 
|  | 36 |  | 
| Michael Ellerman | 0cc4746 | 2005-12-04 18:39:37 +1100 | [diff] [blame] | 37 | static void __init create_trampoline(unsigned long addr) | 
|  | 38 | { | 
| Michael Ellerman | e7a5727 | 2008-06-24 11:32:22 +1000 | [diff] [blame] | 39 | unsigned int *p = (unsigned int *)addr; | 
|  | 40 |  | 
| Michael Ellerman | 0cc4746 | 2005-12-04 18:39:37 +1100 | [diff] [blame] | 41 | /* The maximum range of a single instruction branch, is the current | 
|  | 42 | * instruction's address + (32 MB - 4) bytes. For the trampoline we | 
|  | 43 | * need to branch to current address + 32 MB. So we insert a nop at | 
|  | 44 | * the trampoline address, then the next instruction (+ 4 bytes) | 
|  | 45 | * does a branch to (32 MB - 4). The net effect is that when we | 
|  | 46 | * branch to "addr" we jump to ("addr" + 32 MB). Although it requires | 
|  | 47 | * two instructions it doesn't require any registers. | 
|  | 48 | */ | 
| Kumar Gala | 16c57b3 | 2009-02-10 20:10:44 +0000 | [diff] [blame] | 49 | patch_instruction(p, PPC_INST_NOP); | 
| Michael Ellerman | e7a5727 | 2008-06-24 11:32:22 +1000 | [diff] [blame] | 50 | patch_branch(++p, addr + PHYSICAL_START, 0); | 
| Michael Ellerman | 0cc4746 | 2005-12-04 18:39:37 +1100 | [diff] [blame] | 51 | } | 
|  | 52 |  | 
| Michael Ellerman | 4731041 | 2006-05-17 18:00:49 +1000 | [diff] [blame] | 53 | void __init setup_kdump_trampoline(void) | 
| Michael Ellerman | 0cc4746 | 2005-12-04 18:39:37 +1100 | [diff] [blame] | 54 | { | 
|  | 55 | unsigned long i; | 
|  | 56 |  | 
| Michael Ellerman | 4731041 | 2006-05-17 18:00:49 +1000 | [diff] [blame] | 57 | DBG(" -> setup_kdump_trampoline()\n"); | 
| Michael Ellerman | 0cc4746 | 2005-12-04 18:39:37 +1100 | [diff] [blame] | 58 |  | 
|  | 59 | for (i = KDUMP_TRAMPOLINE_START; i < KDUMP_TRAMPOLINE_END; i += 8) { | 
|  | 60 | create_trampoline(i); | 
|  | 61 | } | 
|  | 62 |  | 
| Stephen Rothwell | 9e4859e | 2007-09-18 17:25:12 +1000 | [diff] [blame] | 63 | #ifdef CONFIG_PPC_PSERIES | 
| Michael Ellerman | 0cc4746 | 2005-12-04 18:39:37 +1100 | [diff] [blame] | 64 | create_trampoline(__pa(system_reset_fwnmi) - PHYSICAL_START); | 
|  | 65 | create_trampoline(__pa(machine_check_fwnmi) - PHYSICAL_START); | 
| Stephen Rothwell | 9e4859e | 2007-09-18 17:25:12 +1000 | [diff] [blame] | 66 | #endif /* CONFIG_PPC_PSERIES */ | 
| Michael Ellerman | 0cc4746 | 2005-12-04 18:39:37 +1100 | [diff] [blame] | 67 |  | 
| Michael Ellerman | 4731041 | 2006-05-17 18:00:49 +1000 | [diff] [blame] | 68 | DBG(" <- setup_kdump_trampoline()\n"); | 
| Michael Ellerman | 0cc4746 | 2005-12-04 18:39:37 +1100 | [diff] [blame] | 69 | } | 
| Mohan Kumar M | 54622f1 | 2008-10-21 17:38:10 +0000 | [diff] [blame] | 70 | #endif /* CONFIG_RELOCATABLE */ | 
| Michael Ellerman | cc53291 | 2005-12-04 18:39:43 +1100 | [diff] [blame] | 71 |  | 
| Michael Ellerman | cc53291 | 2005-12-04 18:39:43 +1100 | [diff] [blame] | 72 | static int __init parse_savemaxmem(char *p) | 
|  | 73 | { | 
|  | 74 | if (p) | 
|  | 75 | saved_max_pfn = (memparse(p, &p) >> PAGE_SHIFT) - 1; | 
|  | 76 |  | 
| OGAWA Hirofumi | 9b41046 | 2006-03-31 02:30:33 -0800 | [diff] [blame] | 77 | return 1; | 
| Michael Ellerman | cc53291 | 2005-12-04 18:39:43 +1100 | [diff] [blame] | 78 | } | 
|  | 79 | __setup("savemaxmem=", parse_savemaxmem); | 
| Michael Ellerman | 54c3202 | 2005-12-04 18:39:51 +1100 | [diff] [blame] | 80 |  | 
| Michael Ellerman | 7230ced | 2008-07-31 16:54:28 +1000 | [diff] [blame] | 81 |  | 
|  | 82 | static size_t copy_oldmem_vaddr(void *vaddr, char *buf, size_t csize, | 
|  | 83 | unsigned long offset, int userbuf) | 
|  | 84 | { | 
|  | 85 | if (userbuf) { | 
|  | 86 | if (copy_to_user((char __user *)buf, (vaddr + offset), csize)) | 
|  | 87 | return -EFAULT; | 
|  | 88 | } else | 
|  | 89 | memcpy(buf, (vaddr + offset), csize); | 
|  | 90 |  | 
|  | 91 | return csize; | 
|  | 92 | } | 
|  | 93 |  | 
| Michael Ellerman | 40681b9 | 2006-08-02 11:13:50 +1000 | [diff] [blame] | 94 | /** | 
| Michael Ellerman | 54c3202 | 2005-12-04 18:39:51 +1100 | [diff] [blame] | 95 | * copy_oldmem_page - copy one page from "oldmem" | 
|  | 96 | * @pfn: page frame number to be copied | 
|  | 97 | * @buf: target memory address for the copy; this can be in kernel address | 
|  | 98 | *      space or user address space (see @userbuf) | 
|  | 99 | * @csize: number of bytes to copy | 
|  | 100 | * @offset: offset in bytes into the page (based on pfn) to begin the copy | 
|  | 101 | * @userbuf: if set, @buf is in user address space, use copy_to_user(), | 
|  | 102 | *      otherwise @buf is in kernel address space, use memcpy(). | 
|  | 103 | * | 
|  | 104 | * Copy a page from "oldmem". For this page, there is no pte mapped | 
|  | 105 | * in the current kernel. We stitch up a pte, similar to kmap_atomic. | 
|  | 106 | */ | 
|  | 107 | ssize_t copy_oldmem_page(unsigned long pfn, char *buf, | 
|  | 108 | size_t csize, unsigned long offset, int userbuf) | 
|  | 109 | { | 
|  | 110 | void  *vaddr; | 
|  | 111 |  | 
|  | 112 | if (!csize) | 
|  | 113 | return 0; | 
|  | 114 |  | 
| Matthew McClintock | bbc8e30 | 2010-07-21 11:14:54 +0000 | [diff] [blame] | 115 | csize = min_t(size_t, csize, PAGE_SIZE); | 
| Michael Ellerman | 54c3202 | 2005-12-04 18:39:51 +1100 | [diff] [blame] | 116 |  | 
| Matthew McClintock | bbc8e30 | 2010-07-21 11:14:54 +0000 | [diff] [blame] | 117 | if ((min_low_pfn < pfn) && (pfn < max_pfn)) { | 
| Michael Ellerman | 7230ced | 2008-07-31 16:54:28 +1000 | [diff] [blame] | 118 | vaddr = __va(pfn << PAGE_SHIFT); | 
|  | 119 | csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf); | 
|  | 120 | } else { | 
|  | 121 | vaddr = __ioremap(pfn << PAGE_SHIFT, PAGE_SIZE, 0); | 
|  | 122 | csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf); | 
|  | 123 | iounmap(vaddr); | 
|  | 124 | } | 
| Michael Ellerman | 54c3202 | 2005-12-04 18:39:51 +1100 | [diff] [blame] | 125 |  | 
| Michael Ellerman | 54c3202 | 2005-12-04 18:39:51 +1100 | [diff] [blame] | 126 | return csize; | 
|  | 127 | } | 
| Anton Blanchard | d72e063 | 2010-08-24 14:23:44 +0000 | [diff] [blame] | 128 |  | 
|  | 129 | #ifdef CONFIG_PPC_RTAS | 
|  | 130 | /* | 
|  | 131 | * The crashkernel region will almost always overlap the RTAS region, so | 
|  | 132 | * we have to be careful when shrinking the crashkernel region. | 
|  | 133 | */ | 
|  | 134 | void crash_free_reserved_phys_range(unsigned long begin, unsigned long end) | 
|  | 135 | { | 
|  | 136 | unsigned long addr; | 
|  | 137 | const u32 *basep, *sizep; | 
|  | 138 | unsigned int rtas_start = 0, rtas_end = 0; | 
|  | 139 |  | 
|  | 140 | basep = of_get_property(rtas.dev, "linux,rtas-base", NULL); | 
|  | 141 | sizep = of_get_property(rtas.dev, "rtas-size", NULL); | 
|  | 142 |  | 
|  | 143 | if (basep && sizep) { | 
|  | 144 | rtas_start = *basep; | 
|  | 145 | rtas_end = *basep + *sizep; | 
|  | 146 | } | 
|  | 147 |  | 
|  | 148 | for (addr = begin; addr < end; addr += PAGE_SIZE) { | 
|  | 149 | /* Does this page overlap with the RTAS region? */ | 
|  | 150 | if (addr <= rtas_end && ((addr + PAGE_SIZE) > rtas_start)) | 
|  | 151 | continue; | 
|  | 152 |  | 
|  | 153 | ClearPageReserved(pfn_to_page(addr >> PAGE_SHIFT)); | 
|  | 154 | init_page_count(pfn_to_page(addr >> PAGE_SHIFT)); | 
|  | 155 | free_page((unsigned long)__va(addr)); | 
|  | 156 | totalram_pages++; | 
|  | 157 | } | 
|  | 158 | } | 
|  | 159 | #endif |