| Michael Ellerman | 0cc4746 | 2005-12-04 18:39:37 +1100 | [diff] [blame] | 1 | /* | 
 | 2 |  * Routines for doing kexec-based kdump. | 
 | 3 |  * | 
 | 4 |  * Copyright (C) 2005, IBM Corp. | 
 | 5 |  * | 
 | 6 |  * Created by: Michael Ellerman | 
 | 7 |  * | 
 | 8 |  * This source code is licensed under the GNU General Public License, | 
 | 9 |  * Version 2.  See the file COPYING for more details. | 
 | 10 |  */ | 
 | 11 |  | 
 | 12 | #undef DEBUG | 
 | 13 |  | 
| Michael Ellerman | cc53291 | 2005-12-04 18:39:43 +1100 | [diff] [blame] | 14 | #include <linux/crash_dump.h> | 
 | 15 | #include <linux/bootmem.h> | 
| Michael Ellerman | 0cc4746 | 2005-12-04 18:39:37 +1100 | [diff] [blame] | 16 | #include <asm/kdump.h> | 
 | 17 | #include <asm/lmb.h> | 
 | 18 | #include <asm/firmware.h> | 
| Michael Ellerman | 54c3202 | 2005-12-04 18:39:51 +1100 | [diff] [blame] | 19 | #include <asm/uaccess.h> | 
| Michael Ellerman | 0cc4746 | 2005-12-04 18:39:37 +1100 | [diff] [blame] | 20 |  | 
 | 21 | #ifdef DEBUG | 
 | 22 | #include <asm/udbg.h> | 
 | 23 | #define DBG(fmt...) udbg_printf(fmt) | 
 | 24 | #else | 
 | 25 | #define DBG(fmt...) | 
 | 26 | #endif | 
 | 27 |  | 
| Michael Ellerman | 4731041 | 2006-05-17 18:00:49 +1000 | [diff] [blame] | 28 | void reserve_kdump_trampoline(void) | 
 | 29 | { | 
 | 30 | 	lmb_reserve(0, KDUMP_RESERVE_LIMIT); | 
 | 31 | } | 
 | 32 |  | 
| Michael Ellerman | 0cc4746 | 2005-12-04 18:39:37 +1100 | [diff] [blame] | 33 | static void __init create_trampoline(unsigned long addr) | 
 | 34 | { | 
 | 35 | 	/* The maximum range of a single instruction branch, is the current | 
 | 36 | 	 * instruction's address + (32 MB - 4) bytes. For the trampoline we | 
 | 37 | 	 * need to branch to current address + 32 MB. So we insert a nop at | 
 | 38 | 	 * the trampoline address, then the next instruction (+ 4 bytes) | 
 | 39 | 	 * does a branch to (32 MB - 4). The net effect is that when we | 
 | 40 | 	 * branch to "addr" we jump to ("addr" + 32 MB). Although it requires | 
 | 41 | 	 * two instructions it doesn't require any registers. | 
 | 42 | 	 */ | 
 | 43 | 	create_instruction(addr, 0x60000000); /* nop */ | 
 | 44 | 	create_branch(addr + 4, addr + PHYSICAL_START, 0); | 
 | 45 | } | 
 | 46 |  | 
| Michael Ellerman | 4731041 | 2006-05-17 18:00:49 +1000 | [diff] [blame] | 47 | void __init setup_kdump_trampoline(void) | 
| Michael Ellerman | 0cc4746 | 2005-12-04 18:39:37 +1100 | [diff] [blame] | 48 | { | 
 | 49 | 	unsigned long i; | 
 | 50 |  | 
| Michael Ellerman | 4731041 | 2006-05-17 18:00:49 +1000 | [diff] [blame] | 51 | 	DBG(" -> setup_kdump_trampoline()\n"); | 
| Michael Ellerman | 0cc4746 | 2005-12-04 18:39:37 +1100 | [diff] [blame] | 52 |  | 
 | 53 | 	for (i = KDUMP_TRAMPOLINE_START; i < KDUMP_TRAMPOLINE_END; i += 8) { | 
 | 54 | 		create_trampoline(i); | 
 | 55 | 	} | 
 | 56 |  | 
 | 57 | 	create_trampoline(__pa(system_reset_fwnmi) - PHYSICAL_START); | 
 | 58 | 	create_trampoline(__pa(machine_check_fwnmi) - PHYSICAL_START); | 
 | 59 |  | 
| Michael Ellerman | 4731041 | 2006-05-17 18:00:49 +1000 | [diff] [blame] | 60 | 	DBG(" <- setup_kdump_trampoline()\n"); | 
| Michael Ellerman | 0cc4746 | 2005-12-04 18:39:37 +1100 | [diff] [blame] | 61 | } | 
| Michael Ellerman | cc53291 | 2005-12-04 18:39:43 +1100 | [diff] [blame] | 62 |  | 
| Paul Mackerras | 6bac953 | 2006-01-11 15:30:07 +1100 | [diff] [blame] | 63 | #ifdef CONFIG_PROC_VMCORE | 
| Michael Ellerman | cc53291 | 2005-12-04 18:39:43 +1100 | [diff] [blame] | 64 | static int __init parse_elfcorehdr(char *p) | 
 | 65 | { | 
 | 66 | 	if (p) | 
 | 67 | 		elfcorehdr_addr = memparse(p, &p); | 
 | 68 |  | 
| OGAWA Hirofumi | 9b41046 | 2006-03-31 02:30:33 -0800 | [diff] [blame] | 69 | 	return 1; | 
| Michael Ellerman | cc53291 | 2005-12-04 18:39:43 +1100 | [diff] [blame] | 70 | } | 
 | 71 | __setup("elfcorehdr=", parse_elfcorehdr); | 
| Paul Mackerras | 6bac953 | 2006-01-11 15:30:07 +1100 | [diff] [blame] | 72 | #endif | 
| Michael Ellerman | cc53291 | 2005-12-04 18:39:43 +1100 | [diff] [blame] | 73 |  | 
 | 74 | static int __init parse_savemaxmem(char *p) | 
 | 75 | { | 
 | 76 | 	if (p) | 
 | 77 | 		saved_max_pfn = (memparse(p, &p) >> PAGE_SHIFT) - 1; | 
 | 78 |  | 
| OGAWA Hirofumi | 9b41046 | 2006-03-31 02:30:33 -0800 | [diff] [blame] | 79 | 	return 1; | 
| Michael Ellerman | cc53291 | 2005-12-04 18:39:43 +1100 | [diff] [blame] | 80 | } | 
 | 81 | __setup("savemaxmem=", parse_savemaxmem); | 
| Michael Ellerman | 54c3202 | 2005-12-04 18:39:51 +1100 | [diff] [blame] | 82 |  | 
| Michael Ellerman | 40681b9 | 2006-08-02 11:13:50 +1000 | [diff] [blame] | 83 | /** | 
| Michael Ellerman | 54c3202 | 2005-12-04 18:39:51 +1100 | [diff] [blame] | 84 |  * copy_oldmem_page - copy one page from "oldmem" | 
 | 85 |  * @pfn: page frame number to be copied | 
 | 86 |  * @buf: target memory address for the copy; this can be in kernel address | 
 | 87 |  *      space or user address space (see @userbuf) | 
 | 88 |  * @csize: number of bytes to copy | 
 | 89 |  * @offset: offset in bytes into the page (based on pfn) to begin the copy | 
 | 90 |  * @userbuf: if set, @buf is in user address space, use copy_to_user(), | 
 | 91 |  *      otherwise @buf is in kernel address space, use memcpy(). | 
 | 92 |  * | 
 | 93 |  * Copy a page from "oldmem". For this page, there is no pte mapped | 
 | 94 |  * in the current kernel. We stitch up a pte, similar to kmap_atomic. | 
 | 95 |  */ | 
 | 96 | ssize_t copy_oldmem_page(unsigned long pfn, char *buf, | 
 | 97 | 			size_t csize, unsigned long offset, int userbuf) | 
 | 98 | { | 
 | 99 | 	void  *vaddr; | 
 | 100 |  | 
 | 101 | 	if (!csize) | 
 | 102 | 		return 0; | 
 | 103 |  | 
 | 104 | 	vaddr = __ioremap(pfn << PAGE_SHIFT, PAGE_SIZE, 0); | 
 | 105 |  | 
 | 106 | 	if (userbuf) { | 
 | 107 | 		if (copy_to_user((char __user *)buf, (vaddr + offset), csize)) { | 
 | 108 | 			iounmap(vaddr); | 
 | 109 | 			return -EFAULT; | 
 | 110 | 		} | 
 | 111 | 	} else | 
 | 112 | 		memcpy(buf, (vaddr + offset), csize); | 
 | 113 |  | 
 | 114 | 	iounmap(vaddr); | 
 | 115 | 	return csize; | 
 | 116 | } |