|  | /* | 
|  | * Routines for doing kexec-based kdump. | 
|  | * | 
|  | * Copyright (C) 2005, IBM Corp. | 
|  | * | 
|  | * Created by: Michael Ellerman | 
|  | * | 
|  | * This source code is licensed under the GNU General Public License, | 
|  | * Version 2.  See the file COPYING for more details. | 
|  | */ | 
|  |  | 
|  | #undef DEBUG | 
|  |  | 
|  | #include <linux/crash_dump.h> | 
|  | #include <linux/bootmem.h> | 
|  | #include <asm/kdump.h> | 
|  | #include <asm/lmb.h> | 
|  | #include <asm/firmware.h> | 
|  | #include <asm/uaccess.h> | 
|  |  | 
|  | #ifdef DEBUG | 
|  | #include <asm/udbg.h> | 
|  | #define DBG(fmt...) udbg_printf(fmt) | 
|  | #else | 
|  | #define DBG(fmt...) | 
|  | #endif | 
|  |  | 
|  | void reserve_kdump_trampoline(void) | 
|  | { | 
|  | lmb_reserve(0, KDUMP_RESERVE_LIMIT); | 
|  | } | 
|  |  | 
|  | static void __init create_trampoline(unsigned long addr) | 
|  | { | 
|  | /* The maximum range of a single instruction branch, is the current | 
|  | * instruction's address + (32 MB - 4) bytes. For the trampoline we | 
|  | * need to branch to current address + 32 MB. So we insert a nop at | 
|  | * the trampoline address, then the next instruction (+ 4 bytes) | 
|  | * does a branch to (32 MB - 4). The net effect is that when we | 
|  | * branch to "addr" we jump to ("addr" + 32 MB). Although it requires | 
|  | * two instructions it doesn't require any registers. | 
|  | */ | 
|  | create_instruction(addr, 0x60000000); /* nop */ | 
|  | create_branch(addr + 4, addr + PHYSICAL_START, 0); | 
|  | } | 
|  |  | 
|  | void __init setup_kdump_trampoline(void) | 
|  | { | 
|  | unsigned long i; | 
|  |  | 
|  | DBG(" -> setup_kdump_trampoline()\n"); | 
|  |  | 
|  | for (i = KDUMP_TRAMPOLINE_START; i < KDUMP_TRAMPOLINE_END; i += 8) { | 
|  | create_trampoline(i); | 
|  | } | 
|  |  | 
|  | create_trampoline(__pa(system_reset_fwnmi) - PHYSICAL_START); | 
|  | create_trampoline(__pa(machine_check_fwnmi) - PHYSICAL_START); | 
|  |  | 
|  | DBG(" <- setup_kdump_trampoline()\n"); | 
|  | } | 
|  |  | 
|  | #ifdef CONFIG_PROC_VMCORE | 
|  | static int __init parse_elfcorehdr(char *p) | 
|  | { | 
|  | if (p) | 
|  | elfcorehdr_addr = memparse(p, &p); | 
|  |  | 
|  | return 1; | 
|  | } | 
|  | __setup("elfcorehdr=", parse_elfcorehdr); | 
|  | #endif | 
|  |  | 
|  | static int __init parse_savemaxmem(char *p) | 
|  | { | 
|  | if (p) | 
|  | saved_max_pfn = (memparse(p, &p) >> PAGE_SHIFT) - 1; | 
|  |  | 
|  | return 1; | 
|  | } | 
|  | __setup("savemaxmem=", parse_savemaxmem); | 
|  |  | 
|  | /** | 
|  | * copy_oldmem_page - copy one page from "oldmem" | 
|  | * @pfn: page frame number to be copied | 
|  | * @buf: target memory address for the copy; this can be in kernel address | 
|  | *      space or user address space (see @userbuf) | 
|  | * @csize: number of bytes to copy | 
|  | * @offset: offset in bytes into the page (based on pfn) to begin the copy | 
|  | * @userbuf: if set, @buf is in user address space, use copy_to_user(), | 
|  | *      otherwise @buf is in kernel address space, use memcpy(). | 
|  | * | 
|  | * Copy a page from "oldmem". For this page, there is no pte mapped | 
|  | * in the current kernel. We stitch up a pte, similar to kmap_atomic. | 
|  | */ | 
|  | ssize_t copy_oldmem_page(unsigned long pfn, char *buf, | 
|  | size_t csize, unsigned long offset, int userbuf) | 
|  | { | 
|  | void  *vaddr; | 
|  |  | 
|  | if (!csize) | 
|  | return 0; | 
|  |  | 
|  | vaddr = __ioremap(pfn << PAGE_SHIFT, PAGE_SIZE, 0); | 
|  |  | 
|  | if (userbuf) { | 
|  | if (copy_to_user((char __user *)buf, (vaddr + offset), csize)) { | 
|  | iounmap(vaddr); | 
|  | return -EFAULT; | 
|  | } | 
|  | } else | 
|  | memcpy(buf, (vaddr + offset), csize); | 
|  |  | 
|  | iounmap(vaddr); | 
|  | return csize; | 
|  | } |