| Vivek Goyal | 4ae362b | 2006-01-09 20:51:50 -0800 | [diff] [blame] | 1 | /* | 
 | 2 |  *	kernel/crash_dump.c - Memory preserving reboot related code. | 
 | 3 |  * | 
 | 4 |  *	Created by: Hariprasad Nellitheertha (hari@in.ibm.com) | 
 | 5 |  *	Copyright (C) IBM Corporation, 2004. All rights reserved | 
 | 6 |  */ | 
 | 7 |  | 
 | 8 | #include <linux/errno.h> | 
 | 9 | #include <linux/highmem.h> | 
 | 10 | #include <linux/crash_dump.h> | 
 | 11 |  | 
 | 12 | #include <asm/uaccess.h> | 
 | 13 |  | 
 | 14 | static void *kdump_buf_page; | 
 | 15 |  | 
 | 16 | /** | 
 | 17 |  * copy_oldmem_page - copy one page from "oldmem" | 
 | 18 |  * @pfn: page frame number to be copied | 
 | 19 |  * @buf: target memory address for the copy; this can be in kernel address | 
 | 20 |  *	space or user address space (see @userbuf) | 
 | 21 |  * @csize: number of bytes to copy | 
 | 22 |  * @offset: offset in bytes into the page (based on pfn) to begin the copy | 
 | 23 |  * @userbuf: if set, @buf is in user address space, use copy_to_user(), | 
 | 24 |  *	otherwise @buf is in kernel address space, use memcpy(). | 
 | 25 |  * | 
 | 26 |  * Copy a page from "oldmem". For this page, there is no pte mapped | 
 | 27 |  * in the current kernel. We stitch up a pte, similar to kmap_atomic. | 
 | 28 |  * | 
 | 29 |  * Calling copy_to_user() in atomic context is not desirable. Hence first | 
 | 30 |  * copying the data to a pre-allocated kernel page and then copying to user | 
 | 31 |  * space in non-atomic context. | 
 | 32 |  */ | 
 | 33 | ssize_t copy_oldmem_page(unsigned long pfn, char *buf, | 
 | 34 |                                size_t csize, unsigned long offset, int userbuf) | 
 | 35 | { | 
 | 36 | 	void  *vaddr; | 
 | 37 |  | 
 | 38 | 	if (!csize) | 
 | 39 | 		return 0; | 
 | 40 |  | 
 | 41 | 	vaddr = kmap_atomic_pfn(pfn, KM_PTE0); | 
 | 42 |  | 
 | 43 | 	if (!userbuf) { | 
 | 44 | 		memcpy(buf, (vaddr + offset), csize); | 
 | 45 | 		kunmap_atomic(vaddr, KM_PTE0); | 
 | 46 | 	} else { | 
 | 47 | 		if (!kdump_buf_page) { | 
 | 48 | 			printk(KERN_WARNING "Kdump: Kdump buffer page not" | 
 | 49 | 				" allocated\n"); | 
 | 50 | 			return -EFAULT; | 
 | 51 | 		} | 
 | 52 | 		copy_page(kdump_buf_page, vaddr); | 
 | 53 | 		kunmap_atomic(vaddr, KM_PTE0); | 
 | 54 | 		if (copy_to_user(buf, (kdump_buf_page + offset), csize)) | 
 | 55 | 			return -EFAULT; | 
 | 56 | 	} | 
 | 57 |  | 
 | 58 | 	return csize; | 
 | 59 | } | 
 | 60 |  | 
 | 61 | static int __init kdump_buf_page_init(void) | 
 | 62 | { | 
 | 63 | 	int ret = 0; | 
 | 64 |  | 
 | 65 | 	kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL); | 
 | 66 | 	if (!kdump_buf_page) { | 
 | 67 | 		printk(KERN_WARNING "Kdump: Failed to allocate kdump buffer" | 
 | 68 | 			 " page\n"); | 
 | 69 | 		ret = -ENOMEM; | 
 | 70 | 	} | 
 | 71 |  | 
 | 72 | 	return ret; | 
 | 73 | } | 
 | 74 | arch_initcall(kdump_buf_page_init); |