Heiko Carstens | c48ff64 | 2009-09-11 10:28:37 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Suspend support specific for s390. |
| 3 | * |
| 4 | * Copyright IBM Corp. 2009 |
| 5 | * |
| 6 | * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com> |
| 7 | */ |
| 8 | |
Heiko Carstens | c48ff64 | 2009-09-11 10:28:37 +0200 | [diff] [blame] | 9 | #include <linux/pfn.h> |
Martin Schwidefsky | 638ad34 | 2011-10-30 15:17:13 +0100 | [diff] [blame^] | 10 | #include <linux/suspend.h> |
Martin Schwidefsky | 85055dd | 2011-08-17 20:42:24 +0200 | [diff] [blame] | 11 | #include <linux/mm.h> |
Heiko Carstens | c48ff64 | 2009-09-11 10:28:37 +0200 | [diff] [blame] | 12 | #include <asm/system.h> |
Heiko Carstens | c48ff64 | 2009-09-11 10:28:37 +0200 | [diff] [blame] | 13 | |
| 14 | /* |
| 15 | * References to section boundaries |
| 16 | */ |
| 17 | extern const void __nosave_begin, __nosave_end; |
| 18 | |
Martin Schwidefsky | 85055dd | 2011-08-17 20:42:24 +0200 | [diff] [blame] | 19 | /* |
| 20 | * The restore of the saved pages in an hibernation image will set |
| 21 | * the change and referenced bits in the storage key for each page. |
| 22 | * Overindication of the referenced bits after an hibernation cycle |
| 23 | * does not cause any harm but the overindication of the change bits |
| 24 | * would cause trouble. |
| 25 | * Use the ARCH_SAVE_PAGE_KEYS hooks to save the storage key of each |
| 26 | * page to the most significant byte of the associated page frame |
| 27 | * number in the hibernation image. |
| 28 | */ |
| 29 | |
| 30 | /* |
| 31 | * Key storage is allocated as a linked list of pages. |
| 32 | * The size of the keys array is (PAGE_SIZE - sizeof(long)) |
| 33 | */ |
| 34 | struct page_key_data { |
| 35 | struct page_key_data *next; |
| 36 | unsigned char data[]; |
| 37 | }; |
| 38 | |
| 39 | #define PAGE_KEY_DATA_SIZE (PAGE_SIZE - sizeof(struct page_key_data *)) |
| 40 | |
| 41 | static struct page_key_data *page_key_data; |
| 42 | static struct page_key_data *page_key_rp, *page_key_wp; |
| 43 | static unsigned long page_key_rx, page_key_wx; |
| 44 | |
| 45 | /* |
| 46 | * For each page in the hibernation image one additional byte is |
| 47 | * stored in the most significant byte of the page frame number. |
| 48 | * On suspend no additional memory is required but on resume the |
| 49 | * keys need to be memorized until the page data has been restored. |
| 50 | * Only then can the storage keys be set to their old state. |
| 51 | */ |
| 52 | unsigned long page_key_additional_pages(unsigned long pages) |
| 53 | { |
| 54 | return DIV_ROUND_UP(pages, PAGE_KEY_DATA_SIZE); |
| 55 | } |
| 56 | |
| 57 | /* |
| 58 | * Free page_key_data list of arrays. |
| 59 | */ |
| 60 | void page_key_free(void) |
| 61 | { |
| 62 | struct page_key_data *pkd; |
| 63 | |
| 64 | while (page_key_data) { |
| 65 | pkd = page_key_data; |
| 66 | page_key_data = pkd->next; |
| 67 | free_page((unsigned long) pkd); |
| 68 | } |
| 69 | } |
| 70 | |
| 71 | /* |
| 72 | * Allocate page_key_data list of arrays with enough room to store |
| 73 | * one byte for each page in the hibernation image. |
| 74 | */ |
| 75 | int page_key_alloc(unsigned long pages) |
| 76 | { |
| 77 | struct page_key_data *pk; |
| 78 | unsigned long size; |
| 79 | |
| 80 | size = DIV_ROUND_UP(pages, PAGE_KEY_DATA_SIZE); |
| 81 | while (size--) { |
| 82 | pk = (struct page_key_data *) get_zeroed_page(GFP_KERNEL); |
| 83 | if (!pk) { |
| 84 | page_key_free(); |
| 85 | return -ENOMEM; |
| 86 | } |
| 87 | pk->next = page_key_data; |
| 88 | page_key_data = pk; |
| 89 | } |
| 90 | page_key_rp = page_key_wp = page_key_data; |
| 91 | page_key_rx = page_key_wx = 0; |
| 92 | return 0; |
| 93 | } |
| 94 | |
| 95 | /* |
| 96 | * Save the storage key into the upper 8 bits of the page frame number. |
| 97 | */ |
| 98 | void page_key_read(unsigned long *pfn) |
| 99 | { |
| 100 | unsigned long addr; |
| 101 | |
| 102 | addr = (unsigned long) page_address(pfn_to_page(*pfn)); |
| 103 | *(unsigned char *) pfn = (unsigned char) page_get_storage_key(addr); |
| 104 | } |
| 105 | |
| 106 | /* |
| 107 | * Extract the storage key from the upper 8 bits of the page frame number |
| 108 | * and store it in the page_key_data list of arrays. |
| 109 | */ |
| 110 | void page_key_memorize(unsigned long *pfn) |
| 111 | { |
| 112 | page_key_wp->data[page_key_wx] = *(unsigned char *) pfn; |
| 113 | *(unsigned char *) pfn = 0; |
| 114 | if (++page_key_wx < PAGE_KEY_DATA_SIZE) |
| 115 | return; |
| 116 | page_key_wp = page_key_wp->next; |
| 117 | page_key_wx = 0; |
| 118 | } |
| 119 | |
| 120 | /* |
| 121 | * Get the next key from the page_key_data list of arrays and set the |
| 122 | * storage key of the page referred by @address. If @address refers to |
| 123 | * a "safe" page the swsusp_arch_resume code will transfer the storage |
| 124 | * key from the buffer page to the original page. |
| 125 | */ |
| 126 | void page_key_write(void *address) |
| 127 | { |
| 128 | page_set_storage_key((unsigned long) address, |
| 129 | page_key_rp->data[page_key_rx], 0); |
| 130 | if (++page_key_rx >= PAGE_KEY_DATA_SIZE) |
| 131 | return; |
| 132 | page_key_rp = page_key_rp->next; |
| 133 | page_key_rx = 0; |
| 134 | } |
| 135 | |
Heiko Carstens | c48ff64 | 2009-09-11 10:28:37 +0200 | [diff] [blame] | 136 | int pfn_is_nosave(unsigned long pfn) |
| 137 | { |
Heiko Carstens | 2573a57 | 2009-09-22 22:58:50 +0200 | [diff] [blame] | 138 | unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin)); |
| 139 | unsigned long nosave_end_pfn = PFN_DOWN(__pa(&__nosave_end)); |
Heiko Carstens | c48ff64 | 2009-09-11 10:28:37 +0200 | [diff] [blame] | 140 | |
Heiko Carstens | 2573a57 | 2009-09-22 22:58:50 +0200 | [diff] [blame] | 141 | /* Always save lowcore pages (LC protection might be enabled). */ |
| 142 | if (pfn <= LC_PAGES) |
| 143 | return 0; |
Heiko Carstens | c48ff64 | 2009-09-11 10:28:37 +0200 | [diff] [blame] | 144 | if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn) |
| 145 | return 1; |
Heiko Carstens | 2573a57 | 2009-09-22 22:58:50 +0200 | [diff] [blame] | 146 | /* Skip memory holes and read-only pages (NSS, DCSS, ...). */ |
| 147 | if (tprot(PFN_PHYS(pfn))) |
Heiko Carstens | c48ff64 | 2009-09-11 10:28:37 +0200 | [diff] [blame] | 148 | return 1; |
| 149 | return 0; |
| 150 | } |
| 151 | |
| 152 | void save_processor_state(void) |
| 153 | { |
| 154 | /* swsusp_arch_suspend() actually saves all cpu register contents. |
| 155 | * Machine checks must be disabled since swsusp_arch_suspend() stores |
| 156 | * register contents to their lowcore save areas. That's the same |
| 157 | * place where register contents on machine checks would be saved. |
| 158 | * To avoid register corruption disable machine checks. |
| 159 | * We must also disable machine checks in the new psw mask for |
| 160 | * program checks, since swsusp_arch_suspend() may generate program |
| 161 | * checks. Disabling machine checks for all other new psw masks is |
| 162 | * just paranoia. |
| 163 | */ |
| 164 | local_mcck_disable(); |
| 165 | /* Disable lowcore protection */ |
| 166 | __ctl_clear_bit(0,28); |
| 167 | S390_lowcore.external_new_psw.mask &= ~PSW_MASK_MCHECK; |
| 168 | S390_lowcore.svc_new_psw.mask &= ~PSW_MASK_MCHECK; |
| 169 | S390_lowcore.io_new_psw.mask &= ~PSW_MASK_MCHECK; |
| 170 | S390_lowcore.program_new_psw.mask &= ~PSW_MASK_MCHECK; |
| 171 | } |
| 172 | |
| 173 | void restore_processor_state(void) |
| 174 | { |
| 175 | S390_lowcore.external_new_psw.mask |= PSW_MASK_MCHECK; |
| 176 | S390_lowcore.svc_new_psw.mask |= PSW_MASK_MCHECK; |
| 177 | S390_lowcore.io_new_psw.mask |= PSW_MASK_MCHECK; |
| 178 | S390_lowcore.program_new_psw.mask |= PSW_MASK_MCHECK; |
| 179 | /* Enable lowcore protection */ |
| 180 | __ctl_set_bit(0,28); |
| 181 | local_mcck_enable(); |
| 182 | } |