Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2002 Andi Kleen, SuSE Labs. |
| 3 | * Thanks to Ben LaHaise for precious feedback. |
| 4 | */ |
| 5 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | #include <linux/mm.h> |
| 7 | #include <linux/sched.h> |
| 8 | #include <linux/highmem.h> |
| 9 | #include <linux/module.h> |
| 10 | #include <linux/slab.h> |
| 11 | #include <asm/uaccess.h> |
| 12 | #include <asm/processor.h> |
| 13 | #include <asm/tlbflush.h> |
| 14 | #include <asm/io.h> |
| 15 | |
| 16 | static inline pte_t *lookup_address(unsigned long address) |
| 17 | { |
| 18 | pgd_t *pgd = pgd_offset_k(address); |
| 19 | pud_t *pud; |
| 20 | pmd_t *pmd; |
| 21 | pte_t *pte; |
| 22 | if (pgd_none(*pgd)) |
| 23 | return NULL; |
| 24 | pud = pud_offset(pgd, address); |
| 25 | if (!pud_present(*pud)) |
| 26 | return NULL; |
| 27 | pmd = pmd_offset(pud, address); |
| 28 | if (!pmd_present(*pmd)) |
| 29 | return NULL; |
| 30 | if (pmd_large(*pmd)) |
| 31 | return (pte_t *)pmd; |
| 32 | pte = pte_offset_kernel(pmd, address); |
| 33 | if (pte && !pte_present(*pte)) |
| 34 | pte = NULL; |
| 35 | return pte; |
| 36 | } |
| 37 | |
| 38 | static struct page *split_large_page(unsigned long address, pgprot_t prot, |
| 39 | pgprot_t ref_prot) |
| 40 | { |
| 41 | int i; |
| 42 | unsigned long addr; |
| 43 | struct page *base = alloc_pages(GFP_KERNEL, 0); |
| 44 | pte_t *pbase; |
| 45 | if (!base) |
| 46 | return NULL; |
Nick Piggin | 4fa4f53 | 2006-03-22 00:08:33 -0800 | [diff] [blame] | 47 | /* |
| 48 | * page_private is used to track the number of entries in |
| 49 | * the page table page have non standard attributes. |
| 50 | */ |
| 51 | SetPagePrivate(base); |
| 52 | page_private(base) = 0; |
| 53 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | addr = address & LARGE_PAGE_MASK; |
| 55 | pbase = (pte_t *)page_address(base); |
| 56 | for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) { |
| 57 | pbase[i] = pfn_pte(addr >> PAGE_SHIFT, |
| 58 | addr == address ? prot : ref_prot); |
| 59 | } |
| 60 | return base; |
| 61 | } |
| 62 | |
Andi Kleen | ea7322d | 2006-12-07 02:14:05 +0100 | [diff] [blame] | 63 | static void cache_flush_page(void *adr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | { |
Andi Kleen | ea7322d | 2006-12-07 02:14:05 +0100 | [diff] [blame] | 65 | int i; |
| 66 | for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) |
| 67 | asm volatile("clflush (%0)" :: "r" (adr + i)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | } |
| 69 | |
Andi Kleen | ea7322d | 2006-12-07 02:14:05 +0100 | [diff] [blame] | 70 | static void flush_kernel_map(void *arg) |
| 71 | { |
| 72 | struct list_head *l = (struct list_head *)arg; |
| 73 | struct page *pg; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 74 | |
Andi Kleen | ea7322d | 2006-12-07 02:14:05 +0100 | [diff] [blame] | 75 | /* When clflush is available always use it because it is |
| 76 | much cheaper than WBINVD */ |
| 77 | if (!cpu_has_clflush) |
| 78 | asm volatile("wbinvd" ::: "memory"); |
| 79 | list_for_each_entry(pg, l, lru) { |
| 80 | void *adr = page_address(pg); |
| 81 | if (cpu_has_clflush) |
| 82 | cache_flush_page(adr); |
Andi Kleen | ea7322d | 2006-12-07 02:14:05 +0100 | [diff] [blame] | 83 | } |
Andi Kleen | 90767bd | 2007-04-24 13:05:37 +0200 | [diff] [blame] | 84 | __flush_tlb_all(); |
Andi Kleen | ea7322d | 2006-12-07 02:14:05 +0100 | [diff] [blame] | 85 | } |
| 86 | |
| 87 | static inline void flush_map(struct list_head *l) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | { |
Andi Kleen | ea7322d | 2006-12-07 02:14:05 +0100 | [diff] [blame] | 89 | on_each_cpu(flush_kernel_map, l, 1, 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | } |
| 91 | |
Andi Kleen | ea7322d | 2006-12-07 02:14:05 +0100 | [diff] [blame] | 92 | static LIST_HEAD(deferred_pages); /* protected by init_mm.mmap_sem */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | |
Nick Piggin | 20aaffd | 2006-03-22 00:08:32 -0800 | [diff] [blame] | 94 | static inline void save_page(struct page *fpage) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | { |
Andi Kleen | ea7322d | 2006-12-07 02:14:05 +0100 | [diff] [blame] | 96 | list_add(&fpage->lru, &deferred_pages); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | } |
| 98 | |
| 99 | /* |
| 100 | * No more special protections in this 2/4MB area - revert to a |
| 101 | * large page again. |
| 102 | */ |
Vivek Goyal | 0dbf702 | 2007-05-02 19:27:07 +0200 | [diff] [blame] | 103 | static void revert_page(unsigned long address, unsigned long pfn, pgprot_t ref_prot) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 | { |
| 105 | pgd_t *pgd; |
| 106 | pud_t *pud; |
| 107 | pmd_t *pmd; |
| 108 | pte_t large_pte; |
| 109 | |
| 110 | pgd = pgd_offset_k(address); |
| 111 | BUG_ON(pgd_none(*pgd)); |
| 112 | pud = pud_offset(pgd,address); |
| 113 | BUG_ON(pud_none(*pud)); |
| 114 | pmd = pmd_offset(pud, address); |
| 115 | BUG_ON(pmd_val(*pmd) & _PAGE_PSE); |
Andi Kleen | 126b192 | 2007-02-13 13:26:26 +0100 | [diff] [blame] | 116 | large_pte = pfn_pte(pfn, ref_prot); |
Andi Kleen | 5e6b0bf | 2006-09-26 10:52:37 +0200 | [diff] [blame] | 117 | large_pte = pte_mkhuge(large_pte); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | set_pte((pte_t *)pmd, large_pte); |
| 119 | } |
| 120 | |
| 121 | static int |
| 122 | __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot, |
| 123 | pgprot_t ref_prot) |
| 124 | { |
| 125 | pte_t *kpte; |
| 126 | struct page *kpte_page; |
Arjan van de Ven | c728252 | 2006-01-06 00:12:03 -0800 | [diff] [blame] | 127 | pgprot_t ref_prot2; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | kpte = lookup_address(address); |
| 129 | if (!kpte) return 0; |
| 130 | kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | if (pgprot_val(prot) != pgprot_val(ref_prot)) { |
Andi Kleen | 5e6b0bf | 2006-09-26 10:52:37 +0200 | [diff] [blame] | 132 | if (!pte_huge(*kpte)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | set_pte(kpte, pfn_pte(pfn, prot)); |
| 134 | } else { |
| 135 | /* |
Nick Piggin | 4fa4f53 | 2006-03-22 00:08:33 -0800 | [diff] [blame] | 136 | * split_large_page will take the reference for this |
| 137 | * change_page_attr on the split page. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | */ |
Arjan van de Ven | c728252 | 2006-01-06 00:12:03 -0800 | [diff] [blame] | 139 | struct page *split; |
Andi Kleen | 5e6b0bf | 2006-09-26 10:52:37 +0200 | [diff] [blame] | 140 | ref_prot2 = pte_pgprot(pte_clrhuge(*kpte)); |
Vivek Goyal | 0dbf702 | 2007-05-02 19:27:07 +0200 | [diff] [blame] | 141 | split = split_large_page(pfn << PAGE_SHIFT, prot, |
| 142 | ref_prot2); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | if (!split) |
| 144 | return -ENOMEM; |
Andi Kleen | 5e6b0bf | 2006-09-26 10:52:37 +0200 | [diff] [blame] | 145 | set_pte(kpte, mk_pte(split, ref_prot2)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 146 | kpte_page = split; |
Andi Kleen | 5e6b0bf | 2006-09-26 10:52:37 +0200 | [diff] [blame] | 147 | } |
Nick Piggin | 4fa4f53 | 2006-03-22 00:08:33 -0800 | [diff] [blame] | 148 | page_private(kpte_page)++; |
Andi Kleen | 5e6b0bf | 2006-09-26 10:52:37 +0200 | [diff] [blame] | 149 | } else if (!pte_huge(*kpte)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | set_pte(kpte, pfn_pte(pfn, ref_prot)); |
Nick Piggin | 4fa4f53 | 2006-03-22 00:08:33 -0800 | [diff] [blame] | 151 | BUG_ON(page_private(kpte_page) == 0); |
| 152 | page_private(kpte_page)--; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 | } else |
| 154 | BUG(); |
| 155 | |
| 156 | /* on x86-64 the direct mapping set at boot is not using 4k pages */ |
| 157 | BUG_ON(PageReserved(kpte_page)); |
| 158 | |
Nick Piggin | 4fa4f53 | 2006-03-22 00:08:33 -0800 | [diff] [blame] | 159 | if (page_private(kpte_page) == 0) { |
Nick Piggin | 20aaffd | 2006-03-22 00:08:32 -0800 | [diff] [blame] | 160 | save_page(kpte_page); |
Vivek Goyal | 0dbf702 | 2007-05-02 19:27:07 +0200 | [diff] [blame] | 161 | revert_page(address, pfn, ref_prot); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | } |
| 163 | return 0; |
| 164 | } |
| 165 | |
| 166 | /* |
| 167 | * Change the page attributes of an page in the linear mapping. |
| 168 | * |
| 169 | * This should be used when a page is mapped with a different caching policy |
| 170 | * than write-back somewhere - some CPUs do not like it when mappings with |
| 171 | * different caching policies exist. This changes the page attributes of the |
| 172 | * in kernel linear mapping too. |
| 173 | * |
| 174 | * The caller needs to ensure that there are no conflicting mappings elsewhere. |
| 175 | * This function only deals with the kernel linear map. |
| 176 | * |
| 177 | * Caller must call global_flush_tlb() after this. |
| 178 | */ |
| 179 | int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot) |
| 180 | { |
Vivek Goyal | 0dbf702 | 2007-05-02 19:27:07 +0200 | [diff] [blame] | 181 | unsigned long phys_base_pfn = __pa_symbol(__START_KERNEL_map) >> PAGE_SHIFT; |
Jan Beulich | d01ad8d | 2007-05-02 19:27:10 +0200 | [diff] [blame^] | 182 | int err = 0, kernel_map = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 183 | int i; |
| 184 | |
Jan Beulich | d01ad8d | 2007-05-02 19:27:10 +0200 | [diff] [blame^] | 185 | if (address >= __START_KERNEL_map |
| 186 | && address < __START_KERNEL_map + KERNEL_TEXT_SIZE) { |
| 187 | address = (unsigned long)__va(__pa(address)); |
| 188 | kernel_map = 1; |
| 189 | } |
| 190 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 191 | down_write(&init_mm.mmap_sem); |
| 192 | for (i = 0; i < numpages; i++, address += PAGE_SIZE) { |
| 193 | unsigned long pfn = __pa(address) >> PAGE_SHIFT; |
| 194 | |
Jan Beulich | d01ad8d | 2007-05-02 19:27:10 +0200 | [diff] [blame^] | 195 | if (!kernel_map || pte_present(pfn_pte(0, prot))) { |
| 196 | err = __change_page_attr(address, pfn, prot, PAGE_KERNEL); |
| 197 | if (err) |
| 198 | break; |
| 199 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | /* Handle kernel mapping too which aliases part of the |
| 201 | * lowmem */ |
Vivek Goyal | 0dbf702 | 2007-05-02 19:27:07 +0200 | [diff] [blame] | 202 | if ((pfn >= phys_base_pfn) && |
| 203 | ((pfn - phys_base_pfn) < (KERNEL_TEXT_SIZE >> PAGE_SHIFT))) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 204 | unsigned long addr2; |
Andi Kleen | df99284 | 2006-09-26 10:52:37 +0200 | [diff] [blame] | 205 | pgprot_t prot2; |
Vivek Goyal | 0dbf702 | 2007-05-02 19:27:07 +0200 | [diff] [blame] | 206 | addr2 = __START_KERNEL_map + ((pfn - phys_base_pfn) << PAGE_SHIFT); |
Andi Kleen | df99284 | 2006-09-26 10:52:37 +0200 | [diff] [blame] | 207 | /* Make sure the kernel mappings stay executable */ |
| 208 | prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot))); |
| 209 | err = __change_page_attr(addr2, pfn, prot2, |
| 210 | PAGE_KERNEL_EXEC); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 211 | } |
| 212 | } |
| 213 | up_write(&init_mm.mmap_sem); |
| 214 | return err; |
| 215 | } |
| 216 | |
| 217 | /* Don't call this for MMIO areas that may not have a mem_map entry */ |
| 218 | int change_page_attr(struct page *page, int numpages, pgprot_t prot) |
| 219 | { |
| 220 | unsigned long addr = (unsigned long)page_address(page); |
| 221 | return change_page_attr_addr(addr, numpages, prot); |
| 222 | } |
| 223 | |
| 224 | void global_flush_tlb(void) |
| 225 | { |
Andi Kleen | ea7322d | 2006-12-07 02:14:05 +0100 | [diff] [blame] | 226 | struct page *pg, *next; |
| 227 | struct list_head l; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | |
| 229 | down_read(&init_mm.mmap_sem); |
Andi Kleen | ea7322d | 2006-12-07 02:14:05 +0100 | [diff] [blame] | 230 | list_replace_init(&deferred_pages, &l); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 | up_read(&init_mm.mmap_sem); |
Nick Piggin | 20aaffd | 2006-03-22 00:08:32 -0800 | [diff] [blame] | 232 | |
Andi Kleen | ea7322d | 2006-12-07 02:14:05 +0100 | [diff] [blame] | 233 | flush_map(&l); |
| 234 | |
| 235 | list_for_each_entry_safe(pg, next, &l, lru) { |
| 236 | ClearPagePrivate(pg); |
| 237 | __free_page(pg); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 238 | } |
| 239 | } |
| 240 | |
| 241 | EXPORT_SYMBOL(change_page_attr); |
| 242 | EXPORT_SYMBOL(global_flush_tlb); |