Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _I386_PGTABLE_3LEVEL_H |
| 2 | #define _I386_PGTABLE_3LEVEL_H |
| 3 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | /* |
| 5 | * Intel Physical Address Extension (PAE) Mode - three-level page |
| 6 | * tables on PPro+ CPUs. |
| 7 | * |
| 8 | * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> |
| 9 | */ |
| 10 | |
| 11 | #define pte_ERROR(e) \ |
| 12 | printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low) |
| 13 | #define pmd_ERROR(e) \ |
| 14 | printk("%s:%d: bad pmd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pmd_val(e)) |
| 15 | #define pgd_ERROR(e) \ |
| 16 | printk("%s:%d: bad pgd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pgd_val(e)) |
| 17 | |
Jeremy Fitzhardinge | 6194ba6 | 2008-01-30 13:34:11 +0100 | [diff] [blame] | 18 | |
| 19 | static inline int pud_none(pud_t pud) |
| 20 | { |
| 21 | return pud_val(pud) == 0; |
| 22 | } |
| 23 | static inline int pud_bad(pud_t pud) |
| 24 | { |
| 25 | return (pud_val(pud) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER)) != 0; |
| 26 | } |
| 27 | static inline int pud_present(pud_t pud) |
| 28 | { |
| 29 | return pud_val(pud) & _PAGE_PRESENT; |
| 30 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | /* Rules for using set_pte: the pte being assigned *must* be |
| 33 | * either not present or in a state where the hardware will |
| 34 | * not attempt to update the pte. In places where this is |
| 35 | * not possible, use pte_get_and_clear to obtain the old pte |
| 36 | * value and then use set_pte to update it. -ben |
| 37 | */ |
Jeremy Fitzhardinge | 3dc494e | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 38 | static inline void native_set_pte(pte_t *ptep, pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | { |
| 40 | ptep->pte_high = pte.pte_high; |
| 41 | smp_wmb(); |
| 42 | ptep->pte_low = pte.pte_low; |
| 43 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | |
Zachary Amsden | d6d861e | 2006-09-30 23:29:36 -0700 | [diff] [blame] | 45 | /* |
| 46 | * Since this is only called on user PTEs, and the page fault handler |
| 47 | * must handle the already racy situation of simultaneous page faults, |
| 48 | * we are justified in merely clearing the PTE present bit, followed |
| 49 | * by a set. The ordering here is important. |
| 50 | */ |
Jeremy Fitzhardinge | 3dc494e | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 51 | static inline void native_set_pte_present(struct mm_struct *mm, unsigned long addr, |
| 52 | pte_t *ptep, pte_t pte) |
Zachary Amsden | d6d861e | 2006-09-30 23:29:36 -0700 | [diff] [blame] | 53 | { |
| 54 | ptep->pte_low = 0; |
| 55 | smp_wmb(); |
| 56 | ptep->pte_high = pte.pte_high; |
| 57 | smp_wmb(); |
| 58 | ptep->pte_low = pte.pte_low; |
| 59 | } |
| 60 | |
Jeremy Fitzhardinge | 3dc494e | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 61 | static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) |
| 62 | { |
| 63 | set_64bit((unsigned long long *)(ptep),native_pte_val(pte)); |
| 64 | } |
| 65 | static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) |
| 66 | { |
| 67 | set_64bit((unsigned long long *)(pmdp),native_pmd_val(pmd)); |
| 68 | } |
| 69 | static inline void native_set_pud(pud_t *pudp, pud_t pud) |
| 70 | { |
Jeremy Fitzhardinge | 6194ba6 | 2008-01-30 13:34:11 +0100 | [diff] [blame] | 71 | set_64bit((unsigned long long *)(pudp),native_pud_val(pud)); |
Jeremy Fitzhardinge | 3dc494e | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 72 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | |
| 74 | /* |
Zachary Amsden | 6e5882c | 2006-04-27 11:32:29 -0700 | [diff] [blame] | 75 | * For PTEs and PDEs, we must clear the P-bit first when clearing a page table |
| 76 | * entry, so clear the bottom half first and enforce ordering with a compiler |
| 77 | * barrier. |
| 78 | */ |
Jeremy Fitzhardinge | 3dc494e | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 79 | static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
Zachary Amsden | 6e5882c | 2006-04-27 11:32:29 -0700 | [diff] [blame] | 80 | { |
| 81 | ptep->pte_low = 0; |
| 82 | smp_wmb(); |
| 83 | ptep->pte_high = 0; |
| 84 | } |
| 85 | |
Jeremy Fitzhardinge | 3dc494e | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 86 | static inline void native_pmd_clear(pmd_t *pmd) |
Zachary Amsden | 6e5882c | 2006-04-27 11:32:29 -0700 | [diff] [blame] | 87 | { |
| 88 | u32 *tmp = (u32 *)pmd; |
| 89 | *tmp = 0; |
| 90 | smp_wmb(); |
| 91 | *(tmp + 1) = 0; |
| 92 | } |
Jeremy Fitzhardinge | 3dc494e | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 93 | |
Jeremy Fitzhardinge | 6194ba6 | 2008-01-30 13:34:11 +0100 | [diff] [blame] | 94 | static inline void pud_clear(pud_t *pudp) |
| 95 | { |
| 96 | set_pud(pudp, __pud(0)); |
| 97 | |
| 98 | /* |
Jeremy Fitzhardinge | fa28ba2 | 2008-01-30 13:34:11 +0100 | [diff] [blame] | 99 | * In principle we need to do a cr3 reload here to make sure |
| 100 | * the processor recognizes the changed pgd. In practice, all |
| 101 | * the places where pud_clear() gets called are followed by |
| 102 | * full tlb flushes anyway, so we can defer the cost here. |
Jeremy Fitzhardinge | 6194ba6 | 2008-01-30 13:34:11 +0100 | [diff] [blame] | 103 | * |
Jeremy Fitzhardinge | fa28ba2 | 2008-01-30 13:34:11 +0100 | [diff] [blame] | 104 | * Specifically: |
| 105 | * |
| 106 | * mm/memory.c:free_pmd_range() - immediately after the |
| 107 | * pud_clear() it does a pmd_free_tlb(). We change the |
| 108 | * mmu_gather structure to do a full tlb flush (which has the |
| 109 | * effect of reloading cr3) when the pagetable free is |
| 110 | * complete. |
| 111 | * |
| 112 | * arch/x86/mm/hugetlbpage.c:huge_pmd_unshare() - the call to |
| 113 | * this is followed by a flush_tlb_range, which on x86 does a |
| 114 | * full tlb flush. |
Jeremy Fitzhardinge | 6194ba6 | 2008-01-30 13:34:11 +0100 | [diff] [blame] | 115 | */ |
Jeremy Fitzhardinge | 6194ba6 | 2008-01-30 13:34:11 +0100 | [diff] [blame] | 116 | } |
Rusty Russell | da181a8 | 2006-12-07 02:14:08 +0100 | [diff] [blame] | 117 | |
| 118 | #define pud_page(pud) \ |
| 119 | ((struct page *) __va(pud_val(pud) & PAGE_MASK)) |
| 120 | |
| 121 | #define pud_page_vaddr(pud) \ |
| 122 | ((unsigned long) __va(pud_val(pud) & PAGE_MASK)) |
| 123 | |
| 124 | |
| 125 | /* Find an entry in the second-level page table.. */ |
| 126 | #define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \ |
| 127 | pmd_index(address)) |
Zachary Amsden | 6e5882c | 2006-04-27 11:32:29 -0700 | [diff] [blame] | 128 | |
Zachary Amsden | 142dd97 | 2007-05-02 19:27:19 +0200 | [diff] [blame] | 129 | #ifdef CONFIG_SMP |
Jeremy Fitzhardinge | 3dc494e | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 130 | static inline pte_t native_ptep_get_and_clear(pte_t *ptep) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | { |
| 132 | pte_t res; |
| 133 | |
| 134 | /* xchg acts as a barrier before the setting of the high bits */ |
| 135 | res.pte_low = xchg(&ptep->pte_low, 0); |
| 136 | res.pte_high = ptep->pte_high; |
| 137 | ptep->pte_high = 0; |
| 138 | |
| 139 | return res; |
| 140 | } |
Zachary Amsden | 142dd97 | 2007-05-02 19:27:19 +0200 | [diff] [blame] | 141 | #else |
| 142 | #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp) |
| 143 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | |
Rusty Russell | 6049742 | 2006-09-25 23:32:30 -0700 | [diff] [blame] | 145 | #define __HAVE_ARCH_PTE_SAME |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 146 | static inline int pte_same(pte_t a, pte_t b) |
| 147 | { |
| 148 | return a.pte_low == b.pte_low && a.pte_high == b.pte_high; |
| 149 | } |
| 150 | |
| 151 | #define pte_page(x) pfn_to_page(pte_pfn(x)) |
| 152 | |
| 153 | static inline int pte_none(pte_t pte) |
| 154 | { |
| 155 | return !pte.pte_low && !pte.pte_high; |
| 156 | } |
| 157 | |
| 158 | static inline unsigned long pte_pfn(pte_t pte) |
| 159 | { |
Jeremy Fitzhardinge | c3bcfb5 | 2008-01-30 13:32:57 +0100 | [diff] [blame] | 160 | return (pte_val(pte) & ~_PAGE_NX) >> PAGE_SHIFT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 161 | } |
| 162 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | /* |
| 164 | * Bits 0, 6 and 7 are taken in the low part of the pte, |
| 165 | * put the 32 bits of offset into the high part. |
| 166 | */ |
| 167 | #define pte_to_pgoff(pte) ((pte).pte_high) |
Jeremy Fitzhardinge | c8e5393 | 2008-01-30 13:32:57 +0100 | [diff] [blame] | 168 | #define pgoff_to_pte(off) ((pte_t) { { .pte_low = _PAGE_FILE, .pte_high = (off) } }) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 169 | #define PTE_FILE_MAX_BITS 32 |
| 170 | |
| 171 | /* Encode and de-code a swap entry */ |
| 172 | #define __swp_type(x) (((x).val) & 0x1f) |
| 173 | #define __swp_offset(x) ((x).val >> 5) |
| 174 | #define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5}) |
| 175 | #define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high }) |
Jeremy Fitzhardinge | c8e5393 | 2008-01-30 13:32:57 +0100 | [diff] [blame] | 176 | #define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } }) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 178 | #endif /* _I386_PGTABLE_3LEVEL_H */ |