blob: e814cfe96af2eab6c4d9c408c70601785d4b5330 [file] [log] [blame]
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +01001#ifndef _ASM_X86_PGTABLE_H
2#define _ASM_X86_PGTABLE_H
3
4#define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1)
5#define FIRST_USER_ADDRESS 0
6
7#define _PAGE_BIT_PRESENT 0
8#define _PAGE_BIT_RW 1
9#define _PAGE_BIT_USER 2
10#define _PAGE_BIT_PWT 3
11#define _PAGE_BIT_PCD 4
12#define _PAGE_BIT_ACCESSED 5
13#define _PAGE_BIT_DIRTY 6
14#define _PAGE_BIT_FILE 6
15#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
Andi Kleen9bf5a472008-02-04 16:48:06 +010016#define _PAGE_BIT_PAT 7 /* on 4KB pages */
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +010017#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
18#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
19#define _PAGE_BIT_UNUSED2 10
20#define _PAGE_BIT_UNUSED3 11
Andi Kleen9bf5a472008-02-04 16:48:06 +010021#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +010022#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
23
Jeremy Fitzhardingef2919232008-01-30 13:32:59 +010024/*
25 * Note: we use _AC(1, L) instead of _AC(1, UL) so that we get a
26 * sign-extended value on 32-bit with all 1's in the upper word,
27 * which preserves the upper pte values on 64-bit ptes:
28 */
Ingo Molnar61f38222008-01-30 13:32:55 +010029#define _PAGE_PRESENT (_AC(1, L)<<_PAGE_BIT_PRESENT)
30#define _PAGE_RW (_AC(1, L)<<_PAGE_BIT_RW)
31#define _PAGE_USER (_AC(1, L)<<_PAGE_BIT_USER)
32#define _PAGE_PWT (_AC(1, L)<<_PAGE_BIT_PWT)
33#define _PAGE_PCD (_AC(1, L)<<_PAGE_BIT_PCD)
34#define _PAGE_ACCESSED (_AC(1, L)<<_PAGE_BIT_ACCESSED)
35#define _PAGE_DIRTY (_AC(1, L)<<_PAGE_BIT_DIRTY)
36#define _PAGE_PSE (_AC(1, L)<<_PAGE_BIT_PSE) /* 2MB page */
37#define _PAGE_GLOBAL (_AC(1, L)<<_PAGE_BIT_GLOBAL) /* Global TLB entry */
38#define _PAGE_UNUSED1 (_AC(1, L)<<_PAGE_BIT_UNUSED1)
39#define _PAGE_UNUSED2 (_AC(1, L)<<_PAGE_BIT_UNUSED2)
40#define _PAGE_UNUSED3 (_AC(1, L)<<_PAGE_BIT_UNUSED3)
Andi Kleen9bf5a472008-02-04 16:48:06 +010041#define _PAGE_PAT (_AC(1, L)<<_PAGE_BIT_PAT)
42#define _PAGE_PAT_LARGE (_AC(1, L)<<_PAGE_BIT_PAT_LARGE)
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +010043
44#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
45#define _PAGE_NX (_AC(1, ULL) << _PAGE_BIT_NX)
46#else
47#define _PAGE_NX 0
48#endif
49
50/* If _PAGE_PRESENT is clear, we use these: */
51#define _PAGE_FILE _PAGE_DIRTY /* nonlinear file mapping, saved PTE; unset:swap */
52#define _PAGE_PROTNONE _PAGE_PSE /* if the user mapped it with PROT_NONE;
53 pte_present gives true */
54
55#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
56#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
57
58#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
59
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -070060#define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT)
61#define _PAGE_CACHE_WB (0)
62#define _PAGE_CACHE_WC (_PAGE_PWT)
63#define _PAGE_CACHE_UC_MINUS (_PAGE_PCD)
64#define _PAGE_CACHE_UC (_PAGE_PCD | _PAGE_PWT)
65
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +010066#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
67#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
68
69#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
70#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
71#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
72#define PAGE_COPY PAGE_COPY_NOEXEC
73#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
74#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
75
76#ifdef CONFIG_X86_32
77#define _PAGE_KERNEL_EXEC \
78 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
79#define _PAGE_KERNEL (_PAGE_KERNEL_EXEC | _PAGE_NX)
80
81#ifndef __ASSEMBLY__
Andi Kleenc93c82b2008-01-30 13:33:50 +010082extern pteval_t __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +010083#endif /* __ASSEMBLY__ */
84#else
85#define __PAGE_KERNEL_EXEC \
86 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
87#define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
88#endif
89
90#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
91#define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
Ingo Molnard2e626f2008-01-30 13:34:04 +010092#define __PAGE_KERNEL_EXEC_NOCACHE (__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT)
venkatesh.pallipadi@intel.comb310f382008-03-18 17:00:24 -070093#define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +010094#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
Suresh Siddhad546b672008-03-25 17:39:12 -070095#define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +010096#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
97#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
98#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
99#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
100
101#ifdef CONFIG_X86_32
102# define MAKE_GLOBAL(x) __pgprot((x))
103#else
104# define MAKE_GLOBAL(x) __pgprot((x) | _PAGE_GLOBAL)
105#endif
106
107#define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL)
108#define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO)
109#define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC)
110#define PAGE_KERNEL_RX MAKE_GLOBAL(__PAGE_KERNEL_RX)
venkatesh.pallipadi@intel.comb310f382008-03-18 17:00:24 -0700111#define PAGE_KERNEL_WC MAKE_GLOBAL(__PAGE_KERNEL_WC)
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +0100112#define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
Suresh Siddhad546b672008-03-25 17:39:12 -0700113#define PAGE_KERNEL_UC_MINUS MAKE_GLOBAL(__PAGE_KERNEL_UC_MINUS)
Ingo Molnard2e626f2008-01-30 13:34:04 +0100114#define PAGE_KERNEL_EXEC_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_EXEC_NOCACHE)
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +0100115#define PAGE_KERNEL_LARGE MAKE_GLOBAL(__PAGE_KERNEL_LARGE)
116#define PAGE_KERNEL_LARGE_EXEC MAKE_GLOBAL(__PAGE_KERNEL_LARGE_EXEC)
117#define PAGE_KERNEL_VSYSCALL MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL)
118#define PAGE_KERNEL_VSYSCALL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL_NOCACHE)
119
120/* xwr */
121#define __P000 PAGE_NONE
122#define __P001 PAGE_READONLY
123#define __P010 PAGE_COPY
124#define __P011 PAGE_COPY
125#define __P100 PAGE_READONLY_EXEC
126#define __P101 PAGE_READONLY_EXEC
127#define __P110 PAGE_COPY_EXEC
128#define __P111 PAGE_COPY_EXEC
129
130#define __S000 PAGE_NONE
131#define __S001 PAGE_READONLY
132#define __S010 PAGE_SHARED
133#define __S011 PAGE_SHARED
134#define __S100 PAGE_READONLY_EXEC
135#define __S101 PAGE_READONLY_EXEC
136#define __S110 PAGE_SHARED_EXEC
137#define __S111 PAGE_SHARED_EXEC
138
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +0100139#ifndef __ASSEMBLY__
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100140
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +0100141/*
Jeremy Fitzhardinge8405b122008-01-30 13:32:58 +0100142 * ZERO_PAGE is a global shared page that is always zero: used
143 * for zero-mapped memory areas etc..
144 */
145extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
146#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
147
Jeremy Fitzhardingee3ed9102008-01-30 13:34:11 +0100148extern spinlock_t pgd_lock;
149extern struct list_head pgd_list;
Jeremy Fitzhardinge8405b122008-01-30 13:32:58 +0100150
151/*
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +0100152 * The following only work if pte_present() is true.
153 * Undefined behaviour if not..
154 */
155static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
156static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
157static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
158static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
159static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_PSE; }
Andi Kleena5a5dc32008-01-30 13:33:42 +0100160static inline int pte_global(pte_t pte) { return pte_val(pte) & _PAGE_GLOBAL; }
Andi Kleen4c3c4b42008-01-30 13:33:42 +0100161static inline int pte_exec(pte_t pte) { return !(pte_val(pte) & _PAGE_NX); }
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +0100162
163static inline int pmd_large(pmd_t pte) {
164 return (pmd_val(pte) & (_PAGE_PSE|_PAGE_PRESENT)) ==
165 (_PAGE_PSE|_PAGE_PRESENT);
166}
167
Andi Kleenaaa0e892008-01-30 13:33:51 +0100168static inline pte_t pte_mkclean(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_DIRTY); }
169static inline pte_t pte_mkold(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_ACCESSED); }
170static inline pte_t pte_wrprotect(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_RW); }
171static inline pte_t pte_mkexec(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_NX); }
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +0100172static inline pte_t pte_mkdirty(pte_t pte) { return __pte(pte_val(pte) | _PAGE_DIRTY); }
173static inline pte_t pte_mkyoung(pte_t pte) { return __pte(pte_val(pte) | _PAGE_ACCESSED); }
174static inline pte_t pte_mkwrite(pte_t pte) { return __pte(pte_val(pte) | _PAGE_RW); }
175static inline pte_t pte_mkhuge(pte_t pte) { return __pte(pte_val(pte) | _PAGE_PSE); }
Andi Kleenaaa0e892008-01-30 13:33:51 +0100176static inline pte_t pte_clrhuge(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_PSE); }
Andi Kleena5a5dc32008-01-30 13:33:42 +0100177static inline pte_t pte_mkglobal(pte_t pte) { return __pte(pte_val(pte) | _PAGE_GLOBAL); }
Andi Kleenaaa0e892008-01-30 13:33:51 +0100178static inline pte_t pte_clrglobal(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_GLOBAL); }
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +0100179
Jeremy Fitzhardinge6fdc05d2008-01-30 13:32:57 +0100180extern pteval_t __supported_pte_mask;
181
182static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
183{
184 return __pte((((phys_addr_t)page_nr << PAGE_SHIFT) |
185 pgprot_val(pgprot)) & __supported_pte_mask);
186}
187
188static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
189{
190 return __pmd((((phys_addr_t)page_nr << PAGE_SHIFT) |
191 pgprot_val(pgprot)) & __supported_pte_mask);
192}
193
Ingo Molnar38472312008-01-30 13:32:57 +0100194static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
195{
196 pteval_t val = pte_val(pte);
197
198 /*
199 * Chop off the NX bit (if present), and add the NX portion of
200 * the newprot (if present):
201 */
202 val &= _PAGE_CHG_MASK & ~_PAGE_NX;
203 val |= pgprot_val(newprot) & __supported_pte_mask;
204
205 return __pte(val);
206}
207
Andi Kleenc6ca18e2008-01-30 13:33:51 +0100208#define pte_pgprot(x) __pgprot(pte_val(x) & (0xfff | _PAGE_NX))
209
Andi Kleen1e8e23b2008-01-30 13:33:53 +0100210#define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask)
211
Jeremy Fitzhardinge48916452008-01-30 13:32:58 +0100212#ifdef CONFIG_PARAVIRT
213#include <asm/paravirt.h>
214#else /* !CONFIG_PARAVIRT */
215#define set_pte(ptep, pte) native_set_pte(ptep, pte)
216#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
217
218#define set_pte_present(mm, addr, ptep, pte) \
219 native_set_pte_present(mm, addr, ptep, pte)
220#define set_pte_atomic(ptep, pte) \
221 native_set_pte_atomic(ptep, pte)
222
223#define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
224
225#ifndef __PAGETABLE_PUD_FOLDED
226#define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
227#define pgd_clear(pgd) native_pgd_clear(pgd)
228#endif
229
230#ifndef set_pud
231# define set_pud(pudp, pud) native_set_pud(pudp, pud)
232#endif
233
234#ifndef __PAGETABLE_PMD_FOLDED
235#define pud_clear(pud) native_pud_clear(pud)
236#endif
237
238#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
239#define pmd_clear(pmd) native_pmd_clear(pmd)
240
241#define pte_update(mm, addr, ptep) do { } while (0)
242#define pte_update_defer(mm, addr, ptep) do { } while (0)
243#endif /* CONFIG_PARAVIRT */
244
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +0100245#endif /* __ASSEMBLY__ */
246
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200247#ifdef CONFIG_X86_32
248# include "pgtable_32.h"
249#else
250# include "pgtable_64.h"
251#endif
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +0100252
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100253#ifndef __ASSEMBLY__
254
Thomas Gleixner30551bb2008-01-30 13:34:04 +0100255enum {
256 PG_LEVEL_NONE,
257 PG_LEVEL_4K,
258 PG_LEVEL_2M,
Ingo Molnar86f03982008-01-30 13:34:09 +0100259 PG_LEVEL_1G,
Thomas Gleixner30551bb2008-01-30 13:34:04 +0100260};
261
Thomas Gleixner0a663082008-01-30 13:34:04 +0100262/*
263 * Helper function that returns the kernel pagetable entry controlling
264 * the virtual address 'address'. NULL means no pagetable entry present.
265 * NOTE: the return type is pte_t but if the pmd is PSE then we return it
266 * as a pte too.
267 */
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100268extern pte_t *lookup_address(unsigned long address, unsigned int *level);
Thomas Gleixner0a663082008-01-30 13:34:04 +0100269
Jeremy Fitzhardinge48916452008-01-30 13:32:58 +0100270/* local pte updates need not use xchg for locking */
271static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
272{
273 pte_t res = *ptep;
274
275 /* Pure native function needs no input for mm, addr */
276 native_pte_clear(NULL, 0, ptep);
277 return res;
278}
279
280static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
281 pte_t *ptep , pte_t pte)
282{
283 native_set_pte(ptep, pte);
284}
285
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100286#ifndef CONFIG_PARAVIRT
287/*
288 * Rules for using pte_update - it must be called after any PTE update which
289 * has not been done using the set_pte / clear_pte interfaces. It is used by
290 * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
291 * updates should either be sets, clears, or set_pte_atomic for P->P
292 * transitions, which means this hook should only be called for user PTEs.
293 * This hook implies a P->P protection or access change has taken place, which
294 * requires a subsequent TLB flush. The notification can optionally be delayed
295 * until the TLB flush event by using the pte_update_defer form of the
296 * interface, but care must be taken to assure that the flush happens while
297 * still holding the same page table lock so that the shadow and primary pages
298 * do not become out of sync on SMP.
299 */
300#define pte_update(mm, addr, ptep) do { } while (0)
301#define pte_update_defer(mm, addr, ptep) do { } while (0)
302#endif
303
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100304/*
305 * We only update the dirty/accessed state if we set
306 * the dirty bit by hand in the kernel, since the hardware
307 * will do the accessed bit for us, and we don't want to
308 * race with other CPU's that might be updating the dirty
309 * bit at the same time.
310 */
311#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
312#define ptep_set_access_flags(vma, address, ptep, entry, dirty) \
313({ \
314 int __changed = !pte_same(*(ptep), entry); \
315 if (__changed && dirty) { \
316 *ptep = entry; \
317 pte_update_defer((vma)->vm_mm, (address), (ptep)); \
318 flush_tlb_page(vma, address); \
319 } \
320 __changed; \
321})
322
323#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
324#define ptep_test_and_clear_young(vma, addr, ptep) ({ \
325 int __ret = 0; \
326 if (pte_young(*(ptep))) \
327 __ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, \
328 &(ptep)->pte); \
329 if (__ret) \
330 pte_update((vma)->vm_mm, addr, ptep); \
331 __ret; \
332})
333
334#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
335#define ptep_clear_flush_young(vma, address, ptep) \
336({ \
337 int __young; \
338 __young = ptep_test_and_clear_young((vma), (address), (ptep)); \
339 if (__young) \
340 flush_tlb_page(vma, address); \
341 __young; \
342})
343
344#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
345static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
346{
347 pte_t pte = native_ptep_get_and_clear(ptep);
348 pte_update(mm, addr, ptep);
349 return pte;
350}
351
352#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
353static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
354{
355 pte_t pte;
356 if (full) {
357 /*
358 * Full address destruction in progress; paravirt does not
359 * care about updates and native needs no locking
360 */
361 pte = native_local_ptep_get_and_clear(ptep);
362 } else {
363 pte = ptep_get_and_clear(mm, addr, ptep);
364 }
365 return pte;
366}
367
368#define __HAVE_ARCH_PTEP_SET_WRPROTECT
369static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
370{
Jeremy Fitzhardinged8d89822008-01-30 13:32:58 +0100371 clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100372 pte_update(mm, addr, ptep);
373}
374
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100375#include <asm-generic/pgtable.h>
376#endif /* __ASSEMBLY__ */
377
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +0100378#endif /* _ASM_X86_PGTABLE_H */