blob: f1eddf71dd0c9128e908c800a4c6b8c9ed56822b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _ASM_GENERIC_PGTABLE_H
2#define _ASM_GENERIC_PGTABLE_H
3
Rusty Russell673eae82006-09-25 23:32:29 -07004#ifndef __ASSEMBLY__
Greg Ungerer95352392007-08-10 13:01:20 -07005#ifdef CONFIG_MMU
Rusty Russell673eae82006-09-25 23:32:29 -07006
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
Andrea Arcangelie2cda322011-01-13 15:46:40 -08008extern int ptep_set_access_flags(struct vm_area_struct *vma,
9 unsigned long address, pte_t *ptep,
10 pte_t entry, int dirty);
11#endif
12
13#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
14extern int pmdp_set_access_flags(struct vm_area_struct *vma,
15 unsigned long address, pmd_t *pmdp,
16 pmd_t entry, int dirty);
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#endif
18
19#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
Andrea Arcangelie2cda322011-01-13 15:46:40 -080020static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
21 unsigned long address,
22 pte_t *ptep)
23{
24 pte_t pte = *ptep;
25 int r = 1;
26 if (!pte_young(pte))
27 r = 0;
28 else
29 set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte));
30 return r;
31}
32#endif
33
34#ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
35#ifdef CONFIG_TRANSPARENT_HUGEPAGE
36static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
37 unsigned long address,
38 pmd_t *pmdp)
39{
40 pmd_t pmd = *pmdp;
41 int r = 1;
42 if (!pmd_young(pmd))
43 r = 0;
44 else
45 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd));
46 return r;
47}
48#else /* CONFIG_TRANSPARENT_HUGEPAGE */
49static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
50 unsigned long address,
51 pmd_t *pmdp)
52{
53 BUG();
54 return 0;
55}
56#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
Linus Torvalds1da177e2005-04-16 15:20:36 -070057#endif
58
59#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
Andrea Arcangelie2cda322011-01-13 15:46:40 -080060int ptep_clear_flush_young(struct vm_area_struct *vma,
61 unsigned long address, pte_t *ptep);
62#endif
63
64#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
65int pmdp_clear_flush_young(struct vm_area_struct *vma,
66 unsigned long address, pmd_t *pmdp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070067#endif
68
Linus Torvalds1da177e2005-04-16 15:20:36 -070069#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
Andrea Arcangelie2cda322011-01-13 15:46:40 -080070static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
71 unsigned long address,
72 pte_t *ptep)
73{
74 pte_t pte = *ptep;
75 pte_clear(mm, address, ptep);
76 return pte;
77}
78#endif
79
80#ifndef __HAVE_ARCH_PMDP_GET_AND_CLEAR
81#ifdef CONFIG_TRANSPARENT_HUGEPAGE
82static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
83 unsigned long address,
84 pmd_t *pmdp)
85{
86 pmd_t pmd = *pmdp;
87 pmd_clear(mm, address, pmdp);
88 return pmd;
Linus Torvalds1da177e2005-04-16 15:20:36 -070089})
Andrea Arcangelie2cda322011-01-13 15:46:40 -080090#else /* CONFIG_TRANSPARENT_HUGEPAGE */
91static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
92 unsigned long address,
93 pmd_t *pmdp)
94{
95 BUG();
96 return __pmd(0);
97}
98#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
Linus Torvalds1da177e2005-04-16 15:20:36 -070099#endif
100
Zachary Amsdena6003882005-09-03 15:55:04 -0700101#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
Andrea Arcangelie2cda322011-01-13 15:46:40 -0800102static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
103 unsigned long address, pte_t *ptep,
104 int full)
105{
106 pte_t pte;
107 pte = ptep_get_and_clear(mm, address, ptep);
108 return pte;
109}
Zachary Amsdena6003882005-09-03 15:55:04 -0700110#endif
111
Zachary Amsden9888a1c2006-09-30 23:29:31 -0700112/*
113 * Some architectures may be able to avoid expensive synchronization
114 * primitives when modifications are made to PTE's which are already
115 * not present, or in the process of an address space destruction.
116 */
117#ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
Andrea Arcangelie2cda322011-01-13 15:46:40 -0800118static inline void pte_clear_not_present_full(struct mm_struct *mm,
119 unsigned long address,
120 pte_t *ptep,
121 int full)
122{
123 pte_clear(mm, address, ptep);
124}
Zachary Amsdena6003882005-09-03 15:55:04 -0700125#endif
126
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
Andrea Arcangelie2cda322011-01-13 15:46:40 -0800128extern pte_t ptep_clear_flush(struct vm_area_struct *vma,
129 unsigned long address,
130 pte_t *ptep);
131#endif
132
133#ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH
134extern pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
135 unsigned long address,
136 pmd_t *pmdp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137#endif
138
139#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
Tim Schmielau8c65b4a2005-11-07 00:59:43 -0800140struct mm_struct;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
142{
143 pte_t old_pte = *ptep;
144 set_pte_at(mm, address, ptep, pte_wrprotect(old_pte));
145}
146#endif
147
Andrea Arcangelie2cda322011-01-13 15:46:40 -0800148#ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT
149#ifdef CONFIG_TRANSPARENT_HUGEPAGE
150static inline void pmdp_set_wrprotect(struct mm_struct *mm,
151 unsigned long address, pmd_t *pmdp)
152{
153 pmd_t old_pmd = *pmdp;
154 set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd));
155}
156#else /* CONFIG_TRANSPARENT_HUGEPAGE */
157static inline void pmdp_set_wrprotect(struct mm_struct *mm,
158 unsigned long address, pmd_t *pmdp)
159{
160 BUG();
161}
162#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
163#endif
164
165#ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
166extern pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
167 unsigned long address,
168 pmd_t *pmdp);
169#endif
170
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171#ifndef __HAVE_ARCH_PTE_SAME
Andrea Arcangelie2cda322011-01-13 15:46:40 -0800172static inline int pte_same(pte_t pte_a, pte_t pte_b)
173{
174 return pte_val(pte_a) == pte_val(pte_b);
175}
176#endif
177
178#ifndef __HAVE_ARCH_PMD_SAME
179#ifdef CONFIG_TRANSPARENT_HUGEPAGE
180static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
181{
182 return pmd_val(pmd_a) == pmd_val(pmd_b);
183}
184#else /* CONFIG_TRANSPARENT_HUGEPAGE */
185static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
186{
187 BUG();
188 return 0;
189}
190#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191#endif
192
Martin Schwidefsky6c210482007-04-27 16:01:57 +0200193#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
194#define page_test_dirty(page) (0)
195#endif
196
197#ifndef __HAVE_ARCH_PAGE_CLEAR_DIRTY
Martin Schwidefskye2b8d7a2010-10-25 16:10:14 +0200198#define page_clear_dirty(page, mapped) do { } while (0)
Martin Schwidefsky6c210482007-04-27 16:01:57 +0200199#endif
200
201#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
Abhijit Karmarkarb4955ce2005-06-21 17:15:13 -0700202#define pte_maybe_dirty(pte) pte_dirty(pte)
203#else
204#define pte_maybe_dirty(pte) (1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205#endif
206
207#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
208#define page_test_and_clear_young(page) (0)
209#endif
210
211#ifndef __HAVE_ARCH_PGD_OFFSET_GATE
212#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
213#endif
214
David S. Miller0b0968a2006-06-01 17:47:25 -0700215#ifndef __HAVE_ARCH_MOVE_PTE
Nick Piggin8b1f3122005-09-27 21:45:18 -0700216#define move_pte(pte, prot, old_addr, new_addr) (pte)
Nick Piggin8b1f3122005-09-27 21:45:18 -0700217#endif
218
Shaohua Li61c77322010-08-16 09:16:55 +0800219#ifndef flush_tlb_fix_spurious_fault
220#define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address)
221#endif
222
Paul Mundt0634a632009-06-23 13:51:19 +0200223#ifndef pgprot_noncached
224#define pgprot_noncached(prot) (prot)
225#endif
226
venkatesh.pallipadi@intel.com2520bd32008-12-18 11:41:32 -0800227#ifndef pgprot_writecombine
228#define pgprot_writecombine pgprot_noncached
229#endif
230
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231/*
Hugh Dickins8f6c99c2005-04-19 13:29:17 -0700232 * When walking page tables, get the address of the next boundary,
233 * or the end address of the range if that comes earlier. Although no
234 * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 */
236
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237#define pgd_addr_end(addr, end) \
238({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
239 (__boundary - 1 < (end) - 1)? __boundary: (end); \
240})
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241
242#ifndef pud_addr_end
243#define pud_addr_end(addr, end) \
244({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \
245 (__boundary - 1 < (end) - 1)? __boundary: (end); \
246})
247#endif
248
249#ifndef pmd_addr_end
250#define pmd_addr_end(addr, end) \
251({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
252 (__boundary - 1 < (end) - 1)? __boundary: (end); \
253})
254#endif
255
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256/*
257 * When walking page tables, we usually want to skip any p?d_none entries;
258 * and any p?d_bad entries - reporting the error before resetting to none.
259 * Do the tests inline, but report and clear the bad entry in mm/memory.c.
260 */
261void pgd_clear_bad(pgd_t *);
262void pud_clear_bad(pud_t *);
263void pmd_clear_bad(pmd_t *);
264
265static inline int pgd_none_or_clear_bad(pgd_t *pgd)
266{
267 if (pgd_none(*pgd))
268 return 1;
269 if (unlikely(pgd_bad(*pgd))) {
270 pgd_clear_bad(pgd);
271 return 1;
272 }
273 return 0;
274}
275
276static inline int pud_none_or_clear_bad(pud_t *pud)
277{
278 if (pud_none(*pud))
279 return 1;
280 if (unlikely(pud_bad(*pud))) {
281 pud_clear_bad(pud);
282 return 1;
283 }
284 return 0;
285}
286
287static inline int pmd_none_or_clear_bad(pmd_t *pmd)
288{
289 if (pmd_none(*pmd))
290 return 1;
291 if (unlikely(pmd_bad(*pmd))) {
292 pmd_clear_bad(pmd);
293 return 1;
294 }
295 return 0;
296}
Greg Ungerer95352392007-08-10 13:01:20 -0700297
Jeremy Fitzhardinge1ea07042008-06-16 04:30:00 -0700298static inline pte_t __ptep_modify_prot_start(struct mm_struct *mm,
299 unsigned long addr,
300 pte_t *ptep)
301{
302 /*
303 * Get the current pte state, but zero it out to make it
304 * non-present, preventing the hardware from asynchronously
305 * updating it.
306 */
307 return ptep_get_and_clear(mm, addr, ptep);
308}
309
310static inline void __ptep_modify_prot_commit(struct mm_struct *mm,
311 unsigned long addr,
312 pte_t *ptep, pte_t pte)
313{
314 /*
315 * The pte is non-present, so there's no hardware state to
316 * preserve.
317 */
318 set_pte_at(mm, addr, ptep, pte);
319}
320
321#ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
322/*
323 * Start a pte protection read-modify-write transaction, which
324 * protects against asynchronous hardware modifications to the pte.
325 * The intention is not to prevent the hardware from making pte
326 * updates, but to prevent any updates it may make from being lost.
327 *
328 * This does not protect against other software modifications of the
329 * pte; the appropriate pte lock must be held over the transation.
330 *
331 * Note that this interface is intended to be batchable, meaning that
332 * ptep_modify_prot_commit may not actually update the pte, but merely
333 * queue the update to be done at some later time. The update must be
334 * actually committed before the pte lock is released, however.
335 */
336static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
337 unsigned long addr,
338 pte_t *ptep)
339{
340 return __ptep_modify_prot_start(mm, addr, ptep);
341}
342
343/*
344 * Commit an update to a pte, leaving any hardware-controlled bits in
345 * the PTE unmodified.
346 */
347static inline void ptep_modify_prot_commit(struct mm_struct *mm,
348 unsigned long addr,
349 pte_t *ptep, pte_t pte)
350{
351 __ptep_modify_prot_commit(mm, addr, ptep, pte);
352}
353#endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */
Sebastian Siewiorfe1a6872008-07-15 22:28:46 +0200354#endif /* CONFIG_MMU */
Jeremy Fitzhardinge1ea07042008-06-16 04:30:00 -0700355
Greg Ungerer95352392007-08-10 13:01:20 -0700356/*
357 * A facility to provide lazy MMU batching. This allows PTE updates and
358 * page invalidations to be delayed until a call to leave lazy MMU mode
359 * is issued. Some architectures may benefit from doing this, and it is
360 * beneficial for both shadow and direct mode hypervisors, which may batch
361 * the PTE updates which happen during this window. Note that using this
362 * interface requires that read hazards be removed from the code. A read
363 * hazard could result in the direct mode hypervisor case, since the actual
364 * write to the page tables may not yet have taken place, so reads though
365 * a raw PTE pointer after it has been modified are not guaranteed to be
366 * up to date. This mode can only be entered and left under the protection of
367 * the page table locks for all page tables which may be modified. In the UP
368 * case, this is required so that preemption is disabled, and in the SMP case,
369 * it must synchronize the delayed page table writes properly on other CPUs.
370 */
371#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
372#define arch_enter_lazy_mmu_mode() do {} while (0)
373#define arch_leave_lazy_mmu_mode() do {} while (0)
374#define arch_flush_lazy_mmu_mode() do {} while (0)
375#endif
376
377/*
Jeremy Fitzhardinge7fd7d832009-02-17 23:24:03 -0800378 * A facility to provide batching of the reload of page tables and
379 * other process state with the actual context switch code for
380 * paravirtualized guests. By convention, only one of the batched
381 * update (lazy) modes (CPU, MMU) should be active at any given time,
382 * entry should never be nested, and entry and exits should always be
383 * paired. This is for sanity of maintaining and reasoning about the
384 * kernel code. In this case, the exit (end of the context switch) is
385 * in architecture-specific code, and so doesn't need a generic
386 * definition.
Greg Ungerer95352392007-08-10 13:01:20 -0700387 */
Jeremy Fitzhardinge7fd7d832009-02-17 23:24:03 -0800388#ifndef __HAVE_ARCH_START_CONTEXT_SWITCH
Jeremy Fitzhardinge224101e2009-02-18 11:18:57 -0800389#define arch_start_context_switch(prev) do {} while (0)
Greg Ungerer95352392007-08-10 13:01:20 -0700390#endif
391
venkatesh.pallipadi@intel.com34801ba2008-12-19 13:47:29 -0800392#ifndef __HAVE_PFNMAP_TRACKING
393/*
394 * Interface that can be used by architecture code to keep track of
395 * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
396 *
397 * track_pfn_vma_new is called when a _new_ pfn mapping is being established
398 * for physical range indicated by pfn and size.
399 */
venkatesh.pallipadi@intel.come4b866e2009-01-09 16:13:11 -0800400static inline int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
venkatesh.pallipadi@intel.com34801ba2008-12-19 13:47:29 -0800401 unsigned long pfn, unsigned long size)
402{
403 return 0;
404}
405
406/*
407 * Interface that can be used by architecture code to keep track of
408 * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
409 *
410 * track_pfn_vma_copy is called when vma that is covering the pfnmap gets
411 * copied through copy_page_range().
412 */
413static inline int track_pfn_vma_copy(struct vm_area_struct *vma)
414{
415 return 0;
416}
417
418/*
419 * Interface that can be used by architecture code to keep track of
420 * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
421 *
422 * untrack_pfn_vma is called while unmapping a pfnmap for a region.
423 * untrack can be called for a specific region indicated by pfn and size or
424 * can be for the entire vma (in which case size can be zero).
425 */
426static inline void untrack_pfn_vma(struct vm_area_struct *vma,
427 unsigned long pfn, unsigned long size)
428{
429}
430#else
venkatesh.pallipadi@intel.come4b866e2009-01-09 16:13:11 -0800431extern int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
venkatesh.pallipadi@intel.com34801ba2008-12-19 13:47:29 -0800432 unsigned long pfn, unsigned long size);
433extern int track_pfn_vma_copy(struct vm_area_struct *vma);
434extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
435 unsigned long size);
436#endif
437
Andrea Arcangeli5f6e8da2011-01-13 15:46:40 -0800438#ifndef CONFIG_TRANSPARENT_HUGEPAGE
439static inline int pmd_trans_huge(pmd_t pmd)
440{
441 return 0;
442}
443static inline int pmd_trans_splitting(pmd_t pmd)
444{
445 return 0;
446}
Andrea Arcangelie2cda322011-01-13 15:46:40 -0800447#ifndef __HAVE_ARCH_PMD_WRITE
448static inline int pmd_write(pmd_t pmd)
449{
450 BUG();
451 return 0;
452}
453#endif /* __HAVE_ARCH_PMD_WRITE */
Andrea Arcangeli5f6e8da2011-01-13 15:46:40 -0800454#endif
455
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456#endif /* !__ASSEMBLY__ */
457
458#endif /* _ASM_GENERIC_PGTABLE_H */