blob: 0bc5a5e506befde9c1dac4afee1795c09f7b552a [file] [log] [blame]
Stephen Rothwell19702822005-11-04 16:58:59 +11001#ifndef _ASM_POWERPC_TLBFLUSH_H
2#define _ASM_POWERPC_TLBFLUSH_H
3/*
4 * TLB flushing:
5 *
6 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
7 * - flush_tlb_page(vma, vmaddr) flushes one page
8 * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB
9 * - flush_tlb_range(vma, start, end) flushes a range of pages
10 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
11 * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18#ifdef __KERNEL__
19
Stephen Rothwell19702822005-11-04 16:58:59 +110020
21struct mm_struct;
22
23#ifdef CONFIG_PPC64
24
25#include <linux/percpu.h>
26#include <asm/page.h>
27
28#define PPC64_TLB_BATCH_NR 192
29
30struct ppc64_tlb_batch {
Benjamin Herrenschmidta741e672007-04-10 17:09:37 +100031 int active;
32 unsigned long index;
33 struct mm_struct *mm;
34 real_pte_t pte[PPC64_TLB_BATCH_NR];
35 unsigned long vaddr[PPC64_TLB_BATCH_NR];
36 unsigned int psize;
Stephen Rothwell19702822005-11-04 16:58:59 +110037};
38DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
39
40extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
41
Benjamin Herrenschmidta741e672007-04-10 17:09:37 +100042extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
43 pte_t *ptep, unsigned long pte, int huge);
44
45#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
46
47static inline void arch_enter_lazy_mmu_mode(void)
Stephen Rothwell19702822005-11-04 16:58:59 +110048{
Benjamin Herrenschmidta741e672007-04-10 17:09:37 +100049 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
50
51 batch->active = 1;
52}
53
54static inline void arch_leave_lazy_mmu_mode(void)
55{
56 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
Stephen Rothwell19702822005-11-04 16:58:59 +110057
58 if (batch->index)
59 __flush_tlb_pending(batch);
Benjamin Herrenschmidta741e672007-04-10 17:09:37 +100060 batch->active = 0;
Stephen Rothwell19702822005-11-04 16:58:59 +110061}
62
Benjamin Herrenschmidta741e672007-04-10 17:09:37 +100063#define arch_flush_lazy_mmu_mode() do {} while (0)
64
65
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110066extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize,
67 int local);
68extern void flush_hash_range(unsigned long number, int local);
Stephen Rothwell19702822005-11-04 16:58:59 +110069
70#else /* CONFIG_PPC64 */
71
72#include <linux/mm.h>
73
74extern void _tlbie(unsigned long address);
75extern void _tlbia(void);
76
77/*
78 * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range &
79 * flush_tlb_kernel_range are best implemented as tlbia vs
80 * specific tlbie's
81 */
82
83#if (defined(CONFIG_4xx) && !defined(CONFIG_44x)) || defined(CONFIG_8xx)
84#define flush_tlb_pending() asm volatile ("tlbia; sync" : : : "memory")
85#elif defined(CONFIG_4xx) || defined(CONFIG_FSL_BOOKE)
86#define flush_tlb_pending() _tlbia()
87#endif
88
89/*
90 * This gets called at the end of handling a page fault, when
91 * the kernel has put a new PTE into the page table for the process.
92 * We use it to ensure coherency between the i-cache and d-cache
93 * for the page which has just been mapped in.
94 * On machines which use an MMU hash table, we use this to put a
95 * corresponding HPTE into the hash table ahead of time, instead of
96 * waiting for the inevitable extra hash-table miss exception.
97 */
98extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
99
100#endif /* CONFIG_PPC64 */
101
102#if defined(CONFIG_PPC64) || defined(CONFIG_4xx) || \
103 defined(CONFIG_FSL_BOOKE) || defined(CONFIG_8xx)
104
105static inline void flush_tlb_mm(struct mm_struct *mm)
106{
Stephen Rothwell19702822005-11-04 16:58:59 +1100107}
108
109static inline void flush_tlb_page(struct vm_area_struct *vma,
110 unsigned long vmaddr)
111{
Benjamin Herrenschmidta741e672007-04-10 17:09:37 +1000112#ifndef CONFIG_PPC64
Stephen Rothwell19702822005-11-04 16:58:59 +1100113 _tlbie(vmaddr);
114#endif
115}
116
117static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
118 unsigned long vmaddr)
119{
120#ifndef CONFIG_PPC64
121 _tlbie(vmaddr);
122#endif
123}
124
125static inline void flush_tlb_range(struct vm_area_struct *vma,
126 unsigned long start, unsigned long end)
127{
Stephen Rothwell19702822005-11-04 16:58:59 +1100128}
129
130static inline void flush_tlb_kernel_range(unsigned long start,
131 unsigned long end)
132{
Stephen Rothwell19702822005-11-04 16:58:59 +1100133}
134
135#else /* 6xx, 7xx, 7xxx cpus */
136
137extern void flush_tlb_mm(struct mm_struct *mm);
138extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
139extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr);
140extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
141 unsigned long end);
142extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
143
144#endif
145
146/*
147 * This is called in munmap when we have freed up some page-table
148 * pages. We don't need to do anything here, there's nothing special
149 * about our page-table pages. -- paulus
150 */
151static inline void flush_tlb_pgtables(struct mm_struct *mm,
152 unsigned long start, unsigned long end)
153{
154}
155
156#endif /*__KERNEL__ */
157#endif /* _ASM_POWERPC_TLBFLUSH_H */