| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * include/asm-ppc/tlbflush.h | 
|  | 3 | * | 
|  | 4 | *  This program is free software; you can redistribute it and/or | 
|  | 5 | *  modify it under the terms of the GNU General Public License | 
|  | 6 | *  as published by the Free Software Foundation; either version | 
|  | 7 | *  2 of the License, or (at your option) any later version. | 
|  | 8 | */ | 
|  | 9 | #ifdef __KERNEL__ | 
|  | 10 | #ifndef _PPC_TLBFLUSH_H | 
|  | 11 | #define _PPC_TLBFLUSH_H | 
|  | 12 |  | 
|  | 13 | #include <linux/config.h> | 
|  | 14 | #include <linux/mm.h> | 
|  | 15 |  | 
|  | 16 | extern void _tlbie(unsigned long address); | 
|  | 17 | extern void _tlbia(void); | 
|  | 18 |  | 
|  | 19 | #if defined(CONFIG_4xx) | 
|  | 20 |  | 
|  | 21 | #ifndef CONFIG_44x | 
|  | 22 | #define __tlbia()	asm volatile ("sync; tlbia; isync" : : : "memory") | 
|  | 23 | #else | 
|  | 24 | #define __tlbia		_tlbia | 
|  | 25 | #endif | 
|  | 26 |  | 
|  | 27 | static inline void flush_tlb_mm(struct mm_struct *mm) | 
|  | 28 | { __tlbia(); } | 
|  | 29 | static inline void flush_tlb_page(struct vm_area_struct *vma, | 
|  | 30 | unsigned long vmaddr) | 
|  | 31 | { _tlbie(vmaddr); } | 
|  | 32 | static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, | 
|  | 33 | unsigned long vmaddr) | 
|  | 34 | { _tlbie(vmaddr); } | 
|  | 35 | static inline void flush_tlb_range(struct vm_area_struct *vma, | 
|  | 36 | unsigned long start, unsigned long end) | 
|  | 37 | { __tlbia(); } | 
|  | 38 | static inline void flush_tlb_kernel_range(unsigned long start, | 
|  | 39 | unsigned long end) | 
|  | 40 | { __tlbia(); } | 
|  | 41 |  | 
|  | 42 | #elif defined(CONFIG_FSL_BOOKE) | 
|  | 43 |  | 
|  | 44 | /* TODO: determine if flush_tlb_range & flush_tlb_kernel_range | 
|  | 45 | * are best implemented as tlbia vs specific tlbie's */ | 
|  | 46 |  | 
|  | 47 | #define __tlbia()	_tlbia() | 
|  | 48 |  | 
|  | 49 | static inline void flush_tlb_mm(struct mm_struct *mm) | 
|  | 50 | { __tlbia(); } | 
|  | 51 | static inline void flush_tlb_page(struct vm_area_struct *vma, | 
|  | 52 | unsigned long vmaddr) | 
|  | 53 | { _tlbie(vmaddr); } | 
|  | 54 | static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, | 
|  | 55 | unsigned long vmaddr) | 
|  | 56 | { _tlbie(vmaddr); } | 
|  | 57 | static inline void flush_tlb_range(struct vm_area_struct *vma, | 
|  | 58 | unsigned long start, unsigned long end) | 
|  | 59 | { __tlbia(); } | 
|  | 60 | static inline void flush_tlb_kernel_range(unsigned long start, | 
|  | 61 | unsigned long end) | 
|  | 62 | { __tlbia(); } | 
|  | 63 |  | 
|  | 64 | #elif defined(CONFIG_8xx) | 
|  | 65 | #define __tlbia()	asm volatile ("tlbia; sync" : : : "memory") | 
|  | 66 |  | 
|  | 67 | static inline void flush_tlb_mm(struct mm_struct *mm) | 
|  | 68 | { __tlbia(); } | 
|  | 69 | static inline void flush_tlb_page(struct vm_area_struct *vma, | 
|  | 70 | unsigned long vmaddr) | 
|  | 71 | { _tlbie(vmaddr); } | 
|  | 72 | static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, | 
|  | 73 | unsigned long vmaddr) | 
|  | 74 | { _tlbie(vmaddr); } | 
|  | 75 | static inline void flush_tlb_range(struct mm_struct *mm, | 
|  | 76 | unsigned long start, unsigned long end) | 
|  | 77 | { __tlbia(); } | 
|  | 78 | static inline void flush_tlb_kernel_range(unsigned long start, | 
|  | 79 | unsigned long end) | 
|  | 80 | { __tlbia(); } | 
|  | 81 |  | 
|  | 82 | #else	/* 6xx, 7xx, 7xxx cpus */ | 
|  | 83 | struct mm_struct; | 
|  | 84 | struct vm_area_struct; | 
|  | 85 | extern void flush_tlb_mm(struct mm_struct *mm); | 
|  | 86 | extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); | 
|  | 87 | extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr); | 
|  | 88 | extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | 
|  | 89 | unsigned long end); | 
|  | 90 | extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); | 
|  | 91 | #endif | 
|  | 92 |  | 
|  | 93 | /* | 
|  | 94 | * This is called in munmap when we have freed up some page-table | 
|  | 95 | * pages.  We don't need to do anything here, there's nothing special | 
|  | 96 | * about our page-table pages.  -- paulus | 
|  | 97 | */ | 
|  | 98 | static inline void flush_tlb_pgtables(struct mm_struct *mm, | 
|  | 99 | unsigned long start, unsigned long end) | 
|  | 100 | { | 
|  | 101 | } | 
|  | 102 |  | 
|  | 103 | /* | 
|  | 104 | * This gets called at the end of handling a page fault, when | 
|  | 105 | * the kernel has put a new PTE into the page table for the process. | 
|  | 106 | * We use it to ensure coherency between the i-cache and d-cache | 
|  | 107 | * for the page which has just been mapped in. | 
|  | 108 | * On machines which use an MMU hash table, we use this to put a | 
|  | 109 | * corresponding HPTE into the hash table ahead of time, instead of | 
|  | 110 | * waiting for the inevitable extra hash-table miss exception. | 
|  | 111 | */ | 
|  | 112 | extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); | 
|  | 113 |  | 
|  | 114 | #endif /* _PPC_TLBFLUSH_H */ | 
|  | 115 | #endif /*__KERNEL__ */ |