| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _ASM_IA64_TLBFLUSH_H | 
|  | 2 | #define _ASM_IA64_TLBFLUSH_H | 
|  | 3 |  | 
|  | 4 | /* | 
|  | 5 | * Copyright (C) 2002 Hewlett-Packard Co | 
|  | 6 | *	David Mosberger-Tang <davidm@hpl.hp.com> | 
|  | 7 | */ | 
|  | 8 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 |  | 
|  | 10 | #include <linux/mm.h> | 
|  | 11 |  | 
|  | 12 | #include <asm/intrinsics.h> | 
|  | 13 | #include <asm/mmu_context.h> | 
|  | 14 | #include <asm/page.h> | 
|  | 15 |  | 
|  | 16 | /* | 
|  | 17 | * Now for some TLB flushing routines.  This is the kind of stuff that | 
|  | 18 | * can be very expensive, so try to avoid them whenever possible. | 
|  | 19 | */ | 
|  | 20 |  | 
|  | 21 | /* | 
|  | 22 | * Flush everything (kernel mapping may also have changed due to | 
|  | 23 | * vmalloc/vfree). | 
|  | 24 | */ | 
|  | 25 | extern void local_flush_tlb_all (void); | 
|  | 26 |  | 
|  | 27 | #ifdef CONFIG_SMP | 
|  | 28 | extern void smp_flush_tlb_all (void); | 
|  | 29 | extern void smp_flush_tlb_mm (struct mm_struct *mm); | 
|  | 30 | # define flush_tlb_all()	smp_flush_tlb_all() | 
|  | 31 | #else | 
|  | 32 | # define flush_tlb_all()	local_flush_tlb_all() | 
|  | 33 | #endif | 
|  | 34 |  | 
|  | 35 | static inline void | 
|  | 36 | local_finish_flush_tlb_mm (struct mm_struct *mm) | 
|  | 37 | { | 
|  | 38 | if (mm == current->active_mm) | 
|  | 39 | activate_context(mm); | 
|  | 40 | } | 
|  | 41 |  | 
|  | 42 | /* | 
|  | 43 | * Flush a specified user mapping.  This is called, e.g., as a result of fork() and | 
|  | 44 | * exit().  fork() ends up here because the copy-on-write mechanism needs to write-protect | 
|  | 45 | * the PTEs of the parent task. | 
|  | 46 | */ | 
|  | 47 | static inline void | 
|  | 48 | flush_tlb_mm (struct mm_struct *mm) | 
|  | 49 | { | 
|  | 50 | if (!mm) | 
|  | 51 | return; | 
|  | 52 |  | 
| Peter Keilty | dcc17d1 | 2005-10-31 16:44:47 -0500 | [diff] [blame] | 53 | set_bit(mm->context, ia64_ctx.flushmap); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | mm->context = 0; | 
|  | 55 |  | 
|  | 56 | if (atomic_read(&mm->mm_users) == 0) | 
|  | 57 | return;		/* happens as a result of exit_mmap() */ | 
|  | 58 |  | 
|  | 59 | #ifdef CONFIG_SMP | 
|  | 60 | smp_flush_tlb_mm(mm); | 
|  | 61 | #else | 
|  | 62 | local_finish_flush_tlb_mm(mm); | 
|  | 63 | #endif | 
|  | 64 | } | 
|  | 65 |  | 
|  | 66 | extern void flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end); | 
|  | 67 |  | 
|  | 68 | /* | 
|  | 69 | * Page-granular tlb flush. | 
|  | 70 | */ | 
|  | 71 | static inline void | 
|  | 72 | flush_tlb_page (struct vm_area_struct *vma, unsigned long addr) | 
|  | 73 | { | 
|  | 74 | #ifdef CONFIG_SMP | 
|  | 75 | flush_tlb_range(vma, (addr & PAGE_MASK), (addr & PAGE_MASK) + PAGE_SIZE); | 
|  | 76 | #else | 
|  | 77 | if (vma->vm_mm == current->active_mm) | 
|  | 78 | ia64_ptcl(addr, (PAGE_SHIFT << 2)); | 
|  | 79 | else | 
|  | 80 | vma->vm_mm->context = 0; | 
|  | 81 | #endif | 
|  | 82 | } | 
|  | 83 |  | 
|  | 84 | /* | 
|  | 85 | * Flush the TLB entries mapping the virtually mapped linear page | 
|  | 86 | * table corresponding to address range [START-END). | 
|  | 87 | */ | 
|  | 88 | static inline void | 
|  | 89 | flush_tlb_pgtables (struct mm_struct *mm, unsigned long start, unsigned long end) | 
|  | 90 | { | 
|  | 91 | /* | 
|  | 92 | * Deprecated.  The virtual page table is now flushed via the normal gather/flush | 
|  | 93 | * interface (see tlb.h). | 
|  | 94 | */ | 
|  | 95 | } | 
|  | 96 |  | 
|  | 97 | #define flush_tlb_kernel_range(start, end)	flush_tlb_all()	/* XXX fix me */ | 
|  | 98 |  | 
|  | 99 | #endif /* _ASM_IA64_TLBFLUSH_H */ |