| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _ASM_IA64_MMU_CONTEXT_H | 
|  | 2 | #define _ASM_IA64_MMU_CONTEXT_H | 
|  | 3 |  | 
|  | 4 | /* | 
|  | 5 | * Copyright (C) 1998-2002 Hewlett-Packard Co | 
|  | 6 | *	David Mosberger-Tang <davidm@hpl.hp.com> | 
|  | 7 | */ | 
|  | 8 |  | 
|  | 9 | /* | 
|  | 10 | * Routines to manage the allocation of task context numbers.  Task context numbers are | 
|  | 11 | * used to reduce or eliminate the need to perform TLB flushes due to context switches. | 
|  | 12 | * Context numbers are implemented using ia-64 region ids.  Since the IA-64 TLB does not | 
|  | 13 | * consider the region number when performing a TLB lookup, we need to assign a unique | 
|  | 14 | * region id to each region in a process.  We use the least significant three bits in a | 
|  | 15 | * region id for this purpose. | 
|  | 16 | */ | 
|  | 17 |  | 
|  | 18 | #define IA64_REGION_ID_KERNEL	0 /* the kernel's region id (tlb.c depends on this being 0) */ | 
|  | 19 |  | 
|  | 20 | #define ia64_rid(ctx,addr)	(((ctx) << 3) | (addr >> 61)) | 
|  | 21 |  | 
| Peter Chubb | 0a41e25 | 2005-08-16 19:54:00 -0700 | [diff] [blame] | 22 | # include <asm/page.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | # ifndef __ASSEMBLY__ | 
|  | 24 |  | 
|  | 25 | #include <linux/compiler.h> | 
|  | 26 | #include <linux/percpu.h> | 
|  | 27 | #include <linux/sched.h> | 
|  | 28 | #include <linux/spinlock.h> | 
|  | 29 |  | 
|  | 30 | #include <asm/processor.h> | 
|  | 31 |  | 
|  | 32 | struct ia64_ctx { | 
|  | 33 | spinlock_t lock; | 
|  | 34 | unsigned int next;	/* next context number to use */ | 
|  | 35 | unsigned int limit;	/* next >= limit => must call wrap_mmu_context() */ | 
|  | 36 | unsigned int max_ctx;	/* max. context value supported by all CPUs */ | 
|  | 37 | }; | 
|  | 38 |  | 
|  | 39 | extern struct ia64_ctx ia64_ctx; | 
|  | 40 | DECLARE_PER_CPU(u8, ia64_need_tlb_flush); | 
|  | 41 |  | 
|  | 42 | extern void wrap_mmu_context (struct mm_struct *mm); | 
|  | 43 |  | 
|  | 44 | static inline void | 
|  | 45 | enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk) | 
|  | 46 | { | 
|  | 47 | } | 
|  | 48 |  | 
|  | 49 | /* | 
|  | 50 | * When the context counter wraps around all TLBs need to be flushed because an old | 
|  | 51 | * context number might have been reused. This is signalled by the ia64_need_tlb_flush | 
|  | 52 | * per-CPU variable, which is checked in the routine below. Called by activate_mm(). | 
|  | 53 | * <efocht@ess.nec.de> | 
|  | 54 | */ | 
|  | 55 | static inline void | 
|  | 56 | delayed_tlb_flush (void) | 
|  | 57 | { | 
|  | 58 | extern void local_flush_tlb_all (void); | 
| David Mosberger-Tang | badea12 | 2005-07-25 22:23:00 -0700 | [diff] [blame] | 59 | unsigned long flags; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 |  | 
|  | 61 | if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) { | 
| David Mosberger-Tang | badea12 | 2005-07-25 22:23:00 -0700 | [diff] [blame] | 62 | spin_lock_irqsave(&ia64_ctx.lock, flags); | 
|  | 63 | { | 
|  | 64 | if (__ia64_per_cpu_var(ia64_need_tlb_flush)) { | 
|  | 65 | local_flush_tlb_all(); | 
|  | 66 | __ia64_per_cpu_var(ia64_need_tlb_flush) = 0; | 
|  | 67 | } | 
|  | 68 | } | 
|  | 69 | spin_unlock_irqrestore(&ia64_ctx.lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 | } | 
|  | 71 | } | 
|  | 72 |  | 
| David Mosberger-Tang | badea12 | 2005-07-25 22:23:00 -0700 | [diff] [blame] | 73 | static inline nv_mm_context_t | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 74 | get_mmu_context (struct mm_struct *mm) | 
|  | 75 | { | 
|  | 76 | unsigned long flags; | 
| David Mosberger-Tang | badea12 | 2005-07-25 22:23:00 -0700 | [diff] [blame] | 77 | nv_mm_context_t context = mm->context; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 |  | 
| David Mosberger-Tang | badea12 | 2005-07-25 22:23:00 -0700 | [diff] [blame] | 79 | if (unlikely(!context)) { | 
|  | 80 | spin_lock_irqsave(&ia64_ctx.lock, flags); | 
|  | 81 | { | 
|  | 82 | /* re-check, now that we've got the lock: */ | 
|  | 83 | context = mm->context; | 
|  | 84 | if (context == 0) { | 
|  | 85 | cpus_clear(mm->cpu_vm_mask); | 
|  | 86 | if (ia64_ctx.next >= ia64_ctx.limit) | 
|  | 87 | wrap_mmu_context(mm); | 
|  | 88 | mm->context = context = ia64_ctx.next++; | 
|  | 89 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | } | 
| David Mosberger-Tang | badea12 | 2005-07-25 22:23:00 -0700 | [diff] [blame] | 91 | spin_unlock_irqrestore(&ia64_ctx.lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 | } | 
| David Mosberger-Tang | badea12 | 2005-07-25 22:23:00 -0700 | [diff] [blame] | 93 | /* | 
|  | 94 | * Ensure we're not starting to use "context" before any old | 
|  | 95 | * uses of it are gone from our TLB. | 
|  | 96 | */ | 
|  | 97 | delayed_tlb_flush(); | 
|  | 98 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | return context; | 
|  | 100 | } | 
|  | 101 |  | 
|  | 102 | /* | 
|  | 103 | * Initialize context number to some sane value.  MM is guaranteed to be a brand-new | 
|  | 104 | * address-space, so no TLB flushing is needed, ever. | 
|  | 105 | */ | 
|  | 106 | static inline int | 
|  | 107 | init_new_context (struct task_struct *p, struct mm_struct *mm) | 
|  | 108 | { | 
|  | 109 | mm->context = 0; | 
|  | 110 | return 0; | 
|  | 111 | } | 
|  | 112 |  | 
|  | 113 | static inline void | 
|  | 114 | destroy_context (struct mm_struct *mm) | 
|  | 115 | { | 
|  | 116 | /* Nothing to do.  */ | 
|  | 117 | } | 
|  | 118 |  | 
|  | 119 | static inline void | 
| David Mosberger-Tang | badea12 | 2005-07-25 22:23:00 -0700 | [diff] [blame] | 120 | reload_context (nv_mm_context_t context) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | { | 
|  | 122 | unsigned long rid; | 
|  | 123 | unsigned long rid_incr = 0; | 
|  | 124 | unsigned long rr0, rr1, rr2, rr3, rr4, old_rr4; | 
|  | 125 |  | 
| Peter Chubb | 0a41e25 | 2005-08-16 19:54:00 -0700 | [diff] [blame] | 126 | old_rr4 = ia64_get_rr(RGN_BASE(RGN_HPAGE)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | rid = context << 3;	/* make space for encoding the region number */ | 
|  | 128 | rid_incr = 1 << 8; | 
|  | 129 |  | 
|  | 130 | /* encode the region id, preferred page size, and VHPT enable bit: */ | 
|  | 131 | rr0 = (rid << 8) | (PAGE_SHIFT << 2) | 1; | 
|  | 132 | rr1 = rr0 + 1*rid_incr; | 
|  | 133 | rr2 = rr0 + 2*rid_incr; | 
|  | 134 | rr3 = rr0 + 3*rid_incr; | 
|  | 135 | rr4 = rr0 + 4*rid_incr; | 
|  | 136 | #ifdef  CONFIG_HUGETLB_PAGE | 
|  | 137 | rr4 = (rr4 & (~(0xfcUL))) | (old_rr4 & 0xfc); | 
| Peter Chubb | 0a41e25 | 2005-08-16 19:54:00 -0700 | [diff] [blame] | 138 |  | 
|  | 139 | #  if RGN_HPAGE != 4 | 
|  | 140 | #    error "reload_context assumes RGN_HPAGE is 4" | 
|  | 141 | #  endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | #endif | 
|  | 143 |  | 
|  | 144 | ia64_set_rr(0x0000000000000000UL, rr0); | 
|  | 145 | ia64_set_rr(0x2000000000000000UL, rr1); | 
|  | 146 | ia64_set_rr(0x4000000000000000UL, rr2); | 
|  | 147 | ia64_set_rr(0x6000000000000000UL, rr3); | 
|  | 148 | ia64_set_rr(0x8000000000000000UL, rr4); | 
|  | 149 | ia64_srlz_i();			/* srlz.i implies srlz.d */ | 
|  | 150 | } | 
|  | 151 |  | 
| Peter Chubb | a68db76 | 2005-06-23 21:14:00 -0700 | [diff] [blame] | 152 | /* | 
|  | 153 | * Must be called with preemption off | 
|  | 154 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 155 | static inline void | 
|  | 156 | activate_context (struct mm_struct *mm) | 
|  | 157 | { | 
| David Mosberger-Tang | badea12 | 2005-07-25 22:23:00 -0700 | [diff] [blame] | 158 | nv_mm_context_t context; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 159 |  | 
|  | 160 | do { | 
|  | 161 | context = get_mmu_context(mm); | 
|  | 162 | if (!cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) | 
|  | 163 | cpu_set(smp_processor_id(), mm->cpu_vm_mask); | 
|  | 164 | reload_context(context); | 
|  | 165 | /* in the unlikely event of a TLB-flush by another thread, redo the load: */ | 
|  | 166 | } while (unlikely(context != mm->context)); | 
|  | 167 | } | 
|  | 168 |  | 
|  | 169 | #define deactivate_mm(tsk,mm)	do { } while (0) | 
|  | 170 |  | 
|  | 171 | /* | 
|  | 172 | * Switch from address space PREV to address space NEXT. | 
|  | 173 | */ | 
|  | 174 | static inline void | 
|  | 175 | activate_mm (struct mm_struct *prev, struct mm_struct *next) | 
|  | 176 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | /* | 
|  | 178 | * We may get interrupts here, but that's OK because interrupt handlers cannot | 
|  | 179 | * touch user-space. | 
|  | 180 | */ | 
|  | 181 | ia64_set_kr(IA64_KR_PT_BASE, __pa(next->pgd)); | 
|  | 182 | activate_context(next); | 
|  | 183 | } | 
|  | 184 |  | 
|  | 185 | #define switch_mm(prev_mm,next_mm,next_task)	activate_mm(prev_mm, next_mm) | 
|  | 186 |  | 
|  | 187 | # endif /* ! __ASSEMBLY__ */ | 
|  | 188 | #endif /* _ASM_IA64_MMU_CONTEXT_H */ |