blob: 8d9b30b5f7d48987d8d7fda56779118349ecad7e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _ASM_IA64_MMU_CONTEXT_H
2#define _ASM_IA64_MMU_CONTEXT_H
3
4/*
5 * Copyright (C) 1998-2002 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */
8
9/*
10 * Routines to manage the allocation of task context numbers. Task context numbers are
11 * used to reduce or eliminate the need to perform TLB flushes due to context switches.
12 * Context numbers are implemented using ia-64 region ids. Since the IA-64 TLB does not
13 * consider the region number when performing a TLB lookup, we need to assign a unique
14 * region id to each region in a process. We use the least significant three bits in a
15 * region id for this purpose.
16 */
17
18#define IA64_REGION_ID_KERNEL 0 /* the kernel's region id (tlb.c depends on this being 0) */
19
20#define ia64_rid(ctx,addr) (((ctx) << 3) | (addr >> 61))
21
Peter Chubb0a41e252005-08-16 19:54:00 -070022# include <asm/page.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023# ifndef __ASSEMBLY__
24
25#include <linux/compiler.h>
26#include <linux/percpu.h>
27#include <linux/sched.h>
28#include <linux/spinlock.h>
29
30#include <asm/processor.h>
31
32struct ia64_ctx {
33 spinlock_t lock;
34 unsigned int next; /* next context number to use */
Peter Keiltydcc17d12005-10-31 16:44:47 -050035 unsigned int limit; /* available free range */
36 unsigned int max_ctx; /* max. context value supported by all CPUs */
37 /* call wrap_mmu_context when next >= max */
38 unsigned long *bitmap; /* bitmap size is max_ctx+1 */
39 unsigned long *flushmap;/* pending rid to be flushed */
Linus Torvalds1da177e2005-04-16 15:20:36 -070040};
41
42extern struct ia64_ctx ia64_ctx;
43DECLARE_PER_CPU(u8, ia64_need_tlb_flush);
44
Peter Keiltydcc17d12005-10-31 16:44:47 -050045extern void mmu_context_init (void);
Linus Torvalds1da177e2005-04-16 15:20:36 -070046extern void wrap_mmu_context (struct mm_struct *mm);
47
48static inline void
49enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk)
50{
51}
52
53/*
54 * When the context counter wraps around all TLBs need to be flushed because an old
55 * context number might have been reused. This is signalled by the ia64_need_tlb_flush
56 * per-CPU variable, which is checked in the routine below. Called by activate_mm().
57 * <efocht@ess.nec.de>
58 */
59static inline void
60delayed_tlb_flush (void)
61{
62 extern void local_flush_tlb_all (void);
David Mosberger-Tangbadea122005-07-25 22:23:00 -070063 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
65 if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) {
David Mosberger-Tangbadea122005-07-25 22:23:00 -070066 spin_lock_irqsave(&ia64_ctx.lock, flags);
67 {
68 if (__ia64_per_cpu_var(ia64_need_tlb_flush)) {
69 local_flush_tlb_all();
70 __ia64_per_cpu_var(ia64_need_tlb_flush) = 0;
71 }
72 }
73 spin_unlock_irqrestore(&ia64_ctx.lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074 }
75}
76
David Mosberger-Tangbadea122005-07-25 22:23:00 -070077static inline nv_mm_context_t
Linus Torvalds1da177e2005-04-16 15:20:36 -070078get_mmu_context (struct mm_struct *mm)
79{
80 unsigned long flags;
David Mosberger-Tangbadea122005-07-25 22:23:00 -070081 nv_mm_context_t context = mm->context;
Linus Torvalds1da177e2005-04-16 15:20:36 -070082
David Mosberger-Tangbadea122005-07-25 22:23:00 -070083 if (unlikely(!context)) {
84 spin_lock_irqsave(&ia64_ctx.lock, flags);
85 {
86 /* re-check, now that we've got the lock: */
87 context = mm->context;
88 if (context == 0) {
89 cpus_clear(mm->cpu_vm_mask);
Peter Keiltydcc17d12005-10-31 16:44:47 -050090 if (ia64_ctx.next >= ia64_ctx.limit) {
91 ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap,
92 ia64_ctx.max_ctx, ia64_ctx.next);
93 ia64_ctx.limit = find_next_bit(ia64_ctx.bitmap,
94 ia64_ctx.max_ctx, ia64_ctx.next);
95 if (ia64_ctx.next >= ia64_ctx.max_ctx)
96 wrap_mmu_context(mm);
97 }
David Mosberger-Tangbadea122005-07-25 22:23:00 -070098 mm->context = context = ia64_ctx.next++;
Peter Keiltydcc17d12005-10-31 16:44:47 -050099 __set_bit(context, ia64_ctx.bitmap);
David Mosberger-Tangbadea122005-07-25 22:23:00 -0700100 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101 }
David Mosberger-Tangbadea122005-07-25 22:23:00 -0700102 spin_unlock_irqrestore(&ia64_ctx.lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 }
David Mosberger-Tangbadea122005-07-25 22:23:00 -0700104 /*
105 * Ensure we're not starting to use "context" before any old
106 * uses of it are gone from our TLB.
107 */
108 delayed_tlb_flush();
109
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 return context;
111}
112
113/*
114 * Initialize context number to some sane value. MM is guaranteed to be a brand-new
115 * address-space, so no TLB flushing is needed, ever.
116 */
117static inline int
118init_new_context (struct task_struct *p, struct mm_struct *mm)
119{
120 mm->context = 0;
121 return 0;
122}
123
124static inline void
125destroy_context (struct mm_struct *mm)
126{
127 /* Nothing to do. */
128}
129
130static inline void
David Mosberger-Tangbadea122005-07-25 22:23:00 -0700131reload_context (nv_mm_context_t context)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132{
133 unsigned long rid;
134 unsigned long rid_incr = 0;
135 unsigned long rr0, rr1, rr2, rr3, rr4, old_rr4;
136
Peter Chubb0a41e252005-08-16 19:54:00 -0700137 old_rr4 = ia64_get_rr(RGN_BASE(RGN_HPAGE));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 rid = context << 3; /* make space for encoding the region number */
139 rid_incr = 1 << 8;
140
141 /* encode the region id, preferred page size, and VHPT enable bit: */
142 rr0 = (rid << 8) | (PAGE_SHIFT << 2) | 1;
143 rr1 = rr0 + 1*rid_incr;
144 rr2 = rr0 + 2*rid_incr;
145 rr3 = rr0 + 3*rid_incr;
146 rr4 = rr0 + 4*rid_incr;
147#ifdef CONFIG_HUGETLB_PAGE
148 rr4 = (rr4 & (~(0xfcUL))) | (old_rr4 & 0xfc);
Peter Chubb0a41e252005-08-16 19:54:00 -0700149
150# if RGN_HPAGE != 4
151# error "reload_context assumes RGN_HPAGE is 4"
152# endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153#endif
154
155 ia64_set_rr(0x0000000000000000UL, rr0);
156 ia64_set_rr(0x2000000000000000UL, rr1);
157 ia64_set_rr(0x4000000000000000UL, rr2);
158 ia64_set_rr(0x6000000000000000UL, rr3);
159 ia64_set_rr(0x8000000000000000UL, rr4);
160 ia64_srlz_i(); /* srlz.i implies srlz.d */
161}
162
Peter Chubba68db762005-06-23 21:14:00 -0700163/*
164 * Must be called with preemption off
165 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166static inline void
167activate_context (struct mm_struct *mm)
168{
David Mosberger-Tangbadea122005-07-25 22:23:00 -0700169 nv_mm_context_t context;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170
171 do {
172 context = get_mmu_context(mm);
173 if (!cpu_isset(smp_processor_id(), mm->cpu_vm_mask))
174 cpu_set(smp_processor_id(), mm->cpu_vm_mask);
175 reload_context(context);
176 /* in the unlikely event of a TLB-flush by another thread, redo the load: */
177 } while (unlikely(context != mm->context));
178}
179
180#define deactivate_mm(tsk,mm) do { } while (0)
181
182/*
183 * Switch from address space PREV to address space NEXT.
184 */
185static inline void
186activate_mm (struct mm_struct *prev, struct mm_struct *next)
187{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 /*
189 * We may get interrupts here, but that's OK because interrupt handlers cannot
190 * touch user-space.
191 */
192 ia64_set_kr(IA64_KR_PT_BASE, __pa(next->pgd));
193 activate_context(next);
194}
195
196#define switch_mm(prev_mm,next_mm,next_task) activate_mm(prev_mm, next_mm)
197
198# endif /* ! __ASSEMBLY__ */
199#endif /* _ASM_IA64_MMU_CONTEXT_H */