| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __I386_SCHED_H | 
 | 2 | #define __I386_SCHED_H | 
 | 3 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | #include <asm/desc.h> | 
 | 5 | #include <asm/atomic.h> | 
 | 6 | #include <asm/pgalloc.h> | 
 | 7 | #include <asm/tlbflush.h> | 
 | 8 |  | 
 | 9 | /* | 
 | 10 |  * Used for LDT copy/destruction. | 
 | 11 |  */ | 
 | 12 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm); | 
 | 13 | void destroy_context(struct mm_struct *mm); | 
 | 14 |  | 
 | 15 |  | 
 | 16 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | 
 | 17 | { | 
 | 18 | #ifdef CONFIG_SMP | 
 | 19 | 	unsigned cpu = smp_processor_id(); | 
 | 20 | 	if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) | 
 | 21 | 		per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY; | 
 | 22 | #endif | 
 | 23 | } | 
 | 24 |  | 
 | 25 | static inline void switch_mm(struct mm_struct *prev, | 
 | 26 | 			     struct mm_struct *next, | 
 | 27 | 			     struct task_struct *tsk) | 
 | 28 | { | 
 | 29 | 	int cpu = smp_processor_id(); | 
 | 30 |  | 
 | 31 | 	if (likely(prev != next)) { | 
 | 32 | 		/* stop flush ipis for the previous mm */ | 
 | 33 | 		cpu_clear(cpu, prev->cpu_vm_mask); | 
 | 34 | #ifdef CONFIG_SMP | 
 | 35 | 		per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK; | 
 | 36 | 		per_cpu(cpu_tlbstate, cpu).active_mm = next; | 
 | 37 | #endif | 
 | 38 | 		cpu_set(cpu, next->cpu_vm_mask); | 
 | 39 |  | 
 | 40 | 		/* Re-load page tables */ | 
 | 41 | 		load_cr3(next->pgd); | 
 | 42 |  | 
 | 43 | 		/* | 
 | 44 | 		 * load the LDT, if the LDT is different: | 
 | 45 | 		 */ | 
 | 46 | 		if (unlikely(prev->context.ldt != next->context.ldt)) | 
 | 47 | 			load_LDT_nolock(&next->context, cpu); | 
 | 48 | 	} | 
 | 49 | #ifdef CONFIG_SMP | 
 | 50 | 	else { | 
 | 51 | 		per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK; | 
 | 52 | 		BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next); | 
 | 53 |  | 
 | 54 | 		if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { | 
 | 55 | 			/* We were in lazy tlb mode and leave_mm disabled  | 
 | 56 | 			 * tlb flush IPI delivery. We must reload %cr3. | 
 | 57 | 			 */ | 
 | 58 | 			load_cr3(next->pgd); | 
 | 59 | 			load_LDT_nolock(&next->context, cpu); | 
 | 60 | 		} | 
 | 61 | 	} | 
 | 62 | #endif | 
 | 63 | } | 
 | 64 |  | 
 | 65 | #define deactivate_mm(tsk, mm) \ | 
 | 66 | 	asm("movl %0,%%fs ; movl %0,%%gs": :"r" (0)) | 
 | 67 |  | 
 | 68 | #define activate_mm(prev, next) \ | 
 | 69 | 	switch_mm((prev),(next),NULL) | 
 | 70 |  | 
 | 71 | #endif |