| Jonas Bonn | 61e85e3 | 2011-06-04 11:06:11 +0300 | [diff] [blame] | 1 | /* | 
 | 2 |  * OpenRISC tlb.c | 
 | 3 |  * | 
 | 4 |  * Linux architectural port borrowing liberally from similar works of | 
 | 5 |  * others.  All original copyrights apply as per the original source | 
 | 6 |  * declaration. | 
 | 7 |  * | 
 | 8 |  * Modifications for the OpenRISC architecture: | 
 | 9 |  * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> | 
 | 10 |  * Copyright (C) 2010-2011 Julius Baxter <julius.baxter@orsoc.se> | 
 | 11 |  * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> | 
 | 12 |  * | 
 | 13 |  *      This program is free software; you can redistribute it and/or | 
 | 14 |  *      modify it under the terms of the GNU General Public License | 
 | 15 |  *      as published by the Free Software Foundation; either version | 
 | 16 |  *      2 of the License, or (at your option) any later version. | 
 | 17 |  */ | 
 | 18 |  | 
 | 19 | #include <linux/sched.h> | 
 | 20 | #include <linux/kernel.h> | 
 | 21 | #include <linux/errno.h> | 
 | 22 | #include <linux/string.h> | 
 | 23 | #include <linux/types.h> | 
 | 24 | #include <linux/ptrace.h> | 
 | 25 | #include <linux/mman.h> | 
 | 26 | #include <linux/mm.h> | 
 | 27 | #include <linux/init.h> | 
 | 28 |  | 
| Jonas Bonn | 61e85e3 | 2011-06-04 11:06:11 +0300 | [diff] [blame] | 29 | #include <asm/segment.h> | 
 | 30 | #include <asm/tlbflush.h> | 
 | 31 | #include <asm/pgtable.h> | 
 | 32 | #include <asm/mmu_context.h> | 
 | 33 | #include <asm/spr_defs.h> | 
 | 34 |  | 
 | 35 | #define NO_CONTEXT -1 | 
 | 36 |  | 
 | 37 | #define NUM_DTLB_SETS (1 << ((mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTS) >> \ | 
 | 38 | 			    SPR_DMMUCFGR_NTS_OFF)) | 
 | 39 | #define NUM_ITLB_SETS (1 << ((mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTS) >> \ | 
 | 40 | 			    SPR_IMMUCFGR_NTS_OFF)) | 
 | 41 | #define DTLB_OFFSET(addr) (((addr) >> PAGE_SHIFT) & (NUM_DTLB_SETS-1)) | 
 | 42 | #define ITLB_OFFSET(addr) (((addr) >> PAGE_SHIFT) & (NUM_ITLB_SETS-1)) | 
 | 43 | /* | 
 | 44 |  * Invalidate all TLB entries. | 
 | 45 |  * | 
 | 46 |  * This comes down to setting the 'valid' bit for all xTLBMR registers to 0. | 
 | 47 |  * Easiest way to accomplish this is to just zero out the xTLBMR register | 
 | 48 |  * completely. | 
 | 49 |  * | 
 | 50 |  */ | 
 | 51 |  | 
 | 52 | void flush_tlb_all(void) | 
 | 53 | { | 
 | 54 | 	int i; | 
 | 55 | 	unsigned long num_tlb_sets; | 
 | 56 |  | 
 | 57 | 	/* Determine number of sets for IMMU. */ | 
 | 58 | 	/* FIXME: Assumption is I & D nsets equal. */ | 
 | 59 | 	num_tlb_sets = NUM_ITLB_SETS; | 
 | 60 |  | 
 | 61 | 	for (i = 0; i < num_tlb_sets; i++) { | 
 | 62 | 		mtspr_off(SPR_DTLBMR_BASE(0), i, 0); | 
 | 63 | 		mtspr_off(SPR_ITLBMR_BASE(0), i, 0); | 
 | 64 | 	} | 
 | 65 | } | 
 | 66 |  | 
 | 67 | #define have_dtlbeir (mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_TEIRI) | 
 | 68 | #define have_itlbeir (mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_TEIRI) | 
 | 69 |  | 
 | 70 | /* | 
 | 71 |  * Invalidate a single page.  This is what the xTLBEIR register is for. | 
 | 72 |  * | 
 | 73 |  * There's no point in checking the vma for PAGE_EXEC to determine whether it's | 
 | 74 |  * the data or instruction TLB that should be flushed... that would take more | 
 | 75 |  * than the few instructions that the following compiles down to! | 
 | 76 |  * | 
 | 77 |  * The case where we don't have the xTLBEIR register really only works for | 
 | 78 |  * MMU's with a single way and is hard-coded that way. | 
 | 79 |  */ | 
 | 80 |  | 
 | 81 | #define flush_dtlb_page_eir(addr) mtspr(SPR_DTLBEIR, addr) | 
 | 82 | #define flush_dtlb_page_no_eir(addr) \ | 
 | 83 | 	mtspr_off(SPR_DTLBMR_BASE(0), DTLB_OFFSET(addr), 0); | 
 | 84 |  | 
 | 85 | #define flush_itlb_page_eir(addr) mtspr(SPR_ITLBEIR, addr) | 
 | 86 | #define flush_itlb_page_no_eir(addr) \ | 
 | 87 | 	mtspr_off(SPR_ITLBMR_BASE(0), ITLB_OFFSET(addr), 0); | 
 | 88 |  | 
 | 89 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) | 
 | 90 | { | 
 | 91 | 	if (have_dtlbeir) | 
 | 92 | 		flush_dtlb_page_eir(addr); | 
 | 93 | 	else | 
 | 94 | 		flush_dtlb_page_no_eir(addr); | 
 | 95 |  | 
 | 96 | 	if (have_itlbeir) | 
 | 97 | 		flush_itlb_page_eir(addr); | 
 | 98 | 	else | 
 | 99 | 		flush_itlb_page_no_eir(addr); | 
 | 100 | } | 
 | 101 |  | 
 | 102 | void flush_tlb_range(struct vm_area_struct *vma, | 
 | 103 | 		     unsigned long start, unsigned long end) | 
 | 104 | { | 
 | 105 | 	int addr; | 
 | 106 | 	bool dtlbeir; | 
 | 107 | 	bool itlbeir; | 
 | 108 |  | 
 | 109 | 	dtlbeir = have_dtlbeir; | 
 | 110 | 	itlbeir = have_itlbeir; | 
 | 111 |  | 
 | 112 | 	for (addr = start; addr < end; addr += PAGE_SIZE) { | 
 | 113 | 		if (dtlbeir) | 
 | 114 | 			flush_dtlb_page_eir(addr); | 
 | 115 | 		else | 
 | 116 | 			flush_dtlb_page_no_eir(addr); | 
 | 117 |  | 
 | 118 | 		if (itlbeir) | 
 | 119 | 			flush_itlb_page_eir(addr); | 
 | 120 | 		else | 
 | 121 | 			flush_itlb_page_no_eir(addr); | 
 | 122 | 	} | 
 | 123 | } | 
 | 124 |  | 
 | 125 | /* | 
 | 126 |  * Invalidate the selected mm context only. | 
 | 127 |  * | 
 | 128 |  * FIXME: Due to some bug here, we're flushing everything for now. | 
 | 129 |  * This should be changed to loop over over mm and call flush_tlb_range. | 
 | 130 |  */ | 
 | 131 |  | 
 | 132 | void flush_tlb_mm(struct mm_struct *mm) | 
 | 133 | { | 
 | 134 |  | 
 | 135 | 	/* Was seeing bugs with the mm struct passed to us. Scrapped most of | 
 | 136 | 	   this function. */ | 
 | 137 | 	/* Several architctures do this */ | 
 | 138 | 	flush_tlb_all(); | 
 | 139 | } | 
 | 140 |  | 
 | 141 | /* called in schedule() just before actually doing the switch_to */ | 
 | 142 |  | 
 | 143 | void switch_mm(struct mm_struct *prev, struct mm_struct *next, | 
 | 144 | 	       struct task_struct *next_tsk) | 
 | 145 | { | 
 | 146 | 	/* remember the pgd for the fault handlers | 
 | 147 | 	 * this is similar to the pgd register in some other CPU's. | 
 | 148 | 	 * we need our own copy of it because current and active_mm | 
 | 149 | 	 * might be invalid at points where we still need to derefer | 
 | 150 | 	 * the pgd. | 
 | 151 | 	 */ | 
 | 152 | 	current_pgd = next->pgd; | 
 | 153 |  | 
 | 154 | 	/* We don't have context support implemented, so flush all | 
 | 155 | 	 * entries belonging to previous map | 
 | 156 | 	 */ | 
 | 157 |  | 
 | 158 | 	if (prev != next) | 
 | 159 | 		flush_tlb_mm(prev); | 
 | 160 |  | 
 | 161 | } | 
 | 162 |  | 
 | 163 | /* | 
 | 164 |  * Initialize the context related info for a new mm_struct | 
 | 165 |  * instance. | 
 | 166 |  */ | 
 | 167 |  | 
 | 168 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm) | 
 | 169 | { | 
 | 170 | 	mm->context = NO_CONTEXT; | 
 | 171 | 	return 0; | 
 | 172 | } | 
 | 173 |  | 
 | 174 | /* called by __exit_mm to destroy the used MMU context if any before | 
 | 175 |  * destroying the mm itself. this is only called when the last user of the mm | 
 | 176 |  * drops it. | 
 | 177 |  */ | 
 | 178 |  | 
 | 179 | void destroy_context(struct mm_struct *mm) | 
 | 180 | { | 
 | 181 | 	flush_tlb_mm(mm); | 
 | 182 |  | 
 | 183 | } | 
 | 184 |  | 
 | 185 | /* called once during VM initialization, from init.c */ | 
 | 186 |  | 
 | 187 | void __init tlb_init(void) | 
 | 188 | { | 
 | 189 | 	/* Do nothing... */ | 
 | 190 | 	/* invalidate the entire TLB */ | 
 | 191 | 	/* flush_tlb_all(); */ | 
 | 192 | } |