| Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * File:         include/asm-blackfin/mmu_context.h | 
 | 3 |  * Based on: | 
 | 4 |  * Author: | 
 | 5 |  * | 
 | 6 |  * Created: | 
 | 7 |  * Description: | 
 | 8 |  * | 
 | 9 |  * Modified: | 
 | 10 |  *               Copyright 2004-2006 Analog Devices Inc. | 
 | 11 |  * | 
 | 12 |  * Bugs:         Enter bugs at http://blackfin.uclinux.org/ | 
 | 13 |  * | 
 | 14 |  * This program is free software; you can redistribute it and/or modify | 
 | 15 |  * it under the terms of the GNU General Public License as published by | 
 | 16 |  * the Free Software Foundation; either version 2 of the License, or | 
 | 17 |  * (at your option) any later version. | 
 | 18 |  * | 
 | 19 |  * This program is distributed in the hope that it will be useful, | 
 | 20 |  * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
 | 21 |  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
 | 22 |  * GNU General Public License for more details. | 
 | 23 |  * | 
 | 24 |  * You should have received a copy of the GNU General Public License | 
 | 25 |  * along with this program; if not, see the file COPYING, or write | 
 | 26 |  * to the Free Software Foundation, Inc., | 
 | 27 |  * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA | 
 | 28 |  */ | 
 | 29 |  | 
 | 30 | #ifndef __BLACKFIN_MMU_CONTEXT_H__ | 
 | 31 | #define __BLACKFIN_MMU_CONTEXT_H__ | 
 | 32 |  | 
| Bernd Schmidt | b97b8a9 | 2008-01-27 18:39:16 +0800 | [diff] [blame] | 33 | #include <linux/gfp.h> | 
 | 34 | #include <linux/sched.h> | 
| Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 35 | #include <asm/setup.h> | 
 | 36 | #include <asm/page.h> | 
 | 37 | #include <asm/pgalloc.h> | 
| Bernd Schmidt | b97b8a9 | 2008-01-27 18:39:16 +0800 | [diff] [blame] | 38 | #include <asm/cplbinit.h> | 
| Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 39 |  | 
 | 40 | extern void *current_l1_stack_save; | 
 | 41 | extern int nr_l1stack_tasks; | 
 | 42 | extern void *l1_stack_base; | 
 | 43 | extern unsigned long l1_stack_len; | 
 | 44 |  | 
 | 45 | extern int l1sram_free(const void*); | 
 | 46 | extern void *l1sram_alloc_max(void*); | 
 | 47 |  | 
 | 48 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | 
 | 49 | { | 
 | 50 | } | 
 | 51 |  | 
 | 52 | /* Called when creating a new context during fork() or execve().  */ | 
 | 53 | static inline int | 
 | 54 | init_new_context(struct task_struct *tsk, struct mm_struct *mm) | 
 | 55 | { | 
| Bernd Schmidt | b97b8a9 | 2008-01-27 18:39:16 +0800 | [diff] [blame] | 56 | #ifdef CONFIG_MPU | 
 | 57 | 	unsigned long p = __get_free_pages(GFP_KERNEL, page_mask_order); | 
 | 58 | 	mm->context.page_rwx_mask = (unsigned long *)p; | 
 | 59 | 	memset(mm->context.page_rwx_mask, 0, | 
 | 60 | 	       page_mask_nelts * 3 * sizeof(long)); | 
 | 61 | #endif | 
| Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 62 | 	return 0; | 
 | 63 | } | 
 | 64 |  | 
 | 65 | static inline void free_l1stack(void) | 
 | 66 | { | 
 | 67 | 	nr_l1stack_tasks--; | 
 | 68 | 	if (nr_l1stack_tasks == 0) | 
 | 69 | 		l1sram_free(l1_stack_base); | 
 | 70 | } | 
 | 71 | static inline void destroy_context(struct mm_struct *mm) | 
 | 72 | { | 
 | 73 | 	struct sram_list_struct *tmp; | 
 | 74 |  | 
 | 75 | 	if (current_l1_stack_save == mm->context.l1_stack_save) | 
| Mike Frysinger | 9821b1f | 2008-03-05 19:02:23 -0700 | [diff] [blame] | 76 | 		current_l1_stack_save = NULL; | 
| Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 77 | 	if (mm->context.l1_stack_save) | 
 | 78 | 		free_l1stack(); | 
 | 79 |  | 
 | 80 | 	while ((tmp = mm->context.sram_list)) { | 
 | 81 | 		mm->context.sram_list = tmp->next; | 
 | 82 | 		sram_free(tmp->addr); | 
 | 83 | 		kfree(tmp); | 
 | 84 | 	} | 
| Bernd Schmidt | b97b8a9 | 2008-01-27 18:39:16 +0800 | [diff] [blame] | 85 | #ifdef CONFIG_MPU | 
 | 86 | 	if (current_rwx_mask == mm->context.page_rwx_mask) | 
 | 87 | 		current_rwx_mask = NULL; | 
 | 88 | 	free_pages((unsigned long)mm->context.page_rwx_mask, page_mask_order); | 
 | 89 | #endif | 
| Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 90 | } | 
 | 91 |  | 
 | 92 | static inline unsigned long | 
 | 93 | alloc_l1stack(unsigned long length, unsigned long *stack_base) | 
 | 94 | { | 
 | 95 | 	if (nr_l1stack_tasks == 0) { | 
 | 96 | 		l1_stack_base = l1sram_alloc_max(&l1_stack_len); | 
 | 97 | 		if (!l1_stack_base) | 
 | 98 | 			return 0; | 
 | 99 | 	} | 
 | 100 |  | 
 | 101 | 	if (l1_stack_len < length) { | 
 | 102 | 		if (nr_l1stack_tasks == 0) | 
 | 103 | 			l1sram_free(l1_stack_base); | 
 | 104 | 		return 0; | 
 | 105 | 	} | 
 | 106 | 	*stack_base = (unsigned long)l1_stack_base; | 
 | 107 | 	nr_l1stack_tasks++; | 
 | 108 | 	return l1_stack_len; | 
 | 109 | } | 
 | 110 |  | 
 | 111 | static inline int | 
 | 112 | activate_l1stack(struct mm_struct *mm, unsigned long sp_base) | 
 | 113 | { | 
 | 114 | 	if (current_l1_stack_save) | 
 | 115 | 		memcpy(current_l1_stack_save, l1_stack_base, l1_stack_len); | 
 | 116 | 	mm->context.l1_stack_save = current_l1_stack_save = (void*)sp_base; | 
 | 117 | 	memcpy(l1_stack_base, current_l1_stack_save, l1_stack_len); | 
 | 118 | 	return 1; | 
 | 119 | } | 
 | 120 |  | 
 | 121 | #define deactivate_mm(tsk,mm)	do { } while (0) | 
 | 122 |  | 
| Bernd Schmidt | b97b8a9 | 2008-01-27 18:39:16 +0800 | [diff] [blame] | 123 | #define activate_mm(prev, next) switch_mm(prev, next, NULL) | 
 | 124 |  | 
 | 125 | static inline void switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm, | 
 | 126 | 			     struct task_struct *tsk) | 
| Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 127 | { | 
| Bernd Schmidt | b97b8a9 | 2008-01-27 18:39:16 +0800 | [diff] [blame] | 128 | 	if (prev_mm == next_mm) | 
 | 129 | 		return; | 
 | 130 | #ifdef CONFIG_MPU | 
 | 131 | 	if (prev_mm->context.page_rwx_mask == current_rwx_mask) { | 
 | 132 | 		flush_switched_cplbs(); | 
 | 133 | 		set_mask_dcplbs(next_mm->context.page_rwx_mask); | 
 | 134 | 	} | 
 | 135 | #endif | 
 | 136 |  | 
 | 137 | 	/* L1 stack switching.  */ | 
| Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 138 | 	if (!next_mm->context.l1_stack_save) | 
 | 139 | 		return; | 
 | 140 | 	if (next_mm->context.l1_stack_save == current_l1_stack_save) | 
 | 141 | 		return; | 
 | 142 | 	if (current_l1_stack_save) { | 
 | 143 | 		memcpy(current_l1_stack_save, l1_stack_base, l1_stack_len); | 
 | 144 | 	} | 
 | 145 | 	current_l1_stack_save = next_mm->context.l1_stack_save; | 
 | 146 | 	memcpy(l1_stack_base, current_l1_stack_save, l1_stack_len); | 
 | 147 | } | 
 | 148 |  | 
| Bernd Schmidt | b97b8a9 | 2008-01-27 18:39:16 +0800 | [diff] [blame] | 149 | #ifdef CONFIG_MPU | 
 | 150 | static inline void protect_page(struct mm_struct *mm, unsigned long addr, | 
 | 151 | 				unsigned long flags) | 
| Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 152 | { | 
| Bernd Schmidt | b97b8a9 | 2008-01-27 18:39:16 +0800 | [diff] [blame] | 153 | 	unsigned long *mask = mm->context.page_rwx_mask; | 
 | 154 | 	unsigned long page = addr >> 12; | 
 | 155 | 	unsigned long idx = page >> 5; | 
 | 156 | 	unsigned long bit = 1 << (page & 31); | 
 | 157 |  | 
 | 158 | 	if (flags & VM_MAYREAD) | 
 | 159 | 		mask[idx] |= bit; | 
 | 160 | 	else | 
 | 161 | 		mask[idx] &= ~bit; | 
 | 162 | 	mask += page_mask_nelts; | 
 | 163 | 	if (flags & VM_MAYWRITE) | 
 | 164 | 		mask[idx] |= bit; | 
 | 165 | 	else | 
 | 166 | 		mask[idx] &= ~bit; | 
 | 167 | 	mask += page_mask_nelts; | 
 | 168 | 	if (flags & VM_MAYEXEC) | 
 | 169 | 		mask[idx] |= bit; | 
 | 170 | 	else | 
 | 171 | 		mask[idx] &= ~bit; | 
| Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 172 | } | 
 | 173 |  | 
| Bernd Schmidt | b97b8a9 | 2008-01-27 18:39:16 +0800 | [diff] [blame] | 174 | static inline void update_protections(struct mm_struct *mm) | 
 | 175 | { | 
 | 176 | 	flush_switched_cplbs(); | 
 | 177 | 	set_mask_dcplbs(mm->context.page_rwx_mask); | 
 | 178 | } | 
 | 179 | #endif | 
 | 180 |  | 
| Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 181 | #endif |