| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  *  linux/kernel/vm86.c | 
 | 3 |  * | 
 | 4 |  *  Copyright (C) 1994  Linus Torvalds | 
 | 5 |  * | 
 | 6 |  *  29 dec 2001 - Fixed oopses caused by unchecked access to the vm86 | 
| Christian Kujau | 624dffc | 2006-01-15 02:43:54 +0100 | [diff] [blame] | 7 |  *                stack - Manfred Spraul <manfred@colorfullife.com> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 |  * | 
 | 9 |  *  22 mar 2002 - Manfred detected the stackfaults, but didn't handle | 
 | 10 |  *                them correctly. Now the emulation will be in a | 
 | 11 |  *                consistent state after stackfaults - Kasper Dupont | 
 | 12 |  *                <kasperd@daimi.au.dk> | 
 | 13 |  * | 
 | 14 |  *  22 mar 2002 - Added missing clear_IF in set_vflags_* Kasper Dupont | 
 | 15 |  *                <kasperd@daimi.au.dk> | 
 | 16 |  * | 
 | 17 |  *  ?? ??? 2002 - Fixed premature returns from handle_vm86_fault | 
 | 18 |  *                caused by Kasper Dupont's changes - Stas Sergeev | 
 | 19 |  * | 
 | 20 |  *   4 apr 2002 - Fixed CHECK_IF_IN_TRAP broken by Stas' changes. | 
 | 21 |  *                Kasper Dupont <kasperd@daimi.au.dk> | 
 | 22 |  * | 
 | 23 |  *   9 apr 2002 - Changed syntax of macros in handle_vm86_fault. | 
 | 24 |  *                Kasper Dupont <kasperd@daimi.au.dk> | 
 | 25 |  * | 
 | 26 |  *   9 apr 2002 - Changed stack access macros to jump to a label | 
 | 27 |  *                instead of returning to userspace. This simplifies | 
 | 28 |  *                do_int, and is needed by handle_vm6_fault. Kasper | 
 | 29 |  *                Dupont <kasperd@daimi.au.dk> | 
 | 30 |  * | 
 | 31 |  */ | 
 | 32 |  | 
| Randy Dunlap | a941564 | 2006-01-11 12:17:48 -0800 | [diff] [blame] | 33 | #include <linux/capability.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | #include <linux/errno.h> | 
 | 35 | #include <linux/interrupt.h> | 
 | 36 | #include <linux/sched.h> | 
 | 37 | #include <linux/kernel.h> | 
 | 38 | #include <linux/signal.h> | 
 | 39 | #include <linux/string.h> | 
 | 40 | #include <linux/mm.h> | 
 | 41 | #include <linux/smp.h> | 
 | 42 | #include <linux/smp_lock.h> | 
 | 43 | #include <linux/highmem.h> | 
 | 44 | #include <linux/ptrace.h> | 
| Jason Baron | 7e7f8a0 | 2006-01-31 16:56:28 -0500 | [diff] [blame] | 45 | #include <linux/audit.h> | 
| Jeremy Fitzhardinge | 49d26b6 | 2006-12-07 02:14:03 +0100 | [diff] [blame] | 46 | #include <linux/stddef.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 |  | 
 | 48 | #include <asm/uaccess.h> | 
 | 49 | #include <asm/io.h> | 
 | 50 | #include <asm/tlbflush.h> | 
 | 51 | #include <asm/irq.h> | 
 | 52 |  | 
 | 53 | /* | 
 | 54 |  * Known problems: | 
 | 55 |  * | 
 | 56 |  * Interrupt handling is not guaranteed: | 
 | 57 |  * - a real x86 will disable all interrupts for one instruction | 
 | 58 |  *   after a "mov ss,xx" to make stack handling atomic even without | 
 | 59 |  *   the 'lss' instruction. We can't guarantee this in v86 mode, | 
 | 60 |  *   as the next instruction might result in a page fault or similar. | 
 | 61 |  * - a real x86 will have interrupts disabled for one instruction | 
 | 62 |  *   past the 'sti' that enables them. We don't bother with all the | 
 | 63 |  *   details yet. | 
 | 64 |  * | 
 | 65 |  * Let's hope these problems do not actually matter for anything. | 
 | 66 |  */ | 
 | 67 |  | 
 | 68 |  | 
 | 69 | #define KVM86	((struct kernel_vm86_struct *)regs) | 
 | 70 | #define VMPI 	KVM86->vm86plus | 
 | 71 |  | 
 | 72 |  | 
 | 73 | /* | 
 | 74 |  * 8- and 16-bit register defines.. | 
 | 75 |  */ | 
| Jeremy Fitzhardinge | 49d26b6 | 2006-12-07 02:14:03 +0100 | [diff] [blame] | 76 | #define AL(regs)	(((unsigned char *)&((regs)->pt.eax))[0]) | 
 | 77 | #define AH(regs)	(((unsigned char *)&((regs)->pt.eax))[1]) | 
 | 78 | #define IP(regs)	(*(unsigned short *)&((regs)->pt.eip)) | 
 | 79 | #define SP(regs)	(*(unsigned short *)&((regs)->pt.esp)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 |  | 
 | 81 | /* | 
 | 82 |  * virtual flags (16 and 32-bit versions) | 
 | 83 |  */ | 
 | 84 | #define VFLAGS	(*(unsigned short *)&(current->thread.v86flags)) | 
 | 85 | #define VEFLAGS	(current->thread.v86flags) | 
 | 86 |  | 
 | 87 | #define set_flags(X,new,mask) \ | 
 | 88 | ((X) = ((X) & ~(mask)) | ((new) & (mask))) | 
 | 89 |  | 
 | 90 | #define SAFE_MASK	(0xDD5) | 
 | 91 | #define RETURN_MASK	(0xDFF) | 
 | 92 |  | 
| Jeremy Fitzhardinge | 49d26b6 | 2006-12-07 02:14:03 +0100 | [diff] [blame] | 93 | /* convert kernel_vm86_regs to vm86_regs */ | 
 | 94 | static int copy_vm86_regs_to_user(struct vm86_regs __user *user, | 
 | 95 | 				  const struct kernel_vm86_regs *regs) | 
 | 96 | { | 
 | 97 | 	int ret = 0; | 
 | 98 |  | 
 | 99 | 	/* kernel_vm86_regs is missing xfs, so copy everything up to | 
 | 100 | 	   (but not including) xgs, and then rest after xgs. */ | 
 | 101 | 	ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.xgs)); | 
 | 102 | 	ret += copy_to_user(&user->__null_gs, ®s->pt.xgs, | 
 | 103 | 			    sizeof(struct kernel_vm86_regs) - | 
 | 104 | 			    offsetof(struct kernel_vm86_regs, pt.xgs)); | 
 | 105 |  | 
 | 106 | 	return ret; | 
 | 107 | } | 
 | 108 |  | 
 | 109 | /* convert vm86_regs to kernel_vm86_regs */ | 
 | 110 | static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs, | 
 | 111 | 				    const struct vm86_regs __user *user, | 
 | 112 | 				    unsigned extra) | 
 | 113 | { | 
 | 114 | 	int ret = 0; | 
 | 115 |  | 
 | 116 | 	ret += copy_from_user(regs, user, offsetof(struct kernel_vm86_regs, pt.xgs)); | 
 | 117 | 	ret += copy_from_user(®s->pt.xgs, &user->__null_gs, | 
 | 118 | 			      sizeof(struct kernel_vm86_regs) - | 
 | 119 | 			      offsetof(struct kernel_vm86_regs, pt.xgs) + | 
 | 120 | 			      extra); | 
 | 121 |  | 
 | 122 | 	return ret; | 
 | 123 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 |  | 
 | 125 | struct pt_regs * FASTCALL(save_v86_state(struct kernel_vm86_regs * regs)); | 
 | 126 | struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs) | 
 | 127 | { | 
 | 128 | 	struct tss_struct *tss; | 
 | 129 | 	struct pt_regs *ret; | 
 | 130 | 	unsigned long tmp; | 
 | 131 |  | 
 | 132 | 	/* | 
 | 133 | 	 * This gets called from entry.S with interrupts disabled, but | 
 | 134 | 	 * from process context. Enable interrupts here, before trying | 
 | 135 | 	 * to access user space. | 
 | 136 | 	 */ | 
 | 137 | 	local_irq_enable(); | 
 | 138 |  | 
 | 139 | 	if (!current->thread.vm86_info) { | 
 | 140 | 		printk("no vm86_info: BAD\n"); | 
 | 141 | 		do_exit(SIGSEGV); | 
 | 142 | 	} | 
| Jeremy Fitzhardinge | 49d26b6 | 2006-12-07 02:14:03 +0100 | [diff] [blame] | 143 | 	set_flags(regs->pt.eflags, VEFLAGS, VIF_MASK | current->thread.v86mask); | 
 | 144 | 	tmp = copy_vm86_regs_to_user(¤t->thread.vm86_info->regs,regs); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 | 	tmp += put_user(current->thread.screen_bitmap,¤t->thread.vm86_info->screen_bitmap); | 
 | 146 | 	if (tmp) { | 
 | 147 | 		printk("vm86: could not access userspace vm86_info\n"); | 
 | 148 | 		do_exit(SIGSEGV); | 
 | 149 | 	} | 
 | 150 |  | 
 | 151 | 	tss = &per_cpu(init_tss, get_cpu()); | 
 | 152 | 	current->thread.esp0 = current->thread.saved_esp0; | 
 | 153 | 	current->thread.sysenter_cs = __KERNEL_CS; | 
 | 154 | 	load_esp0(tss, ¤t->thread); | 
 | 155 | 	current->thread.saved_esp0 = 0; | 
 | 156 | 	put_cpu(); | 
 | 157 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 | 	ret = KVM86->regs32; | 
| Jeremy Fitzhardinge | 49d26b6 | 2006-12-07 02:14:03 +0100 | [diff] [blame] | 159 |  | 
 | 160 | 	loadsegment(fs, current->thread.saved_fs); | 
 | 161 | 	ret->xgs = current->thread.saved_gs; | 
 | 162 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | 	return ret; | 
 | 164 | } | 
 | 165 |  | 
| Hugh Dickins | 60ec558 | 2005-10-29 18:16:34 -0700 | [diff] [blame] | 166 | static void mark_screen_rdonly(struct mm_struct *mm) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 167 | { | 
 | 168 | 	pgd_t *pgd; | 
 | 169 | 	pud_t *pud; | 
 | 170 | 	pmd_t *pmd; | 
| Hugh Dickins | 60ec558 | 2005-10-29 18:16:34 -0700 | [diff] [blame] | 171 | 	pte_t *pte; | 
 | 172 | 	spinlock_t *ptl; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 173 | 	int i; | 
 | 174 |  | 
| Hugh Dickins | 60ec558 | 2005-10-29 18:16:34 -0700 | [diff] [blame] | 175 | 	pgd = pgd_offset(mm, 0xA0000); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | 	if (pgd_none_or_clear_bad(pgd)) | 
 | 177 | 		goto out; | 
 | 178 | 	pud = pud_offset(pgd, 0xA0000); | 
 | 179 | 	if (pud_none_or_clear_bad(pud)) | 
 | 180 | 		goto out; | 
 | 181 | 	pmd = pmd_offset(pud, 0xA0000); | 
 | 182 | 	if (pmd_none_or_clear_bad(pmd)) | 
 | 183 | 		goto out; | 
| Hugh Dickins | 60ec558 | 2005-10-29 18:16:34 -0700 | [diff] [blame] | 184 | 	pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 | 	for (i = 0; i < 32; i++) { | 
 | 186 | 		if (pte_present(*pte)) | 
 | 187 | 			set_pte(pte, pte_wrprotect(*pte)); | 
 | 188 | 		pte++; | 
 | 189 | 	} | 
| Hugh Dickins | 60ec558 | 2005-10-29 18:16:34 -0700 | [diff] [blame] | 190 | 	pte_unmap_unlock(pte, ptl); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 191 | out: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | 	flush_tlb(); | 
 | 193 | } | 
 | 194 |  | 
 | 195 |  | 
 | 196 |  | 
 | 197 | static int do_vm86_irq_handling(int subfunction, int irqnumber); | 
 | 198 | static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk); | 
 | 199 |  | 
 | 200 | asmlinkage int sys_vm86old(struct pt_regs regs) | 
 | 201 | { | 
 | 202 | 	struct vm86_struct __user *v86 = (struct vm86_struct __user *)regs.ebx; | 
 | 203 | 	struct kernel_vm86_struct info; /* declare this _on top_, | 
 | 204 | 					 * this avoids wasting of stack space. | 
 | 205 | 					 * This remains on the stack until we | 
 | 206 | 					 * return to 32 bit user space. | 
 | 207 | 					 */ | 
 | 208 | 	struct task_struct *tsk; | 
 | 209 | 	int tmp, ret = -EPERM; | 
 | 210 |  | 
 | 211 | 	tsk = current; | 
 | 212 | 	if (tsk->thread.saved_esp0) | 
 | 213 | 		goto out; | 
| Jeremy Fitzhardinge | 49d26b6 | 2006-12-07 02:14:03 +0100 | [diff] [blame] | 214 | 	tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs, | 
 | 215 | 				       offsetof(struct kernel_vm86_struct, vm86plus) - | 
 | 216 | 				       sizeof(info.regs)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 217 | 	ret = -EFAULT; | 
 | 218 | 	if (tmp) | 
 | 219 | 		goto out; | 
 | 220 | 	memset(&info.vm86plus, 0, (int)&info.regs32 - (int)&info.vm86plus); | 
 | 221 | 	info.regs32 = ®s; | 
 | 222 | 	tsk->thread.vm86_info = v86; | 
 | 223 | 	do_sys_vm86(&info, tsk); | 
 | 224 | 	ret = 0;	/* we never return here */ | 
 | 225 | out: | 
 | 226 | 	return ret; | 
 | 227 | } | 
 | 228 |  | 
 | 229 |  | 
 | 230 | asmlinkage int sys_vm86(struct pt_regs regs) | 
 | 231 | { | 
 | 232 | 	struct kernel_vm86_struct info; /* declare this _on top_, | 
 | 233 | 					 * this avoids wasting of stack space. | 
 | 234 | 					 * This remains on the stack until we | 
 | 235 | 					 * return to 32 bit user space. | 
 | 236 | 					 */ | 
 | 237 | 	struct task_struct *tsk; | 
 | 238 | 	int tmp, ret; | 
 | 239 | 	struct vm86plus_struct __user *v86; | 
 | 240 |  | 
 | 241 | 	tsk = current; | 
 | 242 | 	switch (regs.ebx) { | 
 | 243 | 		case VM86_REQUEST_IRQ: | 
 | 244 | 		case VM86_FREE_IRQ: | 
 | 245 | 		case VM86_GET_IRQ_BITS: | 
 | 246 | 		case VM86_GET_AND_RESET_IRQ: | 
 | 247 | 			ret = do_vm86_irq_handling(regs.ebx, (int)regs.ecx); | 
 | 248 | 			goto out; | 
 | 249 | 		case VM86_PLUS_INSTALL_CHECK: | 
 | 250 | 			/* NOTE: on old vm86 stuff this will return the error | 
| Jesper Juhl | e49332b | 2005-05-01 08:59:08 -0700 | [diff] [blame] | 251 | 			   from access_ok(), because the subfunction is | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 252 | 			   interpreted as (invalid) address to vm86_struct. | 
 | 253 | 			   So the installation check works. | 
 | 254 | 			 */ | 
 | 255 | 			ret = 0; | 
 | 256 | 			goto out; | 
 | 257 | 	} | 
 | 258 |  | 
 | 259 | 	/* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */ | 
 | 260 | 	ret = -EPERM; | 
 | 261 | 	if (tsk->thread.saved_esp0) | 
 | 262 | 		goto out; | 
 | 263 | 	v86 = (struct vm86plus_struct __user *)regs.ecx; | 
| Jeremy Fitzhardinge | 49d26b6 | 2006-12-07 02:14:03 +0100 | [diff] [blame] | 264 | 	tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs, | 
 | 265 | 				       offsetof(struct kernel_vm86_struct, regs32) - | 
 | 266 | 				       sizeof(info.regs)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 | 	ret = -EFAULT; | 
 | 268 | 	if (tmp) | 
 | 269 | 		goto out; | 
 | 270 | 	info.regs32 = ®s; | 
 | 271 | 	info.vm86plus.is_vm86pus = 1; | 
 | 272 | 	tsk->thread.vm86_info = (struct vm86_struct __user *)v86; | 
 | 273 | 	do_sys_vm86(&info, tsk); | 
 | 274 | 	ret = 0;	/* we never return here */ | 
 | 275 | out: | 
 | 276 | 	return ret; | 
 | 277 | } | 
 | 278 |  | 
 | 279 |  | 
 | 280 | static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk) | 
 | 281 | { | 
 | 282 | 	struct tss_struct *tss; | 
 | 283 | /* | 
 | 284 |  * make sure the vm86() system call doesn't try to do anything silly | 
 | 285 |  */ | 
| Jeremy Fitzhardinge | 49d26b6 | 2006-12-07 02:14:03 +0100 | [diff] [blame] | 286 | 	info->regs.pt.xds = 0; | 
 | 287 | 	info->regs.pt.xes = 0; | 
 | 288 | 	info->regs.pt.xgs = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 289 |  | 
| Jeremy Fitzhardinge | 49d26b6 | 2006-12-07 02:14:03 +0100 | [diff] [blame] | 290 | /* we are clearing fs later just before "jmp resume_userspace", | 
 | 291 |  * because it is not saved/restored. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 292 |  */ | 
 | 293 |  | 
 | 294 | /* | 
 | 295 |  * The eflags register is also special: we cannot trust that the user | 
 | 296 |  * has set it up safely, so this makes sure interrupt etc flags are | 
 | 297 |  * inherited from protected mode. | 
 | 298 |  */ | 
| Jeremy Fitzhardinge | 49d26b6 | 2006-12-07 02:14:03 +0100 | [diff] [blame] | 299 |  	VEFLAGS = info->regs.pt.eflags; | 
 | 300 | 	info->regs.pt.eflags &= SAFE_MASK; | 
 | 301 | 	info->regs.pt.eflags |= info->regs32->eflags & ~SAFE_MASK; | 
 | 302 | 	info->regs.pt.eflags |= VM_MASK; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 303 |  | 
 | 304 | 	switch (info->cpu_type) { | 
 | 305 | 		case CPU_286: | 
 | 306 | 			tsk->thread.v86mask = 0; | 
 | 307 | 			break; | 
 | 308 | 		case CPU_386: | 
 | 309 | 			tsk->thread.v86mask = NT_MASK | IOPL_MASK; | 
 | 310 | 			break; | 
 | 311 | 		case CPU_486: | 
 | 312 | 			tsk->thread.v86mask = AC_MASK | NT_MASK | IOPL_MASK; | 
 | 313 | 			break; | 
 | 314 | 		default: | 
 | 315 | 			tsk->thread.v86mask = ID_MASK | AC_MASK | NT_MASK | IOPL_MASK; | 
 | 316 | 			break; | 
 | 317 | 	} | 
 | 318 |  | 
 | 319 | /* | 
 | 320 |  * Save old state, set default return value (%eax) to 0 | 
 | 321 |  */ | 
 | 322 | 	info->regs32->eax = 0; | 
 | 323 | 	tsk->thread.saved_esp0 = tsk->thread.esp0; | 
| Zachary Amsden | 4d37e7e | 2005-09-03 15:56:38 -0700 | [diff] [blame] | 324 | 	savesegment(fs, tsk->thread.saved_fs); | 
| Jeremy Fitzhardinge | 49d26b6 | 2006-12-07 02:14:03 +0100 | [diff] [blame] | 325 | 	tsk->thread.saved_gs = info->regs32->xgs; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 326 |  | 
 | 327 | 	tss = &per_cpu(init_tss, get_cpu()); | 
 | 328 | 	tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0; | 
 | 329 | 	if (cpu_has_sep) | 
 | 330 | 		tsk->thread.sysenter_cs = 0; | 
 | 331 | 	load_esp0(tss, &tsk->thread); | 
 | 332 | 	put_cpu(); | 
 | 333 |  | 
 | 334 | 	tsk->thread.screen_bitmap = info->screen_bitmap; | 
 | 335 | 	if (info->flags & VM86_SCREEN_BITMAP) | 
| Hugh Dickins | 60ec558 | 2005-10-29 18:16:34 -0700 | [diff] [blame] | 336 | 		mark_screen_rdonly(tsk->mm); | 
| Jason Baron | 7e7f8a0 | 2006-01-31 16:56:28 -0500 | [diff] [blame] | 337 |  | 
 | 338 | 	/*call audit_syscall_exit since we do not exit via the normal paths */ | 
 | 339 | 	if (unlikely(current->audit_context)) | 
| Jeremy Fitzhardinge | 49d26b6 | 2006-12-07 02:14:03 +0100 | [diff] [blame] | 340 | 		audit_syscall_exit(AUDITSC_RESULT(0), 0); | 
| Jason Baron | 7e7f8a0 | 2006-01-31 16:56:28 -0500 | [diff] [blame] | 341 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 342 | 	__asm__ __volatile__( | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 343 | 		"movl %0,%%esp\n\t" | 
 | 344 | 		"movl %1,%%ebp\n\t" | 
| Jeremy Fitzhardinge | 49d26b6 | 2006-12-07 02:14:03 +0100 | [diff] [blame] | 345 | 		"mov  %2, %%fs\n\t" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 346 | 		"jmp resume_userspace" | 
 | 347 | 		: /* no outputs */ | 
| Jeremy Fitzhardinge | 49d26b6 | 2006-12-07 02:14:03 +0100 | [diff] [blame] | 348 | 		:"r" (&info->regs), "r" (task_thread_info(tsk)), "r" (0)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 349 | 	/* we never return here */ | 
 | 350 | } | 
 | 351 |  | 
 | 352 | static inline void return_to_32bit(struct kernel_vm86_regs * regs16, int retval) | 
 | 353 | { | 
 | 354 | 	struct pt_regs * regs32; | 
 | 355 |  | 
 | 356 | 	regs32 = save_v86_state(regs16); | 
 | 357 | 	regs32->eax = retval; | 
 | 358 | 	__asm__ __volatile__("movl %0,%%esp\n\t" | 
 | 359 | 		"movl %1,%%ebp\n\t" | 
 | 360 | 		"jmp resume_userspace" | 
 | 361 | 		: : "r" (regs32), "r" (current_thread_info())); | 
 | 362 | } | 
 | 363 |  | 
 | 364 | static inline void set_IF(struct kernel_vm86_regs * regs) | 
 | 365 | { | 
 | 366 | 	VEFLAGS |= VIF_MASK; | 
 | 367 | 	if (VEFLAGS & VIP_MASK) | 
 | 368 | 		return_to_32bit(regs, VM86_STI); | 
 | 369 | } | 
 | 370 |  | 
 | 371 | static inline void clear_IF(struct kernel_vm86_regs * regs) | 
 | 372 | { | 
 | 373 | 	VEFLAGS &= ~VIF_MASK; | 
 | 374 | } | 
 | 375 |  | 
 | 376 | static inline void clear_TF(struct kernel_vm86_regs * regs) | 
 | 377 | { | 
| Jeremy Fitzhardinge | 49d26b6 | 2006-12-07 02:14:03 +0100 | [diff] [blame] | 378 | 	regs->pt.eflags &= ~TF_MASK; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 379 | } | 
 | 380 |  | 
 | 381 | static inline void clear_AC(struct kernel_vm86_regs * regs) | 
 | 382 | { | 
| Jeremy Fitzhardinge | 49d26b6 | 2006-12-07 02:14:03 +0100 | [diff] [blame] | 383 | 	regs->pt.eflags &= ~AC_MASK; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 384 | } | 
 | 385 |  | 
 | 386 | /* It is correct to call set_IF(regs) from the set_vflags_* | 
 | 387 |  * functions. However someone forgot to call clear_IF(regs) | 
 | 388 |  * in the opposite case. | 
 | 389 |  * After the command sequence CLI PUSHF STI POPF you should | 
 | 390 |  * end up with interrups disabled, but you ended up with | 
 | 391 |  * interrupts enabled. | 
 | 392 |  *  ( I was testing my own changes, but the only bug I | 
 | 393 |  *    could find was in a function I had not changed. ) | 
 | 394 |  * [KD] | 
 | 395 |  */ | 
 | 396 |  | 
 | 397 | static inline void set_vflags_long(unsigned long eflags, struct kernel_vm86_regs * regs) | 
 | 398 | { | 
 | 399 | 	set_flags(VEFLAGS, eflags, current->thread.v86mask); | 
| Jeremy Fitzhardinge | 49d26b6 | 2006-12-07 02:14:03 +0100 | [diff] [blame] | 400 | 	set_flags(regs->pt.eflags, eflags, SAFE_MASK); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 401 | 	if (eflags & IF_MASK) | 
 | 402 | 		set_IF(regs); | 
 | 403 | 	else | 
 | 404 | 		clear_IF(regs); | 
 | 405 | } | 
 | 406 |  | 
 | 407 | static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs * regs) | 
 | 408 | { | 
 | 409 | 	set_flags(VFLAGS, flags, current->thread.v86mask); | 
| Jeremy Fitzhardinge | 49d26b6 | 2006-12-07 02:14:03 +0100 | [diff] [blame] | 410 | 	set_flags(regs->pt.eflags, flags, SAFE_MASK); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 411 | 	if (flags & IF_MASK) | 
 | 412 | 		set_IF(regs); | 
 | 413 | 	else | 
 | 414 | 		clear_IF(regs); | 
 | 415 | } | 
 | 416 |  | 
 | 417 | static inline unsigned long get_vflags(struct kernel_vm86_regs * regs) | 
 | 418 | { | 
| Jeremy Fitzhardinge | 49d26b6 | 2006-12-07 02:14:03 +0100 | [diff] [blame] | 419 | 	unsigned long flags = regs->pt.eflags & RETURN_MASK; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 420 |  | 
 | 421 | 	if (VEFLAGS & VIF_MASK) | 
 | 422 | 		flags |= IF_MASK; | 
 | 423 | 	flags |= IOPL_MASK; | 
 | 424 | 	return flags | (VEFLAGS & current->thread.v86mask); | 
 | 425 | } | 
 | 426 |  | 
 | 427 | static inline int is_revectored(int nr, struct revectored_struct * bitmap) | 
 | 428 | { | 
 | 429 | 	__asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0" | 
 | 430 | 		:"=r" (nr) | 
 | 431 | 		:"m" (*bitmap),"r" (nr)); | 
 | 432 | 	return nr; | 
 | 433 | } | 
 | 434 |  | 
 | 435 | #define val_byte(val, n) (((__u8 *)&val)[n]) | 
 | 436 |  | 
 | 437 | #define pushb(base, ptr, val, err_label) \ | 
 | 438 | 	do { \ | 
 | 439 | 		__u8 __val = val; \ | 
 | 440 | 		ptr--; \ | 
 | 441 | 		if (put_user(__val, base + ptr) < 0) \ | 
 | 442 | 			goto err_label; \ | 
 | 443 | 	} while(0) | 
 | 444 |  | 
 | 445 | #define pushw(base, ptr, val, err_label) \ | 
 | 446 | 	do { \ | 
 | 447 | 		__u16 __val = val; \ | 
 | 448 | 		ptr--; \ | 
 | 449 | 		if (put_user(val_byte(__val, 1), base + ptr) < 0) \ | 
 | 450 | 			goto err_label; \ | 
 | 451 | 		ptr--; \ | 
 | 452 | 		if (put_user(val_byte(__val, 0), base + ptr) < 0) \ | 
 | 453 | 			goto err_label; \ | 
 | 454 | 	} while(0) | 
 | 455 |  | 
 | 456 | #define pushl(base, ptr, val, err_label) \ | 
 | 457 | 	do { \ | 
 | 458 | 		__u32 __val = val; \ | 
 | 459 | 		ptr--; \ | 
 | 460 | 		if (put_user(val_byte(__val, 3), base + ptr) < 0) \ | 
 | 461 | 			goto err_label; \ | 
 | 462 | 		ptr--; \ | 
 | 463 | 		if (put_user(val_byte(__val, 2), base + ptr) < 0) \ | 
 | 464 | 			goto err_label; \ | 
 | 465 | 		ptr--; \ | 
 | 466 | 		if (put_user(val_byte(__val, 1), base + ptr) < 0) \ | 
 | 467 | 			goto err_label; \ | 
 | 468 | 		ptr--; \ | 
 | 469 | 		if (put_user(val_byte(__val, 0), base + ptr) < 0) \ | 
 | 470 | 			goto err_label; \ | 
 | 471 | 	} while(0) | 
 | 472 |  | 
 | 473 | #define popb(base, ptr, err_label) \ | 
 | 474 | 	({ \ | 
 | 475 | 		__u8 __res; \ | 
 | 476 | 		if (get_user(__res, base + ptr) < 0) \ | 
 | 477 | 			goto err_label; \ | 
 | 478 | 		ptr++; \ | 
 | 479 | 		__res; \ | 
 | 480 | 	}) | 
 | 481 |  | 
 | 482 | #define popw(base, ptr, err_label) \ | 
 | 483 | 	({ \ | 
 | 484 | 		__u16 __res; \ | 
 | 485 | 		if (get_user(val_byte(__res, 0), base + ptr) < 0) \ | 
 | 486 | 			goto err_label; \ | 
 | 487 | 		ptr++; \ | 
 | 488 | 		if (get_user(val_byte(__res, 1), base + ptr) < 0) \ | 
 | 489 | 			goto err_label; \ | 
 | 490 | 		ptr++; \ | 
 | 491 | 		__res; \ | 
 | 492 | 	}) | 
 | 493 |  | 
 | 494 | #define popl(base, ptr, err_label) \ | 
 | 495 | 	({ \ | 
 | 496 | 		__u32 __res; \ | 
 | 497 | 		if (get_user(val_byte(__res, 0), base + ptr) < 0) \ | 
 | 498 | 			goto err_label; \ | 
 | 499 | 		ptr++; \ | 
 | 500 | 		if (get_user(val_byte(__res, 1), base + ptr) < 0) \ | 
 | 501 | 			goto err_label; \ | 
 | 502 | 		ptr++; \ | 
 | 503 | 		if (get_user(val_byte(__res, 2), base + ptr) < 0) \ | 
 | 504 | 			goto err_label; \ | 
 | 505 | 		ptr++; \ | 
 | 506 | 		if (get_user(val_byte(__res, 3), base + ptr) < 0) \ | 
 | 507 | 			goto err_label; \ | 
 | 508 | 		ptr++; \ | 
 | 509 | 		__res; \ | 
 | 510 | 	}) | 
 | 511 |  | 
 | 512 | /* There are so many possible reasons for this function to return | 
 | 513 |  * VM86_INTx, so adding another doesn't bother me. We can expect | 
 | 514 |  * userspace programs to be able to handle it. (Getting a problem | 
 | 515 |  * in userspace is always better than an Oops anyway.) [KD] | 
 | 516 |  */ | 
 | 517 | static void do_int(struct kernel_vm86_regs *regs, int i, | 
 | 518 |     unsigned char __user * ssp, unsigned short sp) | 
 | 519 | { | 
 | 520 | 	unsigned long __user *intr_ptr; | 
 | 521 | 	unsigned long segoffs; | 
 | 522 |  | 
| Jeremy Fitzhardinge | 49d26b6 | 2006-12-07 02:14:03 +0100 | [diff] [blame] | 523 | 	if (regs->pt.xcs == BIOSSEG) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 524 | 		goto cannot_handle; | 
 | 525 | 	if (is_revectored(i, &KVM86->int_revectored)) | 
 | 526 | 		goto cannot_handle; | 
 | 527 | 	if (i==0x21 && is_revectored(AH(regs),&KVM86->int21_revectored)) | 
 | 528 | 		goto cannot_handle; | 
 | 529 | 	intr_ptr = (unsigned long __user *) (i << 2); | 
 | 530 | 	if (get_user(segoffs, intr_ptr)) | 
 | 531 | 		goto cannot_handle; | 
 | 532 | 	if ((segoffs >> 16) == BIOSSEG) | 
 | 533 | 		goto cannot_handle; | 
 | 534 | 	pushw(ssp, sp, get_vflags(regs), cannot_handle); | 
| Jeremy Fitzhardinge | 49d26b6 | 2006-12-07 02:14:03 +0100 | [diff] [blame] | 535 | 	pushw(ssp, sp, regs->pt.xcs, cannot_handle); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 536 | 	pushw(ssp, sp, IP(regs), cannot_handle); | 
| Jeremy Fitzhardinge | 49d26b6 | 2006-12-07 02:14:03 +0100 | [diff] [blame] | 537 | 	regs->pt.xcs = segoffs >> 16; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 538 | 	SP(regs) -= 6; | 
 | 539 | 	IP(regs) = segoffs & 0xffff; | 
 | 540 | 	clear_TF(regs); | 
 | 541 | 	clear_IF(regs); | 
 | 542 | 	clear_AC(regs); | 
 | 543 | 	return; | 
 | 544 |  | 
 | 545 | cannot_handle: | 
 | 546 | 	return_to_32bit(regs, VM86_INTx + (i << 8)); | 
 | 547 | } | 
 | 548 |  | 
 | 549 | int handle_vm86_trap(struct kernel_vm86_regs * regs, long error_code, int trapno) | 
 | 550 | { | 
 | 551 | 	if (VMPI.is_vm86pus) { | 
 | 552 | 		if ( (trapno==3) || (trapno==1) ) | 
 | 553 | 			return_to_32bit(regs, VM86_TRAP + (trapno << 8)); | 
| Jeremy Fitzhardinge | 49d26b6 | 2006-12-07 02:14:03 +0100 | [diff] [blame] | 554 | 		do_int(regs, trapno, (unsigned char __user *) (regs->pt.xss << 4), SP(regs)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 555 | 		return 0; | 
 | 556 | 	} | 
 | 557 | 	if (trapno !=1) | 
 | 558 | 		return 1; /* we let this handle by the calling routine */ | 
 | 559 | 	if (current->ptrace & PT_PTRACED) { | 
 | 560 | 		unsigned long flags; | 
 | 561 | 		spin_lock_irqsave(¤t->sighand->siglock, flags); | 
 | 562 | 		sigdelset(¤t->blocked, SIGTRAP); | 
 | 563 | 		recalc_sigpending(); | 
 | 564 | 		spin_unlock_irqrestore(¤t->sighand->siglock, flags); | 
 | 565 | 	} | 
 | 566 | 	send_sig(SIGTRAP, current, 1); | 
 | 567 | 	current->thread.trap_no = trapno; | 
 | 568 | 	current->thread.error_code = error_code; | 
 | 569 | 	return 0; | 
 | 570 | } | 
 | 571 |  | 
 | 572 | void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code) | 
 | 573 | { | 
 | 574 | 	unsigned char opcode; | 
 | 575 | 	unsigned char __user *csp; | 
 | 576 | 	unsigned char __user *ssp; | 
| Petr Tesarik | 5fd75eb | 2005-09-03 15:56:28 -0700 | [diff] [blame] | 577 | 	unsigned short ip, sp, orig_flags; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 578 | 	int data32, pref_done; | 
 | 579 |  | 
 | 580 | #define CHECK_IF_IN_TRAP \ | 
 | 581 | 	if (VMPI.vm86dbg_active && VMPI.vm86dbg_TFpendig) \ | 
 | 582 | 		newflags |= TF_MASK | 
 | 583 | #define VM86_FAULT_RETURN do { \ | 
 | 584 | 	if (VMPI.force_return_for_pic  && (VEFLAGS & (IF_MASK | VIF_MASK))) \ | 
 | 585 | 		return_to_32bit(regs, VM86_PICRETURN); \ | 
| Petr Tesarik | 5fd75eb | 2005-09-03 15:56:28 -0700 | [diff] [blame] | 586 | 	if (orig_flags & TF_MASK) \ | 
 | 587 | 		handle_vm86_trap(regs, 0, 1); \ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 588 | 	return; } while (0) | 
 | 589 |  | 
| Jeremy Fitzhardinge | 49d26b6 | 2006-12-07 02:14:03 +0100 | [diff] [blame] | 590 | 	orig_flags = *(unsigned short *)®s->pt.eflags; | 
| Petr Tesarik | 5fd75eb | 2005-09-03 15:56:28 -0700 | [diff] [blame] | 591 |  | 
| Jeremy Fitzhardinge | 49d26b6 | 2006-12-07 02:14:03 +0100 | [diff] [blame] | 592 | 	csp = (unsigned char __user *) (regs->pt.xcs << 4); | 
 | 593 | 	ssp = (unsigned char __user *) (regs->pt.xss << 4); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 594 | 	sp = SP(regs); | 
 | 595 | 	ip = IP(regs); | 
 | 596 |  | 
 | 597 | 	data32 = 0; | 
 | 598 | 	pref_done = 0; | 
 | 599 | 	do { | 
 | 600 | 		switch (opcode = popb(csp, ip, simulate_sigsegv)) { | 
 | 601 | 			case 0x66:      /* 32-bit data */     data32=1; break; | 
 | 602 | 			case 0x67:      /* 32-bit address */  break; | 
 | 603 | 			case 0x2e:      /* CS */              break; | 
 | 604 | 			case 0x3e:      /* DS */              break; | 
 | 605 | 			case 0x26:      /* ES */              break; | 
 | 606 | 			case 0x36:      /* SS */              break; | 
 | 607 | 			case 0x65:      /* GS */              break; | 
 | 608 | 			case 0x64:      /* FS */              break; | 
 | 609 | 			case 0xf2:      /* repnz */       break; | 
 | 610 | 			case 0xf3:      /* rep */             break; | 
 | 611 | 			default: pref_done = 1; | 
 | 612 | 		} | 
 | 613 | 	} while (!pref_done); | 
 | 614 |  | 
 | 615 | 	switch (opcode) { | 
 | 616 |  | 
 | 617 | 	/* pushf */ | 
 | 618 | 	case 0x9c: | 
 | 619 | 		if (data32) { | 
 | 620 | 			pushl(ssp, sp, get_vflags(regs), simulate_sigsegv); | 
 | 621 | 			SP(regs) -= 4; | 
 | 622 | 		} else { | 
 | 623 | 			pushw(ssp, sp, get_vflags(regs), simulate_sigsegv); | 
 | 624 | 			SP(regs) -= 2; | 
 | 625 | 		} | 
 | 626 | 		IP(regs) = ip; | 
 | 627 | 		VM86_FAULT_RETURN; | 
 | 628 |  | 
 | 629 | 	/* popf */ | 
 | 630 | 	case 0x9d: | 
 | 631 | 		{ | 
 | 632 | 		unsigned long newflags; | 
 | 633 | 		if (data32) { | 
 | 634 | 			newflags=popl(ssp, sp, simulate_sigsegv); | 
 | 635 | 			SP(regs) += 4; | 
 | 636 | 		} else { | 
 | 637 | 			newflags = popw(ssp, sp, simulate_sigsegv); | 
 | 638 | 			SP(regs) += 2; | 
 | 639 | 		} | 
 | 640 | 		IP(regs) = ip; | 
 | 641 | 		CHECK_IF_IN_TRAP; | 
 | 642 | 		if (data32) { | 
 | 643 | 			set_vflags_long(newflags, regs); | 
 | 644 | 		} else { | 
 | 645 | 			set_vflags_short(newflags, regs); | 
 | 646 | 		} | 
 | 647 | 		VM86_FAULT_RETURN; | 
 | 648 | 		} | 
 | 649 |  | 
 | 650 | 	/* int xx */ | 
 | 651 | 	case 0xcd: { | 
 | 652 | 		int intno=popb(csp, ip, simulate_sigsegv); | 
 | 653 | 		IP(regs) = ip; | 
 | 654 | 		if (VMPI.vm86dbg_active) { | 
 | 655 | 			if ( (1 << (intno &7)) & VMPI.vm86dbg_intxxtab[intno >> 3] ) | 
 | 656 | 				return_to_32bit(regs, VM86_INTx + (intno << 8)); | 
 | 657 | 		} | 
 | 658 | 		do_int(regs, intno, ssp, sp); | 
 | 659 | 		return; | 
 | 660 | 	} | 
 | 661 |  | 
 | 662 | 	/* iret */ | 
 | 663 | 	case 0xcf: | 
 | 664 | 		{ | 
 | 665 | 		unsigned long newip; | 
 | 666 | 		unsigned long newcs; | 
 | 667 | 		unsigned long newflags; | 
 | 668 | 		if (data32) { | 
 | 669 | 			newip=popl(ssp, sp, simulate_sigsegv); | 
 | 670 | 			newcs=popl(ssp, sp, simulate_sigsegv); | 
 | 671 | 			newflags=popl(ssp, sp, simulate_sigsegv); | 
 | 672 | 			SP(regs) += 12; | 
 | 673 | 		} else { | 
 | 674 | 			newip = popw(ssp, sp, simulate_sigsegv); | 
 | 675 | 			newcs = popw(ssp, sp, simulate_sigsegv); | 
 | 676 | 			newflags = popw(ssp, sp, simulate_sigsegv); | 
 | 677 | 			SP(regs) += 6; | 
 | 678 | 		} | 
 | 679 | 		IP(regs) = newip; | 
| Jeremy Fitzhardinge | 49d26b6 | 2006-12-07 02:14:03 +0100 | [diff] [blame] | 680 | 		regs->pt.xcs = newcs; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 681 | 		CHECK_IF_IN_TRAP; | 
 | 682 | 		if (data32) { | 
 | 683 | 			set_vflags_long(newflags, regs); | 
 | 684 | 		} else { | 
 | 685 | 			set_vflags_short(newflags, regs); | 
 | 686 | 		} | 
 | 687 | 		VM86_FAULT_RETURN; | 
 | 688 | 		} | 
 | 689 |  | 
 | 690 | 	/* cli */ | 
 | 691 | 	case 0xfa: | 
 | 692 | 		IP(regs) = ip; | 
 | 693 | 		clear_IF(regs); | 
 | 694 | 		VM86_FAULT_RETURN; | 
 | 695 |  | 
 | 696 | 	/* sti */ | 
 | 697 | 	/* | 
 | 698 | 	 * Damn. This is incorrect: the 'sti' instruction should actually | 
 | 699 | 	 * enable interrupts after the /next/ instruction. Not good. | 
 | 700 | 	 * | 
 | 701 | 	 * Probably needs some horsing around with the TF flag. Aiee.. | 
 | 702 | 	 */ | 
 | 703 | 	case 0xfb: | 
 | 704 | 		IP(regs) = ip; | 
 | 705 | 		set_IF(regs); | 
 | 706 | 		VM86_FAULT_RETURN; | 
 | 707 |  | 
 | 708 | 	default: | 
 | 709 | 		return_to_32bit(regs, VM86_UNKNOWN); | 
 | 710 | 	} | 
 | 711 |  | 
 | 712 | 	return; | 
 | 713 |  | 
 | 714 | simulate_sigsegv: | 
 | 715 | 	/* FIXME: After a long discussion with Stas we finally | 
 | 716 | 	 *        agreed, that this is wrong. Here we should | 
 | 717 | 	 *        really send a SIGSEGV to the user program. | 
 | 718 | 	 *        But how do we create the correct context? We | 
 | 719 | 	 *        are inside a general protection fault handler | 
 | 720 | 	 *        and has just returned from a page fault handler. | 
 | 721 | 	 *        The correct context for the signal handler | 
 | 722 | 	 *        should be a mixture of the two, but how do we | 
 | 723 | 	 *        get the information? [KD] | 
 | 724 | 	 */ | 
 | 725 | 	return_to_32bit(regs, VM86_UNKNOWN); | 
 | 726 | } | 
 | 727 |  | 
 | 728 | /* ---------------- vm86 special IRQ passing stuff ----------------- */ | 
 | 729 |  | 
 | 730 | #define VM86_IRQNAME		"vm86irq" | 
 | 731 |  | 
 | 732 | static struct vm86_irqs { | 
 | 733 | 	struct task_struct *tsk; | 
 | 734 | 	int sig; | 
 | 735 | } vm86_irqs[16]; | 
 | 736 |  | 
 | 737 | static DEFINE_SPINLOCK(irqbits_lock); | 
 | 738 | static int irqbits; | 
 | 739 |  | 
 | 740 | #define ALLOWED_SIGS ( 1 /* 0 = don't send a signal */ \ | 
 | 741 | 	| (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO)  | (1 << SIGURG) \ | 
 | 742 | 	| (1 << SIGUNUSED) ) | 
 | 743 | 	 | 
| David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 744 | static irqreturn_t irq_handler(int intno, void *dev_id) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 745 | { | 
 | 746 | 	int irq_bit; | 
 | 747 | 	unsigned long flags; | 
 | 748 |  | 
 | 749 | 	spin_lock_irqsave(&irqbits_lock, flags);	 | 
 | 750 | 	irq_bit = 1 << intno; | 
 | 751 | 	if ((irqbits & irq_bit) || ! vm86_irqs[intno].tsk) | 
 | 752 | 		goto out; | 
 | 753 | 	irqbits |= irq_bit; | 
 | 754 | 	if (vm86_irqs[intno].sig) | 
 | 755 | 		send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 756 | 	/* | 
 | 757 | 	 * IRQ will be re-enabled when user asks for the irq (whether | 
 | 758 | 	 * polling or as a result of the signal) | 
 | 759 | 	 */ | 
| Pavel Pisa | ad67142 | 2005-05-01 08:58:52 -0700 | [diff] [blame] | 760 | 	disable_irq_nosync(intno); | 
 | 761 | 	spin_unlock_irqrestore(&irqbits_lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 762 | 	return IRQ_HANDLED; | 
 | 763 |  | 
 | 764 | out: | 
 | 765 | 	spin_unlock_irqrestore(&irqbits_lock, flags);	 | 
 | 766 | 	return IRQ_NONE; | 
 | 767 | } | 
 | 768 |  | 
 | 769 | static inline void free_vm86_irq(int irqnumber) | 
 | 770 | { | 
 | 771 | 	unsigned long flags; | 
 | 772 |  | 
 | 773 | 	free_irq(irqnumber, NULL); | 
 | 774 | 	vm86_irqs[irqnumber].tsk = NULL; | 
 | 775 |  | 
 | 776 | 	spin_lock_irqsave(&irqbits_lock, flags);	 | 
 | 777 | 	irqbits &= ~(1 << irqnumber); | 
 | 778 | 	spin_unlock_irqrestore(&irqbits_lock, flags);	 | 
 | 779 | } | 
 | 780 |  | 
 | 781 | void release_vm86_irqs(struct task_struct *task) | 
 | 782 | { | 
 | 783 | 	int i; | 
 | 784 | 	for (i = FIRST_VM86_IRQ ; i <= LAST_VM86_IRQ; i++) | 
 | 785 | 	    if (vm86_irqs[i].tsk == task) | 
 | 786 | 		free_vm86_irq(i); | 
 | 787 | } | 
 | 788 |  | 
 | 789 | static inline int get_and_reset_irq(int irqnumber) | 
 | 790 | { | 
 | 791 | 	int bit; | 
 | 792 | 	unsigned long flags; | 
| Pavel Pisa | ad67142 | 2005-05-01 08:58:52 -0700 | [diff] [blame] | 793 | 	int ret = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 794 | 	 | 
 | 795 | 	if (invalid_vm86_irq(irqnumber)) return 0; | 
 | 796 | 	if (vm86_irqs[irqnumber].tsk != current) return 0; | 
 | 797 | 	spin_lock_irqsave(&irqbits_lock, flags);	 | 
 | 798 | 	bit = irqbits & (1 << irqnumber); | 
 | 799 | 	irqbits &= ~bit; | 
| Pavel Pisa | ad67142 | 2005-05-01 08:58:52 -0700 | [diff] [blame] | 800 | 	if (bit) { | 
 | 801 | 		enable_irq(irqnumber); | 
 | 802 | 		ret = 1; | 
 | 803 | 	} | 
 | 804 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 805 | 	spin_unlock_irqrestore(&irqbits_lock, flags);	 | 
| Pavel Pisa | ad67142 | 2005-05-01 08:58:52 -0700 | [diff] [blame] | 806 | 	return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 807 | } | 
 | 808 |  | 
 | 809 |  | 
 | 810 | static int do_vm86_irq_handling(int subfunction, int irqnumber) | 
 | 811 | { | 
 | 812 | 	int ret; | 
 | 813 | 	switch (subfunction) { | 
 | 814 | 		case VM86_GET_AND_RESET_IRQ: { | 
 | 815 | 			return get_and_reset_irq(irqnumber); | 
 | 816 | 		} | 
 | 817 | 		case VM86_GET_IRQ_BITS: { | 
 | 818 | 			return irqbits; | 
 | 819 | 		} | 
 | 820 | 		case VM86_REQUEST_IRQ: { | 
 | 821 | 			int sig = irqnumber >> 8; | 
 | 822 | 			int irq = irqnumber & 255; | 
 | 823 | 			if (!capable(CAP_SYS_ADMIN)) return -EPERM; | 
 | 824 | 			if (!((1 << sig) & ALLOWED_SIGS)) return -EPERM; | 
 | 825 | 			if (invalid_vm86_irq(irq)) return -EPERM; | 
 | 826 | 			if (vm86_irqs[irq].tsk) return -EPERM; | 
 | 827 | 			ret = request_irq(irq, &irq_handler, 0, VM86_IRQNAME, NULL); | 
 | 828 | 			if (ret) return ret; | 
 | 829 | 			vm86_irqs[irq].sig = sig; | 
 | 830 | 			vm86_irqs[irq].tsk = current; | 
 | 831 | 			return irq; | 
 | 832 | 		} | 
 | 833 | 		case  VM86_FREE_IRQ: { | 
 | 834 | 			if (invalid_vm86_irq(irqnumber)) return -EPERM; | 
 | 835 | 			if (!vm86_irqs[irqnumber].tsk) return 0; | 
 | 836 | 			if (vm86_irqs[irqnumber].tsk != current) return -EPERM; | 
 | 837 | 			free_vm86_irq(irqnumber); | 
 | 838 | 			return 0; | 
 | 839 | 		} | 
 | 840 | 	} | 
 | 841 | 	return -EINVAL; | 
 | 842 | } | 
 | 843 |  |