uml: fold mmu_context_skas into mm_context

This patch folds mmu_context_skas into struct mm_context, changing all users
of these structures as needed.

Signed-off-by: Jeff Dike <jdike@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
diff --git a/arch/um/kernel/exec.c b/arch/um/kernel/exec.c
index 25c5026..7c77ade 100644
--- a/arch/um/kernel/exec.c
+++ b/arch/um/kernel/exec.c
@@ -23,14 +23,14 @@
 
 	arch_flush_thread(&current->thread.arch);
 
-	ret = unmap(&current->mm->context.skas.id, 0, end, 1, &data);
+	ret = unmap(&current->mm->context.id, 0, end, 1, &data);
 	if (ret) {
 		printk(KERN_ERR "flush_thread - clearing address space failed, "
 		       "err = %d\n", ret);
 		force_sig(SIGKILL, current);
 	}
 
-	__switch_mm(&current->mm->context.skas.id);
+	__switch_mm(&current->mm->context.id);
 }
 
 void start_thread(struct pt_regs *regs, unsigned long eip, unsigned long esp)
diff --git a/arch/um/kernel/reboot.c b/arch/um/kernel/reboot.c
index 9d8eea4..04cebcf 100644
--- a/arch/um/kernel/reboot.c
+++ b/arch/um/kernel/reboot.c
@@ -25,7 +25,7 @@
 			if(p->mm == NULL)
 				continue;
 
-			pid = p->mm->context.skas.id.u.pid;
+			pid = p->mm->context.id.u.pid;
 			os_kill_ptraced_process(pid, 1);
 		}
 	}
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c
index c5475ec..48c8c13 100644
--- a/arch/um/kernel/skas/mmu.c
+++ b/arch/um/kernel/skas/mmu.c
@@ -47,9 +47,9 @@
 	 * destroy_context_skas.
 	 */
 
-	mm->context.skas.last_page_table = pmd_page_vaddr(*pmd);
+	mm->context.last_page_table = pmd_page_vaddr(*pmd);
 #ifdef CONFIG_3_LEVEL_PGTABLES
-	mm->context.skas.last_pmd = (unsigned long) __va(pud_val(*pud));
+	mm->context.last_pmd = (unsigned long) __va(pud_val(*pud));
 #endif
 
 	*pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT));
@@ -66,8 +66,8 @@
 
 int init_new_context(struct task_struct *task, struct mm_struct *mm)
 {
-	struct mmu_context_skas *from_mm = NULL;
-	struct mmu_context_skas *to_mm = &mm->context.skas;
+ 	struct mm_context *from_mm = NULL;
+	struct mm_context *to_mm = &mm->context;
 	unsigned long stack = 0;
 	int ret = -ENOMEM;
 
@@ -97,7 +97,7 @@
 
 	to_mm->id.stack = stack;
 	if (current->mm != NULL && current->mm != &init_mm)
-		from_mm = &current->mm->context.skas;
+		from_mm = &current->mm->context;
 
 	if (proc_mm) {
 		ret = new_mm(stack);
@@ -133,7 +133,7 @@
 
 void destroy_context(struct mm_struct *mm)
 {
-	struct mmu_context_skas *mmu = &mm->context.skas;
+	struct mm_context *mmu = &mm->context;
 
 	if (proc_mm)
 		os_close_file(mmu->id.u.mm_fd);
diff --git a/arch/um/kernel/skas/process.c b/arch/um/kernel/skas/process.c
index 9ce1c49..0297e63 100644
--- a/arch/um/kernel/skas/process.c
+++ b/arch/um/kernel/skas/process.c
@@ -65,5 +65,5 @@
 	if (current->mm == NULL)
 		return 0;
 
-	return current->mm->context.skas.id.stack;
+	return current->mm->context.id.stack;
 }
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
index 849922f..081baef 100644
--- a/arch/um/kernel/tlb.c
+++ b/arch/um/kernel/tlb.c
@@ -14,8 +14,8 @@
 
 static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
 		    unsigned int prot, struct host_vm_op *ops, int *index,
-		    int last_filled, union mm_context *mmu, void **flush,
-		    int (*do_ops)(union mm_context *, struct host_vm_op *,
+		    int last_filled, struct mm_context *mmu, void **flush,
+		    int (*do_ops)(struct mm_context *, struct host_vm_op *,
 				  int, int, void **))
 {
 	__u64 offset;
@@ -52,8 +52,8 @@
 
 static int add_munmap(unsigned long addr, unsigned long len,
 		      struct host_vm_op *ops, int *index, int last_filled,
-		      union mm_context *mmu, void **flush,
-		      int (*do_ops)(union mm_context *, struct host_vm_op *,
+		      struct mm_context *mmu, void **flush,
+		      int (*do_ops)(struct mm_context *, struct host_vm_op *,
 				    int, int, void **))
 {
 	struct host_vm_op *last;
@@ -82,8 +82,8 @@
 
 static int add_mprotect(unsigned long addr, unsigned long len,
 			unsigned int prot, struct host_vm_op *ops, int *index,
-			int last_filled, union mm_context *mmu, void **flush,
-			int (*do_ops)(union mm_context *, struct host_vm_op *,
+			int last_filled, struct mm_context *mmu, void **flush,
+			int (*do_ops)(struct mm_context *, struct host_vm_op *,
 				      int, int, void **))
 {
 	struct host_vm_op *last;
@@ -117,8 +117,8 @@
 static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
 				   unsigned long end, struct host_vm_op *ops,
 				   int last_op, int *op_index, int force,
-				   union mm_context *mmu, void **flush,
-				   int (*do_ops)(union mm_context *,
+				   struct mm_context *mmu, void **flush,
+				   int (*do_ops)(struct mm_context *,
 						 struct host_vm_op *, int, int,
 						 void **))
 {
@@ -157,8 +157,8 @@
 static inline int update_pmd_range(pud_t *pud, unsigned long addr,
 				   unsigned long end, struct host_vm_op *ops,
 				   int last_op, int *op_index, int force,
-				   union mm_context *mmu, void **flush,
-				   int (*do_ops)(union mm_context *,
+				   struct mm_context *mmu, void **flush,
+				   int (*do_ops)(struct mm_context *,
 						 struct host_vm_op *, int, int,
 						 void **))
 {
@@ -187,8 +187,8 @@
 static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
 				   unsigned long end, struct host_vm_op *ops,
 				   int last_op, int *op_index, int force,
-				   union mm_context *mmu, void **flush,
-				   int (*do_ops)(union mm_context *,
+				   struct mm_context *mmu, void **flush,
+				   int (*do_ops)(struct mm_context *,
 						 struct host_vm_op *, int, int,
 						 void **))
 {
@@ -216,11 +216,11 @@
 
 void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
 		      unsigned long end_addr, int force,
-		      int (*do_ops)(union mm_context *, struct host_vm_op *,
+		      int (*do_ops)(struct mm_context *, struct host_vm_op *,
 				    int, int, void **))
 {
 	pgd_t *pgd;
-	union mm_context *mmu = &mm->context;
+	struct mm_context *mmu = &mm->context;
 	struct host_vm_op ops[1];
 	unsigned long addr = start_addr, next;
 	int ret = 0, last_op = ARRAY_SIZE(ops) - 1, op_index = -1;
@@ -375,7 +375,7 @@
 		w = 0;
 	}
 
-	mm_id = &mm->context.skas.id;
+	mm_id = &mm->context.id;
 	prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
 		(x ? UM_PROT_EXEC : 0));
 	if (pte_newpage(*pte)) {
@@ -453,7 +453,7 @@
 	flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
 }
 
-static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last,
+static int do_ops(struct mm_context *mmu, struct host_vm_op *ops, int last,
 		  int finished, void **flush)
 {
 	struct host_vm_op *op;
@@ -463,17 +463,16 @@
 	op = &ops[i];
 		switch(op->type) {
 		case MMAP:
-			ret = map(&mmu->skas.id, op->u.mmap.addr,
-				  op->u.mmap.len, op->u.mmap.prot,
-				  op->u.mmap.fd, op->u.mmap.offset, finished,
-				  flush);
+			ret = map(&mmu->id, op->u.mmap.addr, op->u.mmap.len,
+				  op->u.mmap.prot, op->u.mmap.fd,
+				  op->u.mmap.offset, finished, flush);
 			break;
 		case MUNMAP:
-			ret = unmap(&mmu->skas.id, op->u.munmap.addr,
+			ret = unmap(&mmu->id, op->u.munmap.addr,
 				    op->u.munmap.len, finished, flush);
 			break;
 		case MPROTECT:
-			ret = protect(&mmu->skas.id, op->u.mprotect.addr,
+			ret = protect(&mmu->id, op->u.mprotect.addr,
 				      op->u.mprotect.len, op->u.mprotect.prot,
 				      finished, flush);
 			break;