uml: style fixes pass 3

Formatting changes in the files which have been changed in the course
of folding foo_skas functions into their callers.  These include:
	copyright updates
	header file trimming
	style fixes
	adding severity to printks

These changes should be entirely non-functional.

Signed-off-by: Jeff Dike <jdike@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
index 12b8c63..849922f 100644
--- a/arch/um/kernel/tlb.c
+++ b/arch/um/kernel/tlb.c
@@ -1,19 +1,16 @@
 /*
- * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
+ * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  * Licensed under the GPL
  */
 
 #include "linux/mm.h"
-#include "asm/page.h"
-#include "asm/pgalloc.h"
 #include "asm/pgtable.h"
 #include "asm/tlbflush.h"
 #include "as-layout.h"
-#include "tlb.h"
-#include "mem.h"
 #include "mem_user.h"
 #include "os.h"
 #include "skas.h"
+#include "tlb.h"
 
 static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
 		    unsigned int prot, struct host_vm_op *ops, int *index,
@@ -26,18 +23,18 @@
 	int fd, ret = 0;
 
 	fd = phys_mapping(phys, &offset);
-	if(*index != -1){
+	if (*index != -1) {
 		last = &ops[*index];
-		if((last->type == MMAP) &&
+		if ((last->type == MMAP) &&
 		   (last->u.mmap.addr + last->u.mmap.len == virt) &&
 		   (last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) &&
-		   (last->u.mmap.offset + last->u.mmap.len == offset)){
+		   (last->u.mmap.offset + last->u.mmap.len == offset)) {
 			last->u.mmap.len += len;
 			return 0;
 		}
 	}
 
-	if(*index == last_filled){
+	if (*index == last_filled) {
 		ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
 		*index = -1;
 	}
@@ -62,16 +59,16 @@
 	struct host_vm_op *last;
 	int ret = 0;
 
-	if(*index != -1){
+	if (*index != -1) {
 		last = &ops[*index];
-		if((last->type == MUNMAP) &&
-		   (last->u.munmap.addr + last->u.mmap.len == addr)){
+		if ((last->type == MUNMAP) &&
+		   (last->u.munmap.addr + last->u.mmap.len == addr)) {
 			last->u.munmap.len += len;
 			return 0;
 		}
 	}
 
-	if(*index == last_filled){
+	if (*index == last_filled) {
 		ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
 		*index = -1;
 	}
@@ -92,17 +89,17 @@
 	struct host_vm_op *last;
 	int ret = 0;
 
-	if(*index != -1){
+	if (*index != -1) {
 		last = &ops[*index];
-		if((last->type == MPROTECT) &&
+		if ((last->type == MPROTECT) &&
 		   (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
-		   (last->u.mprotect.prot == prot)){
+		   (last->u.mprotect.prot == prot)) {
 			last->u.mprotect.len += len;
 			return 0;
 		}
 	}
 
-	if(*index == last_filled){
+	if (*index == last_filled) {
 		ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
 		*index = -1;
 	}
@@ -141,15 +138,15 @@
 		}
 		prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
 			(x ? UM_PROT_EXEC : 0));
-		if(force || pte_newpage(*pte)){
-			if(pte_present(*pte))
+		if (force || pte_newpage(*pte)) {
+			if (pte_present(*pte))
 				ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
 					       PAGE_SIZE, prot, ops, op_index,
 					       last_op, mmu, flush, do_ops);
 			else ret = add_munmap(addr, PAGE_SIZE, ops, op_index,
 					      last_op, mmu, flush, do_ops);
 		}
-		else if(pte_newprot(*pte))
+		else if (pte_newprot(*pte))
 			ret = add_mprotect(addr, PAGE_SIZE, prot, ops, op_index,
 					   last_op, mmu, flush, do_ops);
 		*pte = pte_mkuptodate(*pte);
@@ -172,8 +169,8 @@
 	pmd = pmd_offset(pud, addr);
 	do {
 		next = pmd_addr_end(addr, end);
-		if(!pmd_present(*pmd)){
-			if(force || pmd_newpage(*pmd)){
+		if (!pmd_present(*pmd)) {
+			if (force || pmd_newpage(*pmd)) {
 				ret = add_munmap(addr, next - addr, ops,
 						 op_index, last_op, mmu,
 						 flush, do_ops);
@@ -202,8 +199,8 @@
 	pud = pud_offset(pgd, addr);
 	do {
 		next = pud_addr_end(addr, end);
-		if(!pud_present(*pud)){
-			if(force || pud_newpage(*pud)){
+		if (!pud_present(*pud)) {
+			if (force || pud_newpage(*pud)) {
 				ret = add_munmap(addr, next - addr, ops,
 						 op_index, last_op, mmu,
 						 flush, do_ops);
@@ -233,8 +230,8 @@
 	pgd = pgd_offset(mm, addr);
 	do {
 		next = pgd_addr_end(addr, end_addr);
-		if(!pgd_present(*pgd)){
-			if (force || pgd_newpage(*pgd)){
+		if (!pgd_present(*pgd)) {
+			if (force || pgd_newpage(*pgd)) {
 				ret = add_munmap(addr, next - addr, ops,
 						 &op_index, last_op, mmu,
 						 &flush, do_ops);
@@ -246,12 +243,13 @@
 					    do_ops);
 	} while (pgd++, addr = next, ((addr != end_addr) && !ret));
 
-	if(!ret)
+	if (!ret)
 		ret = (*do_ops)(mmu, ops, op_index, 1, &flush);
 
 	/* This is not an else because ret is modified above */
-	if(ret) {
-		printk("fix_range_common: failed, killing current process\n");
+	if (ret) {
+		printk(KERN_ERR "fix_range_common: failed, killing current "
+		       "process\n");
 		force_sig(SIGKILL, current);
 	}
 }
@@ -267,17 +265,17 @@
 	int updated = 0, err;
 
 	mm = &init_mm;
-	for(addr = start; addr < end;){
+	for (addr = start; addr < end;) {
 		pgd = pgd_offset(mm, addr);
-		if(!pgd_present(*pgd)){
+		if (!pgd_present(*pgd)) {
 			last = ADD_ROUND(addr, PGDIR_SIZE);
-			if(last > end)
+			if (last > end)
 				last = end;
-			if(pgd_newpage(*pgd)){
+			if (pgd_newpage(*pgd)) {
 				updated = 1;
 				err = os_unmap_memory((void *) addr,
 						      last - addr);
-				if(err < 0)
+				if (err < 0)
 					panic("munmap failed, errno = %d\n",
 					      -err);
 			}
@@ -286,15 +284,15 @@
 		}
 
 		pud = pud_offset(pgd, addr);
-		if(!pud_present(*pud)){
+		if (!pud_present(*pud)) {
 			last = ADD_ROUND(addr, PUD_SIZE);
-			if(last > end)
+			if (last > end)
 				last = end;
-			if(pud_newpage(*pud)){
+			if (pud_newpage(*pud)) {
 				updated = 1;
 				err = os_unmap_memory((void *) addr,
 						      last - addr);
-				if(err < 0)
+				if (err < 0)
 					panic("munmap failed, errno = %d\n",
 					      -err);
 			}
@@ -303,15 +301,15 @@
 		}
 
 		pmd = pmd_offset(pud, addr);
-		if(!pmd_present(*pmd)){
+		if (!pmd_present(*pmd)) {
 			last = ADD_ROUND(addr, PMD_SIZE);
-			if(last > end)
+			if (last > end)
 				last = end;
-			if(pmd_newpage(*pmd)){
+			if (pmd_newpage(*pmd)) {
 				updated = 1;
 				err = os_unmap_memory((void *) addr,
 						      last - addr);
-				if(err < 0)
+				if (err < 0)
 					panic("munmap failed, errno = %d\n",
 					      -err);
 			}
@@ -320,25 +318,25 @@
 		}
 
 		pte = pte_offset_kernel(pmd, addr);
-		if(!pte_present(*pte) || pte_newpage(*pte)){
+		if (!pte_present(*pte) || pte_newpage(*pte)) {
 			updated = 1;
 			err = os_unmap_memory((void *) addr,
 					      PAGE_SIZE);
-			if(err < 0)
+			if (err < 0)
 				panic("munmap failed, errno = %d\n",
 				      -err);
-			if(pte_present(*pte))
+			if (pte_present(*pte))
 				map_memory(addr,
 					   pte_val(*pte) & PAGE_MASK,
 					   PAGE_SIZE, 1, 1, 1);
 		}
-		else if(pte_newprot(*pte)){
+		else if (pte_newprot(*pte)) {
 			updated = 1;
 			os_protect_memory((void *) addr, PAGE_SIZE, 1, 1, 1);
 		}
 		addr += PAGE_SIZE;
 	}
-	return(updated);
+	return updated;
 }
 
 void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
@@ -354,15 +352,15 @@
 
 	address &= PAGE_MASK;
 	pgd = pgd_offset(mm, address);
-	if(!pgd_present(*pgd))
+	if (!pgd_present(*pgd))
 		goto kill;
 
 	pud = pud_offset(pgd, address);
-	if(!pud_present(*pud))
+	if (!pud_present(*pud))
 		goto kill;
 
 	pmd = pmd_offset(pud, address);
-	if(!pmd_present(*pmd))
+	if (!pmd_present(*pmd))
 		goto kill;
 
 	pte = pte_offset_kernel(pmd, address);
@@ -380,8 +378,8 @@
 	mm_id = &mm->context.skas.id;
 	prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
 		(x ? UM_PROT_EXEC : 0));
-	if(pte_newpage(*pte)){
-		if(pte_present(*pte)){
+	if (pte_newpage(*pte)) {
+		if (pte_present(*pte)) {
 			unsigned long long offset;
 			int fd;
 
@@ -391,10 +389,10 @@
 		}
 		else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush);
 	}
-	else if(pte_newprot(*pte))
+	else if (pte_newprot(*pte))
 		err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush);
 
-	if(err)
+	if (err)
 		goto kill;
 
 	*pte = pte_mkuptodate(*pte);
@@ -402,28 +400,28 @@
 	return;
 
 kill:
-	printk("Failed to flush page for address 0x%lx\n", address);
+	printk(KERN_ERR "Failed to flush page for address 0x%lx\n", address);
 	force_sig(SIGKILL, current);
 }
 
 pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
 {
-	return(pgd_offset(mm, address));
+	return pgd_offset(mm, address);
 }
 
 pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address)
 {
-	return(pud_offset(pgd, address));
+	return pud_offset(pgd, address);
 }
 
 pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address)
 {
-	return(pmd_offset(pud, address));
+	return pmd_offset(pud, address);
 }
 
 pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address)
 {
-	return(pte_offset_kernel(pmd, address));
+	return pte_offset_kernel(pmd, address);
 }
 
 pte_t *addr_pte(struct task_struct *task, unsigned long addr)
@@ -432,7 +430,7 @@
 	pud_t *pud = pud_offset(pgd, addr);
 	pmd_t *pmd = pmd_offset(pud, addr);
 
-	return(pte_offset_map(pmd, addr));
+	return pte_offset_map(pmd, addr);
 }
 
 void flush_tlb_all(void)
@@ -452,18 +450,18 @@
 
 void __flush_tlb_one(unsigned long addr)
 {
-        flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
+	flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
 }
 
 static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last,
 		  int finished, void **flush)
 {
 	struct host_vm_op *op;
-        int i, ret = 0;
+	int i, ret = 0;
 
-        for(i = 0; i <= last && !ret; i++){
-		op = &ops[i];
-		switch(op->type){
+	for (i = 0; i <= last && !ret; i++) {
+	op = &ops[i];
+		switch(op->type) {
 		case MMAP:
 			ret = map(&mmu->skas.id, op->u.mmap.addr,
 				  op->u.mmap.len, op->u.mmap.prot,
@@ -480,7 +478,8 @@
 				      finished, flush);
 			break;
 		default:
-			printk("Unknown op type %d in do_ops\n", op->type);
+			printk(KERN_ERR "Unknown op type %d in do_ops\n",
+			       op->type);
 			break;
 		}
 	}
@@ -491,32 +490,33 @@
 static void fix_range(struct mm_struct *mm, unsigned long start_addr,
 		      unsigned long end_addr, int force)
 {
-        if(!proc_mm && (end_addr > CONFIG_STUB_START))
-                end_addr = CONFIG_STUB_START;
+	if (!proc_mm && (end_addr > CONFIG_STUB_START))
+		end_addr = CONFIG_STUB_START;
 
-        fix_range_common(mm, start_addr, end_addr, force, do_ops);
+	fix_range_common(mm, start_addr, end_addr, force, do_ops);
 }
 
 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
 		     unsigned long end)
 {
-        if(vma->vm_mm == NULL)
-                flush_tlb_kernel_range_common(start, end);
-        else fix_range(vma->vm_mm, start, end, 0);
+	if (vma->vm_mm == NULL)
+		flush_tlb_kernel_range_common(start, end);
+	else fix_range(vma->vm_mm, start, end, 0);
 }
 
 void flush_tlb_mm(struct mm_struct *mm)
 {
 	unsigned long end;
 
-	/* Don't bother flushing if this address space is about to be
-         * destroyed.
-         */
-        if(atomic_read(&mm->mm_users) == 0)
-                return;
+	/*
+	 * Don't bother flushing if this address space is about to be
+	 * destroyed.
+	 */
+	if (atomic_read(&mm->mm_users) == 0)
+		return;
 
 	end = proc_mm ? task_size : CONFIG_STUB_START;
-        fix_range(mm, 0, end, 0);
+	fix_range(mm, 0, end, 0);
 }
 
 void force_flush_all(void)
@@ -524,7 +524,7 @@
 	struct mm_struct *mm = current->mm;
 	struct vm_area_struct *vma = mm->mmap;
 
-	while(vma != NULL) {
+	while (vma != NULL) {
 		fix_range(mm, vma->vm_start, vma->vm_end, 1);
 		vma = vma->vm_next;
 	}