uml: 64-bit tlb fixes
Some 64-bit tlb fixes -
moved pmd_page_vaddr to pgtable.h since it's the same for both
2-level and 3-level page tables
fixed a bogus cast on pud_page_vaddr
made the address checking in update_*_range more careful
Signed-off-by: Jeff Dike <jdike@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
index ef5a2a2..8127ca8 100644
--- a/arch/um/kernel/tlb.c
+++ b/arch/um/kernel/tlb.c
@@ -207,7 +207,7 @@
else if (pte_newprot(*pte))
ret = add_mprotect(addr, PAGE_SIZE, prot, hvc);
*pte = pte_mkuptodate(*pte);
- } while (pte++, addr += PAGE_SIZE, ((addr != end) && !ret));
+ } while (pte++, addr += PAGE_SIZE, ((addr < end) && !ret));
return ret;
}
@@ -229,7 +229,7 @@
}
}
else ret = update_pte_range(pmd, addr, next, hvc);
- } while (pmd++, addr = next, ((addr != end) && !ret));
+ } while (pmd++, addr = next, ((addr < end) && !ret));
return ret;
}
@@ -251,7 +251,7 @@
}
}
else ret = update_pmd_range(pud, addr, next, hvc);
- } while (pud++, addr = next, ((addr != end) && !ret));
+ } while (pud++, addr = next, ((addr < end) && !ret));
return ret;
}
@@ -274,7 +274,7 @@
}
}
else ret = update_pud_range(pgd, addr, next, &hvc);
- } while (pgd++, addr = next, ((addr != end_addr) && !ret));
+ } while (pgd++, addr = next, ((addr < end_addr) && !ret));
if (!ret)
ret = do_ops(&hvc, hvc.index, 1);