x86: simplify 32-bit cpa largepage splitting

simplify 32-bit cpa largepage splitting: do a pure split and repeat
the pte lookup to get the new pte modified.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
diff --git a/arch/x86/mm/pageattr_32.c b/arch/x86/mm/pageattr_32.c
index 570a37b..1011b21 100644
--- a/arch/x86/mm/pageattr_32.c
+++ b/arch/x86/mm/pageattr_32.c
@@ -38,7 +38,7 @@
 }
 
 static struct page *
-split_large_page(unsigned long address, pgprot_t prot, pgprot_t ref_prot)
+split_large_page(unsigned long address, pgprot_t ref_prot)
 {
 	unsigned long addr;
 	struct page *base;
@@ -58,10 +58,9 @@
 	pbase = (pte_t *)page_address(base);
 	paravirt_alloc_pt(&init_mm, page_to_pfn(base));
 
-	for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
-		set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT,
-					   addr == address ? prot : ref_prot));
-	}
+	for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE)
+		set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, ref_prot));
+
 	return base;
 }
 
@@ -101,6 +100,7 @@
 	BUG_ON(PageHighMem(page));
 	address = (unsigned long)page_address(page);
 
+repeat:
 	kpte = lookup_address(address, &level);
 	if (!kpte)
 		return -EINVAL;
@@ -128,7 +128,8 @@
 		set_pte_atomic(kpte, mk_pte(page, prot));
 	} else {
 		struct page *split;
-		split = split_large_page(address, prot, ref_prot);
+
+		split = split_large_page(address, ref_prot);
 		if (!split)
 			return -ENOMEM;
 
@@ -136,6 +137,7 @@
 		 * There's a small window here to waste a bit of RAM:
 		 */
 		set_pmd_pte(kpte, address, mk_pte(split, ref_prot));
+		goto repeat;
 	}
 	return 0;
 }