[S390] Cleanup page table definitions.

- De-confuse the defines for the address-space-control-elements
  and the segment/region table entries.
- Create out of line functions for page table allocation / freeing.
- Simplify get_shadow_xxx functions.

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
diff --git a/include/asm-s390/pgalloc.h b/include/asm-s390/pgalloc.h
index 6cbbfe4..229b0bd 100644
--- a/include/asm-s390/pgalloc.h
+++ b/include/asm-s390/pgalloc.h
@@ -19,114 +19,75 @@
 
 #define check_pgt_cache()	do {} while (0)
 
-/*
- * Page allocation orders.
- */
-#ifndef __s390x__
-# define PTE_ALLOC_ORDER	0
-# define PMD_ALLOC_ORDER	0
-# define PGD_ALLOC_ORDER	1
-#else /* __s390x__ */
-# define PTE_ALLOC_ORDER	0
-# define PMD_ALLOC_ORDER	2
-# define PGD_ALLOC_ORDER	2
-#endif /* __s390x__ */
+unsigned long *crst_table_alloc(struct mm_struct *, int);
+void crst_table_free(unsigned long *);
 
-/*
- * Allocate and free page tables. The xxx_kernel() versions are
- * used to allocate a kernel page table - this turns on ASN bits
- * if any.
- */
+unsigned long *page_table_alloc(int);
+void page_table_free(unsigned long *);
 
-static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
 {
-	pgd_t *pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ALLOC_ORDER);
-	int i;
-
-	if (!pgd)
-		return NULL;
-	if (s390_noexec) {
-		pgd_t *shadow_pgd = (pgd_t *)
-			__get_free_pages(GFP_KERNEL, PGD_ALLOC_ORDER);
-		struct page *page = virt_to_page(pgd);
-
-		if (!shadow_pgd) {
-			free_pages((unsigned long) pgd, PGD_ALLOC_ORDER);
-			return NULL;
-		}
-		page->lru.next = (void *) shadow_pgd;
-	}
-	for (i = 0; i < PTRS_PER_PGD; i++)
-#ifndef __s390x__
-		pmd_clear(pmd_offset(pgd + i, i*PGDIR_SIZE));
+	*s = val;
+	n = (n / 256) - 1;
+	asm volatile(
+#ifdef CONFIG_64BIT
+		"	mvc	8(248,%0),0(%0)\n"
 #else
-		pgd_clear(pgd + i);
+		"	mvc	4(252,%0),0(%0)\n"
 #endif
-	return pgd;
+		"0:	mvc	256(256,%0),0(%0)\n"
+		"	la	%0,256(%0)\n"
+		"	brct	%1,0b\n"
+		: "+a" (s), "+d" (n));
 }
 
-static inline void pgd_free(pgd_t *pgd)
+static inline void crst_table_init(unsigned long *crst, unsigned long entry)
 {
-	pgd_t *shadow_pgd = get_shadow_pgd(pgd);
-
-	if (shadow_pgd)
-		free_pages((unsigned long) shadow_pgd, PGD_ALLOC_ORDER);
-	free_pages((unsigned long) pgd, PGD_ALLOC_ORDER);
+	clear_table(crst, entry, sizeof(unsigned long)*2048);
+	crst = get_shadow_table(crst);
+	if (crst)
+		clear_table(crst, entry, sizeof(unsigned long)*2048);
 }
 
 #ifndef __s390x__
-/*
- * page middle directory allocation/free routines.
- * We use pmd cache only on s390x, so these are dummy routines. This
- * code never triggers because the pgd will always be present.
- */
-#define pmd_alloc_one(mm,address)       ({ BUG(); ((pmd_t *)2); })
-#define pmd_free(x)                     do { } while (0)
-#define pgd_populate(mm, pmd, pte)      BUG()
+
+static inline unsigned long pgd_entry_type(struct mm_struct *mm)
+{
+	return _SEGMENT_ENTRY_EMPTY;
+}
+
+#define pmd_alloc_one(mm,address)		({ BUG(); ((pmd_t *)2); })
+#define pmd_free(x)				do { } while (0)
+
+#define pgd_populate(mm, pmd, pte)		BUG()
 #define pgd_populate_kernel(mm, pmd, pte)	BUG()
+
 #else /* __s390x__ */
-static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
+
+static inline unsigned long pgd_entry_type(struct mm_struct *mm)
 {
-	pmd_t *pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ALLOC_ORDER);
-	int i;
-
-	if (!pmd)
-		return NULL;
-	if (s390_noexec) {
-		pmd_t *shadow_pmd = (pmd_t *)
-			__get_free_pages(GFP_KERNEL, PMD_ALLOC_ORDER);
-		struct page *page = virt_to_page(pmd);
-
-		if (!shadow_pmd) {
-			free_pages((unsigned long) pmd, PMD_ALLOC_ORDER);
-			return NULL;
-		}
-		page->lru.next = (void *) shadow_pmd;
-	}
-	for (i=0; i < PTRS_PER_PMD; i++)
-		pmd_clear(pmd + i);
-	return pmd;
+	return _REGION3_ENTRY_EMPTY;
 }
 
-static inline void pmd_free (pmd_t *pmd)
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
 {
-	pmd_t *shadow_pmd = get_shadow_pmd(pmd);
-
-	if (shadow_pmd)
-		free_pages((unsigned long) shadow_pmd, PMD_ALLOC_ORDER);
-	free_pages((unsigned long) pmd, PMD_ALLOC_ORDER);
+	unsigned long *crst = crst_table_alloc(mm, s390_noexec);
+	if (crst)
+		crst_table_init(crst, _SEGMENT_ENTRY_EMPTY);
+	return (pmd_t *) crst;
 }
+#define pmd_free(pmd) crst_table_free((unsigned long *) pmd)
 
-static inline void
-pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
+static inline void pgd_populate_kernel(struct mm_struct *mm,
+				       pgd_t *pgd, pmd_t *pmd)
 {
-	pgd_val(*pgd) = _PGD_ENTRY | __pa(pmd);
+	pgd_val(*pgd) = _REGION3_ENTRY | __pa(pmd);
 }
 
 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
 {
-	pgd_t *shadow_pgd = get_shadow_pgd(pgd);
-	pmd_t *shadow_pmd = get_shadow_pmd(pmd);
+	pgd_t *shadow_pgd = get_shadow_table(pgd);
+	pmd_t *shadow_pmd = get_shadow_table(pmd);
 
 	if (shadow_pgd && shadow_pmd)
 		pgd_populate_kernel(mm, shadow_pgd, shadow_pmd);
@@ -135,17 +96,26 @@
 
 #endif /* __s390x__ */
 
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+	unsigned long *crst = crst_table_alloc(mm, s390_noexec);
+	if (crst)
+		crst_table_init(crst, pgd_entry_type(mm));
+	return (pgd_t *) crst;
+}
+#define pgd_free(pgd) crst_table_free((unsigned long *) pgd)
+
 static inline void 
 pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
 {
 #ifndef __s390x__
-	pmd_val(pmd[0]) = _PAGE_TABLE + __pa(pte);
-	pmd_val(pmd[1]) = _PAGE_TABLE + __pa(pte+256);
-	pmd_val(pmd[2]) = _PAGE_TABLE + __pa(pte+512);
-	pmd_val(pmd[3]) = _PAGE_TABLE + __pa(pte+768);
+	pmd_val(pmd[0]) = _SEGMENT_ENTRY + __pa(pte);
+	pmd_val(pmd[1]) = _SEGMENT_ENTRY + __pa(pte+256);
+	pmd_val(pmd[2]) = _SEGMENT_ENTRY + __pa(pte+512);
+	pmd_val(pmd[3]) = _SEGMENT_ENTRY + __pa(pte+768);
 #else /* __s390x__ */
-	pmd_val(*pmd) = _PMD_ENTRY + __pa(pte);
-	pmd_val1(*pmd) = _PMD_ENTRY + __pa(pte+256);
+	pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
+	pmd_val1(*pmd) = _SEGMENT_ENTRY + __pa(pte+256);
 #endif /* __s390x__ */
 }
 
@@ -153,7 +123,7 @@
 pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
 {
 	pte_t *pte = (pte_t *)page_to_phys(page);
-	pmd_t *shadow_pmd = get_shadow_pmd(pmd);
+	pmd_t *shadow_pmd = get_shadow_table(pmd);
 	pte_t *shadow_pte = get_shadow_pte(pte);
 
 	pmd_populate_kernel(mm, pmd, pte);
@@ -164,57 +134,14 @@
 /*
  * page table entry allocation/free routines.
  */
-static inline pte_t *
-pte_alloc_one_kernel(struct mm_struct *mm, unsigned long vmaddr)
-{
-	pte_t *pte = (pte_t *) __get_free_page(GFP_KERNEL|__GFP_REPEAT);
-	int i;
+#define pte_alloc_one_kernel(mm, vmaddr) \
+	((pte_t *) page_table_alloc(s390_noexec))
+#define pte_alloc_one(mm, vmaddr) \
+	virt_to_page(page_table_alloc(s390_noexec))
 
-	if (!pte)
-		return NULL;
-	if (s390_noexec) {
-		pte_t *shadow_pte = (pte_t *)
-			__get_free_page(GFP_KERNEL|__GFP_REPEAT);
-		struct page *page = virt_to_page(pte);
-
-		if (!shadow_pte) {
-			free_page((unsigned long) pte);
-			return NULL;
-		}
-		page->lru.next = (void *) shadow_pte;
-	}
-	for (i=0; i < PTRS_PER_PTE; i++) {
-		pte_clear(mm, vmaddr, pte + i);
-		vmaddr += PAGE_SIZE;
-	}
-	return pte;
-}
-
-static inline struct page *
-pte_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
-{
-	pte_t *pte = pte_alloc_one_kernel(mm, vmaddr);
-	if (pte)
-		return virt_to_page(pte);
-	return NULL;
-}
-
-static inline void pte_free_kernel(pte_t *pte)
-{
-	pte_t *shadow_pte = get_shadow_pte(pte);
-
-	if (shadow_pte)
-		free_page((unsigned long) shadow_pte);
-	free_page((unsigned long) pte);
-}
-
-static inline void pte_free(struct page *pte)
-{
-	struct page *shadow_page = get_shadow_page(pte);
-
-	if (shadow_page)
-		__free_page(shadow_page);
-	__free_page(pte);
-}
+#define pte_free_kernel(pte) \
+	page_table_free((unsigned long *) pte)
+#define pte_free(pte) \
+	page_table_free((unsigned long *) page_to_phys((struct page *) pte))
 
 #endif /* _S390_PGALLOC_H */