|  | /* | 
|  | *  IBM System z Huge TLB Page Support for Kernel. | 
|  | * | 
|  | *    Copyright 2007 IBM Corp. | 
|  | *    Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com> | 
|  | */ | 
|  |  | 
|  | #include <linux/mm.h> | 
|  | #include <linux/hugetlb.h> | 
|  |  | 
|  |  | 
|  | void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, | 
|  | pte_t *pteptr, pte_t pteval) | 
|  | { | 
|  | pmd_t *pmdp = (pmd_t *) pteptr; | 
|  | pte_t shadow_pteval = pteval; | 
|  | unsigned long mask; | 
|  |  | 
|  | if (!MACHINE_HAS_HPAGE) { | 
|  | pteptr = (pte_t *) pte_page(pteval)[1].index; | 
|  | mask = pte_val(pteval) & | 
|  | (_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO); | 
|  | pte_val(pteval) = (_SEGMENT_ENTRY + __pa(pteptr)) | mask; | 
|  | if (mm->context.noexec) { | 
|  | pteptr += PTRS_PER_PTE; | 
|  | pte_val(shadow_pteval) = | 
|  | (_SEGMENT_ENTRY + __pa(pteptr)) | mask; | 
|  | } | 
|  | } | 
|  |  | 
|  | pmd_val(*pmdp) = pte_val(pteval); | 
|  | if (mm->context.noexec) { | 
|  | pmdp = get_shadow_table(pmdp); | 
|  | pmd_val(*pmdp) = pte_val(shadow_pteval); | 
|  | } | 
|  | } | 
|  |  | 
|  | int arch_prepare_hugepage(struct page *page) | 
|  | { | 
|  | unsigned long addr = page_to_phys(page); | 
|  | pte_t pte; | 
|  | pte_t *ptep; | 
|  | int i; | 
|  |  | 
|  | if (MACHINE_HAS_HPAGE) | 
|  | return 0; | 
|  |  | 
|  | ptep = (pte_t *) pte_alloc_one(&init_mm, address); | 
|  | if (!ptep) | 
|  | return -ENOMEM; | 
|  |  | 
|  | pte = mk_pte(page, PAGE_RW); | 
|  | for (i = 0; i < PTRS_PER_PTE; i++) { | 
|  | set_pte_at(&init_mm, addr + i * PAGE_SIZE, ptep + i, pte); | 
|  | pte_val(pte) += PAGE_SIZE; | 
|  | } | 
|  | page[1].index = (unsigned long) ptep; | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | void arch_release_hugepage(struct page *page) | 
|  | { | 
|  | pte_t *ptep; | 
|  |  | 
|  | if (MACHINE_HAS_HPAGE) | 
|  | return; | 
|  |  | 
|  | ptep = (pte_t *) page[1].index; | 
|  | if (!ptep) | 
|  | return; | 
|  | pte_free(&init_mm, ptep); | 
|  | page[1].index = 0; | 
|  | } | 
|  |  | 
|  | pte_t *huge_pte_alloc(struct mm_struct *mm, | 
|  | unsigned long addr, unsigned long sz) | 
|  | { | 
|  | pgd_t *pgdp; | 
|  | pud_t *pudp; | 
|  | pmd_t *pmdp = NULL; | 
|  |  | 
|  | pgdp = pgd_offset(mm, addr); | 
|  | pudp = pud_alloc(mm, pgdp, addr); | 
|  | if (pudp) | 
|  | pmdp = pmd_alloc(mm, pudp, addr); | 
|  | return (pte_t *) pmdp; | 
|  | } | 
|  |  | 
|  | pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) | 
|  | { | 
|  | pgd_t *pgdp; | 
|  | pud_t *pudp; | 
|  | pmd_t *pmdp = NULL; | 
|  |  | 
|  | pgdp = pgd_offset(mm, addr); | 
|  | if (pgd_present(*pgdp)) { | 
|  | pudp = pud_offset(pgdp, addr); | 
|  | if (pud_present(*pudp)) | 
|  | pmdp = pmd_offset(pudp, addr); | 
|  | } | 
|  | return (pte_t *) pmdp; | 
|  | } | 
|  |  | 
|  | int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) | 
|  | { | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, | 
|  | int write) | 
|  | { | 
|  | return ERR_PTR(-EINVAL); | 
|  | } | 
|  |  | 
|  | int pmd_huge(pmd_t pmd) | 
|  | { | 
|  | if (!MACHINE_HAS_HPAGE) | 
|  | return 0; | 
|  |  | 
|  | return !!(pmd_val(pmd) & _SEGMENT_ENTRY_LARGE); | 
|  | } | 
|  |  | 
|  | int pud_huge(pud_t pud) | 
|  | { | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, | 
|  | pmd_t *pmdp, int write) | 
|  | { | 
|  | struct page *page; | 
|  |  | 
|  | if (!MACHINE_HAS_HPAGE) | 
|  | return NULL; | 
|  |  | 
|  | page = pmd_page(*pmdp); | 
|  | if (page) | 
|  | page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT); | 
|  | return page; | 
|  | } |