| Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1 | #ifndef _LINUX_HUGE_MM_H | 
 | 2 | #define _LINUX_HUGE_MM_H | 
 | 3 |  | 
 | 4 | extern int do_huge_pmd_anonymous_page(struct mm_struct *mm, | 
 | 5 | 				      struct vm_area_struct *vma, | 
 | 6 | 				      unsigned long address, pmd_t *pmd, | 
 | 7 | 				      unsigned int flags); | 
 | 8 | extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, | 
 | 9 | 			 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, | 
 | 10 | 			 struct vm_area_struct *vma); | 
 | 11 | extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | 
 | 12 | 			       unsigned long address, pmd_t *pmd, | 
 | 13 | 			       pmd_t orig_pmd); | 
 | 14 | extern pgtable_t get_pmd_huge_pte(struct mm_struct *mm); | 
 | 15 | extern struct page *follow_trans_huge_pmd(struct mm_struct *mm, | 
 | 16 | 					  unsigned long addr, | 
 | 17 | 					  pmd_t *pmd, | 
 | 18 | 					  unsigned int flags); | 
 | 19 | extern int zap_huge_pmd(struct mmu_gather *tlb, | 
 | 20 | 			struct vm_area_struct *vma, | 
 | 21 | 			pmd_t *pmd); | 
| Johannes Weiner | 0ca1634 | 2011-01-13 15:47:02 -0800 | [diff] [blame] | 22 | extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | 
 | 23 | 			unsigned long addr, unsigned long end, | 
 | 24 | 			unsigned char *vec); | 
| Johannes Weiner | cd7548a | 2011-01-13 15:47:04 -0800 | [diff] [blame] | 25 | extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | 
 | 26 | 			unsigned long addr, pgprot_t newprot); | 
| Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 27 |  | 
 | 28 | enum transparent_hugepage_flag { | 
 | 29 | 	TRANSPARENT_HUGEPAGE_FLAG, | 
 | 30 | 	TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, | 
 | 31 | 	TRANSPARENT_HUGEPAGE_DEFRAG_FLAG, | 
 | 32 | 	TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, | 
| Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 33 | 	TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG, | 
| Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 34 | #ifdef CONFIG_DEBUG_VM | 
 | 35 | 	TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG, | 
 | 36 | #endif | 
 | 37 | }; | 
 | 38 |  | 
 | 39 | enum page_check_address_pmd_flag { | 
 | 40 | 	PAGE_CHECK_ADDRESS_PMD_FLAG, | 
 | 41 | 	PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG, | 
 | 42 | 	PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG, | 
 | 43 | }; | 
 | 44 | extern pmd_t *page_check_address_pmd(struct page *page, | 
 | 45 | 				     struct mm_struct *mm, | 
 | 46 | 				     unsigned long address, | 
 | 47 | 				     enum page_check_address_pmd_flag flag); | 
 | 48 |  | 
 | 49 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 
 | 50 | #define HPAGE_PMD_SHIFT HPAGE_SHIFT | 
 | 51 | #define HPAGE_PMD_MASK HPAGE_MASK | 
 | 52 | #define HPAGE_PMD_SIZE HPAGE_SIZE | 
 | 53 |  | 
 | 54 | #define transparent_hugepage_enabled(__vma)				\ | 
| Andrea Arcangeli | a664b2d | 2011-01-13 15:47:17 -0800 | [diff] [blame] | 55 | 	((transparent_hugepage_flags &					\ | 
 | 56 | 	  (1<<TRANSPARENT_HUGEPAGE_FLAG) ||				\ | 
 | 57 | 	  (transparent_hugepage_flags &					\ | 
 | 58 | 	   (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) &&			\ | 
 | 59 | 	   ((__vma)->vm_flags & VM_HUGEPAGE))) &&			\ | 
| Andrea Arcangeli | a7d6e4e | 2011-02-15 19:02:45 +0100 | [diff] [blame] | 60 | 	 !((__vma)->vm_flags & VM_NOHUGEPAGE) &&			\ | 
 | 61 | 	 !is_vma_temporary_stack(__vma)) | 
| Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 62 | #define transparent_hugepage_defrag(__vma)				\ | 
 | 63 | 	((transparent_hugepage_flags &					\ | 
 | 64 | 	  (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)) ||			\ | 
 | 65 | 	 (transparent_hugepage_flags &					\ | 
 | 66 | 	  (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG) &&		\ | 
 | 67 | 	  (__vma)->vm_flags & VM_HUGEPAGE)) | 
 | 68 | #ifdef CONFIG_DEBUG_VM | 
 | 69 | #define transparent_hugepage_debug_cow()				\ | 
 | 70 | 	(transparent_hugepage_flags &					\ | 
 | 71 | 	 (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG)) | 
 | 72 | #else /* CONFIG_DEBUG_VM */ | 
 | 73 | #define transparent_hugepage_debug_cow() 0 | 
 | 74 | #endif /* CONFIG_DEBUG_VM */ | 
 | 75 |  | 
 | 76 | extern unsigned long transparent_hugepage_flags; | 
 | 77 | extern int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, | 
 | 78 | 			  pmd_t *dst_pmd, pmd_t *src_pmd, | 
 | 79 | 			  struct vm_area_struct *vma, | 
 | 80 | 			  unsigned long addr, unsigned long end); | 
 | 81 | extern int handle_pte_fault(struct mm_struct *mm, | 
 | 82 | 			    struct vm_area_struct *vma, unsigned long address, | 
 | 83 | 			    pte_t *pte, pmd_t *pmd, unsigned int flags); | 
 | 84 | extern int split_huge_page(struct page *page); | 
 | 85 | extern void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd); | 
 | 86 | #define split_huge_page_pmd(__mm, __pmd)				\ | 
 | 87 | 	do {								\ | 
 | 88 | 		pmd_t *____pmd = (__pmd);				\ | 
 | 89 | 		if (unlikely(pmd_trans_huge(*____pmd)))			\ | 
 | 90 | 			__split_huge_page_pmd(__mm, ____pmd);		\ | 
 | 91 | 	}  while (0) | 
 | 92 | #define wait_split_huge_page(__anon_vma, __pmd)				\ | 
 | 93 | 	do {								\ | 
 | 94 | 		pmd_t *____pmd = (__pmd);				\ | 
| Peter Zijlstra | 2b575eb | 2011-05-24 17:12:11 -0700 | [diff] [blame] | 95 | 		anon_vma_lock(__anon_vma);				\ | 
 | 96 | 		anon_vma_unlock(__anon_vma);				\ | 
| Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 97 | 		BUG_ON(pmd_trans_splitting(*____pmd) ||			\ | 
 | 98 | 		       pmd_trans_huge(*____pmd));			\ | 
 | 99 | 	} while (0) | 
 | 100 | #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) | 
 | 101 | #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) | 
 | 102 | #if HPAGE_PMD_ORDER > MAX_ORDER | 
 | 103 | #error "hugepages can't be allocated by the buddy allocator" | 
 | 104 | #endif | 
| Andrea Arcangeli | 60ab324 | 2011-01-13 15:47:18 -0800 | [diff] [blame] | 105 | extern int hugepage_madvise(struct vm_area_struct *vma, | 
 | 106 | 			    unsigned long *vm_flags, int advice); | 
| Andrea Arcangeli | 94fcc58 | 2011-01-13 15:47:08 -0800 | [diff] [blame] | 107 | extern void __vma_adjust_trans_huge(struct vm_area_struct *vma, | 
 | 108 | 				    unsigned long start, | 
 | 109 | 				    unsigned long end, | 
 | 110 | 				    long adjust_next); | 
 | 111 | static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, | 
 | 112 | 					 unsigned long start, | 
 | 113 | 					 unsigned long end, | 
 | 114 | 					 long adjust_next) | 
 | 115 | { | 
| Andrea Arcangeli | 78f11a2 | 2011-04-27 15:26:45 -0700 | [diff] [blame] | 116 | 	if (!vma->anon_vma || vma->vm_ops) | 
| Andrea Arcangeli | 94fcc58 | 2011-01-13 15:47:08 -0800 | [diff] [blame] | 117 | 		return; | 
 | 118 | 	__vma_adjust_trans_huge(vma, start, end, adjust_next); | 
 | 119 | } | 
| Rik van Riel | 2c888cf | 2011-01-13 15:47:13 -0800 | [diff] [blame] | 120 | static inline int hpage_nr_pages(struct page *page) | 
 | 121 | { | 
 | 122 | 	if (unlikely(PageTransHuge(page))) | 
 | 123 | 		return HPAGE_PMD_NR; | 
 | 124 | 	return 1; | 
 | 125 | } | 
| Andrea Arcangeli | 22e5c47 | 2011-01-13 15:47:20 -0800 | [diff] [blame] | 126 | static inline struct page *compound_trans_head(struct page *page) | 
 | 127 | { | 
 | 128 | 	if (PageTail(page)) { | 
 | 129 | 		struct page *head; | 
 | 130 | 		head = page->first_page; | 
 | 131 | 		smp_rmb(); | 
 | 132 | 		/* | 
 | 133 | 		 * head may be a dangling pointer. | 
 | 134 | 		 * __split_huge_page_refcount clears PageTail before | 
 | 135 | 		 * overwriting first_page, so if PageTail is still | 
 | 136 | 		 * there it means the head pointer isn't dangling. | 
 | 137 | 		 */ | 
 | 138 | 		if (PageTail(page)) | 
 | 139 | 			return head; | 
 | 140 | 	} | 
 | 141 | 	return page; | 
 | 142 | } | 
| Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 143 | #else /* CONFIG_TRANSPARENT_HUGEPAGE */ | 
 | 144 | #define HPAGE_PMD_SHIFT ({ BUG(); 0; }) | 
 | 145 | #define HPAGE_PMD_MASK ({ BUG(); 0; }) | 
 | 146 | #define HPAGE_PMD_SIZE ({ BUG(); 0; }) | 
 | 147 |  | 
| Rik van Riel | 2c888cf | 2011-01-13 15:47:13 -0800 | [diff] [blame] | 148 | #define hpage_nr_pages(x) 1 | 
 | 149 |  | 
| Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 150 | #define transparent_hugepage_enabled(__vma) 0 | 
 | 151 |  | 
 | 152 | #define transparent_hugepage_flags 0UL | 
 | 153 | static inline int split_huge_page(struct page *page) | 
 | 154 | { | 
 | 155 | 	return 0; | 
 | 156 | } | 
 | 157 | #define split_huge_page_pmd(__mm, __pmd)	\ | 
 | 158 | 	do { } while (0) | 
 | 159 | #define wait_split_huge_page(__anon_vma, __pmd)	\ | 
 | 160 | 	do { } while (0) | 
| Andrea Arcangeli | 22e5c47 | 2011-01-13 15:47:20 -0800 | [diff] [blame] | 161 | #define compound_trans_head(page) compound_head(page) | 
| Andrea Arcangeli | 60ab324 | 2011-01-13 15:47:18 -0800 | [diff] [blame] | 162 | static inline int hugepage_madvise(struct vm_area_struct *vma, | 
 | 163 | 				   unsigned long *vm_flags, int advice) | 
| Andrea Arcangeli | 0af4e98 | 2011-01-13 15:46:55 -0800 | [diff] [blame] | 164 | { | 
 | 165 | 	BUG(); | 
 | 166 | 	return 0; | 
 | 167 | } | 
| Andrea Arcangeli | 94fcc58 | 2011-01-13 15:47:08 -0800 | [diff] [blame] | 168 | static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, | 
 | 169 | 					 unsigned long start, | 
 | 170 | 					 unsigned long end, | 
 | 171 | 					 long adjust_next) | 
 | 172 | { | 
 | 173 | } | 
| Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 174 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | 
 | 175 |  | 
 | 176 | #endif /* _LINUX_HUGE_MM_H */ |