blob: de9befac6fcb1f0d7e2730c4bc7ef49e114c09b6 [file] [log] [blame]
Nicholas Flintham1e3d3112013-04-10 10:48:38 +01001#ifndef _LINUX_HUGETLB_H
2#define _LINUX_HUGETLB_H
3
4#include <linux/mm_types.h>
5#include <linux/fs.h>
6#include <linux/hugetlb_inline.h>
7
8struct ctl_table;
9struct user_struct;
10
11#ifdef CONFIG_HUGETLB_PAGE
12
13#include <linux/mempolicy.h>
14#include <linux/shm.h>
15#include <asm/tlbflush.h>
16
17struct hugepage_subpool {
18 spinlock_t lock;
19 long count;
20 long max_hpages, used_hpages;
21};
22
23struct hugepage_subpool *hugepage_new_subpool(long nr_blocks);
24void hugepage_put_subpool(struct hugepage_subpool *spool);
25
26int PageHuge(struct page *page);
27
28void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
29int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
30int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
31int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
32
33#ifdef CONFIG_NUMA
34int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
35 void __user *, size_t *, loff_t *);
36#endif
37
38int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
39int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
40 struct page **, struct vm_area_struct **,
41 unsigned long *, int *, int, unsigned int flags);
42void unmap_hugepage_range(struct vm_area_struct *,
43 unsigned long, unsigned long, struct page *);
44void __unmap_hugepage_range(struct vm_area_struct *,
45 unsigned long, unsigned long, struct page *);
46int hugetlb_prefault(struct address_space *, struct vm_area_struct *);
47void hugetlb_report_meminfo(struct seq_file *);
48int hugetlb_report_node_meminfo(int, char *);
49unsigned long hugetlb_total_pages(void);
50int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
51 unsigned long address, unsigned int flags);
52int hugetlb_reserve_pages(struct inode *inode, long from, long to,
53 struct vm_area_struct *vma,
54 vm_flags_t vm_flags);
55void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
56int dequeue_hwpoisoned_huge_page(struct page *page);
57void copy_huge_page(struct page *dst, struct page *src);
58
59extern unsigned long hugepages_treat_as_movable;
60extern const unsigned long hugetlb_zero, hugetlb_infinity;
61extern int sysctl_hugetlb_shm_group;
62extern struct list_head huge_boot_pages;
63
64
65pte_t *huge_pte_alloc(struct mm_struct *mm,
66 unsigned long addr, unsigned long sz);
67pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
68int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
69struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
70 int write);
71struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
72 pmd_t *pmd, int write);
73struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
74 pud_t *pud, int write);
75int pmd_huge(pmd_t pmd);
76int pud_huge(pud_t pmd);
77void hugetlb_change_protection(struct vm_area_struct *vma,
78 unsigned long address, unsigned long end, pgprot_t newprot);
79
80#else
81
82static inline int PageHuge(struct page *page)
83{
84 return 0;
85}
86
87static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
88{
89}
90
91static inline unsigned long hugetlb_total_pages(void)
92{
93 return 0;
94}
95
96#define follow_hugetlb_page(m,v,p,vs,a,b,i,w) ({ BUG(); 0; })
97#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
98#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
99#define hugetlb_prefault(mapping, vma) ({ BUG(); 0; })
100#define unmap_hugepage_range(vma, start, end, page) BUG()
101static inline void hugetlb_report_meminfo(struct seq_file *m)
102{
103}
104#define hugetlb_report_node_meminfo(n, buf) 0
105#define follow_huge_pmd(mm, addr, pmd, write) NULL
106#define follow_huge_pud(mm, addr, pud, write) NULL
107#define prepare_hugepage_range(file, addr, len) (-EINVAL)
108#define pmd_huge(x) 0
109#define pud_huge(x) 0
110#define is_hugepage_only_range(mm, addr, len) 0
111#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
112#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
113#define huge_pte_offset(mm, address) 0
114#define dequeue_hwpoisoned_huge_page(page) 0
115static inline void copy_huge_page(struct page *dst, struct page *src)
116{
117}
118
119#define hugetlb_change_protection(vma, address, end, newprot)
120
121#endif
122
123#define HUGETLB_ANON_FILE "anon_hugepage"
124
125enum {
126 HUGETLB_SHMFS_INODE = 1,
127 HUGETLB_ANONHUGE_INODE = 2,
128};
129
130#ifdef CONFIG_HUGETLBFS
131struct hugetlbfs_sb_info {
132 long max_inodes;
133 long free_inodes;
134 spinlock_t stat_lock;
135 struct hstate *hstate;
136 struct hugepage_subpool *spool;
137};
138
139static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
140{
141 return sb->s_fs_info;
142}
143
144extern const struct file_operations hugetlbfs_file_operations;
145extern const struct vm_operations_struct hugetlb_vm_ops;
146struct file *hugetlb_file_setup(const char *name, unsigned long addr,
147 size_t size, vm_flags_t acct,
148 struct user_struct **user, int creat_flags);
149
150static inline int is_file_hugepages(struct file *file)
151{
152 if (file->f_op == &hugetlbfs_file_operations)
153 return 1;
154 if (is_file_shm_hugepages(file))
155 return 1;
156
157 return 0;
158}
159
160#else
161
162#define is_file_hugepages(file) 0
163static inline struct file *
164hugetlb_file_setup(const char *name, unsigned long addr, size_t size,
165 vm_flags_t acctflag, struct user_struct **user, int creat_flags)
166{
167 return ERR_PTR(-ENOSYS);
168}
169
170#endif
171
172#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
173unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
174 unsigned long len, unsigned long pgoff,
175 unsigned long flags);
176#endif
177
178#ifdef CONFIG_HUGETLB_PAGE
179
180#define HSTATE_NAME_LEN 32
181struct hstate {
182 int next_nid_to_alloc;
183 int next_nid_to_free;
184 unsigned int order;
185 unsigned long mask;
186 unsigned long max_huge_pages;
187 unsigned long nr_huge_pages;
188 unsigned long free_huge_pages;
189 unsigned long resv_huge_pages;
190 unsigned long surplus_huge_pages;
191 unsigned long nr_overcommit_huge_pages;
192 struct list_head hugepage_freelists[MAX_NUMNODES];
193 unsigned int nr_huge_pages_node[MAX_NUMNODES];
194 unsigned int free_huge_pages_node[MAX_NUMNODES];
195 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
196 char name[HSTATE_NAME_LEN];
197};
198
199struct huge_bootmem_page {
200 struct list_head list;
201 struct hstate *hstate;
202#ifdef CONFIG_HIGHMEM
203 phys_addr_t phys;
204#endif
205};
206
207struct page *alloc_huge_page_node(struct hstate *h, int nid);
208
209int __init alloc_bootmem_huge_page(struct hstate *h);
210
211void __init hugetlb_add_hstate(unsigned order);
212struct hstate *size_to_hstate(unsigned long size);
213
214#ifndef HUGE_MAX_HSTATE
215#define HUGE_MAX_HSTATE 1
216#endif
217
218extern struct hstate hstates[HUGE_MAX_HSTATE];
219extern unsigned int default_hstate_idx;
220
221#define default_hstate (hstates[default_hstate_idx])
222
223static inline struct hstate *hstate_inode(struct inode *i)
224{
225 struct hugetlbfs_sb_info *hsb;
226 hsb = HUGETLBFS_SB(i->i_sb);
227 return hsb->hstate;
228}
229
230static inline struct hstate *hstate_file(struct file *f)
231{
232 return hstate_inode(f->f_dentry->d_inode);
233}
234
235static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
236{
237 return hstate_file(vma->vm_file);
238}
239
240static inline unsigned long huge_page_size(struct hstate *h)
241{
242 return (unsigned long)PAGE_SIZE << h->order;
243}
244
245extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
246
247extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
248
249static inline unsigned long huge_page_mask(struct hstate *h)
250{
251 return h->mask;
252}
253
254static inline unsigned int huge_page_order(struct hstate *h)
255{
256 return h->order;
257}
258
259static inline unsigned huge_page_shift(struct hstate *h)
260{
261 return h->order + PAGE_SHIFT;
262}
263
264static inline unsigned int pages_per_huge_page(struct hstate *h)
265{
266 return 1 << h->order;
267}
268
269static inline unsigned int blocks_per_huge_page(struct hstate *h)
270{
271 return huge_page_size(h) / 512;
272}
273
274#include <asm/hugetlb.h>
275
276static inline struct hstate *page_hstate(struct page *page)
277{
278 return size_to_hstate(PAGE_SIZE << compound_order(page));
279}
280
281static inline unsigned hstate_index_to_shift(unsigned index)
282{
283 return hstates[index].order + PAGE_SHIFT;
284}
285
286#else
287struct hstate {};
288#define alloc_huge_page_node(h, nid) NULL
289#define alloc_bootmem_huge_page(h) NULL
290#define hstate_file(f) NULL
291#define hstate_vma(v) NULL
292#define hstate_inode(i) NULL
293#define huge_page_size(h) PAGE_SIZE
294#define huge_page_mask(h) PAGE_MASK
295#define vma_kernel_pagesize(v) PAGE_SIZE
296#define vma_mmu_pagesize(v) PAGE_SIZE
297#define huge_page_order(h) 0
298#define huge_page_shift(h) PAGE_SHIFT
299static inline unsigned int pages_per_huge_page(struct hstate *h)
300{
301 return 1;
302}
303#define hstate_index_to_shift(index) 0
304#endif
305
306#endif