blob: 911d7e0b3cd9c0db77504e9ccc6606bba8da51e7 [file] [log] [blame]
Jeremy Fitzhardinge83a51012008-01-30 13:32:41 +01001#ifndef _ASM_X86_PAGE_H
2#define _ASM_X86_PAGE_H
3
4#include <linux/const.h>
5
6/* PAGE_SHIFT determines the page size */
7#define PAGE_SHIFT 12
8#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
9#define PAGE_MASK (~(PAGE_SIZE-1))
10
11#define PHYSICAL_PAGE_MASK (PAGE_MASK & __PHYSICAL_MASK)
Ingo Molnar3da1bcc2008-01-30 13:32:42 +010012#define PTE_MASK PHYSICAL_PAGE_MASK
Jeremy Fitzhardinge83a51012008-01-30 13:32:41 +010013
14#define LARGE_PAGE_SIZE (_AC(1,UL) << PMD_SHIFT)
15#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
16
17#define HPAGE_SHIFT PMD_SHIFT
18#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
19#define HPAGE_MASK (~(HPAGE_SIZE - 1))
20#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
21
22/* to align the pointer to the (next) page boundary */
23#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
24
25#define __PHYSICAL_MASK ((_AC(1,UL) << __PHYSICAL_MASK_SHIFT) - 1)
26#define __VIRTUAL_MASK ((_AC(1,UL) << __VIRTUAL_MASK_SHIFT) - 1)
27
Ingo Molnar3da1bcc2008-01-30 13:32:42 +010028#ifndef __ASSEMBLY__
29#include <linux/types.h>
30#endif
Jeremy Fitzhardinge83a51012008-01-30 13:32:41 +010031
32#ifdef CONFIG_X86_64
Ingo Molnar3da1bcc2008-01-30 13:32:42 +010033#define PAGETABLE_LEVELS 4
34
Jeremy Fitzhardinge83a51012008-01-30 13:32:41 +010035#define THREAD_ORDER 1
36#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
37#define CURRENT_MASK (~(THREAD_SIZE-1))
38
39#define EXCEPTION_STACK_ORDER 0
40#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
41
42#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
43#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
44
45#define IRQSTACK_ORDER 2
46#define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER)
47
48#define STACKFAULT_STACK 1
49#define DOUBLEFAULT_STACK 2
50#define NMI_STACK 3
51#define DEBUG_STACK 4
52#define MCE_STACK 5
53#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
54
55#define __PAGE_OFFSET _AC(0xffff810000000000, UL)
56
57#define __PHYSICAL_START CONFIG_PHYSICAL_START
58#define __KERNEL_ALIGN 0x200000
59
60/*
61 * Make sure kernel is aligned to 2MB address. Catching it at compile
62 * time is better. Change your config file and compile the kernel
63 * for a 2MB aligned address (CONFIG_PHYSICAL_START)
64 */
65#if (CONFIG_PHYSICAL_START % __KERNEL_ALIGN) != 0
66#error "CONFIG_PHYSICAL_START must be a multiple of 2MB"
Thomas Gleixner96a388d2007-10-11 11:20:03 +020067#endif
Jeremy Fitzhardinge83a51012008-01-30 13:32:41 +010068
69#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
70#define __START_KERNEL_map _AC(0xffffffff80000000, UL)
71
72/* See Documentation/x86_64/mm.txt for a description of the memory map. */
73#define __PHYSICAL_MASK_SHIFT 46
74#define __VIRTUAL_MASK_SHIFT 48
75
76#define KERNEL_TEXT_SIZE (40*1024*1024)
77#define KERNEL_TEXT_START _AC(0xffffffff80000000, UL)
78
Jeremy Fitzhardinge345b9042008-01-30 13:32:42 +010079#ifndef __ASSEMBLY__
80void clear_page(void *page);
81void copy_page(void *to, void *from);
82#endif /* !__ASSEMBLY__ */
83
Jeremy Fitzhardinge83a51012008-01-30 13:32:41 +010084#endif /* CONFIG_X86_64 */
85
86#ifdef CONFIG_X86_32
87
88/*
89 * This handles the memory map.
90 *
91 * A __PAGE_OFFSET of 0xC0000000 means that the kernel has
92 * a virtual address space of one gigabyte, which limits the
93 * amount of physical memory you can use to about 950MB.
94 *
95 * If you want more physical memory than this then see the CONFIG_HIGHMEM4G
96 * and CONFIG_HIGHMEM64G options in the kernel configuration.
97 */
98#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
99
100#ifdef CONFIG_X86_PAE
101#define __PHYSICAL_MASK_SHIFT 36
102#define __VIRTUAL_MASK_SHIFT 32
103#else /* !CONFIG_X86_PAE */
104#define __PHYSICAL_MASK_SHIFT 32
105#define __VIRTUAL_MASK_SHIFT 32
106#endif /* CONFIG_X86_PAE */
107
108#ifdef CONFIG_HUGETLB_PAGE
109#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
110#endif
111
Jeremy Fitzhardinge345b9042008-01-30 13:32:42 +0100112#ifndef __ASSEMBLY__
113#ifdef CONFIG_X86_USE_3DNOW
114#include <asm/mmx.h>
115
116static inline void clear_page(void *page)
117{
118 mmx_clear_page(page);
119}
120
121static inline void copy_page(void *to, void *from)
122{
123 mmx_copy_page(to, from);
124}
125#else /* !CONFIG_X86_USE_3DNOW */
126#include <linux/string.h>
127
128static inline void clear_page(void *page)
129{
130 memset(page, 0, PAGE_SIZE);
131}
132
133static inline void copy_page(void *to, void *from)
134{
135 memcpy(to, from, PAGE_SIZE);
136}
137#endif /* CONFIG_X86_3DNOW */
138#endif /* !__ASSEMBLY__ */
139
Jeremy Fitzhardinge83a51012008-01-30 13:32:41 +0100140#endif /* CONFIG_X86_32 */
141
142#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
143
144#define VM_DATA_DEFAULT_FLAGS \
145 (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
146 VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
147
148
Jeremy Fitzhardinge345b9042008-01-30 13:32:42 +0100149#ifndef __ASSEMBLY__
150struct page;
151
152static void inline clear_user_page(void *page, unsigned long vaddr,
153 struct page *pg)
154{
155 clear_page(page);
156}
157
158static void inline copy_user_page(void *to, void *from, unsigned long vaddr,
159 struct page *topage)
160{
161 copy_page(to, from);
162}
163
164#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
165 alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
166#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
167
168#endif /* __ASSEMBLY__ */
169
170
Jeremy Fitzhardinge83a51012008-01-30 13:32:41 +0100171#ifdef CONFIG_X86_32
172# include "page_32.h"
173#else
174# include "page_64.h"
175#endif
176
177#endif /* _ASM_X86_PAGE_H */