x86: clean up max_pfn_mapped usage - 32-bit
on 32-bit in head_32.S after initial page table is done, we get initial
max_pfn_mapped, and then kernel_physical_mapping_init will give us
a final one.
We need to use that to make sure find_e820_area will get valid addresses
for boot_map and for NODE_DATA(0) on numa32.
XEN PV and lguest may need to assign max_pfn_mapped too.
Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index bef4618..ac7002f 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -220,6 +220,8 @@
jb 10b
1:
movl %edi,pa(init_pg_tables_end)
+ shrl $12, %eax
+ movl %eax, pa(max_pfn_mapped)
/* Do early initialization of the fixmap area */
movl $pa(swapper_pg_fixmap)+PDE_ATTR,%eax
@@ -251,6 +253,8 @@
cmpl %ebp,%eax
jb 10b
movl %edi,pa(init_pg_tables_end)
+ shrl $12, %eax
+ movl %eax, pa(max_pfn_mapped)
/* Do early initialization of the fixmap area */
movl $pa(swapper_pg_fixmap)+PDE_ATTR,%eax
diff --git a/arch/x86/kernel/setup_32.c b/arch/x86/kernel/setup_32.c
index 2901042..c985a8c 100644
--- a/arch/x86/kernel/setup_32.c
+++ b/arch/x86/kernel/setup_32.c
@@ -586,7 +586,7 @@
*/
bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
bootmap = find_e820_area(min_low_pfn<<PAGE_SHIFT,
- max_low_pfn<<PAGE_SHIFT, bootmap_size,
+ max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
PAGE_SIZE);
if (bootmap == -1L)
panic("Cannot find bootmem map of size %ld\n", bootmap_size);
@@ -595,6 +595,8 @@
reserve_initrd();
#endif
bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, max_low_pfn);
+ printk(KERN_INFO " mapped low ram: 0 - %08lx\n",
+ max_pfn_mapped<<PAGE_SHIFT);
printk(KERN_INFO " low ram: %08lx - %08lx\n",
min_low_pfn<<PAGE_SHIFT, max_low_pfn<<PAGE_SHIFT);
printk(KERN_INFO " bootmap %08lx - %08lx\n",
diff --git a/arch/x86/mm/discontig_32.c b/arch/x86/mm/discontig_32.c
index 73a9834..914a81e 100644
--- a/arch/x86/mm/discontig_32.c
+++ b/arch/x86/mm/discontig_32.c
@@ -163,7 +163,8 @@
else {
unsigned long pgdat_phys;
pgdat_phys = find_e820_area(min_low_pfn<<PAGE_SHIFT,
- max_low_pfn<<PAGE_SHIFT, sizeof(pg_data_t),
+ (nid ? max_low_pfn:max_pfn_mapped)<<PAGE_SHIFT,
+ sizeof(pg_data_t),
PAGE_SIZE);
NODE_DATA(nid) = (pg_data_t *)(pfn_to_kaddr(pgdat_phys>>PAGE_SHIFT));
reserve_early(pgdat_phys, pgdat_phys + sizeof(pg_data_t),