[PATCH] zoned vm counters: conversion of nr_pagetables to per zone counter
Conversion of nr_page_table_pages to a per zone counter
[akpm@osdl.org: bugfix]
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c
index 95273de..931be17 100644
--- a/arch/arm/mm/mm-armv.c
+++ b/arch/arm/mm/mm-armv.c
@@ -227,7 +227,7 @@
pte = pmd_page(*pmd);
pmd_clear(pmd);
- dec_page_state(nr_page_table_pages);
+ dec_zone_page_state(virt_to_page((unsigned long *)pgd), NR_PAGETABLE);
pte_lock_deinit(pte);
pte_free(pte);
pmd_free(pmd);
diff --git a/arch/i386/mm/pgtable.c b/arch/i386/mm/pgtable.c
index 73ac359..0bb1e5c 100644
--- a/arch/i386/mm/pgtable.c
+++ b/arch/i386/mm/pgtable.c
@@ -63,7 +63,8 @@
printk(KERN_INFO "%lu pages writeback\n", ps.nr_writeback);
printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED));
printk(KERN_INFO "%lu pages slab\n", global_page_state(NR_SLAB));
- printk(KERN_INFO "%lu pages pagetables\n", ps.nr_page_table_pages);
+ printk(KERN_INFO "%lu pages pagetables\n",
+ global_page_state(NR_PAGETABLE));
}
/*
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c
index c5c9885..624ca23 100644
--- a/arch/um/kernel/skas/mmu.c
+++ b/arch/um/kernel/skas/mmu.c
@@ -152,7 +152,7 @@
free_page(mmu->id.stack);
pte_lock_deinit(virt_to_page(mmu->last_page_table));
pte_free_kernel((pte_t *) mmu->last_page_table);
- dec_page_state(nr_page_table_pages);
+ dec_zone_page_state(virt_to_page(mmu->last_page_table), NR_PAGETABLE);
#ifdef CONFIG_3_LEVEL_PGTABLES
pmd_free((pmd_t *) mmu->last_pmd);
#endif