powerpc: Shield code specific to 64-bit server processors
This is a random collection of added ifdef's around portions of
code that only mak sense on server processors. Using either
CONFIG_PPC_STD_MMU_64 or CONFIG_PPC_BOOK3S as seems appropriate.
This is meant to make the future merging of Book3E 64-bit support
easier.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 7d46e5d..8564a41 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -117,6 +117,7 @@
if (!en)
return;
+#ifdef CONFIG_PPC_STD_MMU_64
if (firmware_has_feature(FW_FEATURE_ISERIES)) {
/*
* Do we need to disable preemption here? Not really: in the
@@ -134,6 +135,7 @@
if (local_paca->lppaca_ptr->int_dword.any_int)
iseries_handle_interrupts();
}
+#endif /* CONFIG_PPC_STD_MMU_64 */
/*
* if (get_paca()->hard_enabled) return;
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index dd6c7a3..461c916 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -420,6 +420,9 @@
* so flushing the hash table is the only sane way to make sure
* that no hash entries are covering that removed bridge area
* while still allowing other busses overlapping those pages
+ *
+ * Note: If we ever support P2P hotplug on Book3E, we'll have
+ * to do an appropriate TLB flush here too
*/
if (bus->self) {
struct resource *res = bus->resource[0];
@@ -427,8 +430,10 @@
pr_debug("IO unmapping for PCI-PCI bridge %s\n",
pci_name(bus->self));
+#ifdef CONFIG_PPC_STD_MMU_64
__flush_hash_table_range(&init_mm, res->start + _IO_BASE,
res->end + _IO_BASE + 1);
+#endif
return 0;
}
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 7b44a33..3e7135b 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -650,7 +650,7 @@
p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
_ALIGN_UP(sizeof(struct thread_info), 16);
-#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_STD_MMU_64
if (cpu_has_feature(CPU_FTR_SLB)) {
unsigned long sp_vsid;
unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index ce01ff2..d4405b9 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -585,7 +585,7 @@
ibm_pa_features, ARRAY_SIZE(ibm_pa_features));
}
-#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_STD_MMU_64
static void __init check_cpu_slb_size(unsigned long node)
{
u32 *slb_size_ptr;
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index c410c60..4222105 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -417,9 +417,11 @@
if (ppc64_caches.iline_size != 0x80)
printk("ppc64_caches.icache_line_size = 0x%x\n",
ppc64_caches.iline_size);
+#ifdef CONFIG_PPC_STD_MMU_64
if (htab_address)
printk("htab_address = 0x%p\n", htab_address);
printk("htab_hash_mask = 0x%lx\n", htab_hash_mask);
+#endif /* CONFIG_PPC_STD_MMU_64 */
if (PHYSICAL_START > 0)
printk("physical_start = 0x%lx\n",
PHYSICAL_START);
@@ -511,8 +513,9 @@
irqstack_early_init();
emergency_stack_init();
+#ifdef CONFIG_PPC_STD_MMU_64
stabs_alloc();
-
+#endif
/* set up the bootmem stuff with available memory */
do_init_bootmem();
sparse_init();