Merge branch 'merge'
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index 7e2c9fe..821e152 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -2,6 +2,11 @@
  *  FPU support code, moved here from head.S so that it can be used
  *  by chips which use other head-whatever.S files.
  *
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
+ *    Copyright (C) 1996 Paul Mackerras.
+ *    Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
+ *
  *  This program is free software; you can redistribute it and/or
  *  modify it under the terms of the GNU General Public License
  *  as published by the Free Software Foundation; either version
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index fd4ddb8..b443233 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -323,7 +323,8 @@
 
 static LIST_HEAD(irq_hosts);
 static spinlock_t irq_big_lock = SPIN_LOCK_UNLOCKED;
-
+static DEFINE_PER_CPU(unsigned int, irq_radix_reader);
+static unsigned int irq_radix_writer;
 struct irq_map_entry irq_map[NR_IRQS];
 static unsigned int irq_virq_count = NR_IRQS;
 static struct irq_host *irq_default_host;
@@ -456,6 +457,58 @@
 		irq_virq_count = count;
 }
 
+/* radix tree not lockless safe ! we use a brlock-type mecanism
+ * for now, until we can use a lockless radix tree
+ */
+static void irq_radix_wrlock(unsigned long *flags)
+{
+	unsigned int cpu, ok;
+
+	spin_lock_irqsave(&irq_big_lock, *flags);
+	irq_radix_writer = 1;
+	smp_mb();
+	do {
+		barrier();
+		ok = 1;
+		for_each_possible_cpu(cpu) {
+			if (per_cpu(irq_radix_reader, cpu)) {
+				ok = 0;
+				break;
+			}
+		}
+		if (!ok)
+			cpu_relax();
+	} while(!ok);
+}
+
+static void irq_radix_wrunlock(unsigned long flags)
+{
+	smp_wmb();
+	irq_radix_writer = 0;
+	spin_unlock_irqrestore(&irq_big_lock, flags);
+}
+
+static void irq_radix_rdlock(unsigned long *flags)
+{
+	local_irq_save(*flags);
+	__get_cpu_var(irq_radix_reader) = 1;
+	smp_mb();
+	if (likely(irq_radix_writer == 0))
+		return;
+	__get_cpu_var(irq_radix_reader) = 0;
+	smp_wmb();
+	spin_lock(&irq_big_lock);
+	__get_cpu_var(irq_radix_reader) = 1;
+	spin_unlock(&irq_big_lock);
+}
+
+static void irq_radix_rdunlock(unsigned long flags)
+{
+	__get_cpu_var(irq_radix_reader) = 0;
+	local_irq_restore(flags);
+}
+
+
 unsigned int irq_create_mapping(struct irq_host *host,
 				irq_hw_number_t hwirq)
 {
@@ -605,13 +658,9 @@
 		/* Check if radix tree allocated yet */
 		if (host->revmap_data.tree.gfp_mask == 0)
 			break;
-		/* XXX radix tree not safe ! remove lock whem it becomes safe
-		 * and use some RCU sync to make sure everything is ok before we
-		 * can re-use that map entry
-		 */
-		spin_lock_irqsave(&irq_big_lock, flags);
+		irq_radix_wrlock(&flags);
 		radix_tree_delete(&host->revmap_data.tree, hwirq);
-		spin_unlock_irqrestore(&irq_big_lock, flags);
+		irq_radix_wrunlock(flags);
 		break;
 	}
 
@@ -678,25 +727,24 @@
 	if (tree->gfp_mask == 0)
 		return irq_find_mapping(host, hwirq);
 
-	/* XXX Current radix trees are NOT SMP safe !!! Remove that lock
-	 * when that is fixed (when Nick's patch gets in
-	 */
-	spin_lock_irqsave(&irq_big_lock, flags);
-
 	/* Now try to resolve */
+	irq_radix_rdlock(&flags);
 	ptr = radix_tree_lookup(tree, hwirq);
+	irq_radix_rdunlock(flags);
+
 	/* Found it, return */
 	if (ptr) {
 		virq = ptr - irq_map;
-		goto bail;
+		return virq;
 	}
 
 	/* If not there, try to insert it */
 	virq = irq_find_mapping(host, hwirq);
-	if (virq != NO_IRQ)
+	if (virq != NO_IRQ) {
+		irq_radix_wrlock(&flags);
 		radix_tree_insert(tree, hwirq, &irq_map[virq]);
- bail:
-	spin_unlock_irqrestore(&irq_big_lock, flags);
+		irq_radix_wrunlock(flags);
+	}
 	return virq;
 }
 
@@ -807,12 +855,12 @@
 	struct irq_host *h;
 	unsigned long flags;
 
-	spin_lock_irqsave(&irq_big_lock, flags);
+	irq_radix_wrlock(&flags);
 	list_for_each_entry(h, &irq_hosts, link) {
 		if (h->revmap_type == IRQ_HOST_MAP_TREE)
 			INIT_RADIX_TREE(&h->revmap_data.tree, GFP_ATOMIC);
 	}
-	spin_unlock_irqrestore(&irq_big_lock, flags);
+	irq_radix_wrunlock(flags);
 
 	return 0;
 }
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index d51be7c..c1b1e14 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -1254,6 +1254,9 @@
 
 	DBG("Try to map irq for %s...\n", pci_name(pci_dev));
 
+#ifdef DEBUG
+	memset(&oirq, 0xff, sizeof(oirq));
+#endif
 	/* Try to get a mapping from the device-tree */
 	if (of_irq_map_pci(pci_dev, &oirq)) {
 		u8 line, pin;
@@ -1279,8 +1282,9 @@
 		if (virq != NO_IRQ)
 			set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
 	} else {
-		DBG(" -> got one, spec %d cells (0x%08x...) on %s\n",
-		    oirq.size, oirq.specifier[0], oirq.controller->full_name);
+		DBG(" -> got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
+		    oirq.size, oirq.specifier[0], oirq.specifier[1],
+		    oirq.controller->full_name);
 
 		virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
 					     oirq.size);
@@ -1289,6 +1293,9 @@
 		DBG(" -> failed to map !\n");
 		return -1;
 	}
+
+	DBG(" -> mapped to linux irq %d\n", virq);
+
 	pci_dev->irq = virq;
 	pci_write_config_byte(pci_dev, PCI_INTERRUPT_LINE, virq);
 
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 90972ef..b9176163 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -646,13 +646,13 @@
 	5 - 1,				/* 5 option vectors */
 
 	/* option vector 1: processor architectures supported */
-	3 - 1,				/* length */
+	3 - 2,				/* length */
 	0,				/* don't ignore, don't halt */
 	OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 |
 	OV1_PPC_2_04 | OV1_PPC_2_05,
 
 	/* option vector 2: Open Firmware options supported */
-	34 - 1,				/* length */
+	34 - 2,				/* length */
 	OV2_REAL_MODE,
 	0, 0,
 	W(0xffffffff),			/* real_base */
@@ -666,16 +666,16 @@
 	48,				/* max log_2(hash table size) */
 
 	/* option vector 3: processor options supported */
-	3 - 1,				/* length */
+	3 - 2,				/* length */
 	0,				/* don't ignore, don't halt */
 	OV3_FP | OV3_VMX,
 
 	/* option vector 4: IBM PAPR implementation */
-	2 - 1,				/* length */
+	2 - 2,				/* length */
 	0,				/* don't halt */
 
 	/* option vector 5: PAPR/OF options */
-	3 - 1,				/* length */
+	3 - 2,				/* length */
 	0,				/* don't ignore, don't halt */
 	OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES,
 };
diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c
index 6032032..603dff3 100644
--- a/arch/powerpc/kernel/prom_parse.c
+++ b/arch/powerpc/kernel/prom_parse.c
@@ -642,7 +642,7 @@
 
 }
 
-int of_irq_map_raw(struct device_node *parent, const u32 *intspec,
+int of_irq_map_raw(struct device_node *parent, const u32 *intspec, u32 ointsize,
 		const u32 *addr, struct of_irq *out_irq)
 {
 	struct device_node *ipar, *tnode, *old = NULL, *newpar = NULL;
@@ -650,6 +650,9 @@
 	u32 intsize = 1, addrsize, newintsize = 0, newaddrsize = 0;
 	int imaplen, match, i;
 
+	DBG("of_irq_map_raw: par=%s,intspec=[0x%08x 0x%08x...],ointsize=%d\n",
+	    parent->full_name, intspec[0], intspec[1], ointsize);
+
 	ipar = of_node_get(parent);
 
 	/* First get the #interrupt-cells property of the current cursor
@@ -673,6 +676,9 @@
 
 	DBG("of_irq_map_raw: ipar=%s, size=%d\n", ipar->full_name, intsize);
 
+	if (ointsize != intsize)
+		return -EINVAL;
+
 	/* Look for this #address-cells. We have to implement the old linux
 	 * trick of looking for the parent here as some device-trees rely on it
 	 */
@@ -879,12 +885,15 @@
 	}
 	intsize = *tmp;
 
+	DBG(" intsize=%d intlen=%d\n", intsize, intlen);
+
 	/* Check index */
 	if ((index + 1) * intsize > intlen)
 		return -EINVAL;
 
 	/* Get new specifier and map it */
-	res = of_irq_map_raw(p, intspec + index * intsize, addr, out_irq);
+	res = of_irq_map_raw(p, intspec + index * intsize, intsize,
+			     addr, out_irq);
 	of_node_put(p);
 	return res;
 }
@@ -969,7 +978,7 @@
 	laddr[0] = (pdev->bus->number << 16)
 		| (pdev->devfn << 8);
 	laddr[1]  = laddr[2] = 0;
-	return of_irq_map_raw(ppnode, &lspec, laddr, out_irq);
+	return of_irq_map_raw(ppnode, &lspec, 1, laddr, out_irq);
 }
 EXPORT_SYMBOL_GPL(of_irq_map_pci);
 #endif /* CONFIG_PCI */
diff --git a/arch/powerpc/kernel/smp-tbsync.c b/arch/powerpc/kernel/smp-tbsync.c
index f19e2e0..de59c6c 100644
--- a/arch/powerpc/kernel/smp-tbsync.c
+++ b/arch/powerpc/kernel/smp-tbsync.c
@@ -45,8 +45,9 @@
 {
 	int cmd;
 	u64 tb;
+	unsigned long flags;
 
-	local_irq_disable();
+	local_irq_save(flags);
 	while (!running)
 		barrier();
 	rmb();
@@ -70,7 +71,7 @@
 			set_tb(tb >> 32, tb & 0xfffffffful);
 		enter_contest(tbsync->mark, -1);
 	}
-	local_irq_enable();
+	local_irq_restore(flags);
 }
 
 static int __devinit start_contest(int cmd, long offset, int num)
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 272cb82..b9a2061 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -125,15 +125,8 @@
 unsigned long ppc_proc_freq;
 unsigned long ppc_tb_freq;
 
-u64 tb_last_jiffy __cacheline_aligned_in_smp;
-unsigned long tb_last_stamp;
-
-/*
- * Note that on ppc32 this only stores the bottom 32 bits of
- * the timebase value, but that's enough to tell when a jiffy
- * has passed.
- */
-DEFINE_PER_CPU(unsigned long, last_jiffy);
+static u64 tb_last_jiffy __cacheline_aligned_in_smp;
+static DEFINE_PER_CPU(u64, last_jiffy);
 
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
 /*
@@ -458,7 +451,7 @@
 		do {
 			seq = read_seqbegin_irqsave(&xtime_lock, flags);
 			sec = xtime.tv_sec;
-			nsec = xtime.tv_nsec + tb_ticks_since(tb_last_stamp);
+			nsec = xtime.tv_nsec + tb_ticks_since(tb_last_jiffy);
 		} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
 		usec = nsec / 1000;
 		while (usec >= 1000000) {
@@ -700,7 +693,6 @@
 		tb_next_jiffy = tb_last_jiffy + tb_ticks_per_jiffy;
 		if (per_cpu(last_jiffy, cpu) >= tb_next_jiffy) {
 			tb_last_jiffy = tb_next_jiffy;
-			tb_last_stamp = per_cpu(last_jiffy, cpu);
 			do_timer(regs);
 			timer_recalc_offset(tb_last_jiffy);
 			timer_check_rtc();
@@ -749,7 +741,7 @@
 	int i;
 	unsigned long half = tb_ticks_per_jiffy / 2;
 	unsigned long offset = tb_ticks_per_jiffy / max_cpus;
-	unsigned long previous_tb = per_cpu(last_jiffy, boot_cpuid);
+	u64 previous_tb = per_cpu(last_jiffy, boot_cpuid);
 
 	/* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */
 	previous_tb -= tb_ticks_per_jiffy;
@@ -830,7 +822,7 @@
 	 * and therefore the (jiffies - wall_jiffies) computation
 	 * has been removed.
 	 */
-	tb_delta = tb_ticks_since(tb_last_stamp);
+	tb_delta = tb_ticks_since(tb_last_jiffy);
 	tb_delta = mulhdu(tb_delta, do_gtod.varp->tb_to_xs); /* in xsec */
 	new_nsec -= SCALE_XSEC(tb_delta, 1000000000);
 
@@ -950,8 +942,7 @@
 	if (__USE_RTC()) {
 		/* 601 processor: dec counts down by 128 every 128ns */
 		ppc_tb_freq = 1000000000;
-		tb_last_stamp = get_rtcl();
-		tb_last_jiffy = tb_last_stamp;
+		tb_last_jiffy = get_rtcl();
 	} else {
 		/* Normal PowerPC with timebase register */
 		ppc_md.calibrate_decr();
@@ -959,7 +950,7 @@
 		       ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
 		printk(KERN_DEBUG "time_init: processor frequency   = %lu.%.6lu MHz\n",
 		       ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
-		tb_last_stamp = tb_last_jiffy = get_tb();
+		tb_last_jiffy = get_tb();
 	}
 
 	tb_ticks_per_jiffy = ppc_tb_freq / HZ;
@@ -1036,7 +1027,7 @@
 	do_gtod.varp = &do_gtod.vars[0];
 	do_gtod.var_idx = 0;
 	do_gtod.varp->tb_orig_stamp = tb_last_jiffy;
-	__get_cpu_var(last_jiffy) = tb_last_stamp;
+	__get_cpu_var(last_jiffy) = tb_last_jiffy;
 	do_gtod.varp->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
 	do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
 	do_gtod.varp->tb_to_xs = tb_to_xs;
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 4d0b4e7..9b352bd 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -148,7 +148,7 @@
 		panic("Fatal exception in interrupt");
 
 	if (panic_on_oops)
-		panic("Fatal exception: panic_on_oops");
+		panic("Fatal exception");
 
 	do_exit(err);