Linux-2.6.12-rc2

Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.

Let it rip!
diff --git a/arch/ia64/sn/kernel/Makefile b/arch/ia64/sn/kernel/Makefile
new file mode 100644
index 0000000..6c7f4d9
--- /dev/null
+++ b/arch/ia64/sn/kernel/Makefile
@@ -0,0 +1,12 @@
+# arch/ia64/sn/kernel/Makefile
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License.  See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1999,2001-2003 Silicon Graphics, Inc.  All Rights Reserved.
+#
+
+obj-y				+= setup.o bte.o bte_error.o irq.o mca.o idle.o \
+				   huberror.o io_init.o iomv.o klconflib.o sn2/
+obj-$(CONFIG_IA64_GENERIC)      += machvec.o
diff --git a/arch/ia64/sn/kernel/bte.c b/arch/ia64/sn/kernel/bte.c
new file mode 100644
index 0000000..ce0bc40
--- /dev/null
+++ b/arch/ia64/sn/kernel/bte.c
@@ -0,0 +1,453 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <asm/sn/nodepda.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/sn_cpuid.h>
+#include <asm/sn/pda.h>
+#include <asm/sn/shubio.h>
+#include <asm/nodedata.h>
+#include <asm/delay.h>
+
+#include <linux/bootmem.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+
+#include <asm/sn/bte.h>
+
+#ifndef L1_CACHE_MASK
+#define L1_CACHE_MASK (L1_CACHE_BYTES - 1)
+#endif
+
+/* two interfaces on two btes */
+#define MAX_INTERFACES_TO_TRY		4
+
+static struct bteinfo_s *bte_if_on_node(nasid_t nasid, int interface)
+{
+	nodepda_t *tmp_nodepda;
+
+	tmp_nodepda = NODEPDA(nasid_to_cnodeid(nasid));
+	return &tmp_nodepda->bte_if[interface];
+
+}
+
+/************************************************************************
+ * Block Transfer Engine copy related functions.
+ *
+ ***********************************************************************/
+
+/*
+ * bte_copy(src, dest, len, mode, notification)
+ *
+ * Use the block transfer engine to move kernel memory from src to dest
+ * using the assigned mode.
+ *
+ * Paramaters:
+ *   src - physical address of the transfer source.
+ *   dest - physical address of the transfer destination.
+ *   len - number of bytes to transfer from source to dest.
+ *   mode - hardware defined.  See reference information
+ *          for IBCT0/1 in the SHUB Programmers Reference
+ *   notification - kernel virtual address of the notification cache
+ *                  line.  If NULL, the default is used and
+ *                  the bte_copy is synchronous.
+ *
+ * NOTE:  This function requires src, dest, and len to
+ * be cacheline aligned.
+ */
+bte_result_t bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification)
+{
+	u64 transfer_size;
+	u64 transfer_stat;
+	struct bteinfo_s *bte;
+	bte_result_t bte_status;
+	unsigned long irq_flags;
+	unsigned long itc_end = 0;
+	struct bteinfo_s *btes_to_try[MAX_INTERFACES_TO_TRY];
+	int bte_if_index;
+	int bte_pri, bte_sec;
+
+	BTE_PRINTK(("bte_copy(0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%p)\n",
+		    src, dest, len, mode, notification));
+
+	if (len == 0) {
+		return BTE_SUCCESS;
+	}
+
+	BUG_ON((len & L1_CACHE_MASK) ||
+		 (src & L1_CACHE_MASK) || (dest & L1_CACHE_MASK));
+	BUG_ON(!(len < ((BTE_LEN_MASK + 1) << L1_CACHE_SHIFT)));
+
+	/* CPU 0 (per node) tries bte0 first, CPU 1 try bte1 first */
+	if (cpuid_to_subnode(smp_processor_id()) == 0) {
+		bte_pri = 0;
+		bte_sec = 1;
+	} else {
+		bte_pri = 1;
+		bte_sec = 0;
+	}
+
+	if (mode & BTE_USE_DEST) {
+		/* try remote then local */
+		btes_to_try[0] = bte_if_on_node(NASID_GET(dest), bte_pri);
+		btes_to_try[1] = bte_if_on_node(NASID_GET(dest), bte_sec);
+		if (mode & BTE_USE_ANY) {
+			btes_to_try[2] = bte_if_on_node(get_nasid(), bte_pri);
+			btes_to_try[3] = bte_if_on_node(get_nasid(), bte_sec);
+		} else {
+			btes_to_try[2] = NULL;
+			btes_to_try[3] = NULL;
+		}
+	} else {
+		/* try local then remote */
+		btes_to_try[0] = bte_if_on_node(get_nasid(), bte_pri);
+		btes_to_try[1] = bte_if_on_node(get_nasid(), bte_sec);
+		if (mode & BTE_USE_ANY) {
+			btes_to_try[2] = bte_if_on_node(NASID_GET(dest), bte_pri);
+			btes_to_try[3] = bte_if_on_node(NASID_GET(dest), bte_sec);
+		} else {
+			btes_to_try[2] = NULL;
+			btes_to_try[3] = NULL;
+		}
+	}
+
+retry_bteop:
+	do {
+		local_irq_save(irq_flags);
+
+		bte_if_index = 0;
+
+		/* Attempt to lock one of the BTE interfaces. */
+		while (bte_if_index < MAX_INTERFACES_TO_TRY) {
+			bte = btes_to_try[bte_if_index++];
+
+			if (bte == NULL) {
+				continue;
+			}
+
+			if (spin_trylock(&bte->spinlock)) {
+				if (!(*bte->most_rcnt_na & BTE_WORD_AVAILABLE) ||
+				    (BTE_LNSTAT_LOAD(bte) & BTE_ACTIVE)) {
+					/* Got the lock but BTE still busy */
+					spin_unlock(&bte->spinlock);
+				} else {
+					/* we got the lock and it's not busy */
+					break;
+				}
+			}
+			bte = NULL;
+		}
+
+		if (bte != NULL) {
+			break;
+		}
+
+		local_irq_restore(irq_flags);
+
+		if (!(mode & BTE_WACQUIRE)) {
+			return BTEFAIL_NOTAVAIL;
+		}
+	} while (1);
+
+	if (notification == NULL) {
+		/* User does not want to be notified. */
+		bte->most_rcnt_na = &bte->notify;
+	} else {
+		bte->most_rcnt_na = notification;
+	}
+
+	/* Calculate the number of cache lines to transfer. */
+	transfer_size = ((len >> L1_CACHE_SHIFT) & BTE_LEN_MASK);
+
+	/* Initialize the notification to a known value. */
+	*bte->most_rcnt_na = BTE_WORD_BUSY;
+
+	/* Set the status reg busy bit and transfer length */
+	BTE_PRINTKV(("IBLS = 0x%lx\n", IBLS_BUSY | transfer_size));
+	BTE_LNSTAT_STORE(bte, IBLS_BUSY | transfer_size);
+
+	/* Set the source and destination registers */
+	BTE_PRINTKV(("IBSA = 0x%lx)\n", (TO_PHYS(src))));
+	BTE_SRC_STORE(bte, TO_PHYS(src));
+	BTE_PRINTKV(("IBDA = 0x%lx)\n", (TO_PHYS(dest))));
+	BTE_DEST_STORE(bte, TO_PHYS(dest));
+
+	/* Set the notification register */
+	BTE_PRINTKV(("IBNA = 0x%lx)\n",
+		     TO_PHYS(ia64_tpa((unsigned long)bte->most_rcnt_na))));
+	BTE_NOTIF_STORE(bte,
+			TO_PHYS(ia64_tpa((unsigned long)bte->most_rcnt_na)));
+
+	/* Initiate the transfer */
+	BTE_PRINTK(("IBCT = 0x%lx)\n", BTE_VALID_MODE(mode)));
+	BTE_CTRL_STORE(bte, BTE_VALID_MODE(mode));
+
+	itc_end = ia64_get_itc() + (40000000 * local_cpu_data->cyc_per_usec);
+
+	spin_unlock_irqrestore(&bte->spinlock, irq_flags);
+
+	if (notification != NULL) {
+		return BTE_SUCCESS;
+	}
+
+	while ((transfer_stat = *bte->most_rcnt_na) == BTE_WORD_BUSY) {
+		if (ia64_get_itc() > itc_end) {
+			BTE_PRINTK(("BTE timeout nasid 0x%x bte%d IBLS = 0x%lx na 0x%lx\n",
+				NASID_GET(bte->bte_base_addr), bte->bte_num,
+				BTE_LNSTAT_LOAD(bte), *bte->most_rcnt_na) );
+			bte->bte_error_count++;
+			bte->bh_error = IBLS_ERROR;
+			bte_error_handler((unsigned long)NODEPDA(bte->bte_cnode));
+			*bte->most_rcnt_na = BTE_WORD_AVAILABLE;
+			goto retry_bteop;
+		}
+	}
+
+	BTE_PRINTKV((" Delay Done.  IBLS = 0x%lx, most_rcnt_na = 0x%lx\n",
+		     BTE_LNSTAT_LOAD(bte), *bte->most_rcnt_na));
+
+	if (transfer_stat & IBLS_ERROR) {
+		bte_status = transfer_stat & ~IBLS_ERROR;
+	} else {
+		bte_status = BTE_SUCCESS;
+	}
+	*bte->most_rcnt_na = BTE_WORD_AVAILABLE;
+
+	BTE_PRINTK(("Returning status is 0x%lx and most_rcnt_na is 0x%lx\n",
+		    BTE_LNSTAT_LOAD(bte), *bte->most_rcnt_na));
+
+	return bte_status;
+}
+
+EXPORT_SYMBOL(bte_copy);
+
+/*
+ * bte_unaligned_copy(src, dest, len, mode)
+ *
+ * use the block transfer engine to move kernel
+ * memory from src to dest using the assigned mode.
+ *
+ * Paramaters:
+ *   src - physical address of the transfer source.
+ *   dest - physical address of the transfer destination.
+ *   len - number of bytes to transfer from source to dest.
+ *   mode - hardware defined.  See reference information
+ *          for IBCT0/1 in the SGI documentation.
+ *
+ * NOTE: If the source, dest, and len are all cache line aligned,
+ * then it would be _FAR_ preferrable to use bte_copy instead.
+ */
+bte_result_t bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
+{
+	int destFirstCacheOffset;
+	u64 headBteSource;
+	u64 headBteLen;
+	u64 headBcopySrcOffset;
+	u64 headBcopyDest;
+	u64 headBcopyLen;
+	u64 footBteSource;
+	u64 footBteLen;
+	u64 footBcopyDest;
+	u64 footBcopyLen;
+	bte_result_t rv;
+	char *bteBlock, *bteBlock_unaligned;
+
+	if (len == 0) {
+		return BTE_SUCCESS;
+	}
+
+	/* temporary buffer used during unaligned transfers */
+	bteBlock_unaligned = kmalloc(len + 3 * L1_CACHE_BYTES,
+				     GFP_KERNEL | GFP_DMA);
+	if (bteBlock_unaligned == NULL) {
+		return BTEFAIL_NOTAVAIL;
+	}
+	bteBlock = (char *)L1_CACHE_ALIGN((u64) bteBlock_unaligned);
+
+	headBcopySrcOffset = src & L1_CACHE_MASK;
+	destFirstCacheOffset = dest & L1_CACHE_MASK;
+
+	/*
+	 * At this point, the transfer is broken into
+	 * (up to) three sections.  The first section is
+	 * from the start address to the first physical
+	 * cache line, the second is from the first physical
+	 * cache line to the last complete cache line,
+	 * and the third is from the last cache line to the
+	 * end of the buffer.  The first and third sections
+	 * are handled by bte copying into a temporary buffer
+	 * and then bcopy'ing the necessary section into the
+	 * final location.  The middle section is handled with
+	 * a standard bte copy.
+	 *
+	 * One nasty exception to the above rule is when the
+	 * source and destination are not symetrically
+	 * mis-aligned.  If the source offset from the first
+	 * cache line is different from the destination offset,
+	 * we make the first section be the entire transfer
+	 * and the bcopy the entire block into place.
+	 */
+	if (headBcopySrcOffset == destFirstCacheOffset) {
+
+		/*
+		 * Both the source and destination are the same
+		 * distance from a cache line boundary so we can
+		 * use the bte to transfer the bulk of the
+		 * data.
+		 */
+		headBteSource = src & ~L1_CACHE_MASK;
+		headBcopyDest = dest;
+		if (headBcopySrcOffset) {
+			headBcopyLen =
+			    (len >
+			     (L1_CACHE_BYTES -
+			      headBcopySrcOffset) ? L1_CACHE_BYTES
+			     - headBcopySrcOffset : len);
+			headBteLen = L1_CACHE_BYTES;
+		} else {
+			headBcopyLen = 0;
+			headBteLen = 0;
+		}
+
+		if (len > headBcopyLen) {
+			footBcopyLen = (len - headBcopyLen) & L1_CACHE_MASK;
+			footBteLen = L1_CACHE_BYTES;
+
+			footBteSource = src + len - footBcopyLen;
+			footBcopyDest = dest + len - footBcopyLen;
+
+			if (footBcopyDest == (headBcopyDest + headBcopyLen)) {
+				/*
+				 * We have two contigous bcopy
+				 * blocks.  Merge them.
+				 */
+				headBcopyLen += footBcopyLen;
+				headBteLen += footBteLen;
+			} else if (footBcopyLen > 0) {
+				rv = bte_copy(footBteSource,
+					      ia64_tpa((unsigned long)bteBlock),
+					      footBteLen, mode, NULL);
+				if (rv != BTE_SUCCESS) {
+					kfree(bteBlock_unaligned);
+					return rv;
+				}
+
+				memcpy(__va(footBcopyDest),
+				       (char *)bteBlock, footBcopyLen);
+			}
+		} else {
+			footBcopyLen = 0;
+			footBteLen = 0;
+		}
+
+		if (len > (headBcopyLen + footBcopyLen)) {
+			/* now transfer the middle. */
+			rv = bte_copy((src + headBcopyLen),
+				      (dest +
+				       headBcopyLen),
+				      (len - headBcopyLen -
+				       footBcopyLen), mode, NULL);
+			if (rv != BTE_SUCCESS) {
+				kfree(bteBlock_unaligned);
+				return rv;
+			}
+
+		}
+	} else {
+
+		/*
+		 * The transfer is not symetric, we will
+		 * allocate a buffer large enough for all the
+		 * data, bte_copy into that buffer and then
+		 * bcopy to the destination.
+		 */
+
+		/* Add the leader from source */
+		headBteLen = len + (src & L1_CACHE_MASK);
+		/* Add the trailing bytes from footer. */
+		headBteLen += L1_CACHE_BYTES - (headBteLen & L1_CACHE_MASK);
+		headBteSource = src & ~L1_CACHE_MASK;
+		headBcopySrcOffset = src & L1_CACHE_MASK;
+		headBcopyDest = dest;
+		headBcopyLen = len;
+	}
+
+	if (headBcopyLen > 0) {
+		rv = bte_copy(headBteSource,
+			      ia64_tpa((unsigned long)bteBlock), headBteLen,
+			      mode, NULL);
+		if (rv != BTE_SUCCESS) {
+			kfree(bteBlock_unaligned);
+			return rv;
+		}
+
+		memcpy(__va(headBcopyDest), ((char *)bteBlock +
+					     headBcopySrcOffset), headBcopyLen);
+	}
+	kfree(bteBlock_unaligned);
+	return BTE_SUCCESS;
+}
+
+EXPORT_SYMBOL(bte_unaligned_copy);
+
+/************************************************************************
+ * Block Transfer Engine initialization functions.
+ *
+ ***********************************************************************/
+
+/*
+ * bte_init_node(nodepda, cnode)
+ *
+ * Initialize the nodepda structure with BTE base addresses and
+ * spinlocks.
+ */
+void bte_init_node(nodepda_t * mynodepda, cnodeid_t cnode)
+{
+	int i;
+
+	/*
+	 * Indicate that all the block transfer engines on this node
+	 * are available.
+	 */
+
+	/*
+	 * Allocate one bte_recover_t structure per node.  It holds
+	 * the recovery lock for node.  All the bte interface structures
+	 * will point at this one bte_recover structure to get the lock.
+	 */
+	spin_lock_init(&mynodepda->bte_recovery_lock);
+	init_timer(&mynodepda->bte_recovery_timer);
+	mynodepda->bte_recovery_timer.function = bte_error_handler;
+	mynodepda->bte_recovery_timer.data = (unsigned long)mynodepda;
+
+	for (i = 0; i < BTES_PER_NODE; i++) {
+		/* Which link status register should we use? */
+		unsigned long link_status = (i == 0 ? IIO_IBLS0 : IIO_IBLS1);
+		mynodepda->bte_if[i].bte_base_addr = (u64 *)
+		    REMOTE_HUB_ADDR(cnodeid_to_nasid(cnode), link_status);
+
+		/*
+		 * Initialize the notification and spinlock
+		 * so the first transfer can occur.
+		 */
+		mynodepda->bte_if[i].most_rcnt_na =
+		    &(mynodepda->bte_if[i].notify);
+		mynodepda->bte_if[i].notify = BTE_WORD_AVAILABLE;
+		spin_lock_init(&mynodepda->bte_if[i].spinlock);
+
+		mynodepda->bte_if[i].bte_cnode = cnode;
+		mynodepda->bte_if[i].bte_error_count = 0;
+		mynodepda->bte_if[i].bte_num = i;
+		mynodepda->bte_if[i].cleanup_active = 0;
+		mynodepda->bte_if[i].bh_error = 0;
+	}
+
+}
diff --git a/arch/ia64/sn/kernel/bte_error.c b/arch/ia64/sn/kernel/bte_error.c
new file mode 100644
index 0000000..fd10431
--- /dev/null
+++ b/arch/ia64/sn/kernel/bte_error.c
@@ -0,0 +1,198 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ */
+
+#include <linux/types.h>
+#include <asm/sn/sn_sal.h>
+#include "ioerror.h"
+#include <asm/sn/addrs.h>
+#include <asm/sn/shubio.h>
+#include <asm/sn/geo.h>
+#include "xtalk/xwidgetdev.h"
+#include "xtalk/hubdev.h"
+#include <asm/sn/bte.h>
+#include <asm/param.h>
+
+/*
+ * Bte error handling is done in two parts.  The first captures
+ * any crb related errors.  Since there can be multiple crbs per
+ * interface and multiple interfaces active, we need to wait until
+ * all active crbs are completed.  This is the first job of the
+ * second part error handler.  When all bte related CRBs are cleanly
+ * completed, it resets the interfaces and gets them ready for new
+ * transfers to be queued.
+ */
+
+void bte_error_handler(unsigned long);
+
+/*
+ * Wait until all BTE related CRBs are completed
+ * and then reset the interfaces.
+ */
+void bte_error_handler(unsigned long _nodepda)
+{
+	struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda;
+	spinlock_t *recovery_lock = &err_nodepda->bte_recovery_lock;
+	struct timer_list *recovery_timer = &err_nodepda->bte_recovery_timer;
+	nasid_t nasid;
+	int i;
+	int valid_crbs;
+	unsigned long irq_flags;
+	volatile u64 *notify;
+	bte_result_t bh_error;
+	ii_imem_u_t imem;	/* II IMEM Register */
+	ii_icrb0_d_u_t icrbd;	/* II CRB Register D */
+	ii_ibcr_u_t ibcr;
+	ii_icmr_u_t icmr;
+	ii_ieclr_u_t ieclr;
+
+	BTE_PRINTK(("bte_error_handler(%p) - %d\n", err_nodepda,
+		    smp_processor_id()));
+
+	spin_lock_irqsave(recovery_lock, irq_flags);
+
+	if ((err_nodepda->bte_if[0].bh_error == BTE_SUCCESS) &&
+	    (err_nodepda->bte_if[1].bh_error == BTE_SUCCESS)) {
+		BTE_PRINTK(("eh:%p:%d Nothing to do.\n", err_nodepda,
+			    smp_processor_id()));
+		spin_unlock_irqrestore(recovery_lock, irq_flags);
+		return;
+	}
+	/*
+	 * Lock all interfaces on this node to prevent new transfers
+	 * from being queued.
+	 */
+	for (i = 0; i < BTES_PER_NODE; i++) {
+		if (err_nodepda->bte_if[i].cleanup_active) {
+			continue;
+		}
+		spin_lock(&err_nodepda->bte_if[i].spinlock);
+		BTE_PRINTK(("eh:%p:%d locked %d\n", err_nodepda,
+			    smp_processor_id(), i));
+		err_nodepda->bte_if[i].cleanup_active = 1;
+	}
+
+	/* Determine information about our hub */
+	nasid = cnodeid_to_nasid(err_nodepda->bte_if[0].bte_cnode);
+
+	/*
+	 * A BTE transfer can use multiple CRBs.  We need to make sure
+	 * that all the BTE CRBs are complete (or timed out) before
+	 * attempting to clean up the error.  Resetting the BTE while
+	 * there are still BTE CRBs active will hang the BTE.
+	 * We should look at all the CRBs to see if they are allocated
+	 * to the BTE and see if they are still active.  When none
+	 * are active, we can continue with the cleanup.
+	 *
+	 * We also want to make sure that the local NI port is up.
+	 * When a router resets the NI port can go down, while it
+	 * goes through the LLP handshake, but then comes back up.
+	 */
+	icmr.ii_icmr_regval = REMOTE_HUB_L(nasid, IIO_ICMR);
+	if (icmr.ii_icmr_fld_s.i_crb_mark != 0) {
+		/*
+		 * There are errors which still need to be cleaned up by
+		 * hubiio_crb_error_handler
+		 */
+		mod_timer(recovery_timer, HZ * 5);
+		BTE_PRINTK(("eh:%p:%d Marked Giving up\n", err_nodepda,
+			    smp_processor_id()));
+		spin_unlock_irqrestore(recovery_lock, irq_flags);
+		return;
+	}
+	if (icmr.ii_icmr_fld_s.i_crb_vld != 0) {
+
+		valid_crbs = icmr.ii_icmr_fld_s.i_crb_vld;
+
+		for (i = 0; i < IIO_NUM_CRBS; i++) {
+			if (!((1 << i) & valid_crbs)) {
+				/* This crb was not marked as valid, ignore */
+				continue;
+			}
+			icrbd.ii_icrb0_d_regval =
+			    REMOTE_HUB_L(nasid, IIO_ICRB_D(i));
+			if (icrbd.d_bteop) {
+				mod_timer(recovery_timer, HZ * 5);
+				BTE_PRINTK(("eh:%p:%d Valid %d, Giving up\n",
+					    err_nodepda, smp_processor_id(),
+					    i));
+				spin_unlock_irqrestore(recovery_lock,
+						       irq_flags);
+				return;
+			}
+		}
+	}
+
+	BTE_PRINTK(("eh:%p:%d Cleaning up\n", err_nodepda, smp_processor_id()));
+	/* Reenable both bte interfaces */
+	imem.ii_imem_regval = REMOTE_HUB_L(nasid, IIO_IMEM);
+	imem.ii_imem_fld_s.i_b0_esd = imem.ii_imem_fld_s.i_b1_esd = 1;
+	REMOTE_HUB_S(nasid, IIO_IMEM, imem.ii_imem_regval);
+
+	/* Clear BTE0/1 error bits */
+	ieclr.ii_ieclr_regval = 0;
+	if (err_nodepda->bte_if[0].bh_error != BTE_SUCCESS)
+		ieclr.ii_ieclr_fld_s.i_e_bte_0 = 1;
+	if (err_nodepda->bte_if[1].bh_error != BTE_SUCCESS)
+		ieclr.ii_ieclr_fld_s.i_e_bte_1 = 1;
+	REMOTE_HUB_S(nasid, IIO_IECLR, ieclr.ii_ieclr_regval);
+
+	/* Reinitialize both BTE state machines. */
+	ibcr.ii_ibcr_regval = REMOTE_HUB_L(nasid, IIO_IBCR);
+	ibcr.ii_ibcr_fld_s.i_soft_reset = 1;
+	REMOTE_HUB_S(nasid, IIO_IBCR, ibcr.ii_ibcr_regval);
+
+	for (i = 0; i < BTES_PER_NODE; i++) {
+		bh_error = err_nodepda->bte_if[i].bh_error;
+		if (bh_error != BTE_SUCCESS) {
+			/* There is an error which needs to be notified */
+			notify = err_nodepda->bte_if[i].most_rcnt_na;
+			BTE_PRINTK(("cnode %d bte %d error=0x%lx\n",
+				    err_nodepda->bte_if[i].bte_cnode,
+				    err_nodepda->bte_if[i].bte_num,
+				    IBLS_ERROR | (u64) bh_error));
+			*notify = IBLS_ERROR | bh_error;
+			err_nodepda->bte_if[i].bh_error = BTE_SUCCESS;
+		}
+
+		err_nodepda->bte_if[i].cleanup_active = 0;
+		BTE_PRINTK(("eh:%p:%d Unlocked %d\n", err_nodepda,
+			    smp_processor_id(), i));
+		spin_unlock(&err_nodepda->bte_if[i].spinlock);
+	}
+
+	del_timer(recovery_timer);
+
+	spin_unlock_irqrestore(recovery_lock, irq_flags);
+}
+
+/*
+ * First part error handler.  This is called whenever any error CRB interrupt
+ * is generated by the II.
+ */
+void
+bte_crb_error_handler(cnodeid_t cnode, int btenum,
+                      int crbnum, ioerror_t * ioe, int bteop)
+{
+	struct bteinfo_s *bte;
+
+
+	bte = &(NODEPDA(cnode)->bte_if[btenum]);
+
+	/*
+	 * The caller has already figured out the error type, we save that
+	 * in the bte handle structure for the thread excercising the
+	 * interface to consume.
+	 */
+	bte->bh_error = ioe->ie_errortype + BTEFAIL_OFFSET;
+	bte->bte_error_count++;
+
+	BTE_PRINTK(("Got an error on cnode %d bte %d: HW error type 0x%x\n",
+		bte->bte_cnode, bte->bte_num, ioe->ie_errortype));
+	bte_error_handler((unsigned long) NODEPDA(cnode));
+}
+
diff --git a/arch/ia64/sn/kernel/huberror.c b/arch/ia64/sn/kernel/huberror.c
new file mode 100644
index 0000000..2bdf684
--- /dev/null
+++ b/arch/ia64/sn/kernel/huberror.c
@@ -0,0 +1,201 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000,2002-2004 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <asm/delay.h>
+#include <asm/sn/sn_sal.h>
+#include "ioerror.h"
+#include <asm/sn/addrs.h>
+#include <asm/sn/shubio.h>
+#include <asm/sn/geo.h>
+#include "xtalk/xwidgetdev.h"
+#include "xtalk/hubdev.h"
+#include <asm/sn/bte.h>
+
+void hubiio_crb_error_handler(struct hubdev_info *hubdev_info);
+extern void bte_crb_error_handler(cnodeid_t, int, int, ioerror_t *,
+				  int);
+static irqreturn_t hub_eint_handler(int irq, void *arg, struct pt_regs *ep)
+{
+	struct hubdev_info *hubdev_info;
+	struct ia64_sal_retval ret_stuff;
+	nasid_t nasid;
+
+	ret_stuff.status = 0;
+	ret_stuff.v0 = 0;
+	hubdev_info = (struct hubdev_info *)arg;
+	nasid = hubdev_info->hdi_nasid;
+	SAL_CALL_NOLOCK(ret_stuff, SN_SAL_HUB_ERROR_INTERRUPT,
+			(u64) nasid, 0, 0, 0, 0, 0, 0);
+
+	if ((int)ret_stuff.v0)
+		panic("hubii_eint_handler(): Fatal TIO Error");
+
+	if (!(nasid & 1)) /* Not a TIO, handle CRB errors */
+		(void)hubiio_crb_error_handler(hubdev_info);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * Free the hub CRB "crbnum" which encountered an error.
+ * Assumption is, error handling was successfully done,
+ * and we now want to return the CRB back to Hub for normal usage.
+ *
+ * In order to free the CRB, all that's needed is to de-allocate it
+ *
+ * Assumption:
+ *      No other processor is mucking around with the hub control register.
+ *      So, upper layer has to single thread this.
+ */
+void hubiio_crb_free(struct hubdev_info *hubdev_info, int crbnum)
+{
+	ii_icrb0_b_u_t icrbb;
+
+	/*
+	 * The hardware does NOT clear the mark bit, so it must get cleared
+	 * here to be sure the error is not processed twice.
+	 */
+	icrbb.ii_icrb0_b_regval = REMOTE_HUB_L(hubdev_info->hdi_nasid,
+					       IIO_ICRB_B(crbnum));
+	icrbb.b_mark = 0;
+	REMOTE_HUB_S(hubdev_info->hdi_nasid, IIO_ICRB_B(crbnum),
+		     icrbb.ii_icrb0_b_regval);
+	/*
+	 * Deallocate the register wait till hub indicates it's done.
+	 */
+	REMOTE_HUB_S(hubdev_info->hdi_nasid, IIO_ICDR, (IIO_ICDR_PND | crbnum));
+	while (REMOTE_HUB_L(hubdev_info->hdi_nasid, IIO_ICDR) & IIO_ICDR_PND)
+		udelay(1);
+
+}
+
+/*
+ * hubiio_crb_error_handler
+ *
+ *	This routine gets invoked when a hub gets an error 
+ *	interrupt. So, the routine is running in interrupt context
+ *	at error interrupt level.
+ * Action:
+ *	It's responsible for identifying ALL the CRBs that are marked
+ *	with error, and process them. 
+ *	
+ * 	If you find the CRB that's marked with error, map this to the
+ *	reason it caused error, and invoke appropriate error handler.
+ *
+ *	XXX Be aware of the information in the context register.
+ *
+ * NOTE:
+ *	Use REMOTE_HUB_* macro instead of LOCAL_HUB_* so that the interrupt
+ *	handler can be run on any node. (not necessarily the node 
+ *	corresponding to the hub that encountered error).
+ */
+
+void hubiio_crb_error_handler(struct hubdev_info *hubdev_info)
+{
+	nasid_t nasid;
+	ii_icrb0_a_u_t icrba;	/* II CRB Register A */
+	ii_icrb0_b_u_t icrbb;	/* II CRB Register B */
+	ii_icrb0_c_u_t icrbc;	/* II CRB Register C */
+	ii_icrb0_d_u_t icrbd;	/* II CRB Register D */
+	ii_icrb0_e_u_t icrbe;	/* II CRB Register D */
+	int i;
+	int num_errors = 0;	/* Num of errors handled */
+	ioerror_t ioerror;
+
+	nasid = hubdev_info->hdi_nasid;
+
+	/*
+	 * XXX - Add locking for any recovery actions
+	 */
+	/*
+	 * Scan through all CRBs in the Hub, and handle the errors
+	 * in any of the CRBs marked.
+	 */
+	for (i = 0; i < IIO_NUM_CRBS; i++) {
+		/* Check this crb entry to see if it is in error. */
+		icrbb.ii_icrb0_b_regval = REMOTE_HUB_L(nasid, IIO_ICRB_B(i));
+
+		if (icrbb.b_mark == 0) {
+			continue;
+		}
+
+		icrba.ii_icrb0_a_regval = REMOTE_HUB_L(nasid, IIO_ICRB_A(i));
+
+		IOERROR_INIT(&ioerror);
+
+		/* read other CRB error registers. */
+		icrbc.ii_icrb0_c_regval = REMOTE_HUB_L(nasid, IIO_ICRB_C(i));
+		icrbd.ii_icrb0_d_regval = REMOTE_HUB_L(nasid, IIO_ICRB_D(i));
+		icrbe.ii_icrb0_e_regval = REMOTE_HUB_L(nasid, IIO_ICRB_E(i));
+
+		IOERROR_SETVALUE(&ioerror, errortype, icrbb.b_ecode);
+
+		/* Check if this error is due to BTE operation,
+		 * and handle it separately.
+		 */
+		if (icrbd.d_bteop ||
+		    ((icrbb.b_initiator == IIO_ICRB_INIT_BTE0 ||
+		      icrbb.b_initiator == IIO_ICRB_INIT_BTE1) &&
+		     (icrbb.b_imsgtype == IIO_ICRB_IMSGT_BTE ||
+		      icrbb.b_imsgtype == IIO_ICRB_IMSGT_SN1NET))) {
+
+			int bte_num;
+
+			if (icrbd.d_bteop)
+				bte_num = icrbc.c_btenum;
+			else	/* b_initiator bit 2 gives BTE number */
+				bte_num = (icrbb.b_initiator & 0x4) >> 2;
+
+			hubiio_crb_free(hubdev_info, i);
+
+			bte_crb_error_handler(nasid_to_cnodeid(nasid), bte_num,
+					      i, &ioerror, icrbd.d_bteop);
+			num_errors++;
+			continue;
+		}
+	}
+}
+
+/*
+ * Function	: hub_error_init
+ * Purpose	: initialize the error handling requirements for a given hub.
+ * Parameters	: cnode, the compact nodeid.
+ * Assumptions	: Called only once per hub, either by a local cpu. Or by a
+ *			remote cpu, when this hub is headless.(cpuless)
+ * Returns	: None
+ */
+void hub_error_init(struct hubdev_info *hubdev_info)
+{
+	if (request_irq(SGI_II_ERROR, (void *)hub_eint_handler, SA_SHIRQ,
+			"SN_hub_error", (void *)hubdev_info))
+		printk("hub_error_init: Failed to request_irq for 0x%p\n",
+		    hubdev_info);
+	return;
+}
+
+
+/*
+ * Function	: ice_error_init
+ * Purpose	: initialize the error handling requirements for a given tio.
+ * Parameters	: cnode, the compact nodeid.
+ * Assumptions	: Called only once per tio.
+ * Returns	: None
+ */
+void ice_error_init(struct hubdev_info *hubdev_info)
+{
+        if (request_irq
+            (SGI_TIO_ERROR, (void *)hub_eint_handler, SA_SHIRQ, "SN_TIO_error",
+             (void *)hubdev_info))
+                printk("ice_error_init: request_irq() error hubdev_info 0x%p\n",
+                       hubdev_info);
+        return;
+}
+
diff --git a/arch/ia64/sn/kernel/idle.c b/arch/ia64/sn/kernel/idle.c
new file mode 100644
index 0000000..49d178f
--- /dev/null
+++ b/arch/ia64/sn/kernel/idle.c
@@ -0,0 +1,30 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2001-2004 Silicon Graphics, Inc.  All rights reserved.
+ */
+
+#include <asm/sn/leds.h>
+
+void snidle(int state)
+{
+	if (state) {
+		if (pda->idle_flag == 0) {
+			/* 
+			 * Turn the activity LED off.
+			 */
+			set_led_bits(0, LED_CPU_ACTIVITY);
+		}
+
+		pda->idle_flag = 1;
+	} else {
+		/* 
+		 * Turn the activity LED on.
+		 */
+		set_led_bits(LED_CPU_ACTIVITY, LED_CPU_ACTIVITY);
+
+		pda->idle_flag = 0;
+	}
+}
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c
new file mode 100644
index 0000000..0018808
--- /dev/null
+++ b/arch/ia64/sn/kernel/io_init.c
@@ -0,0 +1,411 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/bootmem.h>
+#include <linux/nodemask.h>
+#include <asm/sn/types.h>
+#include <asm/sn/sn_sal.h>
+#include <asm/sn/addrs.h>
+#include "pci/pcibus_provider_defs.h"
+#include "pci/pcidev.h"
+#include "pci/pcibr_provider.h"
+#include "xtalk/xwidgetdev.h"
+#include <asm/sn/geo.h>
+#include "xtalk/hubdev.h"
+#include <asm/sn/io.h>
+#include <asm/sn/simulator.h>
+
+char master_baseio_wid;
+nasid_t master_nasid = INVALID_NASID;	/* Partition Master */
+
+struct slab_info {
+	struct hubdev_info hubdev;
+};
+
+struct brick {
+	moduleid_t id;		/* Module ID of this module        */
+	struct slab_info slab_info[MAX_SLABS + 1];
+};
+
+int sn_ioif_inited = 0;		/* SN I/O infrastructure initialized? */
+
+/*
+ * Retrieve the DMA Flush List given nasid.  This list is needed 
+ * to implement the WAR - Flush DMA data on PIO Reads.
+ */
+static inline uint64_t
+sal_get_widget_dmaflush_list(u64 nasid, u64 widget_num, u64 address)
+{
+
+	struct ia64_sal_retval ret_stuff;
+	ret_stuff.status = 0;
+	ret_stuff.v0 = 0;
+
+	SAL_CALL_NOLOCK(ret_stuff,
+			(u64) SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST,
+			(u64) nasid, (u64) widget_num, (u64) address, 0, 0, 0,
+			0);
+	return ret_stuff.v0;
+
+}
+
+/*
+ * Retrieve the hub device info structure for the given nasid.
+ */
+static inline uint64_t sal_get_hubdev_info(u64 handle, u64 address)
+{
+
+	struct ia64_sal_retval ret_stuff;
+	ret_stuff.status = 0;
+	ret_stuff.v0 = 0;
+
+	SAL_CALL_NOLOCK(ret_stuff,
+			(u64) SN_SAL_IOIF_GET_HUBDEV_INFO,
+			(u64) handle, (u64) address, 0, 0, 0, 0, 0);
+	return ret_stuff.v0;
+}
+
+/*
+ * Retrieve the pci bus information given the bus number.
+ */
+static inline uint64_t sal_get_pcibus_info(u64 segment, u64 busnum, u64 address)
+{
+
+	struct ia64_sal_retval ret_stuff;
+	ret_stuff.status = 0;
+	ret_stuff.v0 = 0;
+
+	SAL_CALL_NOLOCK(ret_stuff,
+			(u64) SN_SAL_IOIF_GET_PCIBUS_INFO,
+			(u64) segment, (u64) busnum, (u64) address, 0, 0, 0, 0);
+	return ret_stuff.v0;
+}
+
+/*
+ * Retrieve the pci device information given the bus and device|function number.
+ */
+static inline uint64_t
+sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev, 
+			u64 sn_irq_info)
+{
+	struct ia64_sal_retval ret_stuff;
+	ret_stuff.status = 0;
+	ret_stuff.v0 = 0;
+
+	SAL_CALL_NOLOCK(ret_stuff,
+			(u64) SN_SAL_IOIF_GET_PCIDEV_INFO,
+			(u64) segment, (u64) bus_number, (u64) devfn, 
+			(u64) pci_dev,
+			sn_irq_info, 0, 0);
+	return ret_stuff.v0;
+}
+
+/*
+ * sn_alloc_pci_sysdata() - This routine allocates a pci controller
+ *	which is expected as the pci_dev and pci_bus sysdata by the Linux
+ *	PCI infrastructure.
+ */
+static inline struct pci_controller *sn_alloc_pci_sysdata(void)
+{
+	struct pci_controller *pci_sysdata;
+
+	pci_sysdata = kmalloc(sizeof(*pci_sysdata), GFP_KERNEL);
+	if (!pci_sysdata)
+		BUG();
+
+	memset(pci_sysdata, 0, sizeof(*pci_sysdata));
+	return pci_sysdata;
+}
+
+/*
+ * sn_fixup_ionodes() - This routine initializes the HUB data strcuture for 
+ *	each node in the system.
+ */
+static void sn_fixup_ionodes(void)
+{
+
+	struct sn_flush_device_list *sn_flush_device_list;
+	struct hubdev_info *hubdev;
+	uint64_t status;
+	uint64_t nasid;
+	int i, widget;
+
+	for (i = 0; i < numionodes; i++) {
+		hubdev = (struct hubdev_info *)(NODEPDA(i)->pdinfo);
+		nasid = cnodeid_to_nasid(i);
+		status = sal_get_hubdev_info(nasid, (uint64_t) __pa(hubdev));
+		if (status)
+			continue;
+
+		for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++)
+			hubdev->hdi_xwidget_info[widget].xwi_hubinfo = hubdev;
+
+		if (!hubdev->hdi_flush_nasid_list.widget_p)
+			continue;
+
+		hubdev->hdi_flush_nasid_list.widget_p =
+		    kmalloc((HUB_WIDGET_ID_MAX + 1) *
+			    sizeof(struct sn_flush_device_list *), GFP_KERNEL);
+
+		memset(hubdev->hdi_flush_nasid_list.widget_p, 0x0,
+		       (HUB_WIDGET_ID_MAX + 1) *
+		       sizeof(struct sn_flush_device_list *));
+
+		for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) {
+			sn_flush_device_list = kmalloc(DEV_PER_WIDGET *
+						       sizeof(struct
+							      sn_flush_device_list),
+						       GFP_KERNEL);
+			memset(sn_flush_device_list, 0x0,
+			       DEV_PER_WIDGET *
+			       sizeof(struct sn_flush_device_list));
+
+			status =
+			    sal_get_widget_dmaflush_list(nasid, widget,
+							 (uint64_t)
+							 __pa
+							 (sn_flush_device_list));
+			if (status) {
+				kfree(sn_flush_device_list);
+				continue;
+			}
+
+			hubdev->hdi_flush_nasid_list.widget_p[widget] =
+			    sn_flush_device_list;
+		}
+
+		if (!(i & 1))
+			hub_error_init(hubdev);
+		else
+			ice_error_init(hubdev);
+	}
+
+}
+
+/*
+ * sn_pci_fixup_slot() - This routine sets up a slot's resources
+ * consistent with the Linux PCI abstraction layer.  Resources acquired
+ * from our PCI provider include PIO maps to BAR space and interrupt
+ * objects.
+ */
+static void sn_pci_fixup_slot(struct pci_dev *dev)
+{
+	int idx;
+	int segment = 0;
+	uint64_t size;
+	struct sn_irq_info *sn_irq_info;
+	struct pci_dev *host_pci_dev;
+	int status = 0;
+
+	dev->sysdata = kmalloc(sizeof(struct pcidev_info), GFP_KERNEL);
+	if (SN_PCIDEV_INFO(dev) <= 0)
+		BUG();		/* Cannot afford to run out of memory */
+	memset(SN_PCIDEV_INFO(dev), 0, sizeof(struct pcidev_info));
+
+	sn_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
+	if (sn_irq_info <= 0)
+		BUG();		/* Cannot afford to run out of memory */
+	memset(sn_irq_info, 0, sizeof(struct sn_irq_info));
+
+	/* Call to retrieve pci device information needed by kernel. */
+	status = sal_get_pcidev_info((u64) segment, (u64) dev->bus->number, 
+				     dev->devfn,
+				     (u64) __pa(SN_PCIDEV_INFO(dev)),
+				     (u64) __pa(sn_irq_info));
+	if (status)
+		BUG();		/* Cannot get platform pci device information information */
+
+	/* Copy over PIO Mapped Addresses */
+	for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
+		unsigned long start, end, addr;
+
+		if (!SN_PCIDEV_INFO(dev)->pdi_pio_mapped_addr[idx])
+			continue;
+
+		start = dev->resource[idx].start;
+		end = dev->resource[idx].end;
+		size = end - start;
+		addr = SN_PCIDEV_INFO(dev)->pdi_pio_mapped_addr[idx];
+		addr = ((addr << 4) >> 4) | __IA64_UNCACHED_OFFSET;
+		dev->resource[idx].start = addr;
+		dev->resource[idx].end = addr + size;
+		if (dev->resource[idx].flags & IORESOURCE_IO)
+			dev->resource[idx].parent = &ioport_resource;
+		else
+			dev->resource[idx].parent = &iomem_resource;
+	}
+
+	/* set up host bus linkages */
+	host_pci_dev =
+	    pci_find_slot(SN_PCIDEV_INFO(dev)->pdi_slot_host_handle >> 32,
+			  SN_PCIDEV_INFO(dev)->
+			  pdi_slot_host_handle & 0xffffffff);
+	SN_PCIDEV_INFO(dev)->pdi_host_pcidev_info =
+	    SN_PCIDEV_INFO(host_pci_dev);
+	SN_PCIDEV_INFO(dev)->pdi_linux_pcidev = dev;
+	SN_PCIDEV_INFO(dev)->pdi_pcibus_info = SN_PCIBUS_BUSSOFT(dev->bus);
+
+	/* Only set up IRQ stuff if this device has a host bus context */
+	if (SN_PCIDEV_BUSSOFT(dev) && sn_irq_info->irq_irq) {
+		SN_PCIDEV_INFO(dev)->pdi_sn_irq_info = sn_irq_info;
+		dev->irq = SN_PCIDEV_INFO(dev)->pdi_sn_irq_info->irq_irq;
+		sn_irq_fixup(dev, sn_irq_info);
+	}
+}
+
+/*
+ * sn_pci_controller_fixup() - This routine sets up a bus's resources
+ * consistent with the Linux PCI abstraction layer.
+ */
+static void sn_pci_controller_fixup(int segment, int busnum)
+{
+	int status = 0;
+	int nasid, cnode;
+	struct pci_bus *bus;
+	struct pci_controller *controller;
+	struct pcibus_bussoft *prom_bussoft_ptr;
+	struct hubdev_info *hubdev_info;
+	void *provider_soft;
+
+	status =
+	    sal_get_pcibus_info((u64) segment, (u64) busnum,
+				(u64) ia64_tpa(&prom_bussoft_ptr));
+	if (status > 0) {
+		return;		/* bus # does not exist */
+	}
+
+	prom_bussoft_ptr = __va(prom_bussoft_ptr);
+	controller = sn_alloc_pci_sysdata();
+	/* controller non-zero is BUG'd in sn_alloc_pci_sysdata */
+
+	bus = pci_scan_bus(busnum, &pci_root_ops, controller);
+	if (bus == NULL) {
+		return;		/* error, or bus already scanned */
+	}
+
+	/*
+	 * Per-provider fixup.  Copies the contents from prom to local
+	 * area and links SN_PCIBUS_BUSSOFT().
+	 *
+	 * Note:  Provider is responsible for ensuring that prom_bussoft_ptr
+	 * represents an asic-type that it can handle.
+	 */
+
+	if (prom_bussoft_ptr->bs_asic_type == PCIIO_ASIC_TYPE_PPB) {
+		return;		/* no further fixup necessary */
+	}
+
+	provider_soft = pcibr_bus_fixup(prom_bussoft_ptr);
+	if (provider_soft == NULL) {
+		return;		/* fixup failed or not applicable */
+	}
+
+	/*
+	 * Generic bus fixup goes here.  Don't reference prom_bussoft_ptr
+	 * after this point.
+	 */
+
+	bus->sysdata = controller;
+	PCI_CONTROLLER(bus)->platform_data = provider_soft;
+
+	nasid = NASID_GET(SN_PCIBUS_BUSSOFT(bus)->bs_base);
+	cnode = nasid_to_cnodeid(nasid);
+	hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
+	SN_PCIBUS_BUSSOFT(bus)->bs_xwidget_info =
+	    &(hubdev_info->hdi_xwidget_info[SN_PCIBUS_BUSSOFT(bus)->bs_xid]);
+}
+
+/*
+ * Ugly hack to get PCI setup until we have a proper ACPI namespace.
+ */
+
+#define PCI_BUSES_TO_SCAN 256
+
+static int __init sn_pci_init(void)
+{
+	int i = 0;
+	struct pci_dev *pci_dev = NULL;
+	extern void sn_init_cpei_timer(void);
+#ifdef CONFIG_PROC_FS
+	extern void register_sn_procfs(void);
+#endif
+
+	if (!ia64_platform_is("sn2") || IS_RUNNING_ON_SIMULATOR())
+		return 0;
+
+	/*
+	 * This is needed to avoid bounce limit checks in the blk layer
+	 */
+	ia64_max_iommu_merge_mask = ~PAGE_MASK;
+	sn_fixup_ionodes();
+	sn_irq = kmalloc(sizeof(struct sn_irq_info *) * NR_IRQS, GFP_KERNEL);
+	if (sn_irq <= 0)
+		BUG();		/* Canno afford to run out of memory. */
+	memset(sn_irq, 0, sizeof(struct sn_irq_info *) * NR_IRQS);
+
+	sn_init_cpei_timer();
+
+#ifdef CONFIG_PROC_FS
+	register_sn_procfs();
+#endif
+
+	for (i = 0; i < PCI_BUSES_TO_SCAN; i++) {
+		sn_pci_controller_fixup(0, i);
+	}
+
+	/*
+	 * Generic Linux PCI Layer has created the pci_bus and pci_dev 
+	 * structures - time for us to add our SN PLatform specific 
+	 * information.
+	 */
+
+	while ((pci_dev =
+		pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pci_dev)) != NULL) {
+		sn_pci_fixup_slot(pci_dev);
+	}
+
+	sn_ioif_inited = 1;	/* sn I/O infrastructure now initialized */
+
+	return 0;
+}
+
+/*
+ * hubdev_init_node() - Creates the HUB data structure and link them to it's 
+ *	own NODE specific data area.
+ */
+void hubdev_init_node(nodepda_t * npda, cnodeid_t node)
+{
+
+	struct hubdev_info *hubdev_info;
+
+	if (node >= num_online_nodes())	/* Headless/memless IO nodes */
+		hubdev_info =
+		    (struct hubdev_info *)alloc_bootmem_node(NODE_DATA(0),
+							     sizeof(struct
+								    hubdev_info));
+	else
+		hubdev_info =
+		    (struct hubdev_info *)alloc_bootmem_node(NODE_DATA(node),
+							     sizeof(struct
+								    hubdev_info));
+	npda->pdinfo = (void *)hubdev_info;
+
+}
+
+geoid_t
+cnodeid_get_geoid(cnodeid_t cnode)
+{
+
+	struct hubdev_info *hubdev;
+
+	hubdev = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
+	return hubdev->hdi_geoid;
+
+}
+
+subsys_initcall(sn_pci_init);
diff --git a/arch/ia64/sn/kernel/iomv.c b/arch/ia64/sn/kernel/iomv.c
new file mode 100644
index 0000000..fec6d8b
--- /dev/null
+++ b/arch/ia64/sn/kernel/iomv.c
@@ -0,0 +1,70 @@
+/* 
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <asm/io.h>
+#include <asm/delay.h>
+#include <asm/sn/nodepda.h>
+#include <asm/sn/simulator.h>
+#include <asm/sn/pda.h>
+#include <asm/sn/sn_cpuid.h>
+#include <asm/sn/shub_mmr.h>
+
+/**
+ * sn_io_addr - convert an in/out port to an i/o address
+ * @port: port to convert
+ *
+ * Legacy in/out instructions are converted to ld/st instructions
+ * on IA64.  This routine will convert a port number into a valid 
+ * SN i/o address.  Used by sn_in*() and sn_out*().
+ */
+void *sn_io_addr(unsigned long port)
+{
+	if (!IS_RUNNING_ON_SIMULATOR()) {
+		/* On sn2, legacy I/O ports don't point at anything */
+		if (port < (64 * 1024))
+			return NULL;
+		return ((void *)(port | __IA64_UNCACHED_OFFSET));
+	} else {
+		/* but the simulator uses them... */
+		unsigned long addr;
+
+		/*
+		 * word align port, but need more than 10 bits
+		 * for accessing registers in bedrock local block
+		 * (so we don't do port&0xfff)
+		 */
+		addr = (is_shub2() ? 0xc00000028c000000UL : 0xc0000087cc000000UL) | ((port >> 2) << 12);
+		if ((port >= 0x1f0 && port <= 0x1f7) || port == 0x3f6 || port == 0x3f7)
+			addr |= port;
+		return (void *)addr;
+	}
+}
+
+EXPORT_SYMBOL(sn_io_addr);
+
+/**
+ * __sn_mmiowb - I/O space memory barrier
+ *
+ * See include/asm-ia64/io.h and Documentation/DocBook/deviceiobook.tmpl
+ * for details.
+ *
+ * On SN2, we wait for the PIO_WRITE_STATUS SHub register to clear.
+ * See PV 871084 for details about the WAR about zero value.
+ *
+ */
+void __sn_mmiowb(void)
+{
+	volatile unsigned long *adr = pda->pio_write_status_addr;
+	unsigned long val = pda->pio_write_status_val;
+
+	while ((*adr & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK) != val)
+		cpu_relax();
+}
+
+EXPORT_SYMBOL(__sn_mmiowb);
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c
new file mode 100644
index 0000000..3be4472
--- /dev/null
+++ b/arch/ia64/sn/kernel/irq.c
@@ -0,0 +1,431 @@
+/*
+ * Platform dependent support for SGI SN
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ */
+
+#include <linux/irq.h>
+#include <asm/sn/intr.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/arch.h>
+#include "xtalk/xwidgetdev.h"
+#include "pci/pcibus_provider_defs.h"
+#include "pci/pcidev.h"
+#include "pci/pcibr_provider.h"
+#include <asm/sn/shub_mmr.h>
+#include <asm/sn/sn_sal.h>
+
+static void force_interrupt(int irq);
+static void register_intr_pda(struct sn_irq_info *sn_irq_info);
+static void unregister_intr_pda(struct sn_irq_info *sn_irq_info);
+
+extern int sn_force_interrupt_flag;
+extern int sn_ioif_inited;
+struct sn_irq_info **sn_irq;
+
+static inline uint64_t sn_intr_alloc(nasid_t local_nasid, int local_widget,
+				     u64 sn_irq_info,
+				     int req_irq, nasid_t req_nasid,
+				     int req_slice)
+{
+	struct ia64_sal_retval ret_stuff;
+	ret_stuff.status = 0;
+	ret_stuff.v0 = 0;
+
+	SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
+			(u64) SAL_INTR_ALLOC, (u64) local_nasid,
+			(u64) local_widget, (u64) sn_irq_info, (u64) req_irq,
+			(u64) req_nasid, (u64) req_slice);
+	return ret_stuff.status;
+}
+
+static inline void sn_intr_free(nasid_t local_nasid, int local_widget,
+				struct sn_irq_info *sn_irq_info)
+{
+	struct ia64_sal_retval ret_stuff;
+	ret_stuff.status = 0;
+	ret_stuff.v0 = 0;
+
+	SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
+			(u64) SAL_INTR_FREE, (u64) local_nasid,
+			(u64) local_widget, (u64) sn_irq_info->irq_irq,
+			(u64) sn_irq_info->irq_cookie, 0, 0);
+}
+
+static unsigned int sn_startup_irq(unsigned int irq)
+{
+	return 0;
+}
+
+static void sn_shutdown_irq(unsigned int irq)
+{
+}
+
+static void sn_disable_irq(unsigned int irq)
+{
+}
+
+static void sn_enable_irq(unsigned int irq)
+{
+}
+
+static void sn_ack_irq(unsigned int irq)
+{
+	uint64_t event_occurred, mask = 0;
+	int nasid;
+
+	irq = irq & 0xff;
+	nasid = get_nasid();
+	event_occurred =
+	    HUB_L((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED));
+	if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) {
+		mask |= (1 << SH_EVENT_OCCURRED_UART_INT_SHFT);
+	}
+	if (event_occurred & SH_EVENT_OCCURRED_IPI_INT_MASK) {
+		mask |= (1 << SH_EVENT_OCCURRED_IPI_INT_SHFT);
+	}
+	if (event_occurred & SH_EVENT_OCCURRED_II_INT0_MASK) {
+		mask |= (1 << SH_EVENT_OCCURRED_II_INT0_SHFT);
+	}
+	if (event_occurred & SH_EVENT_OCCURRED_II_INT1_MASK) {
+		mask |= (1 << SH_EVENT_OCCURRED_II_INT1_SHFT);
+	}
+	HUB_S((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED_ALIAS),
+	      mask);
+	__set_bit(irq, (volatile void *)pda->sn_in_service_ivecs);
+
+	move_irq(irq);
+}
+
+static void sn_end_irq(unsigned int irq)
+{
+	int nasid;
+	int ivec;
+	uint64_t event_occurred;
+
+	ivec = irq & 0xff;
+	if (ivec == SGI_UART_VECTOR) {
+		nasid = get_nasid();
+		event_occurred = HUB_L((uint64_t *) GLOBAL_MMR_ADDR
+				       (nasid, SH_EVENT_OCCURRED));
+		/* If the UART bit is set here, we may have received an 
+		 * interrupt from the UART that the driver missed.  To
+		 * make sure, we IPI ourselves to force us to look again.
+		 */
+		if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) {
+			platform_send_ipi(smp_processor_id(), SGI_UART_VECTOR,
+					  IA64_IPI_DM_INT, 0);
+		}
+	}
+	__clear_bit(ivec, (volatile void *)pda->sn_in_service_ivecs);
+	if (sn_force_interrupt_flag)
+		force_interrupt(irq);
+}
+
+static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask)
+{
+	struct sn_irq_info *sn_irq_info = sn_irq[irq];
+	struct sn_irq_info *tmp_sn_irq_info;
+	int cpuid, cpuphys;
+	nasid_t t_nasid;	/* nasid to target */
+	int t_slice;		/* slice to target */
+
+	/* allocate a temp sn_irq_info struct to get new target info */
+	tmp_sn_irq_info = kmalloc(sizeof(*tmp_sn_irq_info), GFP_KERNEL);
+	if (!tmp_sn_irq_info)
+		return;
+
+	cpuid = first_cpu(mask);
+	cpuphys = cpu_physical_id(cpuid);
+	t_nasid = cpuid_to_nasid(cpuid);
+	t_slice = cpuid_to_slice(cpuid);
+
+	while (sn_irq_info) {
+		int status;
+		int local_widget;
+		uint64_t bridge = (uint64_t) sn_irq_info->irq_bridge;
+		nasid_t local_nasid = NASID_GET(bridge);
+
+		if (!bridge)
+			break;	/* irq is not a device interrupt */
+
+		if (local_nasid & 1)
+			local_widget = TIO_SWIN_WIDGETNUM(bridge);
+		else
+			local_widget = SWIN_WIDGETNUM(bridge);
+
+		/* Free the old PROM sn_irq_info structure */
+		sn_intr_free(local_nasid, local_widget, sn_irq_info);
+
+		/* allocate a new PROM sn_irq_info struct */
+		status = sn_intr_alloc(local_nasid, local_widget,
+				       __pa(tmp_sn_irq_info), irq, t_nasid,
+				       t_slice);
+
+		if (status == 0) {
+			/* Update kernels sn_irq_info with new target info */
+			unregister_intr_pda(sn_irq_info);
+			sn_irq_info->irq_cpuid = cpuid;
+			sn_irq_info->irq_nasid = t_nasid;
+			sn_irq_info->irq_slice = t_slice;
+			sn_irq_info->irq_xtalkaddr =
+			    tmp_sn_irq_info->irq_xtalkaddr;
+			sn_irq_info->irq_cookie = tmp_sn_irq_info->irq_cookie;
+			register_intr_pda(sn_irq_info);
+
+			if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type)) {
+				pcibr_change_devices_irq(sn_irq_info);
+			}
+
+			sn_irq_info = sn_irq_info->irq_next;
+
+#ifdef CONFIG_SMP
+			set_irq_affinity_info((irq & 0xff), cpuphys, 0);
+#endif
+		} else {
+			break;	/* snp_affinity failed the intr_alloc */
+		}
+	}
+	kfree(tmp_sn_irq_info);
+}
+
+struct hw_interrupt_type irq_type_sn = {
+	"SN hub",
+	sn_startup_irq,
+	sn_shutdown_irq,
+	sn_enable_irq,
+	sn_disable_irq,
+	sn_ack_irq,
+	sn_end_irq,
+	sn_set_affinity_irq
+};
+
+unsigned int sn_local_vector_to_irq(u8 vector)
+{
+	return (CPU_VECTOR_TO_IRQ(smp_processor_id(), vector));
+}
+
+void sn_irq_init(void)
+{
+	int i;
+	irq_desc_t *base_desc = irq_desc;
+
+	for (i = 0; i < NR_IRQS; i++) {
+		if (base_desc[i].handler == &no_irq_type) {
+			base_desc[i].handler = &irq_type_sn;
+		}
+	}
+}
+
+static void register_intr_pda(struct sn_irq_info *sn_irq_info)
+{
+	int irq = sn_irq_info->irq_irq;
+	int cpu = sn_irq_info->irq_cpuid;
+
+	if (pdacpu(cpu)->sn_last_irq < irq) {
+		pdacpu(cpu)->sn_last_irq = irq;
+	}
+
+	if (pdacpu(cpu)->sn_first_irq == 0 || pdacpu(cpu)->sn_first_irq > irq) {
+		pdacpu(cpu)->sn_first_irq = irq;
+	}
+}
+
+static void unregister_intr_pda(struct sn_irq_info *sn_irq_info)
+{
+	int irq = sn_irq_info->irq_irq;
+	int cpu = sn_irq_info->irq_cpuid;
+	struct sn_irq_info *tmp_irq_info;
+	int i, foundmatch;
+
+	if (pdacpu(cpu)->sn_last_irq == irq) {
+		foundmatch = 0;
+		for (i = pdacpu(cpu)->sn_last_irq - 1; i; i--) {
+			tmp_irq_info = sn_irq[i];
+			while (tmp_irq_info) {
+				if (tmp_irq_info->irq_cpuid == cpu) {
+					foundmatch++;
+					break;
+				}
+				tmp_irq_info = tmp_irq_info->irq_next;
+			}
+			if (foundmatch) {
+				break;
+			}
+		}
+		pdacpu(cpu)->sn_last_irq = i;
+	}
+
+	if (pdacpu(cpu)->sn_first_irq == irq) {
+		foundmatch = 0;
+		for (i = pdacpu(cpu)->sn_first_irq + 1; i < NR_IRQS; i++) {
+			tmp_irq_info = sn_irq[i];
+			while (tmp_irq_info) {
+				if (tmp_irq_info->irq_cpuid == cpu) {
+					foundmatch++;
+					break;
+				}
+				tmp_irq_info = tmp_irq_info->irq_next;
+			}
+			if (foundmatch) {
+				break;
+			}
+		}
+		pdacpu(cpu)->sn_first_irq = ((i == NR_IRQS) ? 0 : i);
+	}
+}
+
+struct sn_irq_info *sn_irq_alloc(nasid_t local_nasid, int local_widget, int irq,
+				 nasid_t nasid, int slice)
+{
+	struct sn_irq_info *sn_irq_info;
+	int status;
+
+	sn_irq_info = kmalloc(sizeof(*sn_irq_info), GFP_KERNEL);
+	if (sn_irq_info == NULL)
+		return NULL;
+
+	memset(sn_irq_info, 0x0, sizeof(*sn_irq_info));
+
+	status =
+	    sn_intr_alloc(local_nasid, local_widget, __pa(sn_irq_info), irq,
+			  nasid, slice);
+
+	if (status) {
+		kfree(sn_irq_info);
+		return NULL;
+	} else {
+		return sn_irq_info;
+	}
+}
+
+void sn_irq_free(struct sn_irq_info *sn_irq_info)
+{
+	uint64_t bridge = (uint64_t) sn_irq_info->irq_bridge;
+	nasid_t local_nasid = NASID_GET(bridge);
+	int local_widget;
+
+	if (local_nasid & 1)	/* tio check */
+		local_widget = TIO_SWIN_WIDGETNUM(bridge);
+	else
+		local_widget = SWIN_WIDGETNUM(bridge);
+
+	sn_intr_free(local_nasid, local_widget, sn_irq_info);
+
+	kfree(sn_irq_info);
+}
+
+void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info)
+{
+	nasid_t nasid = sn_irq_info->irq_nasid;
+	int slice = sn_irq_info->irq_slice;
+	int cpu = nasid_slice_to_cpuid(nasid, slice);
+
+	sn_irq_info->irq_cpuid = cpu;
+	sn_irq_info->irq_pciioinfo = SN_PCIDEV_INFO(pci_dev);
+
+	/* link it into the sn_irq[irq] list */
+	sn_irq_info->irq_next = sn_irq[sn_irq_info->irq_irq];
+	sn_irq[sn_irq_info->irq_irq] = sn_irq_info;
+
+	(void)register_intr_pda(sn_irq_info);
+}
+
+static void force_interrupt(int irq)
+{
+	struct sn_irq_info *sn_irq_info;
+
+	if (!sn_ioif_inited)
+		return;
+	sn_irq_info = sn_irq[irq];
+	while (sn_irq_info) {
+		if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type) &&
+		    (sn_irq_info->irq_bridge != NULL)) {
+			pcibr_force_interrupt(sn_irq_info);
+		}
+		sn_irq_info = sn_irq_info->irq_next;
+	}
+}
+
+/*
+ * Check for lost interrupts.  If the PIC int_status reg. says that
+ * an interrupt has been sent, but not handled, and the interrupt
+ * is not pending in either the cpu irr regs or in the soft irr regs,
+ * and the interrupt is not in service, then the interrupt may have
+ * been lost.  Force an interrupt on that pin.  It is possible that
+ * the interrupt is in flight, so we may generate a spurious interrupt,
+ * but we should never miss a real lost interrupt.
+ */
+static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info)
+{
+	uint64_t regval;
+	int irr_reg_num;
+	int irr_bit;
+	uint64_t irr_reg;
+	struct pcidev_info *pcidev_info;
+	struct pcibus_info *pcibus_info;
+
+	pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
+	if (!pcidev_info)
+		return;
+
+	pcibus_info =
+	    (struct pcibus_info *)pcidev_info->pdi_host_pcidev_info->
+	    pdi_pcibus_info;
+	regval = pcireg_intr_status_get(pcibus_info);
+
+	irr_reg_num = irq_to_vector(irq) / 64;
+	irr_bit = irq_to_vector(irq) % 64;
+	switch (irr_reg_num) {
+	case 0:
+		irr_reg = ia64_getreg(_IA64_REG_CR_IRR0);
+		break;
+	case 1:
+		irr_reg = ia64_getreg(_IA64_REG_CR_IRR1);
+		break;
+	case 2:
+		irr_reg = ia64_getreg(_IA64_REG_CR_IRR2);
+		break;
+	case 3:
+		irr_reg = ia64_getreg(_IA64_REG_CR_IRR3);
+		break;
+	}
+	if (!test_bit(irr_bit, &irr_reg)) {
+		if (!test_bit(irq, pda->sn_soft_irr)) {
+			if (!test_bit(irq, pda->sn_in_service_ivecs)) {
+				regval &= 0xff;
+				if (sn_irq_info->irq_int_bit & regval &
+				    sn_irq_info->irq_last_intr) {
+					regval &=
+					    ~(sn_irq_info->
+					      irq_int_bit & regval);
+					pcibr_force_interrupt(sn_irq_info);
+				}
+			}
+		}
+	}
+	sn_irq_info->irq_last_intr = regval;
+}
+
+void sn_lb_int_war_check(void)
+{
+	int i;
+
+	if (!sn_ioif_inited || pda->sn_first_irq == 0)
+		return;
+	for (i = pda->sn_first_irq; i <= pda->sn_last_irq; i++) {
+		struct sn_irq_info *sn_irq_info = sn_irq[i];
+		while (sn_irq_info) {
+			/* Only call for PCI bridges that are fully initialized. */
+			if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type) &&
+			    (sn_irq_info->irq_bridge != NULL)) {
+				sn_check_intr(i, sn_irq_info);
+			}
+			sn_irq_info = sn_irq_info->irq_next;
+		}
+	}
+}
diff --git a/arch/ia64/sn/kernel/klconflib.c b/arch/ia64/sn/kernel/klconflib.c
new file mode 100644
index 0000000..0f11a32
--- /dev/null
+++ b/arch/ia64/sn/kernel/klconflib.c
@@ -0,0 +1,108 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/ctype.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <asm/sn/types.h>
+#include <asm/sn/module.h>
+#include <asm/sn/l1.h>
+
+char brick_types[MAX_BRICK_TYPES + 1] = "cri.xdpn%#=vo^kjbf890123456789...";
+/*
+ * Format a module id for printing.
+ *
+ * There are three possible formats:
+ *
+ *   MODULE_FORMAT_BRIEF	is the brief 6-character format, including
+ *				the actual brick-type as recorded in the 
+ *				moduleid_t, eg. 002c15 for a C-brick, or
+ *				101#17 for a PX-brick.
+ *
+ *   MODULE_FORMAT_LONG		is the hwgraph format, eg. rack/002/bay/15
+ *				of rack/101/bay/17 (note that the brick
+ *				type does not appear in this format).
+ *
+ *   MODULE_FORMAT_LCD		is like MODULE_FORMAT_BRIEF, except that it
+ *				ensures that the module id provided appears
+ *				exactly as it would on the LCD display of
+ *				the corresponding brick, eg. still 002c15
+ *				for a C-brick, but 101p17 for a PX-brick.
+ *
+ * maule (9/13/04):  Removed top-level check for (fmt == MODULE_FORMAT_LCD)
+ * making MODULE_FORMAT_LCD equivalent to MODULE_FORMAT_BRIEF.  It was
+ * decided that all callers should assume the returned string should be what
+ * is displayed on the brick L1 LCD.
+ */
+void
+format_module_id(char *buffer, moduleid_t m, int fmt)
+{
+	int rack, position;
+	unsigned char brickchar;
+
+	rack = MODULE_GET_RACK(m);
+	brickchar = MODULE_GET_BTCHAR(m);
+
+	/* Be sure we use the same brick type character as displayed
+	 * on the brick's LCD
+	 */
+	switch (brickchar) 
+	{
+	case L1_BRICKTYPE_GA:
+	case L1_BRICKTYPE_OPUS_TIO:
+		brickchar = L1_BRICKTYPE_C;
+		break;
+
+	case L1_BRICKTYPE_PX:
+	case L1_BRICKTYPE_PE:
+	case L1_BRICKTYPE_PA:
+	case L1_BRICKTYPE_SA: /* we can move this to the "I's" later
+			       * if that makes more sense
+			       */
+		brickchar = L1_BRICKTYPE_P;
+		break;
+
+	case L1_BRICKTYPE_IX:
+	case L1_BRICKTYPE_IA:
+
+		brickchar = L1_BRICKTYPE_I;
+		break;
+	}
+
+	position = MODULE_GET_BPOS(m);
+
+	if ((fmt == MODULE_FORMAT_BRIEF) || (fmt == MODULE_FORMAT_LCD)) {
+	    /* Brief module number format, eg. 002c15 */
+
+	    /* Decompress the rack number */
+	    *buffer++ = '0' + RACK_GET_CLASS(rack);
+	    *buffer++ = '0' + RACK_GET_GROUP(rack);
+	    *buffer++ = '0' + RACK_GET_NUM(rack);
+
+	    /* Add the brick type */
+	    *buffer++ = brickchar;
+	}
+	else if (fmt == MODULE_FORMAT_LONG) {
+	    /* Fuller hwgraph format, eg. rack/002/bay/15 */
+
+	    strcpy(buffer, "rack" "/");  buffer += strlen(buffer);
+
+	    *buffer++ = '0' + RACK_GET_CLASS(rack);
+	    *buffer++ = '0' + RACK_GET_GROUP(rack);
+	    *buffer++ = '0' + RACK_GET_NUM(rack);
+
+	    strcpy(buffer, "/" "bay" "/");  buffer += strlen(buffer);
+	}
+
+	/* Add the bay position, using at least two digits */
+	if (position < 10)
+	    *buffer++ = '0';
+	sprintf(buffer, "%d", position);
+
+}
diff --git a/arch/ia64/sn/kernel/machvec.c b/arch/ia64/sn/kernel/machvec.c
new file mode 100644
index 0000000..02bb915
--- /dev/null
+++ b/arch/ia64/sn/kernel/machvec.c
@@ -0,0 +1,11 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2002-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ */
+
+#define MACHVEC_PLATFORM_NAME	sn2
+#define MACHVEC_PLATFORM_HEADER	<asm/machvec_sn2.h>
+#include <asm/machvec_init.h>
diff --git a/arch/ia64/sn/kernel/mca.c b/arch/ia64/sn/kernel/mca.c
new file mode 100644
index 0000000..857774b
--- /dev/null
+++ b/arch/ia64/sn/kernel/mca.c
@@ -0,0 +1,135 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/vmalloc.h>
+#include <asm/mca.h>
+#include <asm/sal.h>
+#include <asm/sn/sn_sal.h>
+
+/*
+ * Interval for calling SAL to poll for errors that do NOT cause error
+ * interrupts. SAL will raise a CPEI if any errors are present that
+ * need to be logged.
+ */
+#define CPEI_INTERVAL	(5*HZ)
+
+struct timer_list sn_cpei_timer;
+void sn_init_cpei_timer(void);
+
+/* Printing oemdata from mca uses data that is not passed through SAL, it is
+ * global.  Only one user at a time.
+ */
+static DECLARE_MUTEX(sn_oemdata_mutex);
+static u8 **sn_oemdata;
+static u64 *sn_oemdata_size, sn_oemdata_bufsize;
+
+/*
+ * print_hook
+ *
+ * This function is the callback routine that SAL calls to log error
+ * info for platform errors.  buf is appended to sn_oemdata, resizing as
+ * required.
+ */
+static int print_hook(const char *fmt, ...)
+{
+	char buf[400];
+	int len;
+	va_list args;
+	va_start(args, fmt);
+	vsnprintf(buf, sizeof(buf), fmt, args);
+	va_end(args);
+	len = strlen(buf);
+	while (*sn_oemdata_size + len + 1 > sn_oemdata_bufsize) {
+		u8 *newbuf = vmalloc(sn_oemdata_bufsize += 1000);
+		if (!newbuf) {
+			printk(KERN_ERR "%s: unable to extend sn_oemdata\n",
+			       __FUNCTION__);
+			return 0;
+		}
+		memcpy(newbuf, *sn_oemdata, *sn_oemdata_size);
+		vfree(*sn_oemdata);
+		*sn_oemdata = newbuf;
+	}
+	memcpy(*sn_oemdata + *sn_oemdata_size, buf, len + 1);
+	*sn_oemdata_size += len;
+	return 0;
+}
+
+static void sn_cpei_handler(int irq, void *devid, struct pt_regs *regs)
+{
+	/*
+	 * this function's sole purpose is to call SAL when we receive
+	 * a CE interrupt from SHUB or when the timer routine decides
+	 * we need to call SAL to check for CEs.
+	 */
+
+	/* CALL SAL_LOG_CE */
+
+	ia64_sn_plat_cpei_handler();
+}
+
+static void sn_cpei_timer_handler(unsigned long dummy)
+{
+	sn_cpei_handler(-1, NULL, NULL);
+	mod_timer(&sn_cpei_timer, jiffies + CPEI_INTERVAL);
+}
+
+void sn_init_cpei_timer(void)
+{
+	init_timer(&sn_cpei_timer);
+	sn_cpei_timer.expires = jiffies + CPEI_INTERVAL;
+	sn_cpei_timer.function = sn_cpei_timer_handler;
+	add_timer(&sn_cpei_timer);
+}
+
+static int
+sn_platform_plat_specific_err_print(const u8 * sect_header, u8 ** oemdata,
+				    u64 * oemdata_size)
+{
+	down(&sn_oemdata_mutex);
+	sn_oemdata = oemdata;
+	sn_oemdata_size = oemdata_size;
+	sn_oemdata_bufsize = 0;
+	ia64_sn_plat_specific_err_print(print_hook, (char *)sect_header);
+	up(&sn_oemdata_mutex);
+	return 0;
+}
+
+/* Callback when userspace salinfo wants to decode oem data via the platform
+ * kernel and/or prom.
+ */
+int sn_salinfo_platform_oemdata(const u8 *sect_header, u8 **oemdata, u64 *oemdata_size)
+{
+	efi_guid_t guid = *(efi_guid_t *)sect_header;
+	int valid = 0;
+	*oemdata_size = 0;
+	vfree(*oemdata);
+	*oemdata = NULL;
+	if (efi_guidcmp(guid, SAL_PLAT_SPECIFIC_ERR_SECT_GUID) == 0) {
+		sal_log_plat_specific_err_info_t *psei = (sal_log_plat_specific_err_info_t *)sect_header;
+		valid = psei->valid.oem_data;
+	} else if (efi_guidcmp(guid, SAL_PLAT_MEM_DEV_ERR_SECT_GUID) == 0) {
+		sal_log_mem_dev_err_info_t *mdei = (sal_log_mem_dev_err_info_t *)sect_header;
+		valid = mdei->valid.oem_data;
+	}
+	if (valid)
+		return sn_platform_plat_specific_err_print(sect_header, oemdata, oemdata_size);
+	else
+		return 0;
+}
+
+static int __init sn_salinfo_init(void)
+{
+	salinfo_platform_oemdata = &sn_salinfo_platform_oemdata;
+	return 0;
+}
+
+module_init(sn_salinfo_init)
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
new file mode 100644
index 0000000..f0306b5
--- /dev/null
+++ b/arch/ia64/sn/kernel/setup.c
@@ -0,0 +1,621 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999,2001-2004 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/kdev_t.h>
+#include <linux/string.h>
+#include <linux/tty.h>
+#include <linux/console.h>
+#include <linux/timex.h>
+#include <linux/sched.h>
+#include <linux/ioport.h>
+#include <linux/mm.h>
+#include <linux/serial.h>
+#include <linux/irq.h>
+#include <linux/bootmem.h>
+#include <linux/mmzone.h>
+#include <linux/interrupt.h>
+#include <linux/acpi.h>
+#include <linux/compiler.h>
+#include <linux/sched.h>
+#include <linux/root_dev.h>
+#include <linux/nodemask.h>
+
+#include <asm/io.h>
+#include <asm/sal.h>
+#include <asm/machvec.h>
+#include <asm/system.h>
+#include <asm/processor.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/pda.h>
+#include <asm/sn/nodepda.h>
+#include <asm/sn/sn_cpuid.h>
+#include <asm/sn/simulator.h>
+#include <asm/sn/leds.h>
+#include <asm/sn/bte.h>
+#include <asm/sn/shub_mmr.h>
+#include <asm/sn/clksupport.h>
+#include <asm/sn/sn_sal.h>
+#include <asm/sn/geo.h>
+#include "xtalk/xwidgetdev.h"
+#include "xtalk/hubdev.h"
+#include <asm/sn/klconfig.h>
+
+
+DEFINE_PER_CPU(struct pda_s, pda_percpu);
+
+#define MAX_PHYS_MEMORY		(1UL << 49)	/* 1 TB */
+
+lboard_t *root_lboard[MAX_COMPACT_NODES];
+
+extern void bte_init_node(nodepda_t *, cnodeid_t);
+
+extern void sn_timer_init(void);
+extern unsigned long last_time_offset;
+extern void (*ia64_mark_idle) (int);
+extern void snidle(int);
+extern unsigned char acpi_kbd_controller_present;
+
+unsigned long sn_rtc_cycles_per_second;
+EXPORT_SYMBOL(sn_rtc_cycles_per_second);
+
+DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
+EXPORT_PER_CPU_SYMBOL(__sn_hub_info);
+
+partid_t sn_partid = -1;
+EXPORT_SYMBOL(sn_partid);
+char sn_system_serial_number_string[128];
+EXPORT_SYMBOL(sn_system_serial_number_string);
+u64 sn_partition_serial_number;
+EXPORT_SYMBOL(sn_partition_serial_number);
+u8 sn_partition_id;
+EXPORT_SYMBOL(sn_partition_id);
+u8 sn_system_size;
+EXPORT_SYMBOL(sn_system_size);
+u8 sn_sharing_domain_size;
+EXPORT_SYMBOL(sn_sharing_domain_size);
+u8 sn_coherency_id;
+EXPORT_SYMBOL(sn_coherency_id);
+u8 sn_region_size;
+EXPORT_SYMBOL(sn_region_size);
+
+short physical_node_map[MAX_PHYSNODE_ID];
+
+EXPORT_SYMBOL(physical_node_map);
+
+int numionodes;
+
+static void sn_init_pdas(char **);
+static void scan_for_ionodes(void);
+
+static nodepda_t *nodepdaindr[MAX_COMPACT_NODES];
+
+/*
+ * The format of "screen_info" is strange, and due to early i386-setup
+ * code. This is just enough to make the console code think we're on a
+ * VGA color display.
+ */
+struct screen_info sn_screen_info = {
+	.orig_x = 0,
+	.orig_y = 0,
+	.orig_video_mode = 3,
+	.orig_video_cols = 80,
+	.orig_video_ega_bx = 3,
+	.orig_video_lines = 25,
+	.orig_video_isVGA = 1,
+	.orig_video_points = 16
+};
+
+/*
+ * This is here so we can use the CMOS detection in ide-probe.c to
+ * determine what drives are present.  In theory, we don't need this
+ * as the auto-detection could be done via ide-probe.c:do_probe() but
+ * in practice that would be much slower, which is painful when
+ * running in the simulator.  Note that passing zeroes in DRIVE_INFO
+ * is sufficient (the IDE driver will autodetect the drive geometry).
+ */
+#ifdef CONFIG_IA64_GENERIC
+extern char drive_info[4 * 16];
+#else
+char drive_info[4 * 16];
+#endif
+
+/*
+ * Get nasid of current cpu early in boot before nodepda is initialized
+ */
+static int
+boot_get_nasid(void)
+{
+	int nasid;
+
+	if (ia64_sn_get_sapic_info(get_sapicid(), &nasid, NULL, NULL))
+		BUG();
+	return nasid;
+}
+
+/*
+ * This routine can only be used during init, since
+ * smp_boot_data is an init data structure.
+ * We have to use smp_boot_data.cpu_phys_id to find
+ * the physical id of the processor because the normal
+ * cpu_physical_id() relies on data structures that
+ * may not be initialized yet.
+ */
+
+static int __init pxm_to_nasid(int pxm)
+{
+	int i;
+	int nid;
+
+	nid = pxm_to_nid_map[pxm];
+	for (i = 0; i < num_node_memblks; i++) {
+		if (node_memblk[i].nid == nid) {
+			return NASID_GET(node_memblk[i].start_paddr);
+		}
+	}
+	return -1;
+}
+
+/**
+ * early_sn_setup - early setup routine for SN platforms
+ *
+ * Sets up an initial console to aid debugging.  Intended primarily
+ * for bringup.  See start_kernel() in init/main.c.
+ */
+
+void __init early_sn_setup(void)
+{
+	efi_system_table_t *efi_systab;
+	efi_config_table_t *config_tables;
+	struct ia64_sal_systab *sal_systab;
+	struct ia64_sal_desc_entry_point *ep;
+	char *p;
+	int i, j;
+
+	/*
+	 * Parse enough of the SAL tables to locate the SAL entry point. Since, console
+	 * IO on SN2 is done via SAL calls, early_printk won't work without this.
+	 *
+	 * This code duplicates some of the ACPI table parsing that is in efi.c & sal.c.
+	 * Any changes to those file may have to be made hereas well.
+	 */
+	efi_systab = (efi_system_table_t *) __va(ia64_boot_param->efi_systab);
+	config_tables = __va(efi_systab->tables);
+	for (i = 0; i < efi_systab->nr_tables; i++) {
+		if (efi_guidcmp(config_tables[i].guid, SAL_SYSTEM_TABLE_GUID) ==
+		    0) {
+			sal_systab = __va(config_tables[i].table);
+			p = (char *)(sal_systab + 1);
+			for (j = 0; j < sal_systab->entry_count; j++) {
+				if (*p == SAL_DESC_ENTRY_POINT) {
+					ep = (struct ia64_sal_desc_entry_point
+					      *)p;
+					ia64_sal_handler_init(__va
+							      (ep->sal_proc),
+							      __va(ep->gp));
+					return;
+				}
+				p += SAL_DESC_SIZE(*p);
+			}
+		}
+	}
+	/* Uh-oh, SAL not available?? */
+	printk(KERN_ERR "failed to find SAL entry point\n");
+}
+
+extern int platform_intr_list[];
+extern nasid_t master_nasid;
+static int shub_1_1_found __initdata;
+
+/*
+ * sn_check_for_wars
+ *
+ * Set flag for enabling shub specific wars
+ */
+
+static inline int __init is_shub_1_1(int nasid)
+{
+	unsigned long id;
+	int rev;
+
+	if (is_shub2())
+		return 0;
+	id = REMOTE_HUB_L(nasid, SH1_SHUB_ID);
+	rev = (id & SH1_SHUB_ID_REVISION_MASK) >> SH1_SHUB_ID_REVISION_SHFT;
+	return rev <= 2;
+}
+
+static void __init sn_check_for_wars(void)
+{
+	int cnode;
+
+	if (is_shub2()) {
+		/* none yet */
+	} else {
+		for_each_online_node(cnode) {
+			if (is_shub_1_1(cnodeid_to_nasid(cnode)))
+				sn_hub_info->shub_1_1_found = 1;
+		}
+	}
+}
+
+/**
+ * sn_setup - SN platform setup routine
+ * @cmdline_p: kernel command line
+ *
+ * Handles platform setup for SN machines.  This includes determining
+ * the RTC frequency (via a SAL call), initializing secondary CPUs, and
+ * setting up per-node data areas.  The console is also initialized here.
+ */
+void __init sn_setup(char **cmdline_p)
+{
+	long status, ticks_per_sec, drift;
+	int pxm;
+	int major = sn_sal_rev_major(), minor = sn_sal_rev_minor();
+	extern void sn_cpu_init(void);
+
+	/*
+	 * If the generic code has enabled vga console support - lets
+	 * get rid of it again. This is a kludge for the fact that ACPI
+	 * currtently has no way of informing us if legacy VGA is available
+	 * or not.
+	 */
+#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE)
+	if (conswitchp == &vga_con) {
+		printk(KERN_DEBUG "SGI: Disabling VGA console\n");
+#ifdef CONFIG_DUMMY_CONSOLE
+		conswitchp = &dummy_con;
+#else
+		conswitchp = NULL;
+#endif				/* CONFIG_DUMMY_CONSOLE */
+	}
+#endif				/* def(CONFIG_VT) && def(CONFIG_VGA_CONSOLE) */
+
+	MAX_DMA_ADDRESS = PAGE_OFFSET + MAX_PHYS_MEMORY;
+
+	memset(physical_node_map, -1, sizeof(physical_node_map));
+	for (pxm = 0; pxm < MAX_PXM_DOMAINS; pxm++)
+		if (pxm_to_nid_map[pxm] != -1)
+			physical_node_map[pxm_to_nasid(pxm)] =
+			    pxm_to_nid_map[pxm];
+
+	/*
+	 * Old PROMs do not provide an ACPI FADT. Disable legacy keyboard
+	 * support here so we don't have to listen to failed keyboard probe
+	 * messages.
+	 */
+	if ((major < 2 || (major == 2 && minor <= 9)) &&
+	    acpi_kbd_controller_present) {
+		printk(KERN_INFO "Disabling legacy keyboard support as prom "
+		       "is too old and doesn't provide FADT\n");
+		acpi_kbd_controller_present = 0;
+	}
+
+	printk("SGI SAL version %x.%02x\n", major, minor);
+
+	/*
+	 * Confirm the SAL we're running on is recent enough...
+	 */
+	if ((major < SN_SAL_MIN_MAJOR) || (major == SN_SAL_MIN_MAJOR &&
+					   minor < SN_SAL_MIN_MINOR)) {
+		printk(KERN_ERR "This kernel needs SGI SAL version >= "
+		       "%x.%02x\n", SN_SAL_MIN_MAJOR, SN_SAL_MIN_MINOR);
+		panic("PROM version too old\n");
+	}
+
+	master_nasid = boot_get_nasid();
+
+	status =
+	    ia64_sal_freq_base(SAL_FREQ_BASE_REALTIME_CLOCK, &ticks_per_sec,
+			       &drift);
+	if (status != 0 || ticks_per_sec < 100000) {
+		printk(KERN_WARNING
+		       "unable to determine platform RTC clock frequency, guessing.\n");
+		/* PROM gives wrong value for clock freq. so guess */
+		sn_rtc_cycles_per_second = 1000000000000UL / 30000UL;
+	} else
+		sn_rtc_cycles_per_second = ticks_per_sec;
+
+	platform_intr_list[ACPI_INTERRUPT_CPEI] = IA64_CPE_VECTOR;
+
+	/*
+	 * we set the default root device to /dev/hda
+	 * to make simulation easy
+	 */
+	ROOT_DEV = Root_HDA1;
+
+	/*
+	 * Create the PDAs and NODEPDAs for all the cpus.
+	 */
+	sn_init_pdas(cmdline_p);
+
+	ia64_mark_idle = &snidle;
+
+	/* 
+	 * For the bootcpu, we do this here. All other cpus will make the
+	 * call as part of cpu_init in slave cpu initialization.
+	 */
+	sn_cpu_init();
+
+#ifdef CONFIG_SMP
+	init_smp_config();
+#endif
+	screen_info = sn_screen_info;
+
+	sn_timer_init();
+}
+
+/**
+ * sn_init_pdas - setup node data areas
+ *
+ * One time setup for Node Data Area.  Called by sn_setup().
+ */
+static void __init sn_init_pdas(char **cmdline_p)
+{
+	cnodeid_t cnode;
+
+	memset(pda->cnodeid_to_nasid_table, -1,
+	       sizeof(pda->cnodeid_to_nasid_table));
+	for_each_online_node(cnode)
+		pda->cnodeid_to_nasid_table[cnode] =
+		    pxm_to_nasid(nid_to_pxm_map[cnode]);
+
+	numionodes = num_online_nodes();
+	scan_for_ionodes();
+
+	/*
+	 * Allocate & initalize the nodepda for each node.
+	 */
+	for_each_online_node(cnode) {
+		nodepdaindr[cnode] =
+		    alloc_bootmem_node(NODE_DATA(cnode), sizeof(nodepda_t));
+		memset(nodepdaindr[cnode], 0, sizeof(nodepda_t));
+		memset(nodepdaindr[cnode]->phys_cpuid, -1, 
+		    sizeof(nodepdaindr[cnode]->phys_cpuid));
+	}
+
+	/*
+	 * Allocate & initialize nodepda for TIOs.  For now, put them on node 0.
+	 */
+	for (cnode = num_online_nodes(); cnode < numionodes; cnode++) {
+		nodepdaindr[cnode] =
+		    alloc_bootmem_node(NODE_DATA(0), sizeof(nodepda_t));
+		memset(nodepdaindr[cnode], 0, sizeof(nodepda_t));
+	}
+
+	/*
+	 * Now copy the array of nodepda pointers to each nodepda.
+	 */
+	for (cnode = 0; cnode < numionodes; cnode++)
+		memcpy(nodepdaindr[cnode]->pernode_pdaindr, nodepdaindr,
+		       sizeof(nodepdaindr));
+
+	/*
+	 * Set up IO related platform-dependent nodepda fields.
+	 * The following routine actually sets up the hubinfo struct
+	 * in nodepda.
+	 */
+	for_each_online_node(cnode) {
+		bte_init_node(nodepdaindr[cnode], cnode);
+	}
+
+	/*
+	 * Initialize the per node hubdev.  This includes IO Nodes and 
+	 * headless/memless nodes.
+	 */
+	for (cnode = 0; cnode < numionodes; cnode++) {
+		hubdev_init_node(nodepdaindr[cnode], cnode);
+	}
+}
+
+/**
+ * sn_cpu_init - initialize per-cpu data areas
+ * @cpuid: cpuid of the caller
+ *
+ * Called during cpu initialization on each cpu as it starts.
+ * Currently, initializes the per-cpu data area for SNIA.
+ * Also sets up a few fields in the nodepda.  Also known as
+ * platform_cpu_init() by the ia64 machvec code.
+ */
+void __init sn_cpu_init(void)
+{
+	int cpuid;
+	int cpuphyid;
+	int nasid;
+	int subnode;
+	int slice;
+	int cnode;
+	int i;
+	static int wars_have_been_checked;
+
+	memset(pda, 0, sizeof(pda));
+	if (ia64_sn_get_sn_info(0, &sn_hub_info->shub2, &sn_hub_info->nasid_bitmask, &sn_hub_info->nasid_shift,
+				&sn_system_size, &sn_sharing_domain_size, &sn_partition_id,
+				&sn_coherency_id, &sn_region_size))
+		BUG();
+	sn_hub_info->as_shift = sn_hub_info->nasid_shift - 2;
+
+	/*
+	 * The boot cpu makes this call again after platform initialization is
+	 * complete.
+	 */
+	if (nodepdaindr[0] == NULL)
+		return;
+
+	cpuid = smp_processor_id();
+	cpuphyid = get_sapicid();
+
+	if (ia64_sn_get_sapic_info(cpuphyid, &nasid, &subnode, &slice))
+		BUG();
+
+	for (i=0; i < MAX_NUMNODES; i++) {
+		if (nodepdaindr[i]) {
+			nodepdaindr[i]->phys_cpuid[cpuid].nasid = nasid;
+			nodepdaindr[i]->phys_cpuid[cpuid].slice = slice;
+			nodepdaindr[i]->phys_cpuid[cpuid].subnode = subnode;
+		}
+	}
+
+	cnode = nasid_to_cnodeid(nasid);
+
+	pda->p_nodepda = nodepdaindr[cnode];
+	pda->led_address =
+	    (typeof(pda->led_address)) (LED0 + (slice << LED_CPU_SHIFT));
+	pda->led_state = LED_ALWAYS_SET;
+	pda->hb_count = HZ / 2;
+	pda->hb_state = 0;
+	pda->idle_flag = 0;
+
+	if (cpuid != 0) {
+		memcpy(pda->cnodeid_to_nasid_table,
+		       pdacpu(0)->cnodeid_to_nasid_table,
+		       sizeof(pda->cnodeid_to_nasid_table));
+	}
+
+	/*
+	 * Check for WARs.
+	 * Only needs to be done once, on BSP.
+	 * Has to be done after loop above, because it uses pda.cnodeid_to_nasid_table[i].
+	 * Has to be done before assignment below.
+	 */
+	if (!wars_have_been_checked) {
+		sn_check_for_wars();
+		wars_have_been_checked = 1;
+	}
+	sn_hub_info->shub_1_1_found = shub_1_1_found;
+
+	/*
+	 * Set up addresses of PIO/MEM write status registers.
+	 */
+	{
+		u64 pio1[] = {SH1_PIO_WRITE_STATUS_0, 0, SH1_PIO_WRITE_STATUS_1, 0};
+		u64 pio2[] = {SH2_PIO_WRITE_STATUS_0, SH2_PIO_WRITE_STATUS_1, 
+			SH2_PIO_WRITE_STATUS_2, SH2_PIO_WRITE_STATUS_3};
+		u64 *pio;
+		pio = is_shub1() ? pio1 : pio2;
+		pda->pio_write_status_addr = (volatile unsigned long *) LOCAL_MMR_ADDR(pio[slice]);
+		pda->pio_write_status_val = is_shub1() ? SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK : 0;
+	}
+
+	/*
+	 * WAR addresses for SHUB 1.x.
+	 */
+	if (local_node_data->active_cpu_count++ == 0 && is_shub1()) {
+		int buddy_nasid;
+		buddy_nasid =
+		    cnodeid_to_nasid(numa_node_id() ==
+				     num_online_nodes() - 1 ? 0 : numa_node_id() + 1);
+		pda->pio_shub_war_cam_addr =
+		    (volatile unsigned long *)GLOBAL_MMR_ADDR(nasid,
+							      SH1_PI_CAM_CONTROL);
+	}
+}
+
+/*
+ * Scan klconfig for ionodes.  Add the nasids to the
+ * physical_node_map and the pda and increment numionodes.
+ */
+
+static void __init scan_for_ionodes(void)
+{
+	int nasid = 0;
+	lboard_t *brd;
+
+	/* Setup ionodes with memory */
+	for (nasid = 0; nasid < MAX_PHYSNODE_ID; nasid += 2) {
+		char *klgraph_header;
+		cnodeid_t cnodeid;
+
+		if (physical_node_map[nasid] == -1)
+			continue;
+
+		cnodeid = -1;
+		klgraph_header = __va(ia64_sn_get_klconfig_addr(nasid));
+		if (!klgraph_header) {
+			if (IS_RUNNING_ON_SIMULATOR())
+				continue;
+			BUG();	/* All nodes must have klconfig tables! */
+		}
+		cnodeid = nasid_to_cnodeid(nasid);
+		root_lboard[cnodeid] = (lboard_t *)
+		    NODE_OFFSET_TO_LBOARD((nasid),
+					  ((kl_config_hdr_t
+					    *) (klgraph_header))->
+					  ch_board_info);
+	}
+
+	/* Scan headless/memless IO Nodes. */
+	for (nasid = 0; nasid < MAX_PHYSNODE_ID; nasid += 2) {
+		/* if there's no nasid, don't try to read the klconfig on the node */
+		if (physical_node_map[nasid] == -1)
+			continue;
+		brd = find_lboard_any((lboard_t *)
+				      root_lboard[nasid_to_cnodeid(nasid)],
+				      KLTYPE_SNIA);
+		if (brd) {
+			brd = KLCF_NEXT_ANY(brd);	/* Skip this node's lboard */
+			if (!brd)
+				continue;
+		}
+
+		brd = find_lboard_any(brd, KLTYPE_SNIA);
+
+		while (brd) {
+			pda->cnodeid_to_nasid_table[numionodes] =
+			    brd->brd_nasid;
+			physical_node_map[brd->brd_nasid] = numionodes;
+			root_lboard[numionodes] = brd;
+			numionodes++;
+			brd = KLCF_NEXT_ANY(brd);
+			if (!brd)
+				break;
+
+			brd = find_lboard_any(brd, KLTYPE_SNIA);
+		}
+	}
+
+	/* Scan for TIO nodes. */
+	for (nasid = 0; nasid < MAX_PHYSNODE_ID; nasid += 2) {
+		/* if there's no nasid, don't try to read the klconfig on the node */
+		if (physical_node_map[nasid] == -1)
+			continue;
+		brd = find_lboard_any((lboard_t *)
+				      root_lboard[nasid_to_cnodeid(nasid)],
+				      KLTYPE_TIO);
+		while (brd) {
+			pda->cnodeid_to_nasid_table[numionodes] =
+			    brd->brd_nasid;
+			physical_node_map[brd->brd_nasid] = numionodes;
+			root_lboard[numionodes] = brd;
+			numionodes++;
+			brd = KLCF_NEXT_ANY(brd);
+			if (!brd)
+				break;
+
+			brd = find_lboard_any(brd, KLTYPE_TIO);
+		}
+	}
+
+}
+
+int
+nasid_slice_to_cpuid(int nasid, int slice)
+{
+	long cpu;
+	
+	for (cpu=0; cpu < NR_CPUS; cpu++) 
+		if (nodepda->phys_cpuid[cpu].nasid == nasid && nodepda->phys_cpuid[cpu].slice == slice)
+			return cpu;
+
+	return -1;
+}
diff --git a/arch/ia64/sn/kernel/sn2/Makefile b/arch/ia64/sn/kernel/sn2/Makefile
new file mode 100644
index 0000000..170bde4
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/Makefile
@@ -0,0 +1,13 @@
+# arch/ia64/sn/kernel/sn2/Makefile
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License.  See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1999,2001-2002 Silicon Graphics, Inc. All rights reserved.
+#
+# sn2 specific kernel files
+#
+
+obj-y += cache.o io.o ptc_deadlock.o sn2_smp.o sn_proc_fs.o \
+	 prominfo_proc.o timer.o timer_interrupt.o sn_hwperf.o
diff --git a/arch/ia64/sn/kernel/sn2/cache.c b/arch/ia64/sn/kernel/sn2/cache.c
new file mode 100644
index 0000000..bc3cfa1
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/cache.c
@@ -0,0 +1,34 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ * 
+ * Copyright (C) 2001-2003 Silicon Graphics, Inc. All rights reserved.
+ *
+ */
+#include <linux/module.h>
+#include <asm/pgalloc.h>
+
+/**
+ * sn_flush_all_caches - flush a range of address from all caches (incl. L4)
+ * @flush_addr: identity mapped region 7 address to start flushing
+ * @bytes: number of bytes to flush
+ *
+ * Flush a range of addresses from all caches including L4. 
+ * All addresses fully or partially contained within 
+ * @flush_addr to @flush_addr + @bytes are flushed
+ * from the all caches.
+ */
+void
+sn_flush_all_caches(long flush_addr, long bytes)
+{
+	flush_icache_range(flush_addr, flush_addr+bytes);
+	/*
+	 * The last call may have returned before the caches
+	 * were actually flushed, so we call it again to make
+	 * sure.
+	 */
+	flush_icache_range(flush_addr, flush_addr+bytes);
+	mb();
+}
+EXPORT_SYMBOL(sn_flush_all_caches);
diff --git a/arch/ia64/sn/kernel/sn2/io.c b/arch/ia64/sn/kernel/sn2/io.c
new file mode 100644
index 0000000..a12c058
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/io.c
@@ -0,0 +1,101 @@
+/* 
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved.
+ *
+ * The generic kernel requires function pointers to these routines, so
+ * we wrap the inlines from asm/ia64/sn/sn2/io.h here.
+ */
+
+#include <asm/sn/io.h>
+
+#ifdef CONFIG_IA64_GENERIC
+
+#undef __sn_inb
+#undef __sn_inw
+#undef __sn_inl
+#undef __sn_outb
+#undef __sn_outw
+#undef __sn_outl
+#undef __sn_readb
+#undef __sn_readw
+#undef __sn_readl
+#undef __sn_readq
+#undef __sn_readb_relaxed
+#undef __sn_readw_relaxed
+#undef __sn_readl_relaxed
+#undef __sn_readq_relaxed
+
+unsigned int __sn_inb(unsigned long port)
+{
+	return ___sn_inb(port);
+}
+
+unsigned int __sn_inw(unsigned long port)
+{
+	return ___sn_inw(port);
+}
+
+unsigned int __sn_inl(unsigned long port)
+{
+	return ___sn_inl(port);
+}
+
+void __sn_outb(unsigned char val, unsigned long port)
+{
+	___sn_outb(val, port);
+}
+
+void __sn_outw(unsigned short val, unsigned long port)
+{
+	___sn_outw(val, port);
+}
+
+void __sn_outl(unsigned int val, unsigned long port)
+{
+	___sn_outl(val, port);
+}
+
+unsigned char __sn_readb(void __iomem *addr)
+{
+	return ___sn_readb(addr);
+}
+
+unsigned short __sn_readw(void __iomem *addr)
+{
+	return ___sn_readw(addr);
+}
+
+unsigned int __sn_readl(void __iomem *addr)
+{
+	return ___sn_readl(addr);
+}
+
+unsigned long __sn_readq(void __iomem *addr)
+{
+	return ___sn_readq(addr);
+}
+
+unsigned char __sn_readb_relaxed(void __iomem *addr)
+{
+	return ___sn_readb_relaxed(addr);
+}
+
+unsigned short __sn_readw_relaxed(void __iomem *addr)
+{
+	return ___sn_readw_relaxed(addr);
+}
+
+unsigned int __sn_readl_relaxed(void __iomem *addr)
+{
+	return ___sn_readl_relaxed(addr);
+}
+
+unsigned long __sn_readq_relaxed(void __iomem *addr)
+{
+	return ___sn_readq_relaxed(addr);
+}
+
+#endif
diff --git a/arch/ia64/sn/kernel/sn2/prominfo_proc.c b/arch/ia64/sn/kernel/sn2/prominfo_proc.c
new file mode 100644
index 0000000..81c63b2
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/prominfo_proc.c
@@ -0,0 +1,279 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999,2001-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * Module to export the system's Firmware Interface Tables, including
+ * PROM revision numbers and banners, in /proc
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/proc_fs.h>
+#include <linux/nodemask.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/sn/sn_sal.h>
+#include <asm/sn/sn_cpuid.h>
+#include <asm/sn/addrs.h>
+
+MODULE_DESCRIPTION("PROM version reporting for /proc");
+MODULE_AUTHOR("Chad Talbott");
+MODULE_LICENSE("GPL");
+
+/* Standard Intel FIT entry types */
+#define FIT_ENTRY_FIT_HEADER	0x00	/* FIT header entry */
+#define FIT_ENTRY_PAL_B		0x01	/* PAL_B entry */
+/* Entries 0x02 through 0x0D reserved by Intel */
+#define FIT_ENTRY_PAL_A_PROC	0x0E	/* Processor-specific PAL_A entry */
+#define FIT_ENTRY_PAL_A		0x0F	/* PAL_A entry, same as... */
+#define FIT_ENTRY_PAL_A_GEN	0x0F	/* ...Generic PAL_A entry */
+#define FIT_ENTRY_UNUSED	0x7F	/* Unused (reserved by Intel?) */
+/* OEM-defined entries range from 0x10 to 0x7E. */
+#define FIT_ENTRY_SAL_A		0x10	/* SAL_A entry */
+#define FIT_ENTRY_SAL_B		0x11	/* SAL_B entry */
+#define FIT_ENTRY_SALRUNTIME	0x12	/* SAL runtime entry */
+#define FIT_ENTRY_EFI		0x1F	/* EFI entry */
+#define FIT_ENTRY_FPSWA		0x20	/* embedded fpswa entry */
+#define FIT_ENTRY_VMLINUX	0x21	/* embedded vmlinux entry */
+
+#define FIT_MAJOR_SHIFT	(32 + 8)
+#define FIT_MAJOR_MASK	((1 << 8) - 1)
+#define FIT_MINOR_SHIFT	32
+#define FIT_MINOR_MASK	((1 << 8) - 1)
+
+#define FIT_MAJOR(q)	\
+	((unsigned) ((q) >> FIT_MAJOR_SHIFT) & FIT_MAJOR_MASK)
+#define FIT_MINOR(q)	\
+	((unsigned) ((q) >> FIT_MINOR_SHIFT) & FIT_MINOR_MASK)
+
+#define FIT_TYPE_SHIFT	(32 + 16)
+#define FIT_TYPE_MASK	((1 << 7) - 1)
+
+#define FIT_TYPE(q)	\
+	((unsigned) ((q) >> FIT_TYPE_SHIFT) & FIT_TYPE_MASK)
+
+struct fit_type_map_t {
+	unsigned char type;
+	const char *name;
+};
+
+static const struct fit_type_map_t fit_entry_types[] = {
+	{FIT_ENTRY_FIT_HEADER, "FIT Header"},
+	{FIT_ENTRY_PAL_A_GEN, "Generic PAL_A"},
+	{FIT_ENTRY_PAL_A_PROC, "Processor-specific PAL_A"},
+	{FIT_ENTRY_PAL_A, "PAL_A"},
+	{FIT_ENTRY_PAL_B, "PAL_B"},
+	{FIT_ENTRY_SAL_A, "SAL_A"},
+	{FIT_ENTRY_SAL_B, "SAL_B"},
+	{FIT_ENTRY_SALRUNTIME, "SAL runtime"},
+	{FIT_ENTRY_EFI, "EFI"},
+	{FIT_ENTRY_VMLINUX, "Embedded Linux"},
+	{FIT_ENTRY_FPSWA, "Embedded FPSWA"},
+	{FIT_ENTRY_UNUSED, "Unused"},
+	{0xff, "Error"},
+};
+
+static const char *fit_type_name(unsigned char type)
+{
+	struct fit_type_map_t const *mapp;
+
+	for (mapp = fit_entry_types; mapp->type != 0xff; mapp++)
+		if (type == mapp->type)
+			return mapp->name;
+
+	if ((type > FIT_ENTRY_PAL_A) && (type < FIT_ENTRY_UNUSED))
+		return "OEM type";
+	if ((type > FIT_ENTRY_PAL_B) && (type < FIT_ENTRY_PAL_A))
+		return "Reserved";
+
+	return "Unknown type";
+}
+
+static int
+get_fit_entry(unsigned long nasid, int index, unsigned long *fentry,
+	      char *banner, int banlen)
+{
+	return ia64_sn_get_fit_compt(nasid, index, fentry, banner, banlen);
+}
+
+
+/*
+ * These two routines display the FIT table for each node.
+ */
+static int dump_fit_entry(char *page, unsigned long *fentry)
+{
+	unsigned type;
+
+	type = FIT_TYPE(fentry[1]);
+	return sprintf(page, "%02x %-25s %x.%02x %016lx %u\n",
+		       type,
+		       fit_type_name(type),
+		       FIT_MAJOR(fentry[1]), FIT_MINOR(fentry[1]),
+		       fentry[0],
+		       /* mult by sixteen to get size in bytes */
+		       (unsigned)(fentry[1] & 0xffffff) * 16);
+}
+
+
+/*
+ * We assume that the fit table will be small enough that we can print
+ * the whole thing into one page.  (This is true for our default 16kB
+ * pages -- each entry is about 60 chars wide when printed.)  I read
+ * somewhere that the maximum size of the FIT is 128 entries, so we're
+ * OK except for 4kB pages (and no one is going to do that on SN
+ * anyway).
+ */
+static int
+dump_fit(char *page, unsigned long nasid)
+{
+	unsigned long fentry[2];
+	int index;
+	char *p;
+
+	p = page;
+	for (index=0;;index++) {
+		BUG_ON(index * 60 > PAGE_SIZE);
+		if (get_fit_entry(nasid, index, fentry, NULL, 0))
+			break;
+		p += dump_fit_entry(p, fentry);
+	}
+
+	return p - page;
+}
+
+static int
+dump_version(char *page, unsigned long nasid)
+{
+	unsigned long fentry[2];
+	char banner[128];
+	int index;
+	int len;
+
+	for (index = 0; ; index++) {
+		if (get_fit_entry(nasid, index, fentry, banner,
+				  sizeof(banner)))
+			return 0;
+		if (FIT_TYPE(fentry[1]) == FIT_ENTRY_SAL_A)
+			break;
+	}
+
+	len = sprintf(page, "%x.%02x\n", FIT_MAJOR(fentry[1]),
+		      FIT_MINOR(fentry[1]));
+	page += len;
+
+	if (banner[0])
+		len += snprintf(page, PAGE_SIZE-len, "%s\n", banner);
+
+	return len;
+}
+
+/* same as in proc_misc.c */
+static int
+proc_calc_metrics(char *page, char **start, off_t off, int count, int *eof,
+		  int len)
+{
+	if (len <= off + count)
+		*eof = 1;
+	*start = page + off;
+	len -= off;
+	if (len > count)
+		len = count;
+	if (len < 0)
+		len = 0;
+	return len;
+}
+
+static int
+read_version_entry(char *page, char **start, off_t off, int count, int *eof,
+		   void *data)
+{
+	int len = 0;
+
+	/* data holds the NASID of the node */
+	len = dump_version(page, (unsigned long)data);
+	len = proc_calc_metrics(page, start, off, count, eof, len);
+	return len;
+}
+
+static int
+read_fit_entry(char *page, char **start, off_t off, int count, int *eof,
+	       void *data)
+{
+	int len = 0;
+
+	/* data holds the NASID of the node */
+	len = dump_fit(page, (unsigned long)data);
+	len = proc_calc_metrics(page, start, off, count, eof, len);
+
+	return len;
+}
+
+/* module entry points */
+int __init prominfo_init(void);
+void __exit prominfo_exit(void);
+
+module_init(prominfo_init);
+module_exit(prominfo_exit);
+
+static struct proc_dir_entry **proc_entries;
+static struct proc_dir_entry *sgi_prominfo_entry;
+
+#define NODE_NAME_LEN 11
+
+int __init prominfo_init(void)
+{
+	struct proc_dir_entry **entp;
+	struct proc_dir_entry *p;
+	cnodeid_t cnodeid;
+	unsigned long nasid;
+	char name[NODE_NAME_LEN];
+
+	if (!ia64_platform_is("sn2"))
+		return 0;
+
+	proc_entries = kmalloc(num_online_nodes() * sizeof(struct proc_dir_entry *),
+			       GFP_KERNEL);
+
+	sgi_prominfo_entry = proc_mkdir("sgi_prominfo", NULL);
+
+	entp = proc_entries;
+	for_each_online_node(cnodeid) {
+		sprintf(name, "node%d", cnodeid);
+		*entp = proc_mkdir(name, sgi_prominfo_entry);
+		nasid = cnodeid_to_nasid(cnodeid);
+		p = create_proc_read_entry(
+			"fit", 0, *entp, read_fit_entry,
+			(void *)nasid);
+		if (p)
+			p->owner = THIS_MODULE;
+		p = create_proc_read_entry(
+			"version", 0, *entp, read_version_entry,
+			(void *)nasid);
+		if (p)
+			p->owner = THIS_MODULE;
+		entp++;
+	}
+
+	return 0;
+}
+
+void __exit prominfo_exit(void)
+{
+	struct proc_dir_entry **entp;
+	unsigned cnodeid;
+	char name[NODE_NAME_LEN];
+
+	entp = proc_entries;
+	for_each_online_node(cnodeid) {
+		remove_proc_entry("fit", *entp);
+		remove_proc_entry("version", *entp);
+		sprintf(name, "node%d", cnodeid);
+		remove_proc_entry(name, sgi_prominfo_entry);
+		entp++;
+	}
+	remove_proc_entry("sgi_prominfo", NULL);
+	kfree(proc_entries);
+}
diff --git a/arch/ia64/sn/kernel/sn2/ptc_deadlock.S b/arch/ia64/sn/kernel/sn2/ptc_deadlock.S
new file mode 100644
index 0000000..7947312
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/ptc_deadlock.S
@@ -0,0 +1,82 @@
+/* 
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <asm/sn/shub_mmr.h>
+
+#define DEADLOCKBIT	SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_SHFT
+#define WRITECOUNTMASK	SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK
+#define ALIAS_OFFSET	(SH1_PIO_WRITE_STATUS_0_ALIAS-SH1_PIO_WRITE_STATUS_0)
+
+
+	.global	sn2_ptc_deadlock_recovery_core
+	.proc  	sn2_ptc_deadlock_recovery_core
+
+sn2_ptc_deadlock_recovery_core:
+	.regstk 6,0,0,0
+
+	ptc0  	 = in0
+	data0 	 = in1
+	ptc1  	 = in2
+	data1 	 = in3
+	piowc 	 = in4
+	zeroval  = in5
+	piowcphy = r30
+	psrsave  = r2
+	scr1	 = r16
+	scr2	 = r17
+	mask	 = r18
+
+
+	extr.u	piowcphy=piowc,0,61;;	// Convert piowc to uncached physical address
+	dep	piowcphy=-1,piowcphy,63,1
+	movl	mask=WRITECOUNTMASK
+
+1:
+	add	scr2=ALIAS_OFFSET,piowc	// Address of WRITE_STATUS alias register 
+	mov	scr1=7;;		// Clear DEADLOCK, WRITE_ERROR, MULTI_WRITE_ERROR
+	st8.rel	[scr2]=scr1;;
+
+5:	ld8.acq	scr1=[piowc];;		// Wait for PIOs to complete.
+	and	scr2=scr1,mask;;	// mask of writecount bits
+	cmp.ne	p6,p0=zeroval,scr2
+(p6)	br.cond.sptk 5b
+	
+
+
+	////////////// BEGIN PHYSICAL MODE ////////////////////
+	mov psrsave=psr			// Disable IC (no PMIs)
+	rsm psr.i | psr.dt | psr.ic;;
+	srlz.i;;
+
+	st8.rel [ptc0]=data0		// Write PTC0 & wait for completion.
+
+5:	ld8.acq	scr1=[piowcphy];;	// Wait for PIOs to complete.
+	and	scr2=scr1,mask;;	// mask of writecount bits
+	cmp.ne	p6,p0=zeroval,scr2
+(p6)	br.cond.sptk 5b;;
+
+	tbit.nz	p8,p7=scr1,DEADLOCKBIT;;// Test for DEADLOCK
+(p7)	cmp.ne p7,p0=r0,ptc1;;		// Test for non-null ptc1
+	
+(p7)	st8.rel [ptc1]=data1;;		// Now write PTC1.
+
+5:	ld8.acq	scr1=[piowcphy];;	// Wait for PIOs to complete.
+	and	scr2=scr1,mask;;	// mask of writecount bits
+	cmp.ne	p6,p0=zeroval,scr2
+(p6)	br.cond.sptk 5b
+	
+	tbit.nz	p8,p0=scr1,DEADLOCKBIT;;// Test for DEADLOCK
+
+	mov psr.l=psrsave;;		// Reenable IC
+	srlz.i;;
+	////////////// END   PHYSICAL MODE ////////////////////
+
+(p8)	br.cond.spnt 1b;;		// Repeat if DEADLOCK occurred.
+
+	br.ret.sptk	rp
+	.endp sn2_ptc_deadlock_recovery_core
diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c
new file mode 100644
index 0000000..7af05a7
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c
@@ -0,0 +1,295 @@
+/*
+ * SN2 Platform specific SMP Support
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/threads.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mmzone.h>
+#include <linux/module.h>
+#include <linux/bitops.h>
+#include <linux/nodemask.h>
+
+#include <asm/processor.h>
+#include <asm/irq.h>
+#include <asm/sal.h>
+#include <asm/system.h>
+#include <asm/delay.h>
+#include <asm/io.h>
+#include <asm/smp.h>
+#include <asm/tlb.h>
+#include <asm/numa.h>
+#include <asm/hw_irq.h>
+#include <asm/current.h>
+#include <asm/sn/sn_cpuid.h>
+#include <asm/sn/sn_sal.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/shub_mmr.h>
+#include <asm/sn/nodepda.h>
+#include <asm/sn/rw_mmr.h>
+
+void sn2_ptc_deadlock_recovery(volatile unsigned long *, unsigned long data0, 
+	volatile unsigned long *, unsigned long data1);
+
+static  __cacheline_aligned DEFINE_SPINLOCK(sn2_global_ptc_lock);
+
+static unsigned long sn2_ptc_deadlock_count;
+
+static inline unsigned long wait_piowc(void)
+{
+	volatile unsigned long *piows, zeroval;
+	unsigned long ws;
+
+	piows = pda->pio_write_status_addr;
+	zeroval = pda->pio_write_status_val;
+	do {
+		cpu_relax();
+	} while (((ws = *piows) & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK) != zeroval);
+	return ws;
+}
+
+void sn_tlb_migrate_finish(struct mm_struct *mm)
+{
+	if (mm == current->mm)
+		flush_tlb_mm(mm);
+}
+
+/**
+ * sn2_global_tlb_purge - globally purge translation cache of virtual address range
+ * @start: start of virtual address range
+ * @end: end of virtual address range
+ * @nbits: specifies number of bytes to purge per instruction (num = 1<<(nbits & 0xfc))
+ *
+ * Purges the translation caches of all processors of the given virtual address
+ * range.
+ *
+ * Note:
+ * 	- cpu_vm_mask is a bit mask that indicates which cpus have loaded the context.
+ * 	- cpu_vm_mask is converted into a nodemask of the nodes containing the
+ * 	  cpus in cpu_vm_mask.
+ *	- if only one bit is set in cpu_vm_mask & it is the current cpu,
+ *	  then only the local TLB needs to be flushed. This flushing can be done
+ *	  using ptc.l. This is the common case & avoids the global spinlock.
+ *	- if multiple cpus have loaded the context, then flushing has to be
+ *	  done with ptc.g/MMRs under protection of the global ptc_lock.
+ */
+
+void
+sn2_global_tlb_purge(unsigned long start, unsigned long end,
+		     unsigned long nbits)
+{
+	int i, shub1, cnode, mynasid, cpu, lcpu = 0, nasid, flushed = 0;
+	volatile unsigned long *ptc0, *ptc1;
+	unsigned long flags = 0, data0 = 0, data1 = 0;
+	struct mm_struct *mm = current->active_mm;
+	short nasids[MAX_NUMNODES], nix;
+	nodemask_t nodes_flushed;
+
+	nodes_clear(nodes_flushed);
+	i = 0;
+
+	for_each_cpu_mask(cpu, mm->cpu_vm_mask) {
+		cnode = cpu_to_node(cpu);
+		node_set(cnode, nodes_flushed);
+		lcpu = cpu;
+		i++;
+	}
+
+	preempt_disable();
+
+	if (likely(i == 1 && lcpu == smp_processor_id())) {
+		do {
+			ia64_ptcl(start, nbits << 2);
+			start += (1UL << nbits);
+		} while (start < end);
+		ia64_srlz_i();
+		preempt_enable();
+		return;
+	}
+
+	if (atomic_read(&mm->mm_users) == 1) {
+		flush_tlb_mm(mm);
+		preempt_enable();
+		return;
+	}
+
+	nix = 0;
+	for_each_node_mask(cnode, nodes_flushed)
+		nasids[nix++] = cnodeid_to_nasid(cnode);
+
+	shub1 = is_shub1();
+	if (shub1) {
+		data0 = (1UL << SH1_PTC_0_A_SHFT) |
+		    	(nbits << SH1_PTC_0_PS_SHFT) |
+		    	((ia64_get_rr(start) >> 8) << SH1_PTC_0_RID_SHFT) |
+		    	(1UL << SH1_PTC_0_START_SHFT);
+		ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_0);
+		ptc1 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_1);
+	} else {
+		data0 = (1UL << SH2_PTC_A_SHFT) |
+			(nbits << SH2_PTC_PS_SHFT) |
+		    	(1UL << SH2_PTC_START_SHFT);
+		ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH2_PTC + 
+			((ia64_get_rr(start) >> 8) << SH2_PTC_RID_SHFT) );
+		ptc1 = NULL;
+	}
+	
+
+	mynasid = get_nasid();
+
+	spin_lock_irqsave(&sn2_global_ptc_lock, flags);
+
+	do {
+		if (shub1)
+			data1 = start | (1UL << SH1_PTC_1_START_SHFT);
+		else
+			data0 = (data0 & ~SH2_PTC_ADDR_MASK) | (start & SH2_PTC_ADDR_MASK);
+		for (i = 0; i < nix; i++) {
+			nasid = nasids[i];
+			if (unlikely(nasid == mynasid)) {
+				ia64_ptcga(start, nbits << 2);
+				ia64_srlz_i();
+			} else {
+				ptc0 = CHANGE_NASID(nasid, ptc0);
+				if (ptc1)
+					ptc1 = CHANGE_NASID(nasid, ptc1);
+				pio_atomic_phys_write_mmrs(ptc0, data0, ptc1,
+							   data1);
+				flushed = 1;
+			}
+		}
+
+		if (flushed
+		    && (wait_piowc() &
+			SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_MASK)) {
+			sn2_ptc_deadlock_recovery(ptc0, data0, ptc1, data1);
+		}
+
+		start += (1UL << nbits);
+
+	} while (start < end);
+
+	spin_unlock_irqrestore(&sn2_global_ptc_lock, flags);
+
+	preempt_enable();
+}
+
+/*
+ * sn2_ptc_deadlock_recovery
+ *
+ * Recover from PTC deadlocks conditions. Recovery requires stepping thru each 
+ * TLB flush transaction.  The recovery sequence is somewhat tricky & is
+ * coded in assembly language.
+ */
+void sn2_ptc_deadlock_recovery(volatile unsigned long *ptc0, unsigned long data0,
+	volatile unsigned long *ptc1, unsigned long data1)
+{
+	extern void sn2_ptc_deadlock_recovery_core(volatile unsigned long *, unsigned long,
+	        volatile unsigned long *, unsigned long, volatile unsigned long *, unsigned long);
+	int cnode, mycnode, nasid;
+	volatile unsigned long *piows;
+	volatile unsigned long zeroval;
+
+	sn2_ptc_deadlock_count++;
+
+	piows = pda->pio_write_status_addr;
+	zeroval = pda->pio_write_status_val;
+
+	mycnode = numa_node_id();
+
+	for_each_online_node(cnode) {
+		if (is_headless_node(cnode) || cnode == mycnode)
+			continue;
+		nasid = cnodeid_to_nasid(cnode);
+		ptc0 = CHANGE_NASID(nasid, ptc0);
+		if (ptc1)
+			ptc1 = CHANGE_NASID(nasid, ptc1);
+		sn2_ptc_deadlock_recovery_core(ptc0, data0, ptc1, data1, piows, zeroval);
+	}
+}
+
+/**
+ * sn_send_IPI_phys - send an IPI to a Nasid and slice
+ * @nasid: nasid to receive the interrupt (may be outside partition)
+ * @physid: physical cpuid to receive the interrupt.
+ * @vector: command to send
+ * @delivery_mode: delivery mechanism
+ *
+ * Sends an IPI (interprocessor interrupt) to the processor specified by
+ * @physid
+ *
+ * @delivery_mode can be one of the following
+ *
+ * %IA64_IPI_DM_INT - pend an interrupt
+ * %IA64_IPI_DM_PMI - pend a PMI
+ * %IA64_IPI_DM_NMI - pend an NMI
+ * %IA64_IPI_DM_INIT - pend an INIT interrupt
+ */
+void sn_send_IPI_phys(int nasid, long physid, int vector, int delivery_mode)
+{
+	long val;
+	unsigned long flags = 0;
+	volatile long *p;
+
+	p = (long *)GLOBAL_MMR_PHYS_ADDR(nasid, SH_IPI_INT);
+	val = (1UL << SH_IPI_INT_SEND_SHFT) |
+	    (physid << SH_IPI_INT_PID_SHFT) |
+	    ((long)delivery_mode << SH_IPI_INT_TYPE_SHFT) |
+	    ((long)vector << SH_IPI_INT_IDX_SHFT) |
+	    (0x000feeUL << SH_IPI_INT_BASE_SHFT);
+
+	mb();
+	if (enable_shub_wars_1_1()) {
+		spin_lock_irqsave(&sn2_global_ptc_lock, flags);
+	}
+	pio_phys_write_mmr(p, val);
+	if (enable_shub_wars_1_1()) {
+		wait_piowc();
+		spin_unlock_irqrestore(&sn2_global_ptc_lock, flags);
+	}
+
+}
+
+EXPORT_SYMBOL(sn_send_IPI_phys);
+
+/**
+ * sn2_send_IPI - send an IPI to a processor
+ * @cpuid: target of the IPI
+ * @vector: command to send
+ * @delivery_mode: delivery mechanism
+ * @redirect: redirect the IPI?
+ *
+ * Sends an IPI (InterProcessor Interrupt) to the processor specified by
+ * @cpuid.  @vector specifies the command to send, while @delivery_mode can 
+ * be one of the following
+ *
+ * %IA64_IPI_DM_INT - pend an interrupt
+ * %IA64_IPI_DM_PMI - pend a PMI
+ * %IA64_IPI_DM_NMI - pend an NMI
+ * %IA64_IPI_DM_INIT - pend an INIT interrupt
+ */
+void sn2_send_IPI(int cpuid, int vector, int delivery_mode, int redirect)
+{
+	long physid;
+	int nasid;
+
+	physid = cpu_physical_id(cpuid);
+	nasid = cpuid_to_nasid(cpuid);
+
+	/* the following is used only when starting cpus at boot time */
+	if (unlikely(nasid == -1))
+		ia64_sn_get_sapic_info(physid, &nasid, NULL, NULL);
+
+	sn_send_IPI_phys(nasid, physid, vector, delivery_mode);
+}
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
new file mode 100644
index 0000000..1973564
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
@@ -0,0 +1,690 @@
+/* 
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004-2005 Silicon Graphics, Inc. All rights reserved.
+ *
+ * SGI Altix topology and hardware performance monitoring API.
+ * Mark Goodwin <markgw@sgi.com>. 
+ *
+ * Creates /proc/sgi_sn/sn_topology (read-only) to export
+ * info about Altix nodes, routers, CPUs and NumaLink
+ * interconnection/topology.
+ *
+ * Also creates a dynamic misc device named "sn_hwperf"
+ * that supports an ioctl interface to call down into SAL
+ * to discover hw objects, topology and to read/write
+ * memory mapped registers, e.g. for performance monitoring.
+ * The "sn_hwperf" device is registered only after the procfs
+ * file is first opened, i.e. only if/when it's needed. 
+ *
+ * This API is used by SGI Performance Co-Pilot and other
+ * tools, see http://oss.sgi.com/projects/pcp
+ */
+
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/seq_file.h>
+#include <linux/miscdevice.h>
+#include <linux/cpumask.h>
+#include <linux/smp_lock.h>
+#include <linux/nodemask.h>
+#include <asm/processor.h>
+#include <asm/topology.h>
+#include <asm/smp.h>
+#include <asm/semaphore.h>
+#include <asm/segment.h>
+#include <asm/uaccess.h>
+#include <asm/sal.h>
+#include <asm/sn/io.h>
+#include <asm/sn/sn_sal.h>
+#include <asm/sn/module.h>
+#include <asm/sn/geo.h>
+#include <asm/sn/sn2/sn_hwperf.h>
+
+static void *sn_hwperf_salheap = NULL;
+static int sn_hwperf_obj_cnt = 0;
+static nasid_t sn_hwperf_master_nasid = INVALID_NASID;
+static int sn_hwperf_init(void);
+static DECLARE_MUTEX(sn_hwperf_init_mutex);
+
+static int sn_hwperf_enum_objects(int *nobj, struct sn_hwperf_object_info **ret)
+{
+	int e;
+	u64 sz;
+	struct sn_hwperf_object_info *objbuf = NULL;
+
+	if ((e = sn_hwperf_init()) < 0) {
+		printk("sn_hwperf_init failed: err %d\n", e);
+		goto out;
+	}
+
+	sz = sn_hwperf_obj_cnt * sizeof(struct sn_hwperf_object_info);
+	if ((objbuf = (struct sn_hwperf_object_info *) vmalloc(sz)) == NULL) {
+		printk("sn_hwperf_enum_objects: vmalloc(%d) failed\n", (int)sz);
+		e = -ENOMEM;
+		goto out;
+	}
+
+	e = ia64_sn_hwperf_op(sn_hwperf_master_nasid, SN_HWPERF_ENUM_OBJECTS,
+		0, sz, (u64) objbuf, 0, 0, NULL);
+	if (e != SN_HWPERF_OP_OK) {
+		e = -EINVAL;
+		vfree(objbuf);
+	}
+
+out:
+	*nobj = sn_hwperf_obj_cnt;
+	*ret = objbuf;
+	return e;
+}
+
+static int sn_hwperf_geoid_to_cnode(char *location)
+{
+	int cnode;
+	geoid_t geoid;
+	moduleid_t module_id;
+	char type;
+	int rack, slot, slab;
+	int this_rack, this_slot, this_slab;
+
+	if (sscanf(location, "%03d%c%02d#%d", &rack, &type, &slot, &slab) != 4)
+		return -1;
+
+	for (cnode = 0; cnode < numionodes; cnode++) {
+		geoid = cnodeid_get_geoid(cnode);
+		module_id = geo_module(geoid);
+		this_rack = MODULE_GET_RACK(module_id);
+		this_slot = MODULE_GET_BPOS(module_id);
+		this_slab = geo_slab(geoid);
+		if (rack == this_rack && slot == this_slot && slab == this_slab)
+			break;
+	}
+
+	return cnode < numionodes ? cnode : -1;
+}
+
+static int sn_hwperf_obj_to_cnode(struct sn_hwperf_object_info * obj)
+{
+	if (!obj->sn_hwp_this_part)
+		return -1;
+	return sn_hwperf_geoid_to_cnode(obj->location);
+}
+
+static int sn_hwperf_generic_ordinal(struct sn_hwperf_object_info *obj,
+				struct sn_hwperf_object_info *objs)
+{
+	int ordinal;
+	struct sn_hwperf_object_info *p;
+
+	for (ordinal=0, p=objs; p != obj; p++) {
+		if (SN_HWPERF_FOREIGN(p))
+			continue;
+		if (SN_HWPERF_SAME_OBJTYPE(p, obj))
+			ordinal++;
+	}
+
+	return ordinal;
+}
+
+static const char *slabname_node =	"node"; /* SHub asic */
+static const char *slabname_ionode =	"ionode"; /* TIO asic */
+static const char *slabname_router =	"router"; /* NL3R or NL4R */
+static const char *slabname_other =	"other"; /* unknown asic */
+
+static const char *sn_hwperf_get_slabname(struct sn_hwperf_object_info *obj,
+			struct sn_hwperf_object_info *objs, int *ordinal)
+{
+	int isnode;
+	const char *slabname = slabname_other;
+
+	if ((isnode = SN_HWPERF_IS_NODE(obj)) || SN_HWPERF_IS_IONODE(obj)) {
+	    	slabname = isnode ? slabname_node : slabname_ionode;
+		*ordinal = sn_hwperf_obj_to_cnode(obj);
+	}
+	else {
+		*ordinal = sn_hwperf_generic_ordinal(obj, objs);
+		if (SN_HWPERF_IS_ROUTER(obj))
+			slabname = slabname_router;
+	}
+
+	return slabname;
+}
+
+static int sn_topology_show(struct seq_file *s, void *d)
+{
+	int sz;
+	int pt;
+	int e;
+	int i;
+	int j;
+	const char *slabname;
+	int ordinal;
+	cpumask_t cpumask;
+	char slice;
+	struct cpuinfo_ia64 *c;
+	struct sn_hwperf_port_info *ptdata;
+	struct sn_hwperf_object_info *p;
+	struct sn_hwperf_object_info *obj = d;	/* this object */
+	struct sn_hwperf_object_info *objs = s->private; /* all objects */
+
+	if (obj == objs) {
+		seq_printf(s, "# sn_topology version 1\n");
+		seq_printf(s, "# objtype ordinal location partition"
+			" [attribute value [, ...]]\n");
+	}
+
+	if (SN_HWPERF_FOREIGN(obj)) {
+		/* private in another partition: not interesting */
+		return 0;
+	}
+
+	for (i = 0; obj->name[i]; i++) {
+		if (obj->name[i] == ' ')
+			obj->name[i] = '_';
+	}
+
+	slabname = sn_hwperf_get_slabname(obj, objs, &ordinal);
+	seq_printf(s, "%s %d %s %s asic %s", slabname, ordinal, obj->location,
+		obj->sn_hwp_this_part ? "local" : "shared", obj->name);
+
+	if (!SN_HWPERF_IS_NODE(obj) && !SN_HWPERF_IS_IONODE(obj))
+		seq_putc(s, '\n');
+	else {
+		seq_printf(s, ", nasid 0x%x", cnodeid_to_nasid(ordinal));
+		for (i=0; i < numionodes; i++) {
+			seq_printf(s, i ? ":%d" : ", dist %d",
+				node_distance(ordinal, i));
+		}
+		seq_putc(s, '\n');
+
+		/*
+		 * CPUs on this node, if any
+		 */
+		cpumask = node_to_cpumask(ordinal);
+		for_each_online_cpu(i) {
+			if (cpu_isset(i, cpumask)) {
+				slice = 'a' + cpuid_to_slice(i);
+				c = cpu_data(i);
+				seq_printf(s, "cpu %d %s%c local"
+					" freq %luMHz, arch ia64",
+					i, obj->location, slice,
+					c->proc_freq / 1000000);
+				for_each_online_cpu(j) {
+					seq_printf(s, j ? ":%d" : ", dist %d",
+						node_distance(
+						    cpuid_to_cnodeid(i),
+						    cpuid_to_cnodeid(j)));
+				}
+				seq_putc(s, '\n');
+			}
+		}
+	}
+
+	if (obj->ports) {
+		/*
+		 * numalink ports
+		 */
+		sz = obj->ports * sizeof(struct sn_hwperf_port_info);
+		if ((ptdata = vmalloc(sz)) == NULL)
+			return -ENOMEM;
+		e = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
+				      SN_HWPERF_ENUM_PORTS, obj->id, sz,
+				      (u64) ptdata, 0, 0, NULL);
+		if (e != SN_HWPERF_OP_OK)
+			return -EINVAL;
+		for (ordinal=0, p=objs; p != obj; p++) {
+			if (!SN_HWPERF_FOREIGN(p))
+				ordinal += p->ports;
+		}
+		for (pt = 0; pt < obj->ports; pt++) {
+			for (p = objs, i = 0; i < sn_hwperf_obj_cnt; i++, p++) {
+				if (ptdata[pt].conn_id == p->id) {
+					break;
+				}
+			}
+			seq_printf(s, "numalink %d %s-%d",
+			    ordinal+pt, obj->location, ptdata[pt].port);
+
+			if (i >= sn_hwperf_obj_cnt) {
+				/* no connection */
+				seq_puts(s, " local endpoint disconnected"
+					    ", protocol unknown\n");
+				continue;
+			}
+
+			if (obj->sn_hwp_this_part && p->sn_hwp_this_part)
+				/* both ends local to this partition */
+				seq_puts(s, " local");
+			else if (!obj->sn_hwp_this_part && !p->sn_hwp_this_part)
+				/* both ends of the link in foreign partiton */
+				seq_puts(s, " foreign");
+			else
+				/* link straddles a partition */
+				seq_puts(s, " shared");
+
+			/*
+			 * Unlikely, but strictly should query the LLP config
+			 * registers because an NL4R can be configured to run
+			 * NL3 protocol, even when not talking to an NL3 router.
+			 * Ditto for node-node.
+			 */
+			seq_printf(s, " endpoint %s-%d, protocol %s\n",
+				p->location, ptdata[pt].conn_port,
+				(SN_HWPERF_IS_NL3ROUTER(obj) ||
+				SN_HWPERF_IS_NL3ROUTER(p)) ?  "LLP3" : "LLP4");
+		}
+		vfree(ptdata);
+	}
+
+	return 0;
+}
+
+static void *sn_topology_start(struct seq_file *s, loff_t * pos)
+{
+	struct sn_hwperf_object_info *objs = s->private;
+
+	if (*pos < sn_hwperf_obj_cnt)
+		return (void *)(objs + *pos);
+
+	return NULL;
+}
+
+static void *sn_topology_next(struct seq_file *s, void *v, loff_t * pos)
+{
+	++*pos;
+	return sn_topology_start(s, pos);
+}
+
+static void sn_topology_stop(struct seq_file *m, void *v)
+{
+	return;
+}
+
+/*
+ * /proc/sgi_sn/sn_topology, read-only using seq_file
+ */
+static struct seq_operations sn_topology_seq_ops = {
+	.start = sn_topology_start,
+	.next = sn_topology_next,
+	.stop = sn_topology_stop,
+	.show = sn_topology_show
+};
+
+struct sn_hwperf_op_info {
+	u64 op;
+	struct sn_hwperf_ioctl_args *a;
+	void *p;
+	int *v0;
+	int ret;
+};
+
+static void sn_hwperf_call_sal(void *info)
+{
+	struct sn_hwperf_op_info *op_info = info;
+	int r;
+
+	r = ia64_sn_hwperf_op(sn_hwperf_master_nasid, op_info->op,
+		      op_info->a->arg, op_info->a->sz,
+		      (u64) op_info->p, 0, 0, op_info->v0);
+	op_info->ret = r;
+}
+
+static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info)
+{
+	u32 cpu;
+	u32 use_ipi;
+	int r = 0;
+	cpumask_t save_allowed;
+	
+	cpu = (op_info->a->arg & SN_HWPERF_ARG_CPU_MASK) >> 32;
+	use_ipi = op_info->a->arg & SN_HWPERF_ARG_USE_IPI_MASK;
+	op_info->a->arg &= SN_HWPERF_ARG_OBJID_MASK;
+
+	if (cpu != SN_HWPERF_ARG_ANY_CPU) {
+		if (cpu >= num_online_cpus() || !cpu_online(cpu)) {
+			r = -EINVAL;
+			goto out;
+		}
+	}
+
+	if (cpu == SN_HWPERF_ARG_ANY_CPU || cpu == get_cpu()) {
+		/* don't care, or already on correct cpu */
+		sn_hwperf_call_sal(op_info);
+	}
+	else {
+		if (use_ipi) {
+			/* use an interprocessor interrupt to call SAL */
+			smp_call_function_single(cpu, sn_hwperf_call_sal,
+				op_info, 1, 1);
+		}
+		else {
+			/* migrate the task before calling SAL */ 
+			save_allowed = current->cpus_allowed;
+			set_cpus_allowed(current, cpumask_of_cpu(cpu));
+			sn_hwperf_call_sal(op_info);
+			set_cpus_allowed(current, save_allowed);
+		}
+	}
+	r = op_info->ret;
+
+out:
+	return r;
+}
+
+/* map SAL hwperf error code to system error code */
+static int sn_hwperf_map_err(int hwperf_err)
+{
+	int e;
+
+	switch(hwperf_err) {
+	case SN_HWPERF_OP_OK:
+		e = 0;
+		break;
+
+	case SN_HWPERF_OP_NOMEM:
+		e = -ENOMEM;
+		break;
+
+	case SN_HWPERF_OP_NO_PERM:
+		e = -EPERM;
+		break;
+
+	case SN_HWPERF_OP_IO_ERROR:
+		e = -EIO;
+		break;
+
+	case SN_HWPERF_OP_BUSY:
+	case SN_HWPERF_OP_RECONFIGURE:
+		e = -EAGAIN;
+		break;
+
+	case SN_HWPERF_OP_INVAL:
+	default:
+		e = -EINVAL;
+		break;
+	}
+
+	return e;
+}
+
+/*
+ * ioctl for "sn_hwperf" misc device
+ */
+static int
+sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, u64 arg)
+{
+	struct sn_hwperf_ioctl_args a;
+	struct cpuinfo_ia64 *cdata;
+	struct sn_hwperf_object_info *objs;
+	struct sn_hwperf_object_info *cpuobj;
+	struct sn_hwperf_op_info op_info;
+	void *p = NULL;
+	int nobj;
+	char slice;
+	int node;
+	int r;
+	int v0;
+	int i;
+	int j;
+
+	unlock_kernel();
+
+	/* only user requests are allowed here */
+	if ((op & SN_HWPERF_OP_MASK) < 10) {
+		r = -EINVAL;
+		goto error;
+	}
+	r = copy_from_user(&a, (const void __user *)arg,
+		sizeof(struct sn_hwperf_ioctl_args));
+	if (r != 0) {
+		r = -EFAULT;
+		goto error;
+	}
+
+	/*
+	 * Allocate memory to hold a kernel copy of the user buffer. The
+	 * buffer contents are either copied in or out (or both) of user
+	 * space depending on the flags encoded in the requested operation.
+	 */
+	if (a.ptr) {
+		p = vmalloc(a.sz);
+		if (!p) {
+			r = -ENOMEM;
+			goto error;
+		}
+	}
+
+	if (op & SN_HWPERF_OP_MEM_COPYIN) {
+		r = copy_from_user(p, (const void __user *)a.ptr, a.sz);
+		if (r != 0) {
+			r = -EFAULT;
+			goto error;
+		}
+	}
+
+	switch (op) {
+	case SN_HWPERF_GET_CPU_INFO:
+		if (a.sz == sizeof(u64)) {
+			/* special case to get size needed */
+			*(u64 *) p = (u64) num_online_cpus() *
+				sizeof(struct sn_hwperf_object_info);
+		} else
+		if (a.sz < num_online_cpus() * sizeof(struct sn_hwperf_object_info)) {
+			r = -ENOMEM;
+			goto error;
+		} else
+		if ((r = sn_hwperf_enum_objects(&nobj, &objs)) == 0) {
+			memset(p, 0, a.sz);
+			for (i = 0; i < nobj; i++) {
+				node = sn_hwperf_obj_to_cnode(objs + i);
+				for_each_online_cpu(j) {
+					if (node != cpu_to_node(j))
+						continue;
+					cpuobj = (struct sn_hwperf_object_info *) p + j;
+					slice = 'a' + cpuid_to_slice(j);
+					cdata = cpu_data(j);
+					cpuobj->id = j;
+					snprintf(cpuobj->name,
+						 sizeof(cpuobj->name),
+						 "CPU %luMHz %s",
+						 cdata->proc_freq / 1000000,
+						 cdata->vendor);
+					snprintf(cpuobj->location,
+						 sizeof(cpuobj->location),
+						 "%s%c", objs[i].location,
+						 slice);
+				}
+			}
+
+			vfree(objs);
+		}
+		break;
+
+	case SN_HWPERF_GET_NODE_NASID:
+		if (a.sz != sizeof(u64) ||
+		   (node = a.arg) < 0 || node >= numionodes) {
+			r = -EINVAL;
+			goto error;
+		}
+		*(u64 *)p = (u64)cnodeid_to_nasid(node);
+		break;
+
+	case SN_HWPERF_GET_OBJ_NODE:
+		if (a.sz != sizeof(u64) || a.arg < 0) {
+			r = -EINVAL;
+			goto error;
+		}
+		if ((r = sn_hwperf_enum_objects(&nobj, &objs)) == 0) {
+			if (a.arg >= nobj) {
+				r = -EINVAL;
+				vfree(objs);
+				goto error;
+			}
+			if (objs[(i = a.arg)].id != a.arg) {
+				for (i = 0; i < nobj; i++) {
+					if (objs[i].id == a.arg)
+						break;
+				}
+			}
+			if (i == nobj) {
+				r = -EINVAL;
+				vfree(objs);
+				goto error;
+			}
+			*(u64 *)p = (u64)sn_hwperf_obj_to_cnode(objs + i);
+			vfree(objs);
+		}
+		break;
+
+	case SN_HWPERF_GET_MMRS:
+	case SN_HWPERF_SET_MMRS:
+	case SN_HWPERF_OBJECT_DISTANCE:
+		op_info.p = p;
+		op_info.a = &a;
+		op_info.v0 = &v0;
+		op_info.op = op;
+		r = sn_hwperf_op_cpu(&op_info);
+		if (r) {
+			r = sn_hwperf_map_err(r);
+			goto error;
+		}
+		break;
+
+	default:
+		/* all other ops are a direct SAL call */
+		r = ia64_sn_hwperf_op(sn_hwperf_master_nasid, op,
+			      a.arg, a.sz, (u64) p, 0, 0, &v0);
+		if (r) {
+			r = sn_hwperf_map_err(r);
+			goto error;
+		}
+		a.v0 = v0;
+		break;
+	}
+
+	if (op & SN_HWPERF_OP_MEM_COPYOUT) {
+		r = copy_to_user((void __user *)a.ptr, p, a.sz);
+		if (r != 0) {
+			r = -EFAULT;
+			goto error;
+		}
+	}
+
+error:
+	vfree(p);
+
+	lock_kernel();
+	return r;
+}
+
+static struct file_operations sn_hwperf_fops = {
+	.ioctl = sn_hwperf_ioctl,
+};
+
+static struct miscdevice sn_hwperf_dev = {
+	MISC_DYNAMIC_MINOR,
+	"sn_hwperf",
+	&sn_hwperf_fops
+};
+
+static int sn_hwperf_init(void)
+{
+	u64 v;
+	int salr;
+	int e = 0;
+
+	/* single threaded, once-only initialization */
+	down(&sn_hwperf_init_mutex);
+	if (sn_hwperf_salheap) {
+		up(&sn_hwperf_init_mutex);
+		return e;
+	}
+
+	/*
+	 * The PROM code needs a fixed reference node. For convenience the
+	 * same node as the console I/O is used.
+	 */
+	sn_hwperf_master_nasid = (nasid_t) ia64_sn_get_console_nasid();
+
+	/*
+	 * Request the needed size and install the PROM scratch area.
+	 * The PROM keeps various tracking bits in this memory area.
+	 */
+	salr = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
+				 (u64) SN_HWPERF_GET_HEAPSIZE, 0,
+				 (u64) sizeof(u64), (u64) &v, 0, 0, NULL);
+	if (salr != SN_HWPERF_OP_OK) {
+		e = -EINVAL;
+		goto out;
+	}
+
+	if ((sn_hwperf_salheap = vmalloc(v)) == NULL) {
+		e = -ENOMEM;
+		goto out;
+	}
+	salr = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
+				 SN_HWPERF_INSTALL_HEAP, 0, v,
+				 (u64) sn_hwperf_salheap, 0, 0, NULL);
+	if (salr != SN_HWPERF_OP_OK) {
+		e = -EINVAL;
+		goto out;
+	}
+
+	salr = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
+				 SN_HWPERF_OBJECT_COUNT, 0,
+				 sizeof(u64), (u64) &v, 0, 0, NULL);
+	if (salr != SN_HWPERF_OP_OK) {
+		e = -EINVAL;
+		goto out;
+	}
+	sn_hwperf_obj_cnt = (int)v;
+
+out:
+	if (e < 0 && sn_hwperf_salheap) {
+		vfree(sn_hwperf_salheap);
+		sn_hwperf_salheap = NULL;
+		sn_hwperf_obj_cnt = 0;
+	}
+
+	if (!e) {
+		/*
+		 * Register a dynamic misc device for ioctl. Platforms
+		 * supporting hotplug will create /dev/sn_hwperf, else
+		 * user can to look up the minor number in /proc/misc.
+		 */
+		if ((e = misc_register(&sn_hwperf_dev)) != 0) {
+			printk(KERN_ERR "sn_hwperf_init: misc register "
+			       "for \"sn_hwperf\" failed, err %d\n", e);
+		}
+	}
+
+	up(&sn_hwperf_init_mutex);
+	return e;
+}
+
+int sn_topology_open(struct inode *inode, struct file *file)
+{
+	int e;
+	struct seq_file *seq;
+	struct sn_hwperf_object_info *objbuf;
+	int nobj;
+
+	if ((e = sn_hwperf_enum_objects(&nobj, &objbuf)) == 0) {
+		e = seq_open(file, &sn_topology_seq_ops);
+		seq = file->private_data;
+		seq->private = objbuf;
+	}
+
+	return e;
+}
+
+int sn_topology_release(struct inode *inode, struct file *file)
+{
+	struct seq_file *seq = file->private_data;
+
+	vfree(seq->private);
+	return seq_release(inode, file);
+}
diff --git a/arch/ia64/sn/kernel/sn2/sn_proc_fs.c b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c
new file mode 100644
index 0000000..6a80fca
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c
@@ -0,0 +1,149 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
+ */
+#include <linux/config.h>
+#include <asm/uaccess.h>
+
+#ifdef CONFIG_PROC_FS
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <asm/sn/sn_sal.h>
+
+static int partition_id_show(struct seq_file *s, void *p)
+{
+	seq_printf(s, "%d\n", sn_local_partid());
+	return 0;
+}
+
+static int partition_id_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, partition_id_show, NULL);
+}
+
+static int system_serial_number_show(struct seq_file *s, void *p)
+{
+	seq_printf(s, "%s\n", sn_system_serial_number());
+	return 0;
+}
+
+static int system_serial_number_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, system_serial_number_show, NULL);
+}
+
+static int licenseID_show(struct seq_file *s, void *p)
+{
+	seq_printf(s, "0x%lx\n", sn_partition_serial_number_val());
+	return 0;
+}
+
+static int licenseID_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, licenseID_show, NULL);
+}
+
+/*
+ * Enable forced interrupt by default.
+ * When set, the sn interrupt handler writes the force interrupt register on
+ * the bridge chip.  The hardware will then send an interrupt message if the
+ * interrupt line is active.  This mimics a level sensitive interrupt.
+ */
+int sn_force_interrupt_flag = 1;
+
+static int sn_force_interrupt_show(struct seq_file *s, void *p)
+{
+	seq_printf(s, "Force interrupt is %s\n",
+		sn_force_interrupt_flag ? "enabled" : "disabled");
+	return 0;
+}
+
+static ssize_t sn_force_interrupt_write_proc(struct file *file,
+		const char __user *buffer, size_t count, loff_t *data)
+{
+	char val;
+
+	if (copy_from_user(&val, buffer, 1))
+		return -EFAULT;
+
+	sn_force_interrupt_flag = (val == '0') ? 0 : 1;
+	return count;
+}
+
+static int sn_force_interrupt_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, sn_force_interrupt_show, NULL);
+}
+
+static int coherence_id_show(struct seq_file *s, void *p)
+{
+	seq_printf(s, "%d\n", partition_coherence_id());
+
+	return 0;
+}
+
+static int coherence_id_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, coherence_id_show, NULL);
+}
+
+static struct proc_dir_entry *sn_procfs_create_entry(
+	const char *name, struct proc_dir_entry *parent,
+	int (*openfunc)(struct inode *, struct file *),
+	int (*releasefunc)(struct inode *, struct file *))
+{
+	struct proc_dir_entry *e = create_proc_entry(name, 0444, parent);
+
+	if (e) {
+		e->proc_fops = (struct file_operations *)kmalloc(
+			sizeof(struct file_operations), GFP_KERNEL);
+		if (e->proc_fops) {
+			memset(e->proc_fops, 0, sizeof(struct file_operations));
+			e->proc_fops->open = openfunc;
+			e->proc_fops->read = seq_read;
+			e->proc_fops->llseek = seq_lseek;
+			e->proc_fops->release = releasefunc;
+		}
+	}
+
+	return e;
+}
+
+/* /proc/sgi_sn/sn_topology uses seq_file, see sn_hwperf.c */
+extern int sn_topology_open(struct inode *, struct file *);
+extern int sn_topology_release(struct inode *, struct file *);
+
+void register_sn_procfs(void)
+{
+	static struct proc_dir_entry *sgi_proc_dir = NULL;
+	struct proc_dir_entry *e;
+
+	BUG_ON(sgi_proc_dir != NULL);
+	if (!(sgi_proc_dir = proc_mkdir("sgi_sn", NULL)))
+		return;
+
+	sn_procfs_create_entry("partition_id", sgi_proc_dir,
+		partition_id_open, single_release);
+
+	sn_procfs_create_entry("system_serial_number", sgi_proc_dir,
+		system_serial_number_open, single_release);
+
+	sn_procfs_create_entry("licenseID", sgi_proc_dir, 
+		licenseID_open, single_release);
+
+	e = sn_procfs_create_entry("sn_force_interrupt", sgi_proc_dir, 
+		sn_force_interrupt_open, single_release);
+	if (e) 
+		e->proc_fops->write = sn_force_interrupt_write_proc;
+
+	sn_procfs_create_entry("coherence_id", sgi_proc_dir, 
+		coherence_id_open, single_release);
+	
+	sn_procfs_create_entry("sn_topology", sgi_proc_dir,
+		sn_topology_open, sn_topology_release);
+}
+
+#endif /* CONFIG_PROC_FS */
diff --git a/arch/ia64/sn/kernel/sn2/timer.c b/arch/ia64/sn/kernel/sn2/timer.c
new file mode 100644
index 0000000..deb9baf
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/timer.c
@@ -0,0 +1,36 @@
+/*
+ * linux/arch/ia64/sn/kernel/sn2/timer.c
+ *
+ * Copyright (C) 2003 Silicon Graphics, Inc.
+ * Copyright (C) 2003 Hewlett-Packard Co
+ *	David Mosberger <davidm@hpl.hp.com>: updated for new timer-interpolation infrastructure
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/interrupt.h>
+
+#include <asm/hw_irq.h>
+#include <asm/system.h>
+
+#include <asm/sn/leds.h>
+#include <asm/sn/shub_mmr.h>
+#include <asm/sn/clksupport.h>
+
+extern unsigned long sn_rtc_cycles_per_second;
+
+static struct time_interpolator sn2_interpolator = {
+	.drift = -1,
+	.shift = 10,
+	.mask = (1LL << 55) - 1,
+	.source = TIME_SOURCE_MMIO64
+};
+
+void __init sn_timer_init(void)
+{
+	sn2_interpolator.frequency = sn_rtc_cycles_per_second;
+	sn2_interpolator.addr = RTC_COUNTER_ADDR;
+	register_time_interpolator(&sn2_interpolator);
+}
diff --git a/arch/ia64/sn/kernel/sn2/timer_interrupt.c b/arch/ia64/sn/kernel/sn2/timer_interrupt.c
new file mode 100644
index 0000000..cde7375
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/timer_interrupt.c
@@ -0,0 +1,63 @@
+/*
+ *
+ *
+ * Copyright (c) 2003 Silicon Graphics, Inc.  All Rights Reserved.
+ * 
+ * This program is free software; you can redistribute it and/or modify it 
+ * under the terms of version 2 of the GNU General Public License 
+ * as published by the Free Software Foundation.
+ * 
+ * This program is distributed in the hope that it would be useful, but 
+ * WITHOUT ANY WARRANTY; without even the implied warranty of 
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 
+ * 
+ * Further, this software is distributed without any warranty that it is 
+ * free of the rightful claim of any third person regarding infringement 
+ * or the like.  Any license provided herein, whether implied or 
+ * otherwise, applies only to this software file.  Patent licenses, if 
+ * any, provided herein do not apply to combinations of this program with 
+ * other software, or any other product whatsoever.
+ * 
+ * You should have received a copy of the GNU General Public 
+ * License along with this program; if not, write the Free Software 
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ * 
+ * Contact information:  Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, 
+ * Mountain View, CA  94043, or:
+ * 
+ * http://www.sgi.com 
+ * 
+ * For further information regarding this notice, see: 
+ * 
+ * http://oss.sgi.com/projects/GenInfo/NoticeExplan
+ */
+
+#include <linux/interrupt.h>
+#include <asm/sn/pda.h>
+#include <asm/sn/leds.h>
+
+extern void sn_lb_int_war_check(void);
+extern irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+
+#define SN_LB_INT_WAR_INTERVAL 100
+
+void sn_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+	/* LED blinking */
+	if (!pda->hb_count--) {
+		pda->hb_count = HZ / 2;
+		set_led_bits(pda->hb_state ^=
+			     LED_CPU_HEARTBEAT, LED_CPU_HEARTBEAT);
+	}
+
+	if (enable_shub_wars_1_1()) {
+		/* Bugfix code for SHUB 1.1 */
+		if (pda->pio_shub_war_cam_addr)
+			*pda->pio_shub_war_cam_addr = 0x8000000000000010UL;
+	}
+	if (pda->sn_lb_int_war_ticks == 0)
+		sn_lb_int_war_check();
+	pda->sn_lb_int_war_ticks++;
+	if (pda->sn_lb_int_war_ticks >= SN_LB_INT_WAR_INTERVAL)
+		pda->sn_lb_int_war_ticks = 0;
+}