Linux-2.6.12-rc2

Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.

Let it rip!
diff --git a/arch/ia64/ia32/Makefile b/arch/ia64/ia32/Makefile
new file mode 100644
index 0000000..2ed90da
--- /dev/null
+++ b/arch/ia64/ia32/Makefile
@@ -0,0 +1,12 @@
+#
+# Makefile for the ia32 kernel emulation subsystem.
+#
+
+obj-y := ia32_entry.o sys_ia32.o ia32_ioctl.o ia32_signal.o \
+	 ia32_support.o ia32_traps.o binfmt_elf32.o ia32_ldt.o
+
+CFLAGS_ia32_ioctl.o += -Ifs/
+
+# Don't let GCC uses f16-f31 so that save_ia32_fpstate_live() and
+# restore_ia32_fpstate_live() can be sure the live register contain user-level state.
+CFLAGS_ia32_signal.o += -mfixed-range=f16-f31
diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c
new file mode 100644
index 0000000..31de70b
--- /dev/null
+++ b/arch/ia64/ia32/binfmt_elf32.c
@@ -0,0 +1,294 @@
+/*
+ * IA-32 ELF support.
+ *
+ * Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com>
+ * Copyright (C) 2001 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ *
+ * 06/16/00	A. Mallick	initialize csd/ssd/tssd/cflg for ia32_load_state
+ * 04/13/01	D. Mosberger	dropped saving tssd in ar.k1---it's not needed
+ * 09/14/01	D. Mosberger	fixed memory management for gdt/tss page
+ */
+#include <linux/config.h>
+
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/security.h>
+
+#include <asm/param.h>
+#include <asm/signal.h>
+
+#include "ia32priv.h"
+#include "elfcore32.h"
+
+/* Override some function names */
+#undef start_thread
+#define start_thread			ia32_start_thread
+#define elf_format			elf32_format
+#define init_elf_binfmt			init_elf32_binfmt
+#define exit_elf_binfmt			exit_elf32_binfmt
+
+#undef CLOCKS_PER_SEC
+#define CLOCKS_PER_SEC	IA32_CLOCKS_PER_SEC
+
+extern void ia64_elf32_init (struct pt_regs *regs);
+
+static void elf32_set_personality (void);
+
+#define setup_arg_pages(bprm,tos,exec)		ia32_setup_arg_pages(bprm,exec)
+#define elf_map				elf32_map
+
+#undef SET_PERSONALITY
+#define SET_PERSONALITY(ex, ibcs2)	elf32_set_personality()
+
+#define elf_read_implies_exec(ex, have_pt_gnu_stack)	(!(have_pt_gnu_stack))
+
+/* Ugly but avoids duplication */
+#include "../../../fs/binfmt_elf.c"
+
+extern struct page *ia32_shared_page[];
+extern unsigned long *ia32_gdt;
+extern struct page *ia32_gate_page;
+
+struct page *
+ia32_install_shared_page (struct vm_area_struct *vma, unsigned long address, int *type)
+{
+	struct page *pg = ia32_shared_page[smp_processor_id()];
+	get_page(pg);
+	if (type)
+		*type = VM_FAULT_MINOR;
+	return pg;
+}
+
+struct page *
+ia32_install_gate_page (struct vm_area_struct *vma, unsigned long address, int *type)
+{
+	struct page *pg = ia32_gate_page;
+	get_page(pg);
+	if (type)
+		*type = VM_FAULT_MINOR;
+	return pg;
+}
+
+
+static struct vm_operations_struct ia32_shared_page_vm_ops = {
+	.nopage = ia32_install_shared_page
+};
+
+static struct vm_operations_struct ia32_gate_page_vm_ops = {
+	.nopage = ia32_install_gate_page
+};
+
+void
+ia64_elf32_init (struct pt_regs *regs)
+{
+	struct vm_area_struct *vma;
+
+	/*
+	 * Map GDT below 4GB, where the processor can find it.  We need to map
+	 * it with privilege level 3 because the IVE uses non-privileged accesses to these
+	 * tables.  IA-32 segmentation is used to protect against IA-32 accesses to them.
+	 */
+	vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+	if (vma) {
+		memset(vma, 0, sizeof(*vma));
+		vma->vm_mm = current->mm;
+		vma->vm_start = IA32_GDT_OFFSET;
+		vma->vm_end = vma->vm_start + PAGE_SIZE;
+		vma->vm_page_prot = PAGE_SHARED;
+		vma->vm_flags = VM_READ|VM_MAYREAD|VM_RESERVED;
+		vma->vm_ops = &ia32_shared_page_vm_ops;
+		down_write(&current->mm->mmap_sem);
+		{
+			if (insert_vm_struct(current->mm, vma)) {
+				kmem_cache_free(vm_area_cachep, vma);
+				up_write(&current->mm->mmap_sem);
+				BUG();
+			}
+		}
+		up_write(&current->mm->mmap_sem);
+	}
+
+	/*
+	 * When user stack is not executable, push sigreturn code to stack makes
+	 * segmentation fault raised when returning to kernel. So now sigreturn
+	 * code is locked in specific gate page, which is pointed by pretcode
+	 * when setup_frame_ia32
+	 */
+	vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+	if (vma) {
+		memset(vma, 0, sizeof(*vma));
+		vma->vm_mm = current->mm;
+		vma->vm_start = IA32_GATE_OFFSET;
+		vma->vm_end = vma->vm_start + PAGE_SIZE;
+		vma->vm_page_prot = PAGE_COPY_EXEC;
+		vma->vm_flags = VM_READ | VM_MAYREAD | VM_EXEC
+				| VM_MAYEXEC | VM_RESERVED;
+		vma->vm_ops = &ia32_gate_page_vm_ops;
+		down_write(&current->mm->mmap_sem);
+		{
+			if (insert_vm_struct(current->mm, vma)) {
+				kmem_cache_free(vm_area_cachep, vma);
+				up_write(&current->mm->mmap_sem);
+				BUG();
+			}
+		}
+		up_write(&current->mm->mmap_sem);
+	}
+
+	/*
+	 * Install LDT as anonymous memory.  This gives us all-zero segment descriptors
+	 * until a task modifies them via modify_ldt().
+	 */
+	vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+	if (vma) {
+		memset(vma, 0, sizeof(*vma));
+		vma->vm_mm = current->mm;
+		vma->vm_start = IA32_LDT_OFFSET;
+		vma->vm_end = vma->vm_start + PAGE_ALIGN(IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE);
+		vma->vm_page_prot = PAGE_SHARED;
+		vma->vm_flags = VM_READ|VM_WRITE|VM_MAYREAD|VM_MAYWRITE;
+		down_write(&current->mm->mmap_sem);
+		{
+			if (insert_vm_struct(current->mm, vma)) {
+				kmem_cache_free(vm_area_cachep, vma);
+				up_write(&current->mm->mmap_sem);
+				BUG();
+			}
+		}
+		up_write(&current->mm->mmap_sem);
+	}
+
+	ia64_psr(regs)->ac = 0;		/* turn off alignment checking */
+	regs->loadrs = 0;
+	/*
+	 *  According to the ABI %edx points to an `atexit' handler.  Since we don't have
+	 *  one we'll set it to 0 and initialize all the other registers just to make
+	 *  things more deterministic, ala the i386 implementation.
+	 */
+	regs->r8 = 0;	/* %eax */
+	regs->r11 = 0;	/* %ebx */
+	regs->r9 = 0;	/* %ecx */
+	regs->r10 = 0;	/* %edx */
+	regs->r13 = 0;	/* %ebp */
+	regs->r14 = 0;	/* %esi */
+	regs->r15 = 0;	/* %edi */
+
+	current->thread.eflag = IA32_EFLAG;
+	current->thread.fsr = IA32_FSR_DEFAULT;
+	current->thread.fcr = IA32_FCR_DEFAULT;
+	current->thread.fir = 0;
+	current->thread.fdr = 0;
+
+	/*
+	 * Setup GDTD.  Note: GDTD is the descrambled version of the pseudo-descriptor
+	 * format defined by Figure 3-11 "Pseudo-Descriptor Format" in the IA-32
+	 * architecture manual. Also note that the only fields that are not ignored are
+	 * `base', `limit', 'G', `P' (must be 1) and `S' (must be 0).
+	 */
+	regs->r31 = IA32_SEG_UNSCRAMBLE(IA32_SEG_DESCRIPTOR(IA32_GDT_OFFSET, IA32_PAGE_SIZE - 1,
+							    0, 0, 0, 1, 0, 0, 0));
+	/* Setup the segment selectors */
+	regs->r16 = (__USER_DS << 16) | __USER_DS; /* ES == DS, GS, FS are zero */
+	regs->r17 = (__USER_DS << 16) | __USER_CS; /* SS, CS; ia32_load_state() sets TSS and LDT */
+
+	ia32_load_segment_descriptors(current);
+	ia32_load_state(current);
+}
+
+int
+ia32_setup_arg_pages (struct linux_binprm *bprm, int executable_stack)
+{
+	unsigned long stack_base;
+	struct vm_area_struct *mpnt;
+	struct mm_struct *mm = current->mm;
+	int i, ret;
+
+	stack_base = IA32_STACK_TOP - MAX_ARG_PAGES*PAGE_SIZE;
+	mm->arg_start = bprm->p + stack_base;
+
+	bprm->p += stack_base;
+	if (bprm->loader)
+		bprm->loader += stack_base;
+	bprm->exec += stack_base;
+
+	mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+	if (!mpnt)
+		return -ENOMEM;
+
+	if (security_vm_enough_memory((IA32_STACK_TOP - (PAGE_MASK & (unsigned long) bprm->p))
+				      >> PAGE_SHIFT)) {
+		kmem_cache_free(vm_area_cachep, mpnt);
+		return -ENOMEM;
+	}
+
+	memset(mpnt, 0, sizeof(*mpnt));
+
+	down_write(&current->mm->mmap_sem);
+	{
+		mpnt->vm_mm = current->mm;
+		mpnt->vm_start = PAGE_MASK & (unsigned long) bprm->p;
+		mpnt->vm_end = IA32_STACK_TOP;
+		if (executable_stack == EXSTACK_ENABLE_X)
+			mpnt->vm_flags = VM_STACK_FLAGS |  VM_EXEC;
+		else if (executable_stack == EXSTACK_DISABLE_X)
+			mpnt->vm_flags = VM_STACK_FLAGS & ~VM_EXEC;
+		else
+			mpnt->vm_flags = VM_STACK_FLAGS;
+		mpnt->vm_page_prot = (mpnt->vm_flags & VM_EXEC)?
+					PAGE_COPY_EXEC: PAGE_COPY;
+		if ((ret = insert_vm_struct(current->mm, mpnt))) {
+			up_write(&current->mm->mmap_sem);
+			kmem_cache_free(vm_area_cachep, mpnt);
+			return ret;
+		}
+		current->mm->stack_vm = current->mm->total_vm = vma_pages(mpnt);
+	}
+
+	for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
+		struct page *page = bprm->page[i];
+		if (page) {
+			bprm->page[i] = NULL;
+			install_arg_page(mpnt, page, stack_base);
+		}
+		stack_base += PAGE_SIZE;
+	}
+	up_write(&current->mm->mmap_sem);
+
+	/* Can't do it in ia64_elf32_init(). Needs to be done before calls to
+	   elf32_map() */
+	current->thread.ppl = ia32_init_pp_list();
+
+	return 0;
+}
+
+static void
+elf32_set_personality (void)
+{
+	set_personality(PER_LINUX32);
+	current->thread.map_base  = IA32_PAGE_OFFSET/3;
+	current->thread.task_size = IA32_PAGE_OFFSET;	/* use what Linux/x86 uses... */
+	set_fs(USER_DS);				/* set addr limit for new TASK_SIZE */
+}
+
+static unsigned long
+elf32_map (struct file *filep, unsigned long addr, struct elf_phdr *eppnt, int prot, int type)
+{
+	unsigned long pgoff = (eppnt->p_vaddr) & ~IA32_PAGE_MASK;
+
+	return ia32_do_mmap(filep, (addr & IA32_PAGE_MASK), eppnt->p_filesz + pgoff, prot, type,
+			    eppnt->p_offset - pgoff);
+}
+
+#define cpu_uses_ia32el()	(local_cpu_data->family > 0x1f)
+
+static int __init check_elf32_binfmt(void)
+{
+	if (cpu_uses_ia32el()) {
+		printk("Please use IA-32 EL for executing IA-32 binaries\n");
+		return unregister_binfmt(&elf_format);
+	}
+	return 0;
+}
+
+module_init(check_elf32_binfmt)
diff --git a/arch/ia64/ia32/elfcore32.h b/arch/ia64/ia32/elfcore32.h
new file mode 100644
index 0000000..b73b8b6
--- /dev/null
+++ b/arch/ia64/ia32/elfcore32.h
@@ -0,0 +1,138 @@
+/*
+ * IA-32 ELF core dump support.
+ *
+ * Copyright (C) 2003 Arun Sharma <arun.sharma@intel.com>
+ *
+ * Derived from the x86_64 version
+ */
+#ifndef _ELFCORE32_H_
+#define _ELFCORE32_H_
+
+#include <asm/intrinsics.h>
+#include <asm/uaccess.h>
+
+#define USE_ELF_CORE_DUMP 1
+
+/* Override elfcore.h */
+#define _LINUX_ELFCORE_H 1
+typedef unsigned int elf_greg_t;
+
+#define ELF_NGREG (sizeof (struct user_regs_struct32) / sizeof(elf_greg_t))
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+typedef struct ia32_user_i387_struct elf_fpregset_t;
+typedef struct ia32_user_fxsr_struct elf_fpxregset_t;
+
+struct elf_siginfo
+{
+	int	si_signo;			/* signal number */
+	int	si_code;			/* extra code */
+	int	si_errno;			/* errno */
+};
+
+#define jiffies_to_timeval(a,b) do { (b)->tv_usec = 0; (b)->tv_sec = (a)/HZ; }while(0)
+
+struct elf_prstatus
+{
+	struct elf_siginfo pr_info;	/* Info associated with signal */
+	short	pr_cursig;		/* Current signal */
+	unsigned int pr_sigpend;	/* Set of pending signals */
+	unsigned int pr_sighold;	/* Set of held signals */
+	pid_t	pr_pid;
+	pid_t	pr_ppid;
+	pid_t	pr_pgrp;
+	pid_t	pr_sid;
+	struct compat_timeval pr_utime;	/* User time */
+	struct compat_timeval pr_stime;	/* System time */
+	struct compat_timeval pr_cutime;	/* Cumulative user time */
+	struct compat_timeval pr_cstime;	/* Cumulative system time */
+	elf_gregset_t pr_reg;	/* GP registers */
+	int pr_fpvalid;		/* True if math co-processor being used.  */
+};
+
+#define ELF_PRARGSZ	(80)	/* Number of chars for args */
+
+struct elf_prpsinfo
+{
+	char	pr_state;	/* numeric process state */
+	char	pr_sname;	/* char for pr_state */
+	char	pr_zomb;	/* zombie */
+	char	pr_nice;	/* nice val */
+	unsigned int pr_flag;	/* flags */
+	__u16	pr_uid;
+	__u16	pr_gid;
+	pid_t	pr_pid, pr_ppid, pr_pgrp, pr_sid;
+	/* Lots missing */
+	char	pr_fname[16];	/* filename of executable */
+	char	pr_psargs[ELF_PRARGSZ];	/* initial part of arg list */
+};
+
+#define ELF_CORE_COPY_REGS(pr_reg, regs)       		\
+	pr_reg[0] = regs->r11;				\
+	pr_reg[1] = regs->r9;				\
+	pr_reg[2] = regs->r10;				\
+	pr_reg[3] = regs->r14;				\
+	pr_reg[4] = regs->r15;				\
+	pr_reg[5] = regs->r13;				\
+	pr_reg[6] = regs->r8;				\
+	pr_reg[7] = regs->r16 & 0xffff;			\
+	pr_reg[8] = (regs->r16 >> 16) & 0xffff;		\
+	pr_reg[9] = (regs->r16 >> 32) & 0xffff;		\
+	pr_reg[10] = (regs->r16 >> 48) & 0xffff;	\
+	pr_reg[11] = regs->r1; 				\
+	pr_reg[12] = regs->cr_iip;			\
+	pr_reg[13] = regs->r17 & 0xffff;		\
+	pr_reg[14] = ia64_getreg(_IA64_REG_AR_EFLAG);	\
+	pr_reg[15] = regs->r12;				\
+	pr_reg[16] = (regs->r17 >> 16) & 0xffff;
+
+static inline void elf_core_copy_regs(elf_gregset_t *elfregs,
+				      struct pt_regs *regs)
+{
+	ELF_CORE_COPY_REGS((*elfregs), regs)
+}
+
+static inline int elf_core_copy_task_regs(struct task_struct *t,
+					  elf_gregset_t* elfregs)
+{
+	struct pt_regs *pp = ia64_task_regs(t);
+	ELF_CORE_COPY_REGS((*elfregs), pp);
+	return 1;
+}
+
+static inline int
+elf_core_copy_task_fpregs(struct task_struct *tsk, struct pt_regs *regs, elf_fpregset_t *fpu)
+{
+	struct ia32_user_i387_struct *fpstate = (void*)fpu;
+	mm_segment_t old_fs;
+
+	if (!tsk_used_math(tsk))
+		return 0;
+	
+	old_fs = get_fs();
+	set_fs(KERNEL_DS);
+	save_ia32_fpstate(tsk, (struct ia32_user_i387_struct __user *) fpstate);
+	set_fs(old_fs);
+
+	return 1;
+}
+
+#define ELF_CORE_COPY_XFPREGS 1
+static inline int
+elf_core_copy_task_xfpregs(struct task_struct *tsk, elf_fpxregset_t *xfpu)
+{
+	struct ia32_user_fxsr_struct *fpxstate = (void*) xfpu;
+	mm_segment_t old_fs;
+
+	if (!tsk_used_math(tsk))
+		return 0;
+
+	old_fs = get_fs();
+	set_fs(KERNEL_DS);
+	save_ia32_fpxstate(tsk, (struct ia32_user_fxsr_struct __user *) fpxstate);
+	set_fs(old_fs);
+
+	return 1;
+}
+
+#endif /* _ELFCORE32_H_ */
diff --git a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S
new file mode 100644
index 0000000..829a6d8
--- /dev/null
+++ b/arch/ia64/ia32/ia32_entry.S
@@ -0,0 +1,500 @@
+#include <asm/asmmacro.h>
+#include <asm/ia32.h>
+#include <asm/offsets.h>
+#include <asm/signal.h>
+#include <asm/thread_info.h>
+
+#include "../kernel/minstate.h"
+
+	/*
+	 * execve() is special because in case of success, we need to
+	 * setup a null register window frame (in case an IA-32 process
+	 * is exec'ing an IA-64 program).
+	 */
+ENTRY(ia32_execve)
+	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(3)
+	alloc loc1=ar.pfs,3,2,4,0
+	mov loc0=rp
+	.body
+	zxt4 out0=in0			// filename
+	;;				// stop bit between alloc and call
+	zxt4 out1=in1			// argv
+	zxt4 out2=in2			// envp
+	add out3=16,sp			// regs
+	br.call.sptk.few rp=sys32_execve
+1:	cmp.ge p6,p0=r8,r0
+	mov ar.pfs=loc1			// restore ar.pfs
+	;;
+(p6)	mov ar.pfs=r0			// clear ar.pfs in case of success
+	sxt4 r8=r8			// return 64-bit result
+	mov rp=loc0
+	br.ret.sptk.few rp
+END(ia32_execve)
+
+ENTRY(ia32_clone)
+	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(5)
+	alloc r16=ar.pfs,5,2,6,0
+	DO_SAVE_SWITCH_STACK
+	mov loc0=rp
+	mov loc1=r16				// save ar.pfs across do_fork
+	.body
+	zxt4 out1=in1				// newsp
+	mov out3=16				// stacksize (compensates for 16-byte scratch area)
+	adds out2=IA64_SWITCH_STACK_SIZE+16,sp	// out2 = &regs
+	mov out0=in0				// out0 = clone_flags
+	zxt4 out4=in2				// out4 = parent_tidptr
+	zxt4 out5=in4				// out5 = child_tidptr
+	br.call.sptk.many rp=do_fork
+.ret0:	.restore sp
+	adds sp=IA64_SWITCH_STACK_SIZE,sp	// pop the switch stack
+	mov ar.pfs=loc1
+	mov rp=loc0
+	br.ret.sptk.many rp
+END(ia32_clone)
+
+ENTRY(sys32_rt_sigsuspend)
+	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
+	alloc loc1=ar.pfs,8,2,3,0		// preserve all eight input regs
+	mov loc0=rp
+	mov out0=in0				// mask
+	mov out1=in1				// sigsetsize
+	mov out2=sp				// out2 = &sigscratch
+	.fframe 16
+	adds sp=-16,sp				// allocate dummy "sigscratch"
+	;;
+	.body
+	br.call.sptk.many rp=ia32_rt_sigsuspend
+1:	.restore sp
+	adds sp=16,sp
+	mov rp=loc0
+	mov ar.pfs=loc1
+	br.ret.sptk.many rp
+END(sys32_rt_sigsuspend)
+
+ENTRY(sys32_sigsuspend)
+	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
+	alloc loc1=ar.pfs,8,2,3,0		// preserve all eight input regs
+	mov loc0=rp
+	mov out0=in2				// mask (first two args are ignored)
+	;;
+	mov out1=sp				// out1 = &sigscratch
+	.fframe 16
+	adds sp=-16,sp				// allocate dummy "sigscratch"
+	.body
+	br.call.sptk.many rp=ia32_sigsuspend
+1:	.restore sp
+	adds sp=16,sp
+	mov rp=loc0
+	mov ar.pfs=loc1
+	br.ret.sptk.many rp
+END(sys32_sigsuspend)
+
+GLOBAL_ENTRY(ia32_ret_from_clone)
+	PT_REGS_UNWIND_INFO(0)
+{	/*
+	 * Some versions of gas generate bad unwind info if the first instruction of a
+	 * procedure doesn't go into the first slot of a bundle.  This is a workaround.
+	 */
+	nop.m 0
+	nop.i 0
+	/*
+	 * We need to call schedule_tail() to complete the scheduling process.
+	 * Called by ia64_switch_to after do_fork()->copy_thread().  r8 contains the
+	 * address of the previously executing task.
+	 */
+	br.call.sptk.many rp=ia64_invoke_schedule_tail
+}
+.ret1:
+	adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
+	;;
+	ld4 r2=[r2]
+	;;
+	mov r8=0
+	and r2=_TIF_SYSCALL_TRACEAUDIT,r2
+	;;
+	cmp.ne p6,p0=r2,r0
+(p6)	br.cond.spnt .ia32_strace_check_retval
+	;;					// prevent RAW on r8
+END(ia32_ret_from_clone)
+	// fall thrugh
+GLOBAL_ENTRY(ia32_ret_from_syscall)
+	PT_REGS_UNWIND_INFO(0)
+
+	cmp.ge p6,p7=r8,r0                      // syscall executed successfully?
+	adds r2=IA64_PT_REGS_R8_OFFSET+16,sp    // r2 = &pt_regs.r8
+	;;
+	alloc r3=ar.pfs,0,0,0,0			// drop the syscall argument frame
+	st8 [r2]=r8                             // store return value in slot for r8
+	br.cond.sptk.many ia64_leave_kernel
+END(ia32_ret_from_syscall)
+
+	//
+	// Invoke a system call, but do some tracing before and after the call.
+	// We MUST preserve the current register frame throughout this routine
+	// because some system calls (such as ia64_execve) directly
+	// manipulate ar.pfs.
+	//
+	// Input:
+	//	r8 = syscall number
+	//	b6 = syscall entry point
+	//
+GLOBAL_ENTRY(ia32_trace_syscall)
+	PT_REGS_UNWIND_INFO(0)
+	mov r3=-38
+	adds r2=IA64_PT_REGS_R8_OFFSET+16,sp
+	;;
+	st8 [r2]=r3				// initialize return code to -ENOSYS
+	br.call.sptk.few rp=syscall_trace_enter	// give parent a chance to catch syscall args
+.ret2:	// Need to reload arguments (they may be changed by the tracing process)
+	adds r2=IA64_PT_REGS_R1_OFFSET+16,sp	// r2 = &pt_regs.r1
+	adds r3=IA64_PT_REGS_R13_OFFSET+16,sp	// r3 = &pt_regs.r13
+	mov r15=IA32_NR_syscalls
+	;;
+	ld4 r8=[r2],IA64_PT_REGS_R9_OFFSET-IA64_PT_REGS_R1_OFFSET
+	movl r16=ia32_syscall_table
+	;;
+	ld4 r33=[r2],8				// r9 == ecx
+	ld4 r37=[r3],16				// r13 == ebp
+	cmp.ltu.unc p6,p7=r8,r15
+	;;
+	ld4 r34=[r2],8				// r10 == edx
+	ld4 r36=[r3],8				// r15 == edi
+(p6)	shladd r16=r8,3,r16	// force ni_syscall if not valid syscall number
+	;;
+	ld8 r16=[r16]
+	;;
+	ld4 r32=[r2],8				// r11 == ebx
+	mov b6=r16
+	ld4 r35=[r3],8				// r14 == esi
+	br.call.sptk.few rp=b6			// do the syscall
+.ia32_strace_check_retval:
+	cmp.lt p6,p0=r8,r0			// syscall failed?
+	adds r2=IA64_PT_REGS_R8_OFFSET+16,sp	// r2 = &pt_regs.r8
+	;;
+	st8.spill [r2]=r8			// store return value in slot for r8
+	br.call.sptk.few rp=syscall_trace_leave	// give parent a chance to catch return value
+.ret4:	alloc r2=ar.pfs,0,0,0,0			// drop the syscall argument frame
+	br.cond.sptk.many ia64_leave_kernel
+END(ia32_trace_syscall)
+
+GLOBAL_ENTRY(sys32_vfork)
+	alloc r16=ar.pfs,2,2,4,0;;
+	mov out0=IA64_CLONE_VFORK|IA64_CLONE_VM|SIGCHLD	// out0 = clone_flags
+	br.cond.sptk.few .fork1			// do the work
+END(sys32_vfork)
+
+GLOBAL_ENTRY(sys32_fork)
+	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
+	alloc r16=ar.pfs,2,2,4,0
+	mov out0=SIGCHLD			// out0 = clone_flags
+	;;
+.fork1:
+	mov loc0=rp
+	mov loc1=r16				// save ar.pfs across do_fork
+	DO_SAVE_SWITCH_STACK
+
+	.body
+
+	mov out1=0
+	mov out3=0
+	adds out2=IA64_SWITCH_STACK_SIZE+16,sp	// out2 = &regs
+	br.call.sptk.few rp=do_fork
+.ret5:	.restore sp
+	adds sp=IA64_SWITCH_STACK_SIZE,sp	// pop the switch stack
+	mov ar.pfs=loc1
+	mov rp=loc0
+	br.ret.sptk.many rp
+END(sys32_fork)
+
+	.rodata
+	.align 8
+	.globl ia32_syscall_table
+ia32_syscall_table:
+	data8 sys_ni_syscall	  /* 0	-  old "setup(" system call*/
+	data8 sys_exit
+	data8 sys32_fork
+	data8 sys_read
+	data8 sys_write
+	data8 sys32_open	  /* 5 */
+	data8 sys_close
+	data8 sys32_waitpid
+	data8 sys_creat
+	data8 sys_link
+	data8 sys_unlink	  /* 10 */
+	data8 ia32_execve
+	data8 sys_chdir
+	data8 compat_sys_time
+	data8 sys_mknod
+	data8 sys_chmod		  /* 15 */
+	data8 sys_lchown	/* 16-bit version */
+	data8 sys_ni_syscall	  /* old break syscall holder */
+	data8 sys_ni_syscall
+	data8 sys32_lseek
+	data8 sys_getpid	  /* 20 */
+	data8 compat_sys_mount
+	data8 sys_oldumount
+	data8 sys_setuid	/* 16-bit version */
+	data8 sys_getuid	/* 16-bit version */
+	data8 compat_sys_stime    /* 25 */
+	data8 sys32_ptrace
+	data8 sys32_alarm
+	data8 sys_ni_syscall
+	data8 sys32_pause
+	data8 compat_sys_utime	  /* 30 */
+	data8 sys_ni_syscall	  /* old stty syscall holder */
+	data8 sys_ni_syscall	  /* old gtty syscall holder */
+	data8 sys_access
+	data8 sys_nice
+	data8 sys_ni_syscall	  /* 35 */	  /* old ftime syscall holder */
+	data8 sys_sync
+	data8 sys_kill
+	data8 sys_rename
+	data8 sys_mkdir
+	data8 sys_rmdir		  /* 40 */
+	data8 sys_dup
+	data8 sys32_pipe
+	data8 compat_sys_times
+	data8 sys_ni_syscall	  /* old prof syscall holder */
+	data8 sys32_brk		  /* 45 */
+	data8 sys_setgid	/* 16-bit version */
+	data8 sys_getgid	/* 16-bit version */
+	data8 sys32_signal
+	data8 sys_geteuid	/* 16-bit version */
+	data8 sys_getegid	/* 16-bit version */	  /* 50 */
+	data8 sys_acct
+	data8 sys_umount	  /* recycled never used phys( */
+	data8 sys_ni_syscall	  /* old lock syscall holder */
+	data8 compat_sys_ioctl
+	data8 compat_sys_fcntl	  /* 55 */
+	data8 sys_ni_syscall	  /* old mpx syscall holder */
+	data8 sys_setpgid
+	data8 sys_ni_syscall	  /* old ulimit syscall holder */
+	data8 sys_ni_syscall
+	data8 sys_umask		  /* 60 */
+	data8 sys_chroot
+	data8 sys_ustat
+	data8 sys_dup2
+	data8 sys_getppid
+	data8 sys_getpgrp	  /* 65 */
+	data8 sys_setsid
+	data8 sys32_sigaction
+	data8 sys_ni_syscall
+	data8 sys_ni_syscall
+	data8 sys_setreuid	/* 16-bit version */	  /* 70 */
+	data8 sys_setregid	/* 16-bit version */
+	data8 sys32_sigsuspend
+	data8 compat_sys_sigpending
+	data8 sys_sethostname
+	data8 compat_sys_setrlimit	  /* 75 */
+	data8 compat_sys_old_getrlimit
+	data8 compat_sys_getrusage
+	data8 sys32_gettimeofday
+	data8 sys32_settimeofday
+	data8 sys32_getgroups16	  /* 80 */
+	data8 sys32_setgroups16
+	data8 sys32_old_select
+	data8 sys_symlink
+	data8 sys_ni_syscall
+	data8 sys_readlink	  /* 85 */
+	data8 sys_uselib
+	data8 sys_swapon
+	data8 sys_reboot
+	data8 sys32_readdir
+	data8 sys32_mmap	  /* 90 */
+	data8 sys32_munmap
+	data8 sys_truncate
+	data8 sys_ftruncate
+	data8 sys_fchmod
+	data8 sys_fchown	/* 16-bit version */	  /* 95 */
+	data8 sys_getpriority
+	data8 sys_setpriority
+	data8 sys_ni_syscall	  /* old profil syscall holder */
+	data8 compat_sys_statfs
+	data8 compat_sys_fstatfs	  /* 100 */
+	data8 sys_ni_syscall	/* ioperm */
+	data8 compat_sys_socketcall
+	data8 sys_syslog
+	data8 compat_sys_setitimer
+	data8 compat_sys_getitimer	  /* 105 */
+	data8 compat_sys_newstat
+	data8 compat_sys_newlstat
+	data8 compat_sys_newfstat
+	data8 sys_ni_syscall
+	data8 sys_ni_syscall	/* iopl */	/* 110 */
+	data8 sys_vhangup
+	data8 sys_ni_syscall		/* used to be sys_idle */
+	data8 sys_ni_syscall
+	data8 compat_sys_wait4
+	data8 sys_swapoff	  /* 115 */
+	data8 sys32_sysinfo
+	data8 sys32_ipc
+	data8 sys_fsync
+	data8 sys32_sigreturn
+	data8 ia32_clone	  /* 120 */
+	data8 sys_setdomainname
+	data8 sys32_newuname
+	data8 sys32_modify_ldt
+	data8 sys_ni_syscall	/* adjtimex */
+	data8 sys32_mprotect	  /* 125 */
+	data8 compat_sys_sigprocmask
+	data8 sys_ni_syscall	/* create_module */
+	data8 sys_ni_syscall	/* init_module */
+	data8 sys_ni_syscall	/* delete_module */
+	data8 sys_ni_syscall	/* get_kernel_syms */  /* 130 */
+	data8 sys_quotactl
+	data8 sys_getpgid
+	data8 sys_fchdir
+	data8 sys_ni_syscall	/* sys_bdflush */
+	data8 sys_sysfs		/* 135 */
+	data8 sys32_personality
+	data8 sys_ni_syscall	  /* for afs_syscall */
+	data8 sys_setfsuid	/* 16-bit version */
+	data8 sys_setfsgid	/* 16-bit version */
+	data8 sys_llseek	  /* 140 */
+	data8 compat_sys_getdents
+	data8 compat_sys_select
+	data8 sys_flock
+	data8 sys32_msync
+	data8 compat_sys_readv	  /* 145 */
+	data8 compat_sys_writev
+	data8 sys_getsid
+	data8 sys_fdatasync
+	data8 sys32_sysctl
+	data8 sys_mlock		  /* 150 */
+	data8 sys_munlock
+	data8 sys_mlockall
+	data8 sys_munlockall
+	data8 sys_sched_setparam
+	data8 sys_sched_getparam  /* 155 */
+	data8 sys_sched_setscheduler
+	data8 sys_sched_getscheduler
+	data8 sys_sched_yield
+	data8 sys_sched_get_priority_max
+	data8 sys_sched_get_priority_min	 /* 160 */
+	data8 sys32_sched_rr_get_interval
+	data8 compat_sys_nanosleep
+	data8 sys32_mremap
+	data8 sys_setresuid	/* 16-bit version */
+	data8 sys32_getresuid16	/* 16-bit version */	  /* 165 */
+	data8 sys_ni_syscall	/* vm86 */
+	data8 sys_ni_syscall	/* sys_query_module */
+	data8 sys_poll
+	data8 sys_ni_syscall	/* nfsservctl */
+	data8 sys_setresgid	  /* 170 */
+	data8 sys32_getresgid16
+	data8 sys_prctl
+	data8 sys32_rt_sigreturn
+	data8 sys32_rt_sigaction
+	data8 sys32_rt_sigprocmask /* 175 */
+	data8 sys_rt_sigpending
+	data8 compat_sys_rt_sigtimedwait
+	data8 sys32_rt_sigqueueinfo
+	data8 sys32_rt_sigsuspend
+	data8 sys32_pread	  /* 180 */
+	data8 sys32_pwrite
+	data8 sys_chown	/* 16-bit version */
+	data8 sys_getcwd
+	data8 sys_capget
+	data8 sys_capset	  /* 185 */
+	data8 sys32_sigaltstack
+	data8 sys32_sendfile
+	data8 sys_ni_syscall		  /* streams1 */
+	data8 sys_ni_syscall		  /* streams2 */
+	data8 sys32_vfork	  /* 190 */
+	data8 compat_sys_getrlimit
+	data8 sys32_mmap2
+	data8 sys32_truncate64
+	data8 sys32_ftruncate64
+	data8 sys32_stat64	  /* 195 */
+	data8 sys32_lstat64
+	data8 sys32_fstat64
+	data8 sys_lchown
+	data8 sys_getuid
+	data8 sys_getgid	  /* 200 */
+	data8 sys_geteuid
+	data8 sys_getegid
+	data8 sys_setreuid
+	data8 sys_setregid
+	data8 sys_getgroups	  /* 205 */
+	data8 sys_setgroups
+	data8 sys_fchown
+	data8 sys_setresuid
+	data8 sys_getresuid
+	data8 sys_setresgid	  /* 210 */
+	data8 sys_getresgid
+	data8 sys_chown
+	data8 sys_setuid
+	data8 sys_setgid
+	data8 sys_setfsuid	  /* 215 */
+	data8 sys_setfsgid
+	data8 sys_pivot_root
+	data8 sys_mincore
+	data8 sys_madvise
+	data8 compat_sys_getdents64	  /* 220 */
+	data8 compat_sys_fcntl64
+	data8 sys_ni_syscall		/* reserved for TUX */
+	data8 sys_ni_syscall		/* reserved for Security */
+	data8 sys_gettid
+	data8 sys_readahead	  /* 225 */
+ 	data8 sys_setxattr
+ 	data8 sys_lsetxattr
+ 	data8 sys_fsetxattr
+ 	data8 sys_getxattr
+ 	data8 sys_lgetxattr	/* 230 */
+ 	data8 sys_fgetxattr
+ 	data8 sys_listxattr
+ 	data8 sys_llistxattr
+ 	data8 sys_flistxattr
+ 	data8 sys_removexattr	/* 235 */
+ 	data8 sys_lremovexattr
+ 	data8 sys_fremovexattr
+	data8 sys_tkill
+ 	data8 sys_sendfile64
+	data8 compat_sys_futex	/* 240 */
+	data8 compat_sys_sched_setaffinity
+	data8 compat_sys_sched_getaffinity
+	data8 sys32_set_thread_area
+	data8 sys32_get_thread_area
+ 	data8 compat_sys_io_setup	/* 245 */
+ 	data8 sys_io_destroy
+ 	data8 compat_sys_io_getevents
+ 	data8 compat_sys_io_submit
+ 	data8 sys_io_cancel
+ 	data8 sys_fadvise64	/* 250 */
+	data8 sys_ni_syscall
+	data8 sys_exit_group
+ 	data8 sys_lookup_dcookie
+	data8 sys_epoll_create
+	data8 sys32_epoll_ctl	/* 255 */
+	data8 sys32_epoll_wait
+	data8 sys_remap_file_pages
+	data8 sys_set_tid_address
+ 	data8 sys32_timer_create
+ 	data8 compat_sys_timer_settime	/* 260 */
+ 	data8 compat_sys_timer_gettime
+ 	data8 sys_timer_getoverrun
+ 	data8 sys_timer_delete
+ 	data8 compat_sys_clock_settime
+ 	data8 compat_sys_clock_gettime /* 265 */
+ 	data8 compat_sys_clock_getres
+ 	data8 compat_sys_clock_nanosleep
+	data8 compat_sys_statfs64
+	data8 compat_sys_fstatfs64
+ 	data8 sys_tgkill	/* 270 */
+ 	data8 compat_sys_utimes
+ 	data8 sys32_fadvise64_64
+ 	data8 sys_ni_syscall
+  	data8 sys_ni_syscall
+ 	data8 sys_ni_syscall	/* 275 */
+  	data8 sys_ni_syscall
+  	data8 compat_sys_mq_open
+  	data8 sys_mq_unlink
+  	data8 compat_sys_mq_timedsend
+  	data8 compat_sys_mq_timedreceive	/* 280 */
+  	data8 compat_sys_mq_notify
+  	data8 compat_sys_mq_getsetattr
+	data8 sys_ni_syscall		/* reserved for kexec */
+	data8 compat_sys_waitid
+
+	// guard against failures to increase IA32_NR_syscalls
+	.org ia32_syscall_table + 8*IA32_NR_syscalls
diff --git a/arch/ia64/ia32/ia32_ioctl.c b/arch/ia64/ia32/ia32_ioctl.c
new file mode 100644
index 0000000..9845dab
--- /dev/null
+++ b/arch/ia64/ia32/ia32_ioctl.c
@@ -0,0 +1,48 @@
+/*
+ * IA32 Architecture-specific ioctl shim code
+ *
+ * Copyright (C) 2000 VA Linux Co
+ * Copyright (C) 2000 Don Dugger <n0ano@valinux.com>
+ * Copyright (C) 2001-2003 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#include <linux/signal.h>	/* argh, msdos_fs.h isn't self-contained... */
+#include <linux/syscalls.h>
+#include "ia32priv.h"
+  
+#define	INCLUDES
+#include "compat_ioctl.c"
+#include <asm/ioctl32.h>
+
+#define IOCTL_NR(a)	((a) & ~(_IOC_SIZEMASK << _IOC_SIZESHIFT))
+
+#define DO_IOCTL(fd, cmd, arg) ({			\
+	int _ret;					\
+	mm_segment_t _old_fs = get_fs();		\
+							\
+	set_fs(KERNEL_DS);				\
+	_ret = sys_ioctl(fd, cmd, (unsigned long)arg);	\
+	set_fs(_old_fs);				\
+	_ret;						\
+})
+
+#define CODE
+#include "compat_ioctl.c"
+
+typedef int (* ioctl32_handler_t)(unsigned int, unsigned int, unsigned long, struct file *);
+
+#define COMPATIBLE_IOCTL(cmd)		HANDLE_IOCTL((cmd),sys_ioctl)
+#define HANDLE_IOCTL(cmd,handler)	{ (cmd), (ioctl32_handler_t)(handler), NULL },
+#define IOCTL_TABLE_START \
+	struct ioctl_trans ioctl_start[] = {
+#define IOCTL_TABLE_END \
+	};
+
+IOCTL_TABLE_START
+#define DECLARES
+#include "compat_ioctl.c"
+#include <linux/compat_ioctl.h>
+IOCTL_TABLE_END
+
+int ioctl_table_size = ARRAY_SIZE(ioctl_start);
diff --git a/arch/ia64/ia32/ia32_ldt.c b/arch/ia64/ia32/ia32_ldt.c
new file mode 100644
index 0000000..a152738
--- /dev/null
+++ b/arch/ia64/ia32/ia32_ldt.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright (C) 2001, 2004 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ *
+ * Adapted from arch/i386/kernel/ldt.c
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/vmalloc.h>
+
+#include <asm/uaccess.h>
+
+#include "ia32priv.h"
+
+/*
+ * read_ldt() is not really atomic - this is not a problem since synchronization of reads
+ * and writes done to the LDT has to be assured by user-space anyway. Writes are atomic,
+ * to protect the security checks done on new descriptors.
+ */
+static int
+read_ldt (void __user *ptr, unsigned long bytecount)
+{
+	unsigned long bytes_left, n;
+	char __user *src, *dst;
+	char buf[256];	/* temporary buffer (don't overflow kernel stack!) */
+
+	if (bytecount > IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE)
+		bytecount = IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE;
+
+	bytes_left = bytecount;
+
+	src = (void __user *) IA32_LDT_OFFSET;
+	dst = ptr;
+
+	while (bytes_left) {
+		n = sizeof(buf);
+		if (n > bytes_left)
+			n = bytes_left;
+
+		/*
+		 * We know we're reading valid memory, but we still must guard against
+		 * running out of memory.
+		 */
+		if (__copy_from_user(buf, src, n))
+			return -EFAULT;
+
+		if (copy_to_user(dst, buf, n))
+			return -EFAULT;
+
+		src += n;
+		dst += n;
+		bytes_left -= n;
+	}
+	return bytecount;
+}
+
+static int
+read_default_ldt (void __user * ptr, unsigned long bytecount)
+{
+	unsigned long size;
+	int err;
+
+	/* XXX fix me: should return equivalent of default_ldt[0] */
+	err = 0;
+	size = 8;
+	if (size > bytecount)
+		size = bytecount;
+
+	err = size;
+	if (clear_user(ptr, size))
+		err = -EFAULT;
+
+	return err;
+}
+
+static int
+write_ldt (void __user * ptr, unsigned long bytecount, int oldmode)
+{
+	struct ia32_user_desc ldt_info;
+	__u64 entry;
+	int ret;
+
+	if (bytecount != sizeof(ldt_info))
+		return -EINVAL;
+	if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
+		return -EFAULT;
+
+	if (ldt_info.entry_number >= IA32_LDT_ENTRIES)
+		return -EINVAL;
+	if (ldt_info.contents == 3) {
+		if (oldmode)
+			return -EINVAL;
+		if (ldt_info.seg_not_present == 0)
+			return -EINVAL;
+	}
+
+	if (ldt_info.base_addr == 0 && ldt_info.limit == 0
+	    && (oldmode || (ldt_info.contents == 0 && ldt_info.read_exec_only == 1
+			    && ldt_info.seg_32bit == 0 && ldt_info.limit_in_pages == 0
+			    && ldt_info.seg_not_present == 1 && ldt_info.useable == 0)))
+		/* allow LDTs to be cleared by the user */
+		entry = 0;
+	else
+		/* we must set the "Accessed" bit as IVE doesn't emulate it */
+		entry = IA32_SEG_DESCRIPTOR(ldt_info.base_addr, ldt_info.limit,
+					    (((ldt_info.read_exec_only ^ 1) << 1)
+					     | (ldt_info.contents << 2)) | 1,
+					    1, 3, ldt_info.seg_not_present ^ 1,
+					    (oldmode ? 0 : ldt_info.useable),
+					    ldt_info.seg_32bit,
+					    ldt_info.limit_in_pages);
+	/*
+	 * Install the new entry.  We know we're accessing valid (mapped) user-level
+	 * memory, but we still need to guard against out-of-memory, hence we must use
+	 * put_user().
+	 */
+	ret = __put_user(entry, (__u64 __user *) IA32_LDT_OFFSET + ldt_info.entry_number);
+	ia32_load_segment_descriptors(current);
+	return ret;
+}
+
+asmlinkage int
+sys32_modify_ldt (int func, unsigned int ptr, unsigned int bytecount)
+{
+	int ret = -ENOSYS;
+
+	switch (func) {
+	      case 0:
+		ret = read_ldt(compat_ptr(ptr), bytecount);
+		break;
+	      case 1:
+		ret = write_ldt(compat_ptr(ptr), bytecount, 1);
+		break;
+	      case 2:
+		ret = read_default_ldt(compat_ptr(ptr), bytecount);
+		break;
+	      case 0x11:
+		ret = write_ldt(compat_ptr(ptr), bytecount, 0);
+		break;
+	}
+	return ret;
+}
diff --git a/arch/ia64/ia32/ia32_signal.c b/arch/ia64/ia32/ia32_signal.c
new file mode 100644
index 0000000..19b02ad
--- /dev/null
+++ b/arch/ia64/ia32/ia32_signal.c
@@ -0,0 +1,1036 @@
+/*
+ * IA32 Architecture-specific signal handling support.
+ *
+ * Copyright (C) 1999, 2001-2002, 2005 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com>
+ * Copyright (C) 2000 VA Linux Co
+ * Copyright (C) 2000 Don Dugger <n0ano@valinux.com>
+ *
+ * Derived from i386 and Alpha versions.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/personality.h>
+#include <linux/ptrace.h>
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/stddef.h>
+#include <linux/syscalls.h>
+#include <linux/unistd.h>
+#include <linux/wait.h>
+#include <linux/compat.h>
+
+#include <asm/intrinsics.h>
+#include <asm/uaccess.h>
+#include <asm/rse.h>
+#include <asm/sigcontext.h>
+#include <asm/segment.h>
+
+#include "ia32priv.h"
+
+#include "../kernel/sigframe.h"
+
+#define A(__x)		((unsigned long)(__x))
+
+#define DEBUG_SIG	0
+#define _BLOCKABLE	(~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+#define __IA32_NR_sigreturn            119
+#define __IA32_NR_rt_sigreturn         173
+
+struct sigframe_ia32
+{
+       int pretcode;
+       int sig;
+       struct sigcontext_ia32 sc;
+       struct _fpstate_ia32 fpstate;
+       unsigned int extramask[_COMPAT_NSIG_WORDS-1];
+       char retcode[8];
+};
+
+struct rt_sigframe_ia32
+{
+       int pretcode;
+       int sig;
+       int pinfo;
+       int puc;
+       compat_siginfo_t info;
+       struct ucontext_ia32 uc;
+       struct _fpstate_ia32 fpstate;
+       char retcode[8];
+};
+
+int
+copy_siginfo_from_user32 (siginfo_t *to, compat_siginfo_t __user *from)
+{
+	unsigned long tmp;
+	int err;
+
+	if (!access_ok(VERIFY_READ, from, sizeof(compat_siginfo_t)))
+		return -EFAULT;
+
+	err = __get_user(to->si_signo, &from->si_signo);
+	err |= __get_user(to->si_errno, &from->si_errno);
+	err |= __get_user(to->si_code, &from->si_code);
+
+	if (to->si_code < 0)
+		err |= __copy_from_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
+	else {
+		switch (to->si_code >> 16) {
+		      case __SI_CHLD >> 16:
+			err |= __get_user(to->si_utime, &from->si_utime);
+			err |= __get_user(to->si_stime, &from->si_stime);
+			err |= __get_user(to->si_status, &from->si_status);
+		      default:
+			err |= __get_user(to->si_pid, &from->si_pid);
+			err |= __get_user(to->si_uid, &from->si_uid);
+			break;
+		      case __SI_FAULT >> 16:
+			err |= __get_user(tmp, &from->si_addr);
+			to->si_addr = (void __user *) tmp;
+			break;
+		      case __SI_POLL >> 16:
+			err |= __get_user(to->si_band, &from->si_band);
+			err |= __get_user(to->si_fd, &from->si_fd);
+			break;
+		      case __SI_RT >> 16: /* This is not generated by the kernel as of now.  */
+		      case __SI_MESGQ >> 16:
+			err |= __get_user(to->si_pid, &from->si_pid);
+			err |= __get_user(to->si_uid, &from->si_uid);
+			err |= __get_user(to->si_int, &from->si_int);
+			break;
+		}
+	}
+	return err;
+}
+
+int
+copy_siginfo_to_user32 (compat_siginfo_t __user *to, siginfo_t *from)
+{
+	unsigned int addr;
+	int err;
+
+	if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
+		return -EFAULT;
+
+	/* If you change siginfo_t structure, please be sure
+	   this code is fixed accordingly.
+	   It should never copy any pad contained in the structure
+	   to avoid security leaks, but must copy the generic
+	   3 ints plus the relevant union member.
+	   This routine must convert siginfo from 64bit to 32bit as well
+	   at the same time.  */
+	err = __put_user(from->si_signo, &to->si_signo);
+	err |= __put_user(from->si_errno, &to->si_errno);
+	err |= __put_user((short)from->si_code, &to->si_code);
+	if (from->si_code < 0)
+		err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
+	else {
+		switch (from->si_code >> 16) {
+		case __SI_CHLD >> 16:
+			err |= __put_user(from->si_utime, &to->si_utime);
+			err |= __put_user(from->si_stime, &to->si_stime);
+			err |= __put_user(from->si_status, &to->si_status);
+		default:
+			err |= __put_user(from->si_pid, &to->si_pid);
+			err |= __put_user(from->si_uid, &to->si_uid);
+			break;
+		case __SI_FAULT >> 16:
+			/* avoid type-checking warnings by copying _pad[0] in lieu of si_addr... */
+			err |= __put_user(from->_sifields._pad[0], &to->si_addr);
+			break;
+		case __SI_POLL >> 16:
+			err |= __put_user(from->si_band, &to->si_band);
+			err |= __put_user(from->si_fd, &to->si_fd);
+			break;
+		case __SI_TIMER >> 16:
+			err |= __put_user(from->si_tid, &to->si_tid);
+			err |= __put_user(from->si_overrun, &to->si_overrun);
+			addr = (unsigned long) from->si_ptr;
+			err |= __put_user(addr, &to->si_ptr);
+			break;
+		case __SI_RT >> 16:	/* Not generated by the kernel as of now.  */
+		case __SI_MESGQ >> 16:
+			err |= __put_user(from->si_uid, &to->si_uid);
+			err |= __put_user(from->si_pid, &to->si_pid);
+			addr = (unsigned long) from->si_ptr;
+			err |= __put_user(addr, &to->si_ptr);
+			break;
+		}
+	}
+	return err;
+}
+
+
+/*
+ *  SAVE and RESTORE of ia32 fpstate info, from ia64 current state
+ *  Used in exception handler to pass the fpstate to the user, and restore
+ *  the fpstate while returning from the exception handler.
+ *
+ *    fpstate info and their mapping to IA64 regs:
+ *    fpstate    REG(BITS)      Attribute    Comments
+ *    cw         ar.fcr(0:12)                with bits 7 and 6 not used
+ *    sw         ar.fsr(0:15)
+ *    tag        ar.fsr(16:31)               with odd numbered bits not used
+ *                                           (read returns 0, writes ignored)
+ *    ipoff      ar.fir(0:31)
+ *    cssel      ar.fir(32:47)
+ *    dataoff    ar.fdr(0:31)
+ *    datasel    ar.fdr(32:47)
+ *
+ *    _st[(0+TOS)%8]   f8
+ *    _st[(1+TOS)%8]   f9
+ *    _st[(2+TOS)%8]   f10
+ *    _st[(3+TOS)%8]   f11                   (f8..f11 from ptregs)
+ *      : :            :                     (f12..f15 from live reg)
+ *      : :            :
+ *    _st[(7+TOS)%8]   f15                   TOS=sw.top(bits11:13)
+ *
+ *    status     Same as sw     RO
+ *    magic      0                           as X86_FXSR_MAGIC in ia32
+ *    mxcsr      Bits(7:15)=ar.fcr(39:47)
+ *               Bits(0:5) =ar.fsr(32:37)    with bit 6 reserved
+ *    _xmm[0..7] f16..f31                    (live registers)
+ *                                           with _xmm[0]
+ *                                             Bit(64:127)=f17(0:63)
+ *                                             Bit(0:63)=f16(0:63)
+ *    All other fields unused...
+ */
+
+static int
+save_ia32_fpstate_live (struct _fpstate_ia32 __user *save)
+{
+	struct task_struct *tsk = current;
+	struct pt_regs *ptp;
+	struct _fpreg_ia32 *fpregp;
+	char buf[32];
+	unsigned long fsr, fcr, fir, fdr;
+	unsigned long new_fsr;
+	unsigned long num128[2];
+	unsigned long mxcsr=0;
+	int fp_tos, fr8_st_map;
+
+	if (!access_ok(VERIFY_WRITE, save, sizeof(*save)))
+		return -EFAULT;
+
+	/* Read in fsr, fcr, fir, fdr and copy onto fpstate */
+	fsr = ia64_getreg(_IA64_REG_AR_FSR);
+	fcr = ia64_getreg(_IA64_REG_AR_FCR);
+	fir = ia64_getreg(_IA64_REG_AR_FIR);
+	fdr = ia64_getreg(_IA64_REG_AR_FDR);
+
+	/*
+	 * We need to clear the exception state before calling the signal handler. Clear
+	 * the bits 15, bits 0-7 in fp status word. Similar to the functionality of fnclex
+	 * instruction.
+	 */
+	new_fsr = fsr & ~0x80ff;
+	ia64_setreg(_IA64_REG_AR_FSR, new_fsr);
+
+	__put_user(fcr & 0xffff, &save->cw);
+	__put_user(fsr & 0xffff, &save->sw);
+	__put_user((fsr>>16) & 0xffff, &save->tag);
+	__put_user(fir, &save->ipoff);
+	__put_user((fir>>32) & 0xffff, &save->cssel);
+	__put_user(fdr, &save->dataoff);
+	__put_user((fdr>>32) & 0xffff, &save->datasel);
+	__put_user(fsr & 0xffff, &save->status);
+
+	mxcsr = ((fcr>>32) & 0xff80) | ((fsr>>32) & 0x3f);
+	__put_user(mxcsr & 0xffff, &save->mxcsr);
+	__put_user( 0, &save->magic); //#define X86_FXSR_MAGIC   0x0000
+
+	/*
+	 * save f8..f11  from pt_regs
+	 * save f12..f15 from live register set
+	 */
+	/*
+	 *  Find the location where f8 has to go in fp reg stack.  This depends on
+	 *  TOP(11:13) field of sw. Other f reg continue sequentially from where f8 maps
+	 *  to.
+	 */
+	fp_tos = (fsr>>11)&0x7;
+	fr8_st_map = (8-fp_tos)&0x7;
+	ptp = ia64_task_regs(tsk);
+	fpregp = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15);
+	ia64f2ia32f(fpregp, &ptp->f8);
+	copy_to_user(&save->_st[(0+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
+	ia64f2ia32f(fpregp, &ptp->f9);
+	copy_to_user(&save->_st[(1+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
+	ia64f2ia32f(fpregp, &ptp->f10);
+	copy_to_user(&save->_st[(2+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
+	ia64f2ia32f(fpregp, &ptp->f11);
+	copy_to_user(&save->_st[(3+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
+
+	ia64_stfe(fpregp, 12);
+	copy_to_user(&save->_st[(4+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
+	ia64_stfe(fpregp, 13);
+	copy_to_user(&save->_st[(5+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
+	ia64_stfe(fpregp, 14);
+	copy_to_user(&save->_st[(6+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
+	ia64_stfe(fpregp, 15);
+	copy_to_user(&save->_st[(7+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
+
+	ia64_stf8(&num128[0], 16);
+	ia64_stf8(&num128[1], 17);
+	copy_to_user(&save->_xmm[0], num128, sizeof(struct _xmmreg_ia32));
+
+	ia64_stf8(&num128[0], 18);
+	ia64_stf8(&num128[1], 19);
+	copy_to_user(&save->_xmm[1], num128, sizeof(struct _xmmreg_ia32));
+
+	ia64_stf8(&num128[0], 20);
+	ia64_stf8(&num128[1], 21);
+	copy_to_user(&save->_xmm[2], num128, sizeof(struct _xmmreg_ia32));
+
+	ia64_stf8(&num128[0], 22);
+	ia64_stf8(&num128[1], 23);
+	copy_to_user(&save->_xmm[3], num128, sizeof(struct _xmmreg_ia32));
+
+	ia64_stf8(&num128[0], 24);
+	ia64_stf8(&num128[1], 25);
+	copy_to_user(&save->_xmm[4], num128, sizeof(struct _xmmreg_ia32));
+
+	ia64_stf8(&num128[0], 26);
+	ia64_stf8(&num128[1], 27);
+	copy_to_user(&save->_xmm[5], num128, sizeof(struct _xmmreg_ia32));
+
+	ia64_stf8(&num128[0], 28);
+	ia64_stf8(&num128[1], 29);
+	copy_to_user(&save->_xmm[6], num128, sizeof(struct _xmmreg_ia32));
+
+	ia64_stf8(&num128[0], 30);
+	ia64_stf8(&num128[1], 31);
+	copy_to_user(&save->_xmm[7], num128, sizeof(struct _xmmreg_ia32));
+	return 0;
+}
+
+static int
+restore_ia32_fpstate_live (struct _fpstate_ia32 __user *save)
+{
+	struct task_struct *tsk = current;
+	struct pt_regs *ptp;
+	unsigned int lo, hi;
+	unsigned long num128[2];
+	unsigned long num64, mxcsr;
+	struct _fpreg_ia32 *fpregp;
+	char buf[32];
+	unsigned long fsr, fcr, fir, fdr;
+	int fp_tos, fr8_st_map;
+
+	if (!access_ok(VERIFY_READ, save, sizeof(*save)))
+		return(-EFAULT);
+
+	/*
+	 * Updating fsr, fcr, fir, fdr.
+	 * Just a bit more complicated than save.
+	 * - Need to make sure that we don't write any value other than the
+	 *   specific fpstate info
+	 * - Need to make sure that the untouched part of frs, fdr, fir, fcr
+	 *   should remain same while writing.
+	 * So, we do a read, change specific fields and write.
+	 */
+	fsr = ia64_getreg(_IA64_REG_AR_FSR);
+	fcr = ia64_getreg(_IA64_REG_AR_FCR);
+	fir = ia64_getreg(_IA64_REG_AR_FIR);
+	fdr = ia64_getreg(_IA64_REG_AR_FDR);
+
+	__get_user(mxcsr, (unsigned int __user *)&save->mxcsr);
+	/* setting bits 0..5 8..12 with cw and 39..47 from mxcsr */
+	__get_user(lo, (unsigned int __user *)&save->cw);
+	num64 = mxcsr & 0xff10;
+	num64 = (num64 << 32) | (lo & 0x1f3f);
+	fcr = (fcr & (~0xff1000001f3fUL)) | num64;
+
+	/* setting bits 0..31 with sw and tag and 32..37 from mxcsr */
+	__get_user(lo, (unsigned int __user *)&save->sw);
+	/* set bits 15,7 (fsw.b, fsw.es) to reflect the current error status */
+	if ( !(lo & 0x7f) )
+		lo &= (~0x8080);
+	__get_user(hi, (unsigned int __user *)&save->tag);
+	num64 = mxcsr & 0x3f;
+	num64 = (num64 << 16) | (hi & 0xffff);
+	num64 = (num64 << 16) | (lo & 0xffff);
+	fsr = (fsr & (~0x3fffffffffUL)) | num64;
+
+	/* setting bits 0..47 with cssel and ipoff */
+	__get_user(lo, (unsigned int __user *)&save->ipoff);
+	__get_user(hi, (unsigned int __user *)&save->cssel);
+	num64 = hi & 0xffff;
+	num64 = (num64 << 32) | lo;
+	fir = (fir & (~0xffffffffffffUL)) | num64;
+
+	/* setting bits 0..47 with datasel and dataoff */
+	__get_user(lo, (unsigned int __user *)&save->dataoff);
+	__get_user(hi, (unsigned int __user *)&save->datasel);
+	num64 = hi & 0xffff;
+	num64 = (num64 << 32) | lo;
+	fdr = (fdr & (~0xffffffffffffUL)) | num64;
+
+	ia64_setreg(_IA64_REG_AR_FSR, fsr);
+	ia64_setreg(_IA64_REG_AR_FCR, fcr);
+	ia64_setreg(_IA64_REG_AR_FIR, fir);
+	ia64_setreg(_IA64_REG_AR_FDR, fdr);
+
+	/*
+	 * restore f8..f11 onto pt_regs
+	 * restore f12..f15 onto live registers
+	 */
+	/*
+	 *  Find the location where f8 has to go in fp reg stack.  This depends on
+	 *  TOP(11:13) field of sw. Other f reg continue sequentially from where f8 maps
+	 *  to.
+	 */
+	fp_tos = (fsr>>11)&0x7;
+	fr8_st_map = (8-fp_tos)&0x7;
+	fpregp = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15);
+
+	ptp = ia64_task_regs(tsk);
+	copy_from_user(fpregp, &save->_st[(0+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
+	ia32f2ia64f(&ptp->f8, fpregp);
+	copy_from_user(fpregp, &save->_st[(1+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
+	ia32f2ia64f(&ptp->f9, fpregp);
+	copy_from_user(fpregp, &save->_st[(2+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
+	ia32f2ia64f(&ptp->f10, fpregp);
+	copy_from_user(fpregp, &save->_st[(3+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
+	ia32f2ia64f(&ptp->f11, fpregp);
+
+	copy_from_user(fpregp, &save->_st[(4+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
+	ia64_ldfe(12, fpregp);
+	copy_from_user(fpregp, &save->_st[(5+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
+	ia64_ldfe(13, fpregp);
+	copy_from_user(fpregp, &save->_st[(6+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
+	ia64_ldfe(14, fpregp);
+	copy_from_user(fpregp, &save->_st[(7+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
+	ia64_ldfe(15, fpregp);
+
+	copy_from_user(num128, &save->_xmm[0], sizeof(struct _xmmreg_ia32));
+	ia64_ldf8(16, &num128[0]);
+	ia64_ldf8(17, &num128[1]);
+
+	copy_from_user(num128, &save->_xmm[1], sizeof(struct _xmmreg_ia32));
+	ia64_ldf8(18, &num128[0]);
+	ia64_ldf8(19, &num128[1]);
+
+	copy_from_user(num128, &save->_xmm[2], sizeof(struct _xmmreg_ia32));
+	ia64_ldf8(20, &num128[0]);
+	ia64_ldf8(21, &num128[1]);
+
+	copy_from_user(num128, &save->_xmm[3], sizeof(struct _xmmreg_ia32));
+	ia64_ldf8(22, &num128[0]);
+	ia64_ldf8(23, &num128[1]);
+
+	copy_from_user(num128, &save->_xmm[4], sizeof(struct _xmmreg_ia32));
+	ia64_ldf8(24, &num128[0]);
+	ia64_ldf8(25, &num128[1]);
+
+	copy_from_user(num128, &save->_xmm[5], sizeof(struct _xmmreg_ia32));
+	ia64_ldf8(26, &num128[0]);
+	ia64_ldf8(27, &num128[1]);
+
+	copy_from_user(num128, &save->_xmm[6], sizeof(struct _xmmreg_ia32));
+	ia64_ldf8(28, &num128[0]);
+	ia64_ldf8(29, &num128[1]);
+
+	copy_from_user(num128, &save->_xmm[7], sizeof(struct _xmmreg_ia32));
+	ia64_ldf8(30, &num128[0]);
+	ia64_ldf8(31, &num128[1]);
+	return 0;
+}
+
+static inline void
+sigact_set_handler (struct k_sigaction *sa, unsigned int handler, unsigned int restorer)
+{
+	if (handler + 1 <= 2)
+		/* SIG_DFL, SIG_IGN, or SIG_ERR: must sign-extend to 64-bits */
+		sa->sa.sa_handler = (__sighandler_t) A((int) handler);
+	else
+		sa->sa.sa_handler = (__sighandler_t) (((unsigned long) restorer << 32) | handler);
+}
+
+long
+__ia32_rt_sigsuspend (compat_sigset_t *sset, unsigned int sigsetsize, struct sigscratch *scr)
+{
+	extern long ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall);
+	sigset_t oldset, set;
+
+	scr->scratch_unat = 0;	/* avoid leaking kernel bits to user level */
+	memset(&set, 0, sizeof(&set));
+
+	if (memcpy(&set.sig, &sset->sig, sigsetsize))
+		return -EFAULT;
+
+	sigdelsetmask(&set, ~_BLOCKABLE);
+
+	spin_lock_irq(&current->sighand->siglock);
+	{
+		oldset = current->blocked;
+		current->blocked = set;
+		recalc_sigpending();
+	}
+	spin_unlock_irq(&current->sighand->siglock);
+
+	/*
+	 * The return below usually returns to the signal handler.  We need to pre-set the
+	 * correct error code here to ensure that the right values get saved in sigcontext
+	 * by ia64_do_signal.
+	 */
+	scr->pt.r8 = -EINTR;
+	while (1) {
+		current->state = TASK_INTERRUPTIBLE;
+		schedule();
+		if (ia64_do_signal(&oldset, scr, 1))
+			return -EINTR;
+	}
+}
+
+asmlinkage long
+ia32_rt_sigsuspend (compat_sigset_t __user *uset, unsigned int sigsetsize, struct sigscratch *scr)
+{
+	compat_sigset_t set;
+
+	if (sigsetsize > sizeof(compat_sigset_t))
+		return -EINVAL;
+
+	if (copy_from_user(&set.sig, &uset->sig, sigsetsize))
+		return -EFAULT;
+
+	return __ia32_rt_sigsuspend(&set, sigsetsize, scr);
+}
+
+asmlinkage long
+ia32_sigsuspend (unsigned int mask, struct sigscratch *scr)
+{
+	return __ia32_rt_sigsuspend((compat_sigset_t *) &mask, sizeof(mask), scr);
+}
+
+asmlinkage long
+sys32_signal (int sig, unsigned int handler)
+{
+	struct k_sigaction new_sa, old_sa;
+	int ret;
+
+	sigact_set_handler(&new_sa, handler, 0);
+	new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
+
+	ret = do_sigaction(sig, &new_sa, &old_sa);
+
+	return ret ? ret : IA32_SA_HANDLER(&old_sa);
+}
+
+asmlinkage long
+sys32_rt_sigaction (int sig, struct sigaction32 __user *act,
+		    struct sigaction32 __user *oact, unsigned int sigsetsize)
+{
+	struct k_sigaction new_ka, old_ka;
+	unsigned int handler, restorer;
+	int ret;
+
+	/* XXX: Don't preclude handling different sized sigset_t's.  */
+	if (sigsetsize != sizeof(compat_sigset_t))
+		return -EINVAL;
+
+	if (act) {
+		ret = get_user(handler, &act->sa_handler);
+		ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
+		ret |= get_user(restorer, &act->sa_restorer);
+		ret |= copy_from_user(&new_ka.sa.sa_mask, &act->sa_mask, sizeof(compat_sigset_t));
+		if (ret)
+			return -EFAULT;
+
+		sigact_set_handler(&new_ka, handler, restorer);
+	}
+
+	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+	if (!ret && oact) {
+		ret = put_user(IA32_SA_HANDLER(&old_ka), &oact->sa_handler);
+		ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+		ret |= put_user(IA32_SA_RESTORER(&old_ka), &oact->sa_restorer);
+		ret |= copy_to_user(&oact->sa_mask, &old_ka.sa.sa_mask, sizeof(compat_sigset_t));
+	}
+	return ret;
+}
+
+
+asmlinkage long
+sys32_rt_sigprocmask (int how, compat_sigset_t __user *set, compat_sigset_t __user *oset,
+		      unsigned int sigsetsize)
+{
+	mm_segment_t old_fs = get_fs();
+	sigset_t s;
+	long ret;
+
+	if (sigsetsize > sizeof(s))
+		return -EINVAL;
+
+	if (set) {
+		memset(&s, 0, sizeof(s));
+		if (copy_from_user(&s.sig, set, sigsetsize))
+			return -EFAULT;
+	}
+	set_fs(KERNEL_DS);
+	ret = sys_rt_sigprocmask(how,
+				 set ? (sigset_t __user *) &s : NULL,
+				 oset ? (sigset_t __user *) &s : NULL, sizeof(s));
+	set_fs(old_fs);
+	if (ret)
+		return ret;
+	if (oset) {
+		if (copy_to_user(oset, &s.sig, sigsetsize))
+			return -EFAULT;
+	}
+	return 0;
+}
+
+asmlinkage long
+sys32_rt_sigqueueinfo (int pid, int sig, compat_siginfo_t __user *uinfo)
+{
+	mm_segment_t old_fs = get_fs();
+	siginfo_t info;
+	int ret;
+
+	if (copy_siginfo_from_user32(&info, uinfo))
+		return -EFAULT;
+	set_fs(KERNEL_DS);
+	ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *) &info);
+	set_fs(old_fs);
+	return ret;
+}
+
+asmlinkage long
+sys32_sigaction (int sig, struct old_sigaction32 __user *act, struct old_sigaction32 __user *oact)
+{
+	struct k_sigaction new_ka, old_ka;
+	unsigned int handler, restorer;
+	int ret;
+
+	if (act) {
+		compat_old_sigset_t mask;
+
+		ret = get_user(handler, &act->sa_handler);
+		ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
+		ret |= get_user(restorer, &act->sa_restorer);
+		ret |= get_user(mask, &act->sa_mask);
+		if (ret)
+			return ret;
+
+		sigact_set_handler(&new_ka, handler, restorer);
+		siginitset(&new_ka.sa.sa_mask, mask);
+	}
+
+	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+	if (!ret && oact) {
+		ret = put_user(IA32_SA_HANDLER(&old_ka), &oact->sa_handler);
+		ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+		ret |= put_user(IA32_SA_RESTORER(&old_ka), &oact->sa_restorer);
+		ret |= put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
+	}
+
+	return ret;
+}
+
+static int
+setup_sigcontext_ia32 (struct sigcontext_ia32 __user *sc, struct _fpstate_ia32 __user *fpstate,
+		       struct pt_regs *regs, unsigned long mask)
+{
+	int  err = 0;
+	unsigned long flag;
+
+	if (!access_ok(VERIFY_WRITE, sc, sizeof(*sc)))
+		return -EFAULT;
+
+	err |= __put_user((regs->r16 >> 32) & 0xffff, (unsigned int __user *)&sc->fs);
+	err |= __put_user((regs->r16 >> 48) & 0xffff, (unsigned int __user *)&sc->gs);
+	err |= __put_user((regs->r16 >> 16) & 0xffff, (unsigned int __user *)&sc->es);
+	err |= __put_user(regs->r16 & 0xffff, (unsigned int __user *)&sc->ds);
+	err |= __put_user(regs->r15, &sc->edi);
+	err |= __put_user(regs->r14, &sc->esi);
+	err |= __put_user(regs->r13, &sc->ebp);
+	err |= __put_user(regs->r12, &sc->esp);
+	err |= __put_user(regs->r11, &sc->ebx);
+	err |= __put_user(regs->r10, &sc->edx);
+	err |= __put_user(regs->r9, &sc->ecx);
+	err |= __put_user(regs->r8, &sc->eax);
+#if 0
+	err |= __put_user(current->tss.trap_no, &sc->trapno);
+	err |= __put_user(current->tss.error_code, &sc->err);
+#endif
+	err |= __put_user(regs->cr_iip, &sc->eip);
+	err |= __put_user(regs->r17 & 0xffff, (unsigned int __user *)&sc->cs);
+	/*
+	 *  `eflags' is in an ar register for this context
+	 */
+	flag = ia64_getreg(_IA64_REG_AR_EFLAG);
+	err |= __put_user((unsigned int)flag, &sc->eflags);
+	err |= __put_user(regs->r12, &sc->esp_at_signal);
+	err |= __put_user((regs->r17 >> 16) & 0xffff, (unsigned int __user *)&sc->ss);
+
+	if ( save_ia32_fpstate_live(fpstate) < 0 )
+		err = -EFAULT;
+	else
+		err |= __put_user((u32)(u64)fpstate, &sc->fpstate);
+
+#if 0
+	tmp = save_i387(fpstate);
+	if (tmp < 0)
+		err = 1;
+	else
+		err |= __put_user(tmp ? fpstate : NULL, &sc->fpstate);
+
+	/* non-iBCS2 extensions.. */
+#endif
+	err |= __put_user(mask, &sc->oldmask);
+#if 0
+	err |= __put_user(current->tss.cr2, &sc->cr2);
+#endif
+	return err;
+}
+
+static int
+restore_sigcontext_ia32 (struct pt_regs *regs, struct sigcontext_ia32 __user *sc, int *peax)
+{
+	unsigned int err = 0;
+
+	/* Always make any pending restarted system calls return -EINTR */
+	current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+	if (!access_ok(VERIFY_READ, sc, sizeof(*sc)))
+		return(-EFAULT);
+
+#define COPY(ia64x, ia32x)	err |= __get_user(regs->ia64x, &sc->ia32x)
+
+#define copyseg_gs(tmp)		(regs->r16 |= (unsigned long) (tmp) << 48)
+#define copyseg_fs(tmp)		(regs->r16 |= (unsigned long) (tmp) << 32)
+#define copyseg_cs(tmp)		(regs->r17 |= tmp)
+#define copyseg_ss(tmp)		(regs->r17 |= (unsigned long) (tmp) << 16)
+#define copyseg_es(tmp)		(regs->r16 |= (unsigned long) (tmp) << 16)
+#define copyseg_ds(tmp)		(regs->r16 |= tmp)
+
+#define COPY_SEG(seg)					\
+	{						\
+		unsigned short tmp;			\
+		err |= __get_user(tmp, &sc->seg);	\
+		copyseg_##seg(tmp);			\
+	}
+#define COPY_SEG_STRICT(seg)				\
+	{						\
+		unsigned short tmp;			\
+		err |= __get_user(tmp, &sc->seg);	\
+		copyseg_##seg(tmp|3);			\
+	}
+
+	/* To make COPY_SEGs easier, we zero r16, r17 */
+	regs->r16 = 0;
+	regs->r17 = 0;
+
+	COPY_SEG(gs);
+	COPY_SEG(fs);
+	COPY_SEG(es);
+	COPY_SEG(ds);
+	COPY(r15, edi);
+	COPY(r14, esi);
+	COPY(r13, ebp);
+	COPY(r12, esp);
+	COPY(r11, ebx);
+	COPY(r10, edx);
+	COPY(r9, ecx);
+	COPY(cr_iip, eip);
+	COPY_SEG_STRICT(cs);
+	COPY_SEG_STRICT(ss);
+	ia32_load_segment_descriptors(current);
+	{
+		unsigned int tmpflags;
+		unsigned long flag;
+
+		/*
+		 *  IA32 `eflags' is not part of `pt_regs', it's in an ar register which
+		 *  is part of the thread context.  Fortunately, we are executing in the
+		 *  IA32 process's context.
+		 */
+		err |= __get_user(tmpflags, &sc->eflags);
+		flag = ia64_getreg(_IA64_REG_AR_EFLAG);
+		flag &= ~0x40DD5;
+		flag |= (tmpflags & 0x40DD5);
+		ia64_setreg(_IA64_REG_AR_EFLAG, flag);
+
+		regs->r1 = -1;	/* disable syscall checks, r1 is orig_eax */
+	}
+
+	{
+		struct _fpstate_ia32 __user *buf = NULL;
+		u32    fpstate_ptr;
+		err |= get_user(fpstate_ptr, &(sc->fpstate));
+		buf = compat_ptr(fpstate_ptr);
+		if (buf) {
+			err |= restore_ia32_fpstate_live(buf);
+		}
+	}
+
+#if 0
+	{
+		struct _fpstate * buf;
+		err |= __get_user(buf, &sc->fpstate);
+		if (buf) {
+			if (!access_ok(VERIFY_READ, buf, sizeof(*buf)))
+				goto badframe;
+			err |= restore_i387(buf);
+		}
+	}
+#endif
+
+	err |= __get_user(*peax, &sc->eax);
+	return err;
+
+#if 0
+  badframe:
+	return 1;
+#endif
+}
+
+/*
+ * Determine which stack to use..
+ */
+static inline void __user *
+get_sigframe (struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
+{
+	unsigned long esp;
+
+	/* Default to using normal stack (truncate off sign-extension of bit 31: */
+	esp = (unsigned int) regs->r12;
+
+	/* This is the X/Open sanctioned signal stack switching.  */
+	if (ka->sa.sa_flags & SA_ONSTACK) {
+		if (!on_sig_stack(esp))
+			esp = current->sas_ss_sp + current->sas_ss_size;
+	}
+	/* Legacy stack switching not supported */
+
+	return (void __user *)((esp - frame_size) & -8ul);
+}
+
+static int
+setup_frame_ia32 (int sig, struct k_sigaction *ka, sigset_t *set, struct pt_regs * regs)
+{
+	struct exec_domain *ed = current_thread_info()->exec_domain;
+	struct sigframe_ia32 __user *frame;
+	int err = 0;
+
+	frame = get_sigframe(ka, regs, sizeof(*frame));
+
+	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+		goto give_sigsegv;
+
+	err |= __put_user((ed && ed->signal_invmap && sig < 32
+			   ? (int)(ed->signal_invmap[sig]) : sig), &frame->sig);
+
+	err |= setup_sigcontext_ia32(&frame->sc, &frame->fpstate, regs, set->sig[0]);
+
+	if (_COMPAT_NSIG_WORDS > 1)
+		err |= __copy_to_user(frame->extramask, (char *) &set->sig + 4,
+				      sizeof(frame->extramask));
+
+	/* Set up to return from userspace.  If provided, use a stub
+	   already in userspace.  */
+	if (ka->sa.sa_flags & SA_RESTORER) {
+		unsigned int restorer = IA32_SA_RESTORER(ka);
+		err |= __put_user(restorer, &frame->pretcode);
+	} else {
+		/* Pointing to restorer in ia32 gate page */
+		err |= __put_user(IA32_GATE_OFFSET, &frame->pretcode);
+	}
+
+	/* This is popl %eax ; movl $,%eax ; int $0x80
+	 * and there for historical reasons only.
+	 * See arch/i386/kernel/signal.c
+	 */
+
+	err |= __put_user(0xb858, (short __user *)(frame->retcode+0));
+	err |= __put_user(__IA32_NR_sigreturn, (int __user *)(frame->retcode+2));
+	err |= __put_user(0x80cd, (short __user *)(frame->retcode+6));
+
+	if (err)
+		goto give_sigsegv;
+
+	/* Set up registers for signal handler */
+	regs->r12 = (unsigned long) frame;
+	regs->cr_iip = IA32_SA_HANDLER(ka);
+
+	set_fs(USER_DS);
+
+#if 0
+	regs->eflags &= ~TF_MASK;
+#endif
+
+#if 0
+	printk("SIG deliver (%s:%d): sig=%d sp=%p pc=%lx ra=%x\n",
+               current->comm, current->pid, sig, (void *) frame, regs->cr_iip, frame->pretcode);
+#endif
+
+	return 1;
+
+  give_sigsegv:
+	force_sigsegv(sig, current);
+	return 0;
+}
+
+static int
+setup_rt_frame_ia32 (int sig, struct k_sigaction *ka, siginfo_t *info,
+		     sigset_t *set, struct pt_regs * regs)
+{
+	struct exec_domain *ed = current_thread_info()->exec_domain;
+	compat_uptr_t pinfo, puc;
+	struct rt_sigframe_ia32 __user *frame;
+	int err = 0;
+
+	frame = get_sigframe(ka, regs, sizeof(*frame));
+
+	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+		goto give_sigsegv;
+
+	err |= __put_user((ed && ed->signal_invmap
+			   && sig < 32 ? ed->signal_invmap[sig] : sig), &frame->sig);
+
+	pinfo = (long __user) &frame->info;
+	puc = (long __user) &frame->uc;
+	err |= __put_user(pinfo, &frame->pinfo);
+	err |= __put_user(puc, &frame->puc);
+	err |= copy_siginfo_to_user32(&frame->info, info);
+
+	/* Create the ucontext.  */
+	err |= __put_user(0, &frame->uc.uc_flags);
+	err |= __put_user(0, &frame->uc.uc_link);
+	err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
+	err |= __put_user(sas_ss_flags(regs->r12), &frame->uc.uc_stack.ss_flags);
+	err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+	err |= setup_sigcontext_ia32(&frame->uc.uc_mcontext, &frame->fpstate, regs, set->sig[0]);
+	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+	if (err)
+		goto give_sigsegv;
+
+	/* Set up to return from userspace.  If provided, use a stub
+	   already in userspace.  */
+	if (ka->sa.sa_flags & SA_RESTORER) {
+		unsigned int restorer = IA32_SA_RESTORER(ka);
+		err |= __put_user(restorer, &frame->pretcode);
+	} else {
+		/* Pointing to rt_restorer in ia32 gate page */
+		err |= __put_user(IA32_GATE_OFFSET + 8, &frame->pretcode);
+	}
+
+	/* This is movl $,%eax ; int $0x80
+	 * and there for historical reasons only.
+	 * See arch/i386/kernel/signal.c
+	 */
+
+	err |= __put_user(0xb8, (char __user *)(frame->retcode+0));
+	err |= __put_user(__IA32_NR_rt_sigreturn, (int __user *)(frame->retcode+1));
+	err |= __put_user(0x80cd, (short __user *)(frame->retcode+5));
+
+	if (err)
+		goto give_sigsegv;
+
+	/* Set up registers for signal handler */
+	regs->r12 = (unsigned long) frame;
+	regs->cr_iip = IA32_SA_HANDLER(ka);
+
+	set_fs(USER_DS);
+
+#if 0
+	regs->eflags &= ~TF_MASK;
+#endif
+
+#if 0
+	printk("SIG deliver (%s:%d): sp=%p pc=%lx ra=%x\n",
+               current->comm, current->pid, (void *) frame, regs->cr_iip, frame->pretcode);
+#endif
+
+	return 1;
+
+give_sigsegv:
+	force_sigsegv(sig, current);
+	return 0;
+}
+
+int
+ia32_setup_frame1 (int sig, struct k_sigaction *ka, siginfo_t *info,
+		   sigset_t *set, struct pt_regs *regs)
+{
+       /* Set up the stack frame */
+       if (ka->sa.sa_flags & SA_SIGINFO)
+               return setup_rt_frame_ia32(sig, ka, info, set, regs);
+       else
+               return setup_frame_ia32(sig, ka, set, regs);
+}
+
+asmlinkage long
+sys32_sigreturn (int arg0, int arg1, int arg2, int arg3, int arg4, int arg5,
+		 int arg6, int arg7, struct pt_regs regs)
+{
+	unsigned long esp = (unsigned int) regs.r12;
+	struct sigframe_ia32 __user *frame = (struct sigframe_ia32 __user *)(esp - 8);
+	sigset_t set;
+	int eax;
+
+	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+		goto badframe;
+
+	if (__get_user(set.sig[0], &frame->sc.oldmask)
+	    || (_COMPAT_NSIG_WORDS > 1 && __copy_from_user((char *) &set.sig + 4, &frame->extramask,
+							 sizeof(frame->extramask))))
+		goto badframe;
+
+	sigdelsetmask(&set, ~_BLOCKABLE);
+	spin_lock_irq(&current->sighand->siglock);
+	current->blocked = set;
+	recalc_sigpending();
+	spin_unlock_irq(&current->sighand->siglock);
+
+	if (restore_sigcontext_ia32(&regs, &frame->sc, &eax))
+		goto badframe;
+	return eax;
+
+  badframe:
+	force_sig(SIGSEGV, current);
+	return 0;
+}
+
+asmlinkage long
+sys32_rt_sigreturn (int arg0, int arg1, int arg2, int arg3, int arg4,
+		    int arg5, int arg6, int arg7, struct pt_regs regs)
+{
+	unsigned long esp = (unsigned int) regs.r12;
+	struct rt_sigframe_ia32 __user *frame = (struct rt_sigframe_ia32 __user *)(esp - 4);
+	sigset_t set;
+	int eax;
+
+	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+		goto badframe;
+	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+		goto badframe;
+
+	sigdelsetmask(&set, ~_BLOCKABLE);
+	spin_lock_irq(&current->sighand->siglock);
+	current->blocked =  set;
+	recalc_sigpending();
+	spin_unlock_irq(&current->sighand->siglock);
+
+	if (restore_sigcontext_ia32(&regs, &frame->uc.uc_mcontext, &eax))
+		goto badframe;
+
+	/* It is more difficult to avoid calling this function than to
+	   call it and ignore errors.  */
+	do_sigaltstack((stack_t __user *) &frame->uc.uc_stack, NULL, esp);
+
+	return eax;
+
+  badframe:
+	force_sig(SIGSEGV, current);
+	return 0;
+}
diff --git a/arch/ia64/ia32/ia32_support.c b/arch/ia64/ia32/ia32_support.c
new file mode 100644
index 0000000..4f63004
--- /dev/null
+++ b/arch/ia64/ia32/ia32_support.c
@@ -0,0 +1,264 @@
+/*
+ * IA32 helper functions
+ *
+ * Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com>
+ * Copyright (C) 2000 Asit K. Mallick <asit.k.mallick@intel.com>
+ * Copyright (C) 2001-2002 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ *
+ * 06/16/00	A. Mallick	added csd/ssd/tssd for ia32 thread context
+ * 02/19/01	D. Mosberger	dropped tssd; it's not needed
+ * 09/14/01	D. Mosberger	fixed memory management for gdt/tss page
+ * 09/29/01	D. Mosberger	added ia32_load_segment_descriptors()
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/personality.h>
+#include <linux/sched.h>
+
+#include <asm/intrinsics.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/processor.h>
+#include <asm/uaccess.h>
+
+#include "ia32priv.h"
+
+extern void die_if_kernel (char *str, struct pt_regs *regs, long err);
+
+struct exec_domain ia32_exec_domain;
+struct page *ia32_shared_page[NR_CPUS];
+unsigned long *ia32_boot_gdt;
+unsigned long *cpu_gdt_table[NR_CPUS];
+struct page *ia32_gate_page;
+
+static unsigned long
+load_desc (u16 selector)
+{
+	unsigned long *table, limit, index;
+
+	if (!selector)
+		return 0;
+	if (selector & IA32_SEGSEL_TI) {
+		table = (unsigned long *) IA32_LDT_OFFSET;
+		limit = IA32_LDT_ENTRIES;
+	} else {
+		table = cpu_gdt_table[smp_processor_id()];
+		limit = IA32_PAGE_SIZE / sizeof(ia32_boot_gdt[0]);
+	}
+	index = selector >> IA32_SEGSEL_INDEX_SHIFT;
+	if (index >= limit)
+		return 0;
+	return IA32_SEG_UNSCRAMBLE(table[index]);
+}
+
+void
+ia32_load_segment_descriptors (struct task_struct *task)
+{
+	struct pt_regs *regs = ia64_task_regs(task);
+
+	/* Setup the segment descriptors */
+	regs->r24 = load_desc(regs->r16 >> 16);		/* ESD */
+	regs->r27 = load_desc(regs->r16 >>  0);		/* DSD */
+	regs->r28 = load_desc(regs->r16 >> 32);		/* FSD */
+	regs->r29 = load_desc(regs->r16 >> 48);		/* GSD */
+	regs->ar_csd = load_desc(regs->r17 >>  0);	/* CSD */
+	regs->ar_ssd = load_desc(regs->r17 >> 16);	/* SSD */
+}
+
+int
+ia32_clone_tls (struct task_struct *child, struct pt_regs *childregs)
+{
+	struct desc_struct *desc;
+	struct ia32_user_desc info;
+	int idx;
+
+	if (copy_from_user(&info, (void __user *)(childregs->r14 & 0xffffffff), sizeof(info)))
+		return -EFAULT;
+	if (LDT_empty(&info))
+		return -EINVAL;
+
+	idx = info.entry_number;
+	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+		return -EINVAL;
+
+	desc = child->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
+	desc->a = LDT_entry_a(&info);
+	desc->b = LDT_entry_b(&info);
+
+	/* XXX: can this be done in a cleaner way ? */
+	load_TLS(&child->thread, smp_processor_id());
+	ia32_load_segment_descriptors(child);
+	load_TLS(&current->thread, smp_processor_id());
+
+	return 0;
+}
+
+void
+ia32_save_state (struct task_struct *t)
+{
+	t->thread.eflag = ia64_getreg(_IA64_REG_AR_EFLAG);
+	t->thread.fsr   = ia64_getreg(_IA64_REG_AR_FSR);
+	t->thread.fcr   = ia64_getreg(_IA64_REG_AR_FCR);
+	t->thread.fir   = ia64_getreg(_IA64_REG_AR_FIR);
+	t->thread.fdr   = ia64_getreg(_IA64_REG_AR_FDR);
+	ia64_set_kr(IA64_KR_IO_BASE, t->thread.old_iob);
+	ia64_set_kr(IA64_KR_TSSD, t->thread.old_k1);
+}
+
+void
+ia32_load_state (struct task_struct *t)
+{
+	unsigned long eflag, fsr, fcr, fir, fdr, tssd;
+	struct pt_regs *regs = ia64_task_regs(t);
+
+	eflag = t->thread.eflag;
+	fsr = t->thread.fsr;
+	fcr = t->thread.fcr;
+	fir = t->thread.fir;
+	fdr = t->thread.fdr;
+	tssd = load_desc(_TSS);					/* TSSD */
+
+	ia64_setreg(_IA64_REG_AR_EFLAG, eflag);
+	ia64_setreg(_IA64_REG_AR_FSR, fsr);
+	ia64_setreg(_IA64_REG_AR_FCR, fcr);
+	ia64_setreg(_IA64_REG_AR_FIR, fir);
+	ia64_setreg(_IA64_REG_AR_FDR, fdr);
+	current->thread.old_iob = ia64_get_kr(IA64_KR_IO_BASE);
+	current->thread.old_k1 = ia64_get_kr(IA64_KR_TSSD);
+	ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE);
+	ia64_set_kr(IA64_KR_TSSD, tssd);
+
+	regs->r17 = (_TSS << 48) | (_LDT << 32) | (__u32) regs->r17;
+	regs->r30 = load_desc(_LDT);				/* LDTD */
+	load_TLS(&t->thread, smp_processor_id());
+}
+
+/*
+ * Setup IA32 GDT and TSS
+ */
+void
+ia32_gdt_init (void)
+{
+	int cpu = smp_processor_id();
+
+	ia32_shared_page[cpu] = alloc_page(GFP_KERNEL);
+	if (!ia32_shared_page[cpu])
+		panic("failed to allocate ia32_shared_page[%d]\n", cpu);
+
+	cpu_gdt_table[cpu] = page_address(ia32_shared_page[cpu]);
+
+	/* Copy from the boot cpu's GDT */
+	memcpy(cpu_gdt_table[cpu], ia32_boot_gdt, PAGE_SIZE);
+}
+
+
+/*
+ * Setup IA32 GDT and TSS
+ */
+static void
+ia32_boot_gdt_init (void)
+{
+	unsigned long ldt_size;
+
+	ia32_shared_page[0] = alloc_page(GFP_KERNEL);
+	if (!ia32_shared_page[0])
+		panic("failed to allocate ia32_shared_page[0]\n");
+
+	ia32_boot_gdt = page_address(ia32_shared_page[0]);
+	cpu_gdt_table[0] = ia32_boot_gdt;
+
+	/* CS descriptor in IA-32 (scrambled) format */
+	ia32_boot_gdt[__USER_CS >> 3]
+		= IA32_SEG_DESCRIPTOR(0, (IA32_GATE_END-1) >> IA32_PAGE_SHIFT,
+				      0xb, 1, 3, 1, 1, 1, 1);
+
+	/* DS descriptor in IA-32 (scrambled) format */
+	ia32_boot_gdt[__USER_DS >> 3]
+		= IA32_SEG_DESCRIPTOR(0, (IA32_GATE_END-1) >> IA32_PAGE_SHIFT,
+				      0x3, 1, 3, 1, 1, 1, 1);
+
+	ldt_size = PAGE_ALIGN(IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE);
+	ia32_boot_gdt[TSS_ENTRY] = IA32_SEG_DESCRIPTOR(IA32_TSS_OFFSET, 235,
+						       0xb, 0, 3, 1, 1, 1, 0);
+	ia32_boot_gdt[LDT_ENTRY] = IA32_SEG_DESCRIPTOR(IA32_LDT_OFFSET, ldt_size - 1,
+						       0x2, 0, 3, 1, 1, 1, 0);
+}
+
+static void
+ia32_gate_page_init(void)
+{
+	unsigned long *sr;
+
+	ia32_gate_page = alloc_page(GFP_KERNEL);
+	sr = page_address(ia32_gate_page);
+	/* This is popl %eax ; movl $,%eax ; int $0x80 */
+	*sr++ = 0xb858 | (__IA32_NR_sigreturn << 16) | (0x80cdUL << 48);
+
+	/* This is movl $,%eax ; int $0x80 */
+	*sr = 0xb8 | (__IA32_NR_rt_sigreturn << 8) | (0x80cdUL << 40);
+}
+
+void
+ia32_mem_init(void)
+{
+	ia32_boot_gdt_init();
+	ia32_gate_page_init();
+}
+
+/*
+ * Handle bad IA32 interrupt via syscall
+ */
+void
+ia32_bad_interrupt (unsigned long int_num, struct pt_regs *regs)
+{
+	siginfo_t siginfo;
+
+	die_if_kernel("Bad IA-32 interrupt", regs, int_num);
+
+	siginfo.si_signo = SIGTRAP;
+	siginfo.si_errno = int_num;	/* XXX is it OK to abuse si_errno like this? */
+	siginfo.si_flags = 0;
+	siginfo.si_isr = 0;
+	siginfo.si_addr = NULL;
+	siginfo.si_imm = 0;
+	siginfo.si_code = TRAP_BRKPT;
+	force_sig_info(SIGTRAP, &siginfo, current);
+}
+
+void
+ia32_cpu_init (void)
+{
+	/* initialize global ia32 state - CR0 and CR4 */
+	ia64_setreg(_IA64_REG_AR_CFLAG, (((ulong) IA32_CR4 << 32) | IA32_CR0));
+}
+
+static int __init
+ia32_init (void)
+{
+	ia32_exec_domain.name = "Linux/x86";
+	ia32_exec_domain.handler = NULL;
+	ia32_exec_domain.pers_low = PER_LINUX32;
+	ia32_exec_domain.pers_high = PER_LINUX32;
+	ia32_exec_domain.signal_map = default_exec_domain.signal_map;
+	ia32_exec_domain.signal_invmap = default_exec_domain.signal_invmap;
+	register_exec_domain(&ia32_exec_domain);
+
+#if PAGE_SHIFT > IA32_PAGE_SHIFT
+	{
+		extern kmem_cache_t *partial_page_cachep;
+
+		partial_page_cachep = kmem_cache_create("partial_page_cache",
+							sizeof(struct partial_page), 0, 0,
+							NULL, NULL);
+		if (!partial_page_cachep)
+			panic("Cannot create partial page SLAB cache");
+	}
+#endif
+	return 0;
+}
+
+__initcall(ia32_init);
diff --git a/arch/ia64/ia32/ia32_traps.c b/arch/ia64/ia32/ia32_traps.c
new file mode 100644
index 0000000..e486042
--- /dev/null
+++ b/arch/ia64/ia32/ia32_traps.c
@@ -0,0 +1,156 @@
+/*
+ * IA-32 exception handlers
+ *
+ * Copyright (C) 2000 Asit K. Mallick <asit.k.mallick@intel.com>
+ * Copyright (C) 2001-2002 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ *
+ * 06/16/00	A. Mallick	added siginfo for most cases (close to IA32)
+ * 09/29/00	D. Mosberger	added ia32_intercept()
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+
+#include "ia32priv.h"
+
+#include <asm/intrinsics.h>
+#include <asm/ptrace.h>
+
+int
+ia32_intercept (struct pt_regs *regs, unsigned long isr)
+{
+	switch ((isr >> 16) & 0xff) {
+	      case 0:	/* Instruction intercept fault */
+	      case 4:	/* Locked Data reference fault */
+	      case 1:	/* Gate intercept trap */
+		return -1;
+
+	      case 2:	/* System flag trap */
+		if (((isr >> 14) & 0x3) >= 2) {
+			/* MOV SS, POP SS instructions */
+			ia64_psr(regs)->id = 1;
+			return 0;
+		} else
+			return -1;
+	}
+	return -1;
+}
+
+int
+ia32_exception (struct pt_regs *regs, unsigned long isr)
+{
+	struct siginfo siginfo;
+
+	/* initialize these fields to avoid leaking kernel bits to user space: */
+	siginfo.si_errno = 0;
+	siginfo.si_flags = 0;
+	siginfo.si_isr = 0;
+	siginfo.si_imm = 0;
+	switch ((isr >> 16) & 0xff) {
+	      case 1:
+	      case 2:
+		siginfo.si_signo = SIGTRAP;
+		if (isr == 0)
+			siginfo.si_code = TRAP_TRACE;
+		else if (isr & 0x4)
+			siginfo.si_code = TRAP_BRANCH;
+		else
+			siginfo.si_code = TRAP_BRKPT;
+		break;
+
+	      case 3:
+		siginfo.si_signo = SIGTRAP;
+		siginfo.si_code = TRAP_BRKPT;
+		break;
+
+	      case 0:	/* Divide fault */
+		siginfo.si_signo = SIGFPE;
+		siginfo.si_code = FPE_INTDIV;
+		break;
+
+	      case 4:	/* Overflow */
+	      case 5:	/* Bounds fault */
+		siginfo.si_signo = SIGFPE;
+		siginfo.si_code = 0;
+		break;
+
+	      case 6:	/* Invalid Op-code */
+		siginfo.si_signo = SIGILL;
+		siginfo.si_code = ILL_ILLOPN;
+		break;
+
+	      case 7:	/* FP DNA */
+	      case 8:	/* Double Fault */
+	      case 9:	/* Invalid TSS */
+	      case 11:	/* Segment not present */
+	      case 12:	/* Stack fault */
+	      case 13:	/* General Protection Fault */
+		siginfo.si_signo = SIGSEGV;
+		siginfo.si_code = 0;
+		break;
+
+	      case 16:	/* Pending FP error */
+		{
+			unsigned long fsr, fcr;
+
+			fsr = ia64_getreg(_IA64_REG_AR_FSR);
+			fcr = ia64_getreg(_IA64_REG_AR_FCR);
+
+			siginfo.si_signo = SIGFPE;
+			/*
+			 * (~cwd & swd) will mask out exceptions that are not set to unmasked
+			 * status.  0x3f is the exception bits in these regs, 0x200 is the
+			 * C1 reg you need in case of a stack fault, 0x040 is the stack
+			 * fault bit.  We should only be taking one exception at a time,
+			 * so if this combination doesn't produce any single exception,
+			 * then we have a bad program that isn't synchronizing its FPU usage
+			 * and it will suffer the consequences since we won't be able to
+			 * fully reproduce the context of the exception
+			 */
+			siginfo.si_isr = isr;
+			siginfo.si_flags = __ISR_VALID;
+			switch(((~fcr) & (fsr & 0x3f)) | (fsr & 0x240)) {
+				case 0x000:
+				default:
+					siginfo.si_code = 0;
+					break;
+				case 0x001: /* Invalid Op */
+				case 0x040: /* Stack Fault */
+				case 0x240: /* Stack Fault | Direction */
+					siginfo.si_code = FPE_FLTINV;
+					break;
+				case 0x002: /* Denormalize */
+				case 0x010: /* Underflow */
+					siginfo.si_code = FPE_FLTUND;
+					break;
+				case 0x004: /* Zero Divide */
+					siginfo.si_code = FPE_FLTDIV;
+					break;
+				case 0x008: /* Overflow */
+					siginfo.si_code = FPE_FLTOVF;
+					break;
+				case 0x020: /* Precision */
+					siginfo.si_code = FPE_FLTRES;
+					break;
+			}
+
+			break;
+		}
+
+	      case 17:	/* Alignment check */
+		siginfo.si_signo = SIGSEGV;
+		siginfo.si_code = BUS_ADRALN;
+		break;
+
+	      case 19:	/* SSE Numeric error */
+		siginfo.si_signo = SIGFPE;
+		siginfo.si_code = 0;
+		break;
+
+	      default:
+		return -1;
+	}
+	force_sig_info(siginfo.si_signo, &siginfo, current);
+	return 0;
+}
diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h
new file mode 100644
index 0000000..b2de948
--- /dev/null
+++ b/arch/ia64/ia32/ia32priv.h
@@ -0,0 +1,544 @@
+#ifndef _ASM_IA64_IA32_PRIV_H
+#define _ASM_IA64_IA32_PRIV_H
+
+#include <linux/config.h>
+
+#include <asm/ia32.h>
+
+#ifdef CONFIG_IA32_SUPPORT
+
+#include <linux/binfmts.h>
+#include <linux/compat.h>
+#include <linux/rbtree.h>
+
+#include <asm/processor.h>
+
+/*
+ * 32 bit structures for IA32 support.
+ */
+
+#define IA32_PAGE_SIZE		(1UL << IA32_PAGE_SHIFT)
+#define IA32_PAGE_MASK		(~(IA32_PAGE_SIZE - 1))
+#define IA32_PAGE_ALIGN(addr)	(((addr) + IA32_PAGE_SIZE - 1) & IA32_PAGE_MASK)
+#define IA32_CLOCKS_PER_SEC	100	/* Cast in stone for IA32 Linux */
+
+/*
+ * partially mapped pages provide precise accounting of which 4k sub pages
+ * are mapped and which ones are not, thereby improving IA-32 compatibility.
+ */
+struct partial_page {
+	struct partial_page	*next; /* linked list, sorted by address */
+	struct rb_node		pp_rb;
+	/* 64K is the largest "normal" page supported by ia64 ABI. So 4K*32
+	 * should suffice.*/
+	unsigned int		bitmap;
+	unsigned int		base;
+};
+
+struct partial_page_list {
+	struct partial_page	*pp_head; /* list head, points to the lowest
+					   * addressed partial page */
+	struct rb_root		ppl_rb;
+	struct partial_page	*pp_hint; /* pp_hint->next is the last
+					   * accessed partial page */
+	atomic_t		pp_count; /* reference count */
+};
+
+#if PAGE_SHIFT > IA32_PAGE_SHIFT
+struct partial_page_list* ia32_init_pp_list (void);
+#else
+# define ia32_init_pp_list()	0
+#endif
+
+/* sigcontext.h */
+/*
+ * As documented in the iBCS2 standard..
+ *
+ * The first part of "struct _fpstate" is just the
+ * normal i387 hardware setup, the extra "status"
+ * word is used to save the coprocessor status word
+ * before entering the handler.
+ */
+struct _fpreg_ia32 {
+       unsigned short significand[4];
+       unsigned short exponent;
+};
+
+struct _fpxreg_ia32 {
+        unsigned short significand[4];
+        unsigned short exponent;
+        unsigned short padding[3];
+};
+
+struct _xmmreg_ia32 {
+        unsigned int element[4];
+};
+
+
+struct _fpstate_ia32 {
+       unsigned int    cw,
+		       sw,
+		       tag,
+		       ipoff,
+		       cssel,
+		       dataoff,
+		       datasel;
+       struct _fpreg_ia32      _st[8];
+       unsigned short  status;
+       unsigned short  magic;          /* 0xffff = regular FPU data only */
+
+       /* FXSR FPU environment */
+       unsigned int         _fxsr_env[6];   /* FXSR FPU env is ignored */
+       unsigned int         mxcsr;
+       unsigned int         reserved;
+       struct _fpxreg_ia32  _fxsr_st[8];    /* FXSR FPU reg data is ignored */
+       struct _xmmreg_ia32  _xmm[8];
+       unsigned int         padding[56];
+};
+
+struct sigcontext_ia32 {
+       unsigned short gs, __gsh;
+       unsigned short fs, __fsh;
+       unsigned short es, __esh;
+       unsigned short ds, __dsh;
+       unsigned int edi;
+       unsigned int esi;
+       unsigned int ebp;
+       unsigned int esp;
+       unsigned int ebx;
+       unsigned int edx;
+       unsigned int ecx;
+       unsigned int eax;
+       unsigned int trapno;
+       unsigned int err;
+       unsigned int eip;
+       unsigned short cs, __csh;
+       unsigned int eflags;
+       unsigned int esp_at_signal;
+       unsigned short ss, __ssh;
+       unsigned int fpstate;		/* really (struct _fpstate_ia32 *) */
+       unsigned int oldmask;
+       unsigned int cr2;
+};
+
+/* user.h */
+/*
+ * IA32 (Pentium III/4) FXSR, SSE support
+ *
+ * Provide support for the GDB 5.0+ PTRACE_{GET|SET}FPXREGS requests for
+ * interacting with the FXSR-format floating point environment.  Floating
+ * point data can be accessed in the regular format in the usual manner,
+ * and both the standard and SIMD floating point data can be accessed via
+ * the new ptrace requests.  In either case, changes to the FPU environment
+ * will be reflected in the task's state as expected.
+ */
+struct ia32_user_i387_struct {
+	int	cwd;
+	int	swd;
+	int	twd;
+	int	fip;
+	int	fcs;
+	int	foo;
+	int	fos;
+	/* 8*10 bytes for each FP-reg = 80 bytes */
+	struct _fpreg_ia32 	st_space[8];
+};
+
+struct ia32_user_fxsr_struct {
+	unsigned short	cwd;
+	unsigned short	swd;
+	unsigned short	twd;
+	unsigned short	fop;
+	int	fip;
+	int	fcs;
+	int	foo;
+	int	fos;
+	int	mxcsr;
+	int	reserved;
+	int	st_space[32];	/* 8*16 bytes for each FP-reg = 128 bytes */
+	int	xmm_space[32];	/* 8*16 bytes for each XMM-reg = 128 bytes */
+	int	padding[56];
+};
+
+/* signal.h */
+#define IA32_SET_SA_HANDLER(ka,handler,restorer)				\
+				((ka)->sa.sa_handler = (__sighandler_t)		\
+					(((unsigned long)(restorer) << 32)	\
+					 | ((handler) & 0xffffffff)))
+#define IA32_SA_HANDLER(ka)	((unsigned long) (ka)->sa.sa_handler & 0xffffffff)
+#define IA32_SA_RESTORER(ka)	((unsigned long) (ka)->sa.sa_handler >> 32)
+
+#define __IA32_NR_sigreturn 119
+#define __IA32_NR_rt_sigreturn 173
+
+struct sigaction32 {
+       unsigned int sa_handler;		/* Really a pointer, but need to deal with 32 bits */
+       unsigned int sa_flags;
+       unsigned int sa_restorer;	/* Another 32 bit pointer */
+       compat_sigset_t sa_mask;		/* A 32 bit mask */
+};
+
+struct old_sigaction32 {
+       unsigned int  sa_handler;	/* Really a pointer, but need to deal
+					     with 32 bits */
+       compat_old_sigset_t sa_mask;		/* A 32 bit mask */
+       unsigned int sa_flags;
+       unsigned int sa_restorer;	/* Another 32 bit pointer */
+};
+
+typedef struct sigaltstack_ia32 {
+	unsigned int	ss_sp;
+	int		ss_flags;
+	unsigned int	ss_size;
+} stack_ia32_t;
+
+struct ucontext_ia32 {
+	unsigned int	  uc_flags;
+	unsigned int	  uc_link;
+	stack_ia32_t	  uc_stack;
+	struct sigcontext_ia32 uc_mcontext;
+	sigset_t	  uc_sigmask;	/* mask last for extensibility */
+};
+
+struct stat64 {
+	unsigned long long	st_dev;
+	unsigned char	__pad0[4];
+	unsigned int	__st_ino;
+	unsigned int	st_mode;
+	unsigned int	st_nlink;
+	unsigned int	st_uid;
+	unsigned int	st_gid;
+	unsigned long long	st_rdev;
+	unsigned char	__pad3[4];
+	unsigned int	st_size_lo;
+	unsigned int	st_size_hi;
+	unsigned int	st_blksize;
+	unsigned int	st_blocks;	/* Number 512-byte blocks allocated. */
+	unsigned int	__pad4;		/* future possible st_blocks high bits */
+	unsigned int	st_atime;
+	unsigned int	st_atime_nsec;
+	unsigned int	st_mtime;
+	unsigned int	st_mtime_nsec;
+	unsigned int	st_ctime;
+	unsigned int	st_ctime_nsec;
+	unsigned int	st_ino_lo;
+	unsigned int	st_ino_hi;
+};
+
+typedef struct compat_siginfo {
+	int si_signo;
+	int si_errno;
+	int si_code;
+
+	union {
+		int _pad[((128/sizeof(int)) - 3)];
+
+		/* kill() */
+		struct {
+			unsigned int _pid;	/* sender's pid */
+			unsigned int _uid;	/* sender's uid */
+		} _kill;
+
+		/* POSIX.1b timers */
+		struct {
+			timer_t _tid;		/* timer id */
+			int _overrun;		/* overrun count */
+			char _pad[sizeof(unsigned int) - sizeof(int)];
+			compat_sigval_t _sigval;	/* same as below */
+			int _sys_private;       /* not to be passed to user */
+		} _timer;
+
+		/* POSIX.1b signals */
+		struct {
+			unsigned int _pid;	/* sender's pid */
+			unsigned int _uid;	/* sender's uid */
+			compat_sigval_t _sigval;
+		} _rt;
+
+		/* SIGCHLD */
+		struct {
+			unsigned int _pid;	/* which child */
+			unsigned int _uid;	/* sender's uid */
+			int _status;		/* exit code */
+			compat_clock_t _utime;
+			compat_clock_t _stime;
+		} _sigchld;
+
+		/* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
+		struct {
+			unsigned int _addr;	/* faulting insn/memory ref. */
+		} _sigfault;
+
+		/* SIGPOLL */
+		struct {
+			int _band;	/* POLL_IN, POLL_OUT, POLL_MSG */
+			int _fd;
+		} _sigpoll;
+	} _sifields;
+} compat_siginfo_t;
+
+struct old_linux32_dirent {
+	u32	d_ino;
+	u32	d_offset;
+	u16	d_namlen;
+	char	d_name[1];
+};
+
+/*
+ * IA-32 ELF specific definitions for IA-64.
+ */
+
+#define _ASM_IA64_ELF_H		/* Don't include elf.h */
+
+#include <linux/sched.h>
+#include <asm/processor.h>
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch(x) ((x)->e_machine == EM_386)
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_CLASS	ELFCLASS32
+#define ELF_DATA	ELFDATA2LSB
+#define ELF_ARCH	EM_386
+
+#define IA32_PAGE_OFFSET	0xc0000000
+#define IA32_STACK_TOP		IA32_PAGE_OFFSET
+#define IA32_GATE_OFFSET	IA32_PAGE_OFFSET
+#define IA32_GATE_END		IA32_PAGE_OFFSET + PAGE_SIZE
+
+/*
+ * The system segments (GDT, TSS, LDT) have to be mapped below 4GB so the IA-32 engine can
+ * access them.
+ */
+#define IA32_GDT_OFFSET		(IA32_PAGE_OFFSET + PAGE_SIZE)
+#define IA32_TSS_OFFSET		(IA32_PAGE_OFFSET + 2*PAGE_SIZE)
+#define IA32_LDT_OFFSET		(IA32_PAGE_OFFSET + 3*PAGE_SIZE)
+
+#define ELF_EXEC_PAGESIZE	IA32_PAGE_SIZE
+
+/*
+ * This is the location that an ET_DYN program is loaded if exec'ed.
+ * Typical use of this is to invoke "./ld.so someprog" to test out a
+ * new version of the loader.  We need to make sure that it is out of
+ * the way of the program that it will "exec", and that there is
+ * sufficient room for the brk.
+ */
+#define ELF_ET_DYN_BASE		(IA32_PAGE_OFFSET/3 + 0x1000000)
+
+void ia64_elf32_init(struct pt_regs *regs);
+#define ELF_PLAT_INIT(_r, load_addr)	ia64_elf32_init(_r)
+
+#define elf_addr_t	u32
+
+/* This macro yields a bitmask that programs can use to figure out
+   what instruction set this CPU supports.  */
+#define ELF_HWCAP	0
+
+/* This macro yields a string that ld.so will use to load
+   implementation specific libraries for optimization.  Not terribly
+   relevant until we have real hardware to play with... */
+#define ELF_PLATFORM	NULL
+
+#ifdef __KERNEL__
+# define SET_PERSONALITY(EX,IBCS2)				\
+	(current->personality = (IBCS2) ? PER_SVR4 : PER_LINUX)
+#endif
+
+#define IA32_EFLAG	0x200
+
+/*
+ * IA-32 ELF specific definitions for IA-64.
+ */
+
+#define __USER_CS      0x23
+#define __USER_DS      0x2B
+
+/*
+ * The per-cpu GDT has 32 entries: see <asm-i386/segment.h>
+ */
+#define GDT_ENTRIES 32
+
+#define GDT_SIZE	(GDT_ENTRIES * 8)
+
+#define TSS_ENTRY 14
+#define LDT_ENTRY	(TSS_ENTRY + 1)
+
+#define IA32_SEGSEL_RPL		(0x3 << 0)
+#define IA32_SEGSEL_TI		(0x1 << 2)
+#define IA32_SEGSEL_INDEX_SHIFT	3
+
+#define _TSS			((unsigned long) TSS_ENTRY << IA32_SEGSEL_INDEX_SHIFT)
+#define _LDT			((unsigned long) LDT_ENTRY << IA32_SEGSEL_INDEX_SHIFT)
+
+#define IA32_SEG_BASE		16
+#define IA32_SEG_TYPE		40
+#define IA32_SEG_SYS		44
+#define IA32_SEG_DPL		45
+#define IA32_SEG_P		47
+#define IA32_SEG_HIGH_LIMIT	48
+#define IA32_SEG_AVL		52
+#define IA32_SEG_DB		54
+#define IA32_SEG_G		55
+#define IA32_SEG_HIGH_BASE	56
+
+#define IA32_SEG_DESCRIPTOR(base, limit, segtype, nonsysseg, dpl, segpresent, avl, segdb, gran)	\
+	       (((limit) & 0xffff)								\
+		| (((unsigned long) (base) & 0xffffff) << IA32_SEG_BASE)			\
+		| ((unsigned long) (segtype) << IA32_SEG_TYPE)					\
+		| ((unsigned long) (nonsysseg) << IA32_SEG_SYS)					\
+		| ((unsigned long) (dpl) << IA32_SEG_DPL)					\
+		| ((unsigned long) (segpresent) << IA32_SEG_P)					\
+		| ((((unsigned long) (limit) >> 16) & 0xf) << IA32_SEG_HIGH_LIMIT)		\
+		| ((unsigned long) (avl) << IA32_SEG_AVL)					\
+		| ((unsigned long) (segdb) << IA32_SEG_DB)					\
+		| ((unsigned long) (gran) << IA32_SEG_G)					\
+		| ((((unsigned long) (base) >> 24) & 0xff) << IA32_SEG_HIGH_BASE))
+
+#define SEG_LIM		32
+#define SEG_TYPE	52
+#define SEG_SYS		56
+#define SEG_DPL		57
+#define SEG_P		59
+#define SEG_AVL		60
+#define SEG_DB		62
+#define SEG_G		63
+
+/* Unscramble an IA-32 segment descriptor into the IA-64 format.  */
+#define IA32_SEG_UNSCRAMBLE(sd)									 \
+	(   (((sd) >> IA32_SEG_BASE) & 0xffffff) | ((((sd) >> IA32_SEG_HIGH_BASE) & 0xff) << 24) \
+	 | ((((sd) & 0xffff) | ((((sd) >> IA32_SEG_HIGH_LIMIT) & 0xf) << 16)) << SEG_LIM)	 \
+	 | ((((sd) >> IA32_SEG_TYPE) & 0xf) << SEG_TYPE)					 \
+	 | ((((sd) >> IA32_SEG_SYS) & 0x1) << SEG_SYS)						 \
+	 | ((((sd) >> IA32_SEG_DPL) & 0x3) << SEG_DPL)						 \
+	 | ((((sd) >> IA32_SEG_P) & 0x1) << SEG_P)						 \
+	 | ((((sd) >> IA32_SEG_AVL) & 0x1) << SEG_AVL)						 \
+	 | ((((sd) >> IA32_SEG_DB) & 0x1) << SEG_DB)						 \
+	 | ((((sd) >> IA32_SEG_G) & 0x1) << SEG_G))
+
+#define IA32_IOBASE	0x2000000000000000UL /* Virtual address for I/O space */
+
+#define IA32_CR0	0x80000001	/* Enable PG and PE bits */
+#define IA32_CR4	0x600		/* MMXEX and FXSR on */
+
+/*
+ *  IA32 floating point control registers starting values
+ */
+
+#define IA32_FSR_DEFAULT	0x55550000		/* set all tag bits */
+#define IA32_FCR_DEFAULT	0x17800000037fUL	/* extended precision, all masks */
+
+#define IA32_PTRACE_GETREGS	12
+#define IA32_PTRACE_SETREGS	13
+#define IA32_PTRACE_GETFPREGS	14
+#define IA32_PTRACE_SETFPREGS	15
+#define IA32_PTRACE_GETFPXREGS	18
+#define IA32_PTRACE_SETFPXREGS	19
+
+#define ia32_start_thread(regs,new_ip,new_sp) do {				\
+	set_fs(USER_DS);							\
+	ia64_psr(regs)->cpl = 3;	/* set user mode */			\
+	ia64_psr(regs)->ri = 0;		/* clear return slot number */		\
+	ia64_psr(regs)->is = 1;		/* IA-32 instruction set */		\
+	regs->cr_iip = new_ip;							\
+	regs->ar_rsc = 0xc;		/* enforced lazy mode, priv. level 3 */	\
+	regs->ar_rnat = 0;							\
+	regs->loadrs = 0;							\
+	regs->r12 = new_sp;							\
+} while (0)
+
+/*
+ * Local Descriptor Table (LDT) related declarations.
+ */
+
+#define IA32_LDT_ENTRIES	8192		/* Maximum number of LDT entries supported. */
+#define IA32_LDT_ENTRY_SIZE	8		/* The size of each LDT entry. */
+
+#define LDT_entry_a(info) \
+	((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
+
+#define LDT_entry_b(info)				\
+	(((info)->base_addr & 0xff000000) |		\
+	(((info)->base_addr & 0x00ff0000) >> 16) |	\
+	((info)->limit & 0xf0000) |			\
+	(((info)->read_exec_only ^ 1) << 9) |		\
+	((info)->contents << 10) |			\
+	(((info)->seg_not_present ^ 1) << 15) |		\
+	((info)->seg_32bit << 22) |			\
+	((info)->limit_in_pages << 23) |		\
+	((info)->useable << 20) |			\
+	0x7100)
+
+#define LDT_empty(info) (			\
+	(info)->base_addr	== 0	&&	\
+	(info)->limit		== 0	&&	\
+	(info)->contents	== 0	&&	\
+	(info)->read_exec_only	== 1	&&	\
+	(info)->seg_32bit	== 0	&&	\
+	(info)->limit_in_pages	== 0	&&	\
+	(info)->seg_not_present	== 1	&&	\
+	(info)->useable		== 0	)
+
+static inline void
+load_TLS (struct thread_struct *t, unsigned int cpu)
+{
+	extern unsigned long *cpu_gdt_table[NR_CPUS];
+
+	memcpy(cpu_gdt_table[cpu] + GDT_ENTRY_TLS_MIN + 0, &t->tls_array[0], sizeof(long));
+	memcpy(cpu_gdt_table[cpu] + GDT_ENTRY_TLS_MIN + 1, &t->tls_array[1], sizeof(long));
+	memcpy(cpu_gdt_table[cpu] + GDT_ENTRY_TLS_MIN + 2, &t->tls_array[2], sizeof(long));
+}
+
+struct ia32_user_desc {
+	unsigned int entry_number;
+	unsigned int base_addr;
+	unsigned int limit;
+	unsigned int seg_32bit:1;
+	unsigned int contents:2;
+	unsigned int read_exec_only:1;
+	unsigned int limit_in_pages:1;
+	unsigned int seg_not_present:1;
+	unsigned int useable:1;
+};
+
+struct linux_binprm;
+
+extern void ia32_init_addr_space (struct pt_regs *regs);
+extern int ia32_setup_arg_pages (struct linux_binprm *bprm, int exec_stack);
+extern unsigned long ia32_do_mmap (struct file *, unsigned long, unsigned long, int, int, loff_t);
+extern void ia32_load_segment_descriptors (struct task_struct *task);
+
+#define ia32f2ia64f(dst,src)			\
+do {						\
+	ia64_ldfe(6,src);			\
+	ia64_stop();				\
+	ia64_stf_spill(dst, 6);			\
+} while(0)
+
+#define ia64f2ia32f(dst,src)			\
+do {						\
+	ia64_ldf_fill(6, src);			\
+	ia64_stop();				\
+	ia64_stfe(dst, 6);			\
+} while(0)
+
+struct user_regs_struct32 {
+	__u32 ebx, ecx, edx, esi, edi, ebp, eax;
+	unsigned short ds, __ds, es, __es;
+	unsigned short fs, __fs, gs, __gs;
+	__u32 orig_eax, eip;
+	unsigned short cs, __cs;
+	__u32 eflags, esp;
+	unsigned short ss, __ss;
+};
+
+/* Prototypes for use in elfcore32.h */
+extern int save_ia32_fpstate (struct task_struct *, struct ia32_user_i387_struct __user *);
+extern int save_ia32_fpxstate (struct task_struct *, struct ia32_user_fxsr_struct __user *);
+
+#endif /* !CONFIG_IA32_SUPPORT */
+
+#endif /* _ASM_IA64_IA32_PRIV_H */
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
new file mode 100644
index 0000000..247a21c
--- /dev/null
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -0,0 +1,2747 @@
+/*
+ * sys_ia32.c: Conversion between 32bit and 64bit native syscalls. Derived from sys_sparc32.c.
+ *
+ * Copyright (C) 2000		VA Linux Co
+ * Copyright (C) 2000		Don Dugger <n0ano@valinux.com>
+ * Copyright (C) 1999		Arun Sharma <arun.sharma@intel.com>
+ * Copyright (C) 1997,1998	Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ * Copyright (C) 1997		David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 2000-2003, 2005 Hewlett-Packard Co
+ *	David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 2004		Gordon Jin <gordon.jin@intel.com>
+ *
+ * These routines maintain argument size conversion between 32bit and 64bit
+ * environment.
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/syscalls.h>
+#include <linux/sysctl.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/signal.h>
+#include <linux/resource.h>
+#include <linux/times.h>
+#include <linux/utsname.h>
+#include <linux/timex.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+#include <linux/mm.h>
+#include <linux/shm.h>
+#include <linux/slab.h>
+#include <linux/uio.h>
+#include <linux/nfs_fs.h>
+#include <linux/quota.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/nfsd/nfsd.h>
+#include <linux/nfsd/cache.h>
+#include <linux/nfsd/xdr.h>
+#include <linux/nfsd/syscall.h>
+#include <linux/poll.h>
+#include <linux/eventpoll.h>
+#include <linux/personality.h>
+#include <linux/ptrace.h>
+#include <linux/stat.h>
+#include <linux/ipc.h>
+#include <linux/compat.h>
+#include <linux/vfs.h>
+#include <linux/mman.h>
+
+#include <asm/intrinsics.h>
+#include <asm/semaphore.h>
+#include <asm/types.h>
+#include <asm/uaccess.h>
+#include <asm/unistd.h>
+
+#include "ia32priv.h"
+
+#include <net/scm.h>
+#include <net/sock.h>
+
+#define DEBUG	0
+
+#if DEBUG
+# define DBG(fmt...)	printk(KERN_DEBUG fmt)
+#else
+# define DBG(fmt...)
+#endif
+
+#define ROUND_UP(x,a)	((__typeof__(x))(((unsigned long)(x) + ((a) - 1)) & ~((a) - 1)))
+
+#define OFFSET4K(a)		((a) & 0xfff)
+#define PAGE_START(addr)	((addr) & PAGE_MASK)
+#define MINSIGSTKSZ_IA32	2048
+
+#define high2lowuid(uid) ((uid) > 65535 ? 65534 : (uid))
+#define high2lowgid(gid) ((gid) > 65535 ? 65534 : (gid))
+
+/*
+ * Anything that modifies or inspects ia32 user virtual memory must hold this semaphore
+ * while doing so.
+ */
+/* XXX make per-mm: */
+static DECLARE_MUTEX(ia32_mmap_sem);
+
+asmlinkage long
+sys32_execve (char __user *name, compat_uptr_t __user *argv, compat_uptr_t __user *envp,
+	      struct pt_regs *regs)
+{
+	long error;
+	char *filename;
+	unsigned long old_map_base, old_task_size, tssd;
+
+	filename = getname(name);
+	error = PTR_ERR(filename);
+	if (IS_ERR(filename))
+		return error;
+
+	old_map_base  = current->thread.map_base;
+	old_task_size = current->thread.task_size;
+	tssd = ia64_get_kr(IA64_KR_TSSD);
+
+	/* we may be exec'ing a 64-bit process: reset map base, task-size, and io-base: */
+	current->thread.map_base  = DEFAULT_MAP_BASE;
+	current->thread.task_size = DEFAULT_TASK_SIZE;
+	ia64_set_kr(IA64_KR_IO_BASE, current->thread.old_iob);
+	ia64_set_kr(IA64_KR_TSSD, current->thread.old_k1);
+
+	error = compat_do_execve(filename, argv, envp, regs);
+	putname(filename);
+
+	if (error < 0) {
+		/* oops, execve failed, switch back to old values... */
+		ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE);
+		ia64_set_kr(IA64_KR_TSSD, tssd);
+		current->thread.map_base  = old_map_base;
+		current->thread.task_size = old_task_size;
+	}
+
+	return error;
+}
+
+int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
+{
+	int err;
+
+	if ((u64) stat->size > MAX_NON_LFS ||
+	    !old_valid_dev(stat->dev) ||
+	    !old_valid_dev(stat->rdev))
+		return -EOVERFLOW;
+
+	if (clear_user(ubuf, sizeof(*ubuf)))
+		return -EFAULT;
+
+	err  = __put_user(old_encode_dev(stat->dev), &ubuf->st_dev);
+	err |= __put_user(stat->ino, &ubuf->st_ino);
+	err |= __put_user(stat->mode, &ubuf->st_mode);
+	err |= __put_user(stat->nlink, &ubuf->st_nlink);
+	err |= __put_user(high2lowuid(stat->uid), &ubuf->st_uid);
+	err |= __put_user(high2lowgid(stat->gid), &ubuf->st_gid);
+	err |= __put_user(old_encode_dev(stat->rdev), &ubuf->st_rdev);
+	err |= __put_user(stat->size, &ubuf->st_size);
+	err |= __put_user(stat->atime.tv_sec, &ubuf->st_atime);
+	err |= __put_user(stat->atime.tv_nsec, &ubuf->st_atime_nsec);
+	err |= __put_user(stat->mtime.tv_sec, &ubuf->st_mtime);
+	err |= __put_user(stat->mtime.tv_nsec, &ubuf->st_mtime_nsec);
+	err |= __put_user(stat->ctime.tv_sec, &ubuf->st_ctime);
+	err |= __put_user(stat->ctime.tv_nsec, &ubuf->st_ctime_nsec);
+	err |= __put_user(stat->blksize, &ubuf->st_blksize);
+	err |= __put_user(stat->blocks, &ubuf->st_blocks);
+	return err;
+}
+
+#if PAGE_SHIFT > IA32_PAGE_SHIFT
+
+
+static int
+get_page_prot (struct vm_area_struct *vma, unsigned long addr)
+{
+	int prot = 0;
+
+	if (!vma || vma->vm_start > addr)
+		return 0;
+
+	if (vma->vm_flags & VM_READ)
+		prot |= PROT_READ;
+	if (vma->vm_flags & VM_WRITE)
+		prot |= PROT_WRITE;
+	if (vma->vm_flags & VM_EXEC)
+		prot |= PROT_EXEC;
+	return prot;
+}
+
+/*
+ * Map a subpage by creating an anonymous page that contains the union of the old page and
+ * the subpage.
+ */
+static unsigned long
+mmap_subpage (struct file *file, unsigned long start, unsigned long end, int prot, int flags,
+	      loff_t off)
+{
+	void *page = NULL;
+	struct inode *inode;
+	unsigned long ret = 0;
+	struct vm_area_struct *vma = find_vma(current->mm, start);
+	int old_prot = get_page_prot(vma, start);
+
+	DBG("mmap_subpage(file=%p,start=0x%lx,end=0x%lx,prot=%x,flags=%x,off=0x%llx)\n",
+	    file, start, end, prot, flags, off);
+
+
+	/* Optimize the case where the old mmap and the new mmap are both anonymous */
+	if ((old_prot & PROT_WRITE) && (flags & MAP_ANONYMOUS) && !vma->vm_file) {
+		if (clear_user((void __user *) start, end - start)) {
+			ret = -EFAULT;
+			goto out;
+		}
+		goto skip_mmap;
+	}
+
+	page = (void *) get_zeroed_page(GFP_KERNEL);
+	if (!page)
+		return -ENOMEM;
+
+	if (old_prot)
+		copy_from_user(page, (void __user *) PAGE_START(start), PAGE_SIZE);
+
+	down_write(&current->mm->mmap_sem);
+	{
+		ret = do_mmap(NULL, PAGE_START(start), PAGE_SIZE, prot | PROT_WRITE,
+			      flags | MAP_FIXED | MAP_ANONYMOUS, 0);
+	}
+	up_write(&current->mm->mmap_sem);
+
+	if (IS_ERR((void *) ret))
+		goto out;
+
+	if (old_prot) {
+		/* copy back the old page contents.  */
+		if (offset_in_page(start))
+			copy_to_user((void __user *) PAGE_START(start), page,
+				     offset_in_page(start));
+		if (offset_in_page(end))
+			copy_to_user((void __user *) end, page + offset_in_page(end),
+				     PAGE_SIZE - offset_in_page(end));
+	}
+
+	if (!(flags & MAP_ANONYMOUS)) {
+		/* read the file contents */
+		inode = file->f_dentry->d_inode;
+		if (!inode->i_fop || !file->f_op->read
+		    || ((*file->f_op->read)(file, (char __user *) start, end - start, &off) < 0))
+		{
+			ret = -EINVAL;
+			goto out;
+		}
+	}
+
+ skip_mmap:
+	if (!(prot & PROT_WRITE))
+		ret = sys_mprotect(PAGE_START(start), PAGE_SIZE, prot | old_prot);
+  out:
+	if (page)
+		free_page((unsigned long) page);
+	return ret;
+}
+
+/* SLAB cache for partial_page structures */
+kmem_cache_t *partial_page_cachep;
+
+/*
+ * init partial_page_list.
+ * return 0 means kmalloc fail.
+ */
+struct partial_page_list*
+ia32_init_pp_list(void)
+{
+	struct partial_page_list *p;
+
+	if ((p = kmalloc(sizeof(*p), GFP_KERNEL)) == NULL)
+		return p;
+	p->pp_head = NULL;
+	p->ppl_rb = RB_ROOT;
+	p->pp_hint = NULL;
+	atomic_set(&p->pp_count, 1);
+	return p;
+}
+
+/*
+ * Search for the partial page with @start in partial page list @ppl.
+ * If finds the partial page, return the found partial page.
+ * Else, return 0 and provide @pprev, @rb_link, @rb_parent to
+ * be used by later __ia32_insert_pp().
+ */
+static struct partial_page *
+__ia32_find_pp(struct partial_page_list *ppl, unsigned int start,
+	struct partial_page **pprev, struct rb_node ***rb_link,
+	struct rb_node **rb_parent)
+{
+	struct partial_page *pp;
+	struct rb_node **__rb_link, *__rb_parent, *rb_prev;
+
+	pp = ppl->pp_hint;
+	if (pp && pp->base == start)
+		return pp;
+
+	__rb_link = &ppl->ppl_rb.rb_node;
+	rb_prev = __rb_parent = NULL;
+
+	while (*__rb_link) {
+		__rb_parent = *__rb_link;
+		pp = rb_entry(__rb_parent, struct partial_page, pp_rb);
+
+		if (pp->base == start) {
+			ppl->pp_hint = pp;
+			return pp;
+		} else if (pp->base < start) {
+			rb_prev = __rb_parent;
+			__rb_link = &__rb_parent->rb_right;
+		} else {
+			__rb_link = &__rb_parent->rb_left;
+		}
+	}
+
+	*rb_link = __rb_link;
+	*rb_parent = __rb_parent;
+	*pprev = NULL;
+	if (rb_prev)
+		*pprev = rb_entry(rb_prev, struct partial_page, pp_rb);
+	return NULL;
+}
+
+/*
+ * insert @pp into @ppl.
+ */
+static void
+__ia32_insert_pp(struct partial_page_list *ppl, struct partial_page *pp,
+	 struct partial_page *prev, struct rb_node **rb_link,
+	struct rb_node *rb_parent)
+{
+	/* link list */
+	if (prev) {
+		pp->next = prev->next;
+		prev->next = pp;
+	} else {
+		ppl->pp_head = pp;
+		if (rb_parent)
+			pp->next = rb_entry(rb_parent,
+				struct partial_page, pp_rb);
+		else
+			pp->next = NULL;
+	}
+
+	/* link rb */
+	rb_link_node(&pp->pp_rb, rb_parent, rb_link);
+	rb_insert_color(&pp->pp_rb, &ppl->ppl_rb);
+
+	ppl->pp_hint = pp;
+}
+
+/*
+ * delete @pp from partial page list @ppl.
+ */
+static void
+__ia32_delete_pp(struct partial_page_list *ppl, struct partial_page *pp,
+	struct partial_page *prev)
+{
+	if (prev) {
+		prev->next = pp->next;
+		if (ppl->pp_hint == pp)
+			ppl->pp_hint = prev;
+	} else {
+		ppl->pp_head = pp->next;
+		if (ppl->pp_hint == pp)
+			ppl->pp_hint = pp->next;
+	}
+	rb_erase(&pp->pp_rb, &ppl->ppl_rb);
+	kmem_cache_free(partial_page_cachep, pp);
+}
+
+static struct partial_page *
+__pp_prev(struct partial_page *pp)
+{
+	struct rb_node *prev = rb_prev(&pp->pp_rb);
+	if (prev)
+		return rb_entry(prev, struct partial_page, pp_rb);
+	else
+		return NULL;
+}
+
+/*
+ * Delete partial pages with address between @start and @end.
+ * @start and @end are page aligned.
+ */
+static void
+__ia32_delete_pp_range(unsigned int start, unsigned int end)
+{
+	struct partial_page *pp, *prev;
+	struct rb_node **rb_link, *rb_parent;
+
+	if (start >= end)
+		return;
+
+	pp = __ia32_find_pp(current->thread.ppl, start, &prev,
+					&rb_link, &rb_parent);
+	if (pp)
+		prev = __pp_prev(pp);
+	else {
+		if (prev)
+			pp = prev->next;
+		else
+			pp = current->thread.ppl->pp_head;
+	}
+
+	while (pp && pp->base < end) {
+		struct partial_page *tmp = pp->next;
+		__ia32_delete_pp(current->thread.ppl, pp, prev);
+		pp = tmp;
+	}
+}
+
+/*
+ * Set the range between @start and @end in bitmap.
+ * @start and @end should be IA32 page aligned and in the same IA64 page.
+ */
+static int
+__ia32_set_pp(unsigned int start, unsigned int end, int flags)
+{
+	struct partial_page *pp, *prev;
+	struct rb_node ** rb_link, *rb_parent;
+	unsigned int pstart, start_bit, end_bit, i;
+
+	pstart = PAGE_START(start);
+	start_bit = (start % PAGE_SIZE) / IA32_PAGE_SIZE;
+	end_bit = (end % PAGE_SIZE) / IA32_PAGE_SIZE;
+	if (end_bit == 0)
+		end_bit = PAGE_SIZE / IA32_PAGE_SIZE;
+	pp = __ia32_find_pp(current->thread.ppl, pstart, &prev,
+					&rb_link, &rb_parent);
+	if (pp) {
+		for (i = start_bit; i < end_bit; i++)
+			set_bit(i, &pp->bitmap);
+		/*
+		 * Check: if this partial page has been set to a full page,
+		 * then delete it.
+		 */
+		if (find_first_zero_bit(&pp->bitmap, sizeof(pp->bitmap)*8) >=
+				PAGE_SIZE/IA32_PAGE_SIZE) {
+			__ia32_delete_pp(current->thread.ppl, pp, __pp_prev(pp));
+		}
+		return 0;
+	}
+
+	/*
+	 * MAP_FIXED may lead to overlapping mmap.
+	 * In this case, the requested mmap area may already mmaped as a full
+	 * page. So check vma before adding a new partial page.
+	 */
+	if (flags & MAP_FIXED) {
+		struct vm_area_struct *vma = find_vma(current->mm, pstart);
+		if (vma && vma->vm_start <= pstart)
+			return 0;
+	}
+
+	/* new a partial_page */
+	pp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL);
+	if (!pp)
+		return -ENOMEM;
+	pp->base = pstart;
+	pp->bitmap = 0;
+	for (i=start_bit; i<end_bit; i++)
+		set_bit(i, &(pp->bitmap));
+	pp->next = NULL;
+	__ia32_insert_pp(current->thread.ppl, pp, prev, rb_link, rb_parent);
+	return 0;
+}
+
+/*
+ * @start and @end should be IA32 page aligned, but don't need to be in the
+ * same IA64 page. Split @start and @end to make sure they're in the same IA64
+ * page, then call __ia32_set_pp().
+ */
+static void
+ia32_set_pp(unsigned int start, unsigned int end, int flags)
+{
+	down_write(&current->mm->mmap_sem);
+	if (flags & MAP_FIXED) {
+		/*
+		 * MAP_FIXED may lead to overlapping mmap. When this happens,
+		 * a series of complete IA64 pages results in deletion of
+		 * old partial pages in that range.
+		 */
+		__ia32_delete_pp_range(PAGE_ALIGN(start), PAGE_START(end));
+	}
+
+	if (end < PAGE_ALIGN(start)) {
+		__ia32_set_pp(start, end, flags);
+	} else {
+		if (offset_in_page(start))
+			__ia32_set_pp(start, PAGE_ALIGN(start), flags);
+		if (offset_in_page(end))
+			__ia32_set_pp(PAGE_START(end), end, flags);
+	}
+	up_write(&current->mm->mmap_sem);
+}
+
+/*
+ * Unset the range between @start and @end in bitmap.
+ * @start and @end should be IA32 page aligned and in the same IA64 page.
+ * After doing that, if the bitmap is 0, then free the page and return 1,
+ * 	else return 0;
+ * If not find the partial page in the list, then
+ * 	If the vma exists, then the full page is set to a partial page;
+ *	Else return -ENOMEM.
+ */
+static int
+__ia32_unset_pp(unsigned int start, unsigned int end)
+{
+	struct partial_page *pp, *prev;
+	struct rb_node ** rb_link, *rb_parent;
+	unsigned int pstart, start_bit, end_bit, i;
+	struct vm_area_struct *vma;
+
+	pstart = PAGE_START(start);
+	start_bit = (start % PAGE_SIZE) / IA32_PAGE_SIZE;
+	end_bit = (end % PAGE_SIZE) / IA32_PAGE_SIZE;
+	if (end_bit == 0)
+		end_bit = PAGE_SIZE / IA32_PAGE_SIZE;
+
+	pp = __ia32_find_pp(current->thread.ppl, pstart, &prev,
+					&rb_link, &rb_parent);
+	if (pp) {
+		for (i = start_bit; i < end_bit; i++)
+			clear_bit(i, &pp->bitmap);
+		if (pp->bitmap == 0) {
+			__ia32_delete_pp(current->thread.ppl, pp, __pp_prev(pp));
+			return 1;
+		}
+		return 0;
+	}
+
+	vma = find_vma(current->mm, pstart);
+	if (!vma || vma->vm_start > pstart) {
+		return -ENOMEM;
+	}
+
+	/* new a partial_page */
+	pp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL);
+	if (!pp)
+		return -ENOMEM;
+	pp->base = pstart;
+	pp->bitmap = 0;
+	for (i = 0; i < start_bit; i++)
+		set_bit(i, &(pp->bitmap));
+	for (i = end_bit; i < PAGE_SIZE / IA32_PAGE_SIZE; i++)
+		set_bit(i, &(pp->bitmap));
+	pp->next = NULL;
+	__ia32_insert_pp(current->thread.ppl, pp, prev, rb_link, rb_parent);
+	return 0;
+}
+
+/*
+ * Delete pp between PAGE_ALIGN(start) and PAGE_START(end) by calling
+ * __ia32_delete_pp_range(). Unset possible partial pages by calling
+ * __ia32_unset_pp().
+ * The returned value see __ia32_unset_pp().
+ */
+static int
+ia32_unset_pp(unsigned int *startp, unsigned int *endp)
+{
+	unsigned int start = *startp, end = *endp;
+	int ret = 0;
+
+	down_write(&current->mm->mmap_sem);
+
+	__ia32_delete_pp_range(PAGE_ALIGN(start), PAGE_START(end));
+
+	if (end < PAGE_ALIGN(start)) {
+		ret = __ia32_unset_pp(start, end);
+		if (ret == 1) {
+			*startp = PAGE_START(start);
+			*endp = PAGE_ALIGN(end);
+		}
+		if (ret == 0) {
+			/* to shortcut sys_munmap() in sys32_munmap() */
+			*startp = PAGE_START(start);
+			*endp = PAGE_START(end);
+		}
+	} else {
+		if (offset_in_page(start)) {
+			ret = __ia32_unset_pp(start, PAGE_ALIGN(start));
+			if (ret == 1)
+				*startp = PAGE_START(start);
+			if (ret == 0)
+				*startp = PAGE_ALIGN(start);
+			if (ret < 0)
+				goto out;
+		}
+		if (offset_in_page(end)) {
+			ret = __ia32_unset_pp(PAGE_START(end), end);
+			if (ret == 1)
+				*endp = PAGE_ALIGN(end);
+			if (ret == 0)
+				*endp = PAGE_START(end);
+		}
+	}
+
+ out:
+	up_write(&current->mm->mmap_sem);
+	return ret;
+}
+
+/*
+ * Compare the range between @start and @end with bitmap in partial page.
+ * @start and @end should be IA32 page aligned and in the same IA64 page.
+ */
+static int
+__ia32_compare_pp(unsigned int start, unsigned int end)
+{
+	struct partial_page *pp, *prev;
+	struct rb_node ** rb_link, *rb_parent;
+	unsigned int pstart, start_bit, end_bit, size;
+	unsigned int first_bit, next_zero_bit;	/* the first range in bitmap */
+
+	pstart = PAGE_START(start);
+
+	pp = __ia32_find_pp(current->thread.ppl, pstart, &prev,
+					&rb_link, &rb_parent);
+	if (!pp)
+		return 1;
+
+	start_bit = (start % PAGE_SIZE) / IA32_PAGE_SIZE;
+	end_bit = (end % PAGE_SIZE) / IA32_PAGE_SIZE;
+	size = sizeof(pp->bitmap) * 8;
+	first_bit = find_first_bit(&pp->bitmap, size);
+	next_zero_bit = find_next_zero_bit(&pp->bitmap, size, first_bit);
+	if ((start_bit < first_bit) || (end_bit > next_zero_bit)) {
+		/* exceeds the first range in bitmap */
+		return -ENOMEM;
+	} else if ((start_bit == first_bit) && (end_bit == next_zero_bit)) {
+		first_bit = find_next_bit(&pp->bitmap, size, next_zero_bit);
+		if ((next_zero_bit < first_bit) && (first_bit < size))
+			return 1;	/* has next range */
+		else
+			return 0; 	/* no next range */
+	} else
+		return 1;
+}
+
+/*
+ * @start and @end should be IA32 page aligned, but don't need to be in the
+ * same IA64 page. Split @start and @end to make sure they're in the same IA64
+ * page, then call __ia32_compare_pp().
+ *
+ * Take this as example: the range is the 1st and 2nd 4K page.
+ * Return 0 if they fit bitmap exactly, i.e. bitmap = 00000011;
+ * Return 1 if the range doesn't cover whole bitmap, e.g. bitmap = 00001111;
+ * Return -ENOMEM if the range exceeds the bitmap, e.g. bitmap = 00000001 or
+ * 	bitmap = 00000101.
+ */
+static int
+ia32_compare_pp(unsigned int *startp, unsigned int *endp)
+{
+	unsigned int start = *startp, end = *endp;
+	int retval = 0;
+
+	down_write(&current->mm->mmap_sem);
+
+	if (end < PAGE_ALIGN(start)) {
+		retval = __ia32_compare_pp(start, end);
+		if (retval == 0) {
+			*startp = PAGE_START(start);
+			*endp = PAGE_ALIGN(end);
+		}
+	} else {
+		if (offset_in_page(start)) {
+			retval = __ia32_compare_pp(start,
+						   PAGE_ALIGN(start));
+			if (retval == 0)
+				*startp = PAGE_START(start);
+			if (retval < 0)
+				goto out;
+		}
+		if (offset_in_page(end)) {
+			retval = __ia32_compare_pp(PAGE_START(end), end);
+			if (retval == 0)
+				*endp = PAGE_ALIGN(end);
+		}
+	}
+
+ out:
+	up_write(&current->mm->mmap_sem);
+	return retval;
+}
+
+static void
+__ia32_drop_pp_list(struct partial_page_list *ppl)
+{
+	struct partial_page *pp = ppl->pp_head;
+
+	while (pp) {
+		struct partial_page *next = pp->next;
+		kmem_cache_free(partial_page_cachep, pp);
+		pp = next;
+	}
+
+	kfree(ppl);
+}
+
+void
+ia32_drop_partial_page_list(struct task_struct *task)
+{
+	struct partial_page_list* ppl = task->thread.ppl;
+
+	if (ppl && atomic_dec_and_test(&ppl->pp_count))
+		__ia32_drop_pp_list(ppl);
+}
+
+/*
+ * Copy current->thread.ppl to ppl (already initialized).
+ */
+static int
+__ia32_copy_pp_list(struct partial_page_list *ppl)
+{
+	struct partial_page *pp, *tmp, *prev;
+	struct rb_node **rb_link, *rb_parent;
+
+	ppl->pp_head = NULL;
+	ppl->pp_hint = NULL;
+	ppl->ppl_rb = RB_ROOT;
+	rb_link = &ppl->ppl_rb.rb_node;
+	rb_parent = NULL;
+	prev = NULL;
+
+	for (pp = current->thread.ppl->pp_head; pp; pp = pp->next) {
+		tmp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL);
+		if (!tmp)
+			return -ENOMEM;
+		*tmp = *pp;
+		__ia32_insert_pp(ppl, tmp, prev, rb_link, rb_parent);
+		prev = tmp;
+		rb_link = &tmp->pp_rb.rb_right;
+		rb_parent = &tmp->pp_rb;
+	}
+	return 0;
+}
+
+int
+ia32_copy_partial_page_list(struct task_struct *p, unsigned long clone_flags)
+{
+	int retval = 0;
+
+	if (clone_flags & CLONE_VM) {
+		atomic_inc(&current->thread.ppl->pp_count);
+		p->thread.ppl = current->thread.ppl;
+	} else {
+		p->thread.ppl = ia32_init_pp_list();
+		if (!p->thread.ppl)
+			return -ENOMEM;
+		down_write(&current->mm->mmap_sem);
+		{
+			retval = __ia32_copy_pp_list(p->thread.ppl);
+		}
+		up_write(&current->mm->mmap_sem);
+	}
+
+	return retval;
+}
+
+static unsigned long
+emulate_mmap (struct file *file, unsigned long start, unsigned long len, int prot, int flags,
+	      loff_t off)
+{
+	unsigned long tmp, end, pend, pstart, ret, is_congruent, fudge = 0;
+	struct inode *inode;
+	loff_t poff;
+
+	end = start + len;
+	pstart = PAGE_START(start);
+	pend = PAGE_ALIGN(end);
+
+	if (flags & MAP_FIXED) {
+		ia32_set_pp((unsigned int)start, (unsigned int)end, flags);
+		if (start > pstart) {
+			if (flags & MAP_SHARED)
+				printk(KERN_INFO
+				       "%s(%d): emulate_mmap() can't share head (addr=0x%lx)\n",
+				       current->comm, current->pid, start);
+			ret = mmap_subpage(file, start, min(PAGE_ALIGN(start), end), prot, flags,
+					   off);
+			if (IS_ERR((void *) ret))
+				return ret;
+			pstart += PAGE_SIZE;
+			if (pstart >= pend)
+				goto out;	/* done */
+		}
+		if (end < pend) {
+			if (flags & MAP_SHARED)
+				printk(KERN_INFO
+				       "%s(%d): emulate_mmap() can't share tail (end=0x%lx)\n",
+				       current->comm, current->pid, end);
+			ret = mmap_subpage(file, max(start, PAGE_START(end)), end, prot, flags,
+					   (off + len) - offset_in_page(end));
+			if (IS_ERR((void *) ret))
+				return ret;
+			pend -= PAGE_SIZE;
+			if (pstart >= pend)
+				goto out;	/* done */
+		}
+	} else {
+		/*
+		 * If a start address was specified, use it if the entire rounded out area
+		 * is available.
+		 */
+		if (start && !pstart)
+			fudge = 1;	/* handle case of mapping to range (0,PAGE_SIZE) */
+		tmp = arch_get_unmapped_area(file, pstart - fudge, pend - pstart, 0, flags);
+		if (tmp != pstart) {
+			pstart = tmp;
+			start = pstart + offset_in_page(off);	/* make start congruent with off */
+			end = start + len;
+			pend = PAGE_ALIGN(end);
+		}
+	}
+
+	poff = off + (pstart - start);	/* note: (pstart - start) may be negative */
+	is_congruent = (flags & MAP_ANONYMOUS) || (offset_in_page(poff) == 0);
+
+	if ((flags & MAP_SHARED) && !is_congruent)
+		printk(KERN_INFO "%s(%d): emulate_mmap() can't share contents of incongruent mmap "
+		       "(addr=0x%lx,off=0x%llx)\n", current->comm, current->pid, start, off);
+
+	DBG("mmap_body: mapping [0x%lx-0x%lx) %s with poff 0x%llx\n", pstart, pend,
+	    is_congruent ? "congruent" : "not congruent", poff);
+
+	down_write(&current->mm->mmap_sem);
+	{
+		if (!(flags & MAP_ANONYMOUS) && is_congruent)
+			ret = do_mmap(file, pstart, pend - pstart, prot, flags | MAP_FIXED, poff);
+		else
+			ret = do_mmap(NULL, pstart, pend - pstart,
+				      prot | ((flags & MAP_ANONYMOUS) ? 0 : PROT_WRITE),
+				      flags | MAP_FIXED | MAP_ANONYMOUS, 0);
+	}
+	up_write(&current->mm->mmap_sem);
+
+	if (IS_ERR((void *) ret))
+		return ret;
+
+	if (!is_congruent) {
+		/* read the file contents */
+		inode = file->f_dentry->d_inode;
+		if (!inode->i_fop || !file->f_op->read
+		    || ((*file->f_op->read)(file, (char __user *) pstart, pend - pstart, &poff)
+			< 0))
+		{
+			sys_munmap(pstart, pend - pstart);
+			return -EINVAL;
+		}
+		if (!(prot & PROT_WRITE) && sys_mprotect(pstart, pend - pstart, prot) < 0)
+			return -EINVAL;
+	}
+
+	if (!(flags & MAP_FIXED))
+		ia32_set_pp((unsigned int)start, (unsigned int)end, flags);
+out:
+	return start;
+}
+
+#endif /* PAGE_SHIFT > IA32_PAGE_SHIFT */
+
+static inline unsigned int
+get_prot32 (unsigned int prot)
+{
+	if (prot & PROT_WRITE)
+		/* on x86, PROT_WRITE implies PROT_READ which implies PROT_EEC */
+		prot |= PROT_READ | PROT_WRITE | PROT_EXEC;
+	else if (prot & (PROT_READ | PROT_EXEC))
+		/* on x86, there is no distinction between PROT_READ and PROT_EXEC */
+		prot |= (PROT_READ | PROT_EXEC);
+
+	return prot;
+}
+
+unsigned long
+ia32_do_mmap (struct file *file, unsigned long addr, unsigned long len, int prot, int flags,
+	      loff_t offset)
+{
+	DBG("ia32_do_mmap(file=%p,addr=0x%lx,len=0x%lx,prot=%x,flags=%x,offset=0x%llx)\n",
+	    file, addr, len, prot, flags, offset);
+
+	if (file && (!file->f_op || !file->f_op->mmap))
+		return -ENODEV;
+
+	len = IA32_PAGE_ALIGN(len);
+	if (len == 0)
+		return addr;
+
+	if (len > IA32_PAGE_OFFSET || addr > IA32_PAGE_OFFSET - len)
+	{
+		if (flags & MAP_FIXED)
+			return -ENOMEM;
+		else
+		return -EINVAL;
+	}
+
+	if (OFFSET4K(offset))
+		return -EINVAL;
+
+	prot = get_prot32(prot);
+
+#if PAGE_SHIFT > IA32_PAGE_SHIFT
+	down(&ia32_mmap_sem);
+	{
+		addr = emulate_mmap(file, addr, len, prot, flags, offset);
+	}
+	up(&ia32_mmap_sem);
+#else
+	down_write(&current->mm->mmap_sem);
+	{
+		addr = do_mmap(file, addr, len, prot, flags, offset);
+	}
+	up_write(&current->mm->mmap_sem);
+#endif
+	DBG("ia32_do_mmap: returning 0x%lx\n", addr);
+	return addr;
+}
+
+/*
+ * Linux/i386 didn't use to be able to handle more than 4 system call parameters, so these
+ * system calls used a memory block for parameter passing..
+ */
+
+struct mmap_arg_struct {
+	unsigned int addr;
+	unsigned int len;
+	unsigned int prot;
+	unsigned int flags;
+	unsigned int fd;
+	unsigned int offset;
+};
+
+asmlinkage long
+sys32_mmap (struct mmap_arg_struct __user *arg)
+{
+	struct mmap_arg_struct a;
+	struct file *file = NULL;
+	unsigned long addr;
+	int flags;
+
+	if (copy_from_user(&a, arg, sizeof(a)))
+		return -EFAULT;
+
+	if (OFFSET4K(a.offset))
+		return -EINVAL;
+
+	flags = a.flags;
+
+	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+	if (!(flags & MAP_ANONYMOUS)) {
+		file = fget(a.fd);
+		if (!file)
+			return -EBADF;
+	}
+
+	addr = ia32_do_mmap(file, a.addr, a.len, a.prot, flags, a.offset);
+
+	if (file)
+		fput(file);
+	return addr;
+}
+
+asmlinkage long
+sys32_mmap2 (unsigned int addr, unsigned int len, unsigned int prot, unsigned int flags,
+	     unsigned int fd, unsigned int pgoff)
+{
+	struct file *file = NULL;
+	unsigned long retval;
+
+	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+	if (!(flags & MAP_ANONYMOUS)) {
+		file = fget(fd);
+		if (!file)
+			return -EBADF;
+	}
+
+	retval = ia32_do_mmap(file, addr, len, prot, flags,
+			      (unsigned long) pgoff << IA32_PAGE_SHIFT);
+
+	if (file)
+		fput(file);
+	return retval;
+}
+
+asmlinkage long
+sys32_munmap (unsigned int start, unsigned int len)
+{
+	unsigned int end = start + len;
+	long ret;
+
+#if PAGE_SHIFT <= IA32_PAGE_SHIFT
+	ret = sys_munmap(start, end - start);
+#else
+	if (OFFSET4K(start))
+		return -EINVAL;
+
+	end = IA32_PAGE_ALIGN(end);
+	if (start >= end)
+		return -EINVAL;
+
+	ret = ia32_unset_pp(&start, &end);
+	if (ret < 0)
+		return ret;
+
+	if (start >= end)
+		return 0;
+
+	down(&ia32_mmap_sem);
+	{
+		ret = sys_munmap(start, end - start);
+	}
+	up(&ia32_mmap_sem);
+#endif
+	return ret;
+}
+
+#if PAGE_SHIFT > IA32_PAGE_SHIFT
+
+/*
+ * When mprotect()ing a partial page, we set the permission to the union of the old
+ * settings and the new settings.  In other words, it's only possible to make access to a
+ * partial page less restrictive.
+ */
+static long
+mprotect_subpage (unsigned long address, int new_prot)
+{
+	int old_prot;
+	struct vm_area_struct *vma;
+
+	if (new_prot == PROT_NONE)
+		return 0;		/* optimize case where nothing changes... */
+	vma = find_vma(current->mm, address);
+	old_prot = get_page_prot(vma, address);
+	return sys_mprotect(address, PAGE_SIZE, new_prot | old_prot);
+}
+
+#endif /* PAGE_SHIFT > IA32_PAGE_SHIFT */
+
+asmlinkage long
+sys32_mprotect (unsigned int start, unsigned int len, int prot)
+{
+	unsigned int end = start + len;
+#if PAGE_SHIFT > IA32_PAGE_SHIFT
+	long retval = 0;
+#endif
+
+	prot = get_prot32(prot);
+
+#if PAGE_SHIFT <= IA32_PAGE_SHIFT
+	return sys_mprotect(start, end - start, prot);
+#else
+	if (OFFSET4K(start))
+		return -EINVAL;
+
+	end = IA32_PAGE_ALIGN(end);
+	if (end < start)
+		return -EINVAL;
+
+	retval = ia32_compare_pp(&start, &end);
+
+	if (retval < 0)
+		return retval;
+
+	down(&ia32_mmap_sem);
+	{
+		if (offset_in_page(start)) {
+			/* start address is 4KB aligned but not page aligned. */
+			retval = mprotect_subpage(PAGE_START(start), prot);
+			if (retval < 0)
+				goto out;
+
+			start = PAGE_ALIGN(start);
+			if (start >= end)
+				goto out;	/* retval is already zero... */
+		}
+
+		if (offset_in_page(end)) {
+			/* end address is 4KB aligned but not page aligned. */
+			retval = mprotect_subpage(PAGE_START(end), prot);
+			if (retval < 0)
+				goto out;
+
+			end = PAGE_START(end);
+		}
+		retval = sys_mprotect(start, end - start, prot);
+	}
+  out:
+	up(&ia32_mmap_sem);
+	return retval;
+#endif
+}
+
+asmlinkage long
+sys32_mremap (unsigned int addr, unsigned int old_len, unsigned int new_len,
+		unsigned int flags, unsigned int new_addr)
+{
+	long ret;
+
+#if PAGE_SHIFT <= IA32_PAGE_SHIFT
+	ret = sys_mremap(addr, old_len, new_len, flags, new_addr);
+#else
+	unsigned int old_end, new_end;
+
+	if (OFFSET4K(addr))
+		return -EINVAL;
+
+	old_len = IA32_PAGE_ALIGN(old_len);
+	new_len = IA32_PAGE_ALIGN(new_len);
+	old_end = addr + old_len;
+	new_end = addr + new_len;
+
+	if (!new_len)
+		return -EINVAL;
+
+	if ((flags & MREMAP_FIXED) && (OFFSET4K(new_addr)))
+		return -EINVAL;
+
+	if (old_len >= new_len) {
+		ret = sys32_munmap(addr + new_len, old_len - new_len);
+		if (ret && old_len != new_len)
+			return ret;
+		ret = addr;
+		if (!(flags & MREMAP_FIXED) || (new_addr == addr))
+			return ret;
+		old_len = new_len;
+	}
+
+	addr = PAGE_START(addr);
+	old_len = PAGE_ALIGN(old_end) - addr;
+	new_len = PAGE_ALIGN(new_end) - addr;
+
+	down(&ia32_mmap_sem);
+	{
+		ret = sys_mremap(addr, old_len, new_len, flags, new_addr);
+	}
+	up(&ia32_mmap_sem);
+
+	if ((ret >= 0) && (old_len < new_len)) {
+		/* mremap expanded successfully */
+		ia32_set_pp(old_end, new_end, flags);
+	}
+#endif
+	return ret;
+}
+
+asmlinkage long
+sys32_pipe (int __user *fd)
+{
+	int retval;
+	int fds[2];
+
+	retval = do_pipe(fds);
+	if (retval)
+		goto out;
+	if (copy_to_user(fd, fds, sizeof(fds)))
+		retval = -EFAULT;
+  out:
+	return retval;
+}
+
+static inline long
+get_tv32 (struct timeval *o, struct compat_timeval __user *i)
+{
+	return (!access_ok(VERIFY_READ, i, sizeof(*i)) ||
+		(__get_user(o->tv_sec, &i->tv_sec) | __get_user(o->tv_usec, &i->tv_usec)));
+}
+
+static inline long
+put_tv32 (struct compat_timeval __user *o, struct timeval *i)
+{
+	return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) ||
+		(__put_user(i->tv_sec, &o->tv_sec) | __put_user(i->tv_usec, &o->tv_usec)));
+}
+
+asmlinkage unsigned long
+sys32_alarm (unsigned int seconds)
+{
+	struct itimerval it_new, it_old;
+	unsigned int oldalarm;
+
+	it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
+	it_new.it_value.tv_sec = seconds;
+	it_new.it_value.tv_usec = 0;
+	do_setitimer(ITIMER_REAL, &it_new, &it_old);
+	oldalarm = it_old.it_value.tv_sec;
+	/* ehhh.. We can't return 0 if we have an alarm pending.. */
+	/* And we'd better return too much than too little anyway */
+	if (it_old.it_value.tv_usec)
+		oldalarm++;
+	return oldalarm;
+}
+
+/* Translations due to time_t size differences.  Which affects all
+   sorts of things, like timeval and itimerval.  */
+
+extern struct timezone sys_tz;
+
+asmlinkage long
+sys32_gettimeofday (struct compat_timeval __user *tv, struct timezone __user *tz)
+{
+	if (tv) {
+		struct timeval ktv;
+		do_gettimeofday(&ktv);
+		if (put_tv32(tv, &ktv))
+			return -EFAULT;
+	}
+	if (tz) {
+		if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
+			return -EFAULT;
+	}
+	return 0;
+}
+
+asmlinkage long
+sys32_settimeofday (struct compat_timeval __user *tv, struct timezone __user *tz)
+{
+	struct timeval ktv;
+	struct timespec kts;
+	struct timezone ktz;
+
+	if (tv) {
+		if (get_tv32(&ktv, tv))
+			return -EFAULT;
+		kts.tv_sec = ktv.tv_sec;
+		kts.tv_nsec = ktv.tv_usec * 1000;
+	}
+	if (tz) {
+		if (copy_from_user(&ktz, tz, sizeof(ktz)))
+			return -EFAULT;
+	}
+
+	return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
+}
+
+struct getdents32_callback {
+	struct compat_dirent __user *current_dir;
+	struct compat_dirent __user *previous;
+	int count;
+	int error;
+};
+
+struct readdir32_callback {
+	struct old_linux32_dirent __user * dirent;
+	int count;
+};
+
+static int
+filldir32 (void *__buf, const char *name, int namlen, loff_t offset, ino_t ino,
+	   unsigned int d_type)
+{
+	struct compat_dirent __user * dirent;
+	struct getdents32_callback * buf = (struct getdents32_callback *) __buf;
+	int reclen = ROUND_UP(offsetof(struct compat_dirent, d_name) + namlen + 1, 4);
+
+	buf->error = -EINVAL;	/* only used if we fail.. */
+	if (reclen > buf->count)
+		return -EINVAL;
+	buf->error = -EFAULT;	/* only used if we fail.. */
+	dirent = buf->previous;
+	if (dirent)
+		if (put_user(offset, &dirent->d_off))
+			return -EFAULT;
+	dirent = buf->current_dir;
+	buf->previous = dirent;
+	if (put_user(ino, &dirent->d_ino)
+	    || put_user(reclen, &dirent->d_reclen)
+	    || copy_to_user(dirent->d_name, name, namlen)
+	    || put_user(0, dirent->d_name + namlen))
+		return -EFAULT;
+	dirent = (struct compat_dirent __user *) ((char __user *) dirent + reclen);
+	buf->current_dir = dirent;
+	buf->count -= reclen;
+	return 0;
+}
+
+asmlinkage long
+sys32_getdents (unsigned int fd, struct compat_dirent __user *dirent, unsigned int count)
+{
+	struct file * file;
+	struct compat_dirent __user * lastdirent;
+	struct getdents32_callback buf;
+	int error;
+
+	error = -EBADF;
+	file = fget(fd);
+	if (!file)
+		goto out;
+
+	buf.current_dir = dirent;
+	buf.previous = NULL;
+	buf.count = count;
+	buf.error = 0;
+
+	error = vfs_readdir(file, filldir32, &buf);
+	if (error < 0)
+		goto out_putf;
+	error = buf.error;
+	lastdirent = buf.previous;
+	if (lastdirent) {
+		error = -EINVAL;
+		if (put_user(file->f_pos, &lastdirent->d_off))
+			goto out_putf;
+		error = count - buf.count;
+	}
+
+out_putf:
+	fput(file);
+out:
+	return error;
+}
+
+static int
+fillonedir32 (void * __buf, const char * name, int namlen, loff_t offset, ino_t ino,
+	      unsigned int d_type)
+{
+	struct readdir32_callback * buf = (struct readdir32_callback *) __buf;
+	struct old_linux32_dirent __user * dirent;
+
+	if (buf->count)
+		return -EINVAL;
+	buf->count++;
+	dirent = buf->dirent;
+	if (put_user(ino, &dirent->d_ino)
+	    || put_user(offset, &dirent->d_offset)
+	    || put_user(namlen, &dirent->d_namlen)
+	    || copy_to_user(dirent->d_name, name, namlen)
+	    || put_user(0, dirent->d_name + namlen))
+		return -EFAULT;
+	return 0;
+}
+
+asmlinkage long
+sys32_readdir (unsigned int fd, void __user *dirent, unsigned int count)
+{
+	int error;
+	struct file * file;
+	struct readdir32_callback buf;
+
+	error = -EBADF;
+	file = fget(fd);
+	if (!file)
+		goto out;
+
+	buf.count = 0;
+	buf.dirent = dirent;
+
+	error = vfs_readdir(file, fillonedir32, &buf);
+	if (error >= 0)
+		error = buf.count;
+	fput(file);
+out:
+	return error;
+}
+
+struct sel_arg_struct {
+	unsigned int n;
+	unsigned int inp;
+	unsigned int outp;
+	unsigned int exp;
+	unsigned int tvp;
+};
+
+asmlinkage long
+sys32_old_select (struct sel_arg_struct __user *arg)
+{
+	struct sel_arg_struct a;
+
+	if (copy_from_user(&a, arg, sizeof(a)))
+		return -EFAULT;
+	return compat_sys_select(a.n, compat_ptr(a.inp), compat_ptr(a.outp),
+				 compat_ptr(a.exp), compat_ptr(a.tvp));
+}
+
+#define SEMOP		 1
+#define SEMGET		 2
+#define SEMCTL		 3
+#define SEMTIMEDOP	 4
+#define MSGSND		11
+#define MSGRCV		12
+#define MSGGET		13
+#define MSGCTL		14
+#define SHMAT		21
+#define SHMDT		22
+#define SHMGET		23
+#define SHMCTL		24
+
+asmlinkage long
+sys32_ipc(u32 call, int first, int second, int third, u32 ptr, u32 fifth)
+{
+	int version;
+
+	version = call >> 16; /* hack for backward compatibility */
+	call &= 0xffff;
+
+	switch (call) {
+	      case SEMTIMEDOP:
+		if (fifth)
+			return compat_sys_semtimedop(first, compat_ptr(ptr),
+				second, compat_ptr(fifth));
+		/* else fall through for normal semop() */
+	      case SEMOP:
+		/* struct sembuf is the same on 32 and 64bit :)) */
+		return sys_semtimedop(first, compat_ptr(ptr), second,
+				      NULL);
+	      case SEMGET:
+		return sys_semget(first, second, third);
+	      case SEMCTL:
+		return compat_sys_semctl(first, second, third, compat_ptr(ptr));
+
+	      case MSGSND:
+		return compat_sys_msgsnd(first, second, third, compat_ptr(ptr));
+	      case MSGRCV:
+		return compat_sys_msgrcv(first, second, fifth, third, version, compat_ptr(ptr));
+	      case MSGGET:
+		return sys_msgget((key_t) first, second);
+	      case MSGCTL:
+		return compat_sys_msgctl(first, second, compat_ptr(ptr));
+
+	      case SHMAT:
+		return compat_sys_shmat(first, second, third, version, compat_ptr(ptr));
+		break;
+	      case SHMDT:
+		return sys_shmdt(compat_ptr(ptr));
+	      case SHMGET:
+		return sys_shmget(first, (unsigned)second, third);
+	      case SHMCTL:
+		return compat_sys_shmctl(first, second, compat_ptr(ptr));
+
+	      default:
+		return -ENOSYS;
+	}
+	return -EINVAL;
+}
+
+asmlinkage long
+compat_sys_wait4 (compat_pid_t pid, compat_uint_t * stat_addr, int options,
+		 struct compat_rusage *ru);
+
+asmlinkage long
+sys32_waitpid (int pid, unsigned int *stat_addr, int options)
+{
+	return compat_sys_wait4(pid, stat_addr, options, NULL);
+}
+
+static unsigned int
+ia32_peek (struct task_struct *child, unsigned long addr, unsigned int *val)
+{
+	size_t copied;
+	unsigned int ret;
+
+	copied = access_process_vm(child, addr, val, sizeof(*val), 0);
+	return (copied != sizeof(ret)) ? -EIO : 0;
+}
+
+static unsigned int
+ia32_poke (struct task_struct *child, unsigned long addr, unsigned int val)
+{
+
+	if (access_process_vm(child, addr, &val, sizeof(val), 1) != sizeof(val))
+		return -EIO;
+	return 0;
+}
+
+/*
+ *  The order in which registers are stored in the ptrace regs structure
+ */
+#define PT_EBX	0
+#define PT_ECX	1
+#define PT_EDX	2
+#define PT_ESI	3
+#define PT_EDI	4
+#define PT_EBP	5
+#define PT_EAX	6
+#define PT_DS	7
+#define PT_ES	8
+#define PT_FS	9
+#define PT_GS	10
+#define PT_ORIG_EAX 11
+#define PT_EIP	12
+#define PT_CS	13
+#define PT_EFL	14
+#define PT_UESP	15
+#define PT_SS	16
+
+static unsigned int
+getreg (struct task_struct *child, int regno)
+{
+	struct pt_regs *child_regs;
+
+	child_regs = ia64_task_regs(child);
+	switch (regno / sizeof(int)) {
+	      case PT_EBX: return child_regs->r11;
+	      case PT_ECX: return child_regs->r9;
+	      case PT_EDX: return child_regs->r10;
+	      case PT_ESI: return child_regs->r14;
+	      case PT_EDI: return child_regs->r15;
+	      case PT_EBP: return child_regs->r13;
+	      case PT_EAX: return child_regs->r8;
+	      case PT_ORIG_EAX: return child_regs->r1; /* see dispatch_to_ia32_handler() */
+	      case PT_EIP: return child_regs->cr_iip;
+	      case PT_UESP: return child_regs->r12;
+	      case PT_EFL: return child->thread.eflag;
+	      case PT_DS: case PT_ES: case PT_FS: case PT_GS: case PT_SS:
+		return __USER_DS;
+	      case PT_CS: return __USER_CS;
+	      default:
+		printk(KERN_ERR "ia32.getreg(): unknown register %d\n", regno);
+		break;
+	}
+	return 0;
+}
+
+static void
+putreg (struct task_struct *child, int regno, unsigned int value)
+{
+	struct pt_regs *child_regs;
+
+	child_regs = ia64_task_regs(child);
+	switch (regno / sizeof(int)) {
+	      case PT_EBX: child_regs->r11 = value; break;
+	      case PT_ECX: child_regs->r9 = value; break;
+	      case PT_EDX: child_regs->r10 = value; break;
+	      case PT_ESI: child_regs->r14 = value; break;
+	      case PT_EDI: child_regs->r15 = value; break;
+	      case PT_EBP: child_regs->r13 = value; break;
+	      case PT_EAX: child_regs->r8 = value; break;
+	      case PT_ORIG_EAX: child_regs->r1 = value; break;
+	      case PT_EIP: child_regs->cr_iip = value; break;
+	      case PT_UESP: child_regs->r12 = value; break;
+	      case PT_EFL: child->thread.eflag = value; break;
+	      case PT_DS: case PT_ES: case PT_FS: case PT_GS: case PT_SS:
+		if (value != __USER_DS)
+			printk(KERN_ERR
+			       "ia32.putreg: attempt to set invalid segment register %d = %x\n",
+			       regno, value);
+		break;
+	      case PT_CS:
+		if (value != __USER_CS)
+			printk(KERN_ERR
+			       "ia32.putreg: attempt to to set invalid segment register %d = %x\n",
+			       regno, value);
+		break;
+	      default:
+		printk(KERN_ERR "ia32.putreg: unknown register %d\n", regno);
+		break;
+	}
+}
+
+static void
+put_fpreg (int regno, struct _fpreg_ia32 __user *reg, struct pt_regs *ptp,
+	   struct switch_stack *swp, int tos)
+{
+	struct _fpreg_ia32 *f;
+	char buf[32];
+
+	f = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15);
+	if ((regno += tos) >= 8)
+		regno -= 8;
+	switch (regno) {
+	      case 0:
+		ia64f2ia32f(f, &ptp->f8);
+		break;
+	      case 1:
+		ia64f2ia32f(f, &ptp->f9);
+		break;
+	      case 2:
+		ia64f2ia32f(f, &ptp->f10);
+		break;
+	      case 3:
+		ia64f2ia32f(f, &ptp->f11);
+		break;
+	      case 4:
+	      case 5:
+	      case 6:
+	      case 7:
+		ia64f2ia32f(f, &swp->f12 + (regno - 4));
+		break;
+	}
+	copy_to_user(reg, f, sizeof(*reg));
+}
+
+static void
+get_fpreg (int regno, struct _fpreg_ia32 __user *reg, struct pt_regs *ptp,
+	   struct switch_stack *swp, int tos)
+{
+
+	if ((regno += tos) >= 8)
+		regno -= 8;
+	switch (regno) {
+	      case 0:
+		copy_from_user(&ptp->f8, reg, sizeof(*reg));
+		break;
+	      case 1:
+		copy_from_user(&ptp->f9, reg, sizeof(*reg));
+		break;
+	      case 2:
+		copy_from_user(&ptp->f10, reg, sizeof(*reg));
+		break;
+	      case 3:
+		copy_from_user(&ptp->f11, reg, sizeof(*reg));
+		break;
+	      case 4:
+	      case 5:
+	      case 6:
+	      case 7:
+		copy_from_user(&swp->f12 + (regno - 4), reg, sizeof(*reg));
+		break;
+	}
+	return;
+}
+
+int
+save_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct __user *save)
+{
+	struct switch_stack *swp;
+	struct pt_regs *ptp;
+	int i, tos;
+
+	if (!access_ok(VERIFY_WRITE, save, sizeof(*save)))
+		return -EFAULT;
+
+	__put_user(tsk->thread.fcr & 0xffff, &save->cwd);
+	__put_user(tsk->thread.fsr & 0xffff, &save->swd);
+	__put_user((tsk->thread.fsr>>16) & 0xffff, &save->twd);
+	__put_user(tsk->thread.fir, &save->fip);
+	__put_user((tsk->thread.fir>>32) & 0xffff, &save->fcs);
+	__put_user(tsk->thread.fdr, &save->foo);
+	__put_user((tsk->thread.fdr>>32) & 0xffff, &save->fos);
+
+	/*
+	 *  Stack frames start with 16-bytes of temp space
+	 */
+	swp = (struct switch_stack *)(tsk->thread.ksp + 16);
+	ptp = ia64_task_regs(tsk);
+	tos = (tsk->thread.fsr >> 11) & 7;
+	for (i = 0; i < 8; i++)
+		put_fpreg(i, &save->st_space[i], ptp, swp, tos);
+	return 0;
+}
+
+static int
+restore_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct __user *save)
+{
+	struct switch_stack *swp;
+	struct pt_regs *ptp;
+	int i, tos;
+	unsigned int fsrlo, fsrhi, num32;
+
+	if (!access_ok(VERIFY_READ, save, sizeof(*save)))
+		return(-EFAULT);
+
+	__get_user(num32, (unsigned int __user *)&save->cwd);
+	tsk->thread.fcr = (tsk->thread.fcr & (~0x1f3f)) | (num32 & 0x1f3f);
+	__get_user(fsrlo, (unsigned int __user *)&save->swd);
+	__get_user(fsrhi, (unsigned int __user *)&save->twd);
+	num32 = (fsrhi << 16) | fsrlo;
+	tsk->thread.fsr = (tsk->thread.fsr & (~0xffffffff)) | num32;
+	__get_user(num32, (unsigned int __user *)&save->fip);
+	tsk->thread.fir = (tsk->thread.fir & (~0xffffffff)) | num32;
+	__get_user(num32, (unsigned int __user *)&save->foo);
+	tsk->thread.fdr = (tsk->thread.fdr & (~0xffffffff)) | num32;
+
+	/*
+	 *  Stack frames start with 16-bytes of temp space
+	 */
+	swp = (struct switch_stack *)(tsk->thread.ksp + 16);
+	ptp = ia64_task_regs(tsk);
+	tos = (tsk->thread.fsr >> 11) & 7;
+	for (i = 0; i < 8; i++)
+		get_fpreg(i, &save->st_space[i], ptp, swp, tos);
+	return 0;
+}
+
+int
+save_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct __user *save)
+{
+	struct switch_stack *swp;
+	struct pt_regs *ptp;
+	int i, tos;
+	unsigned long mxcsr=0;
+	unsigned long num128[2];
+
+	if (!access_ok(VERIFY_WRITE, save, sizeof(*save)))
+		return -EFAULT;
+
+	__put_user(tsk->thread.fcr & 0xffff, &save->cwd);
+	__put_user(tsk->thread.fsr & 0xffff, &save->swd);
+	__put_user((tsk->thread.fsr>>16) & 0xffff, &save->twd);
+	__put_user(tsk->thread.fir, &save->fip);
+	__put_user((tsk->thread.fir>>32) & 0xffff, &save->fcs);
+	__put_user(tsk->thread.fdr, &save->foo);
+	__put_user((tsk->thread.fdr>>32) & 0xffff, &save->fos);
+
+        /*
+         *  Stack frames start with 16-bytes of temp space
+         */
+        swp = (struct switch_stack *)(tsk->thread.ksp + 16);
+        ptp = ia64_task_regs(tsk);
+	tos = (tsk->thread.fsr >> 11) & 7;
+        for (i = 0; i < 8; i++)
+		put_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos);
+
+	mxcsr = ((tsk->thread.fcr>>32) & 0xff80) | ((tsk->thread.fsr>>32) & 0x3f);
+	__put_user(mxcsr & 0xffff, &save->mxcsr);
+	for (i = 0; i < 8; i++) {
+		memcpy(&(num128[0]), &(swp->f16) + i*2, sizeof(unsigned long));
+		memcpy(&(num128[1]), &(swp->f17) + i*2, sizeof(unsigned long));
+		copy_to_user(&save->xmm_space[0] + 4*i, num128, sizeof(struct _xmmreg_ia32));
+	}
+	return 0;
+}
+
+static int
+restore_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct __user *save)
+{
+	struct switch_stack *swp;
+	struct pt_regs *ptp;
+	int i, tos;
+	unsigned int fsrlo, fsrhi, num32;
+	int mxcsr;
+	unsigned long num64;
+	unsigned long num128[2];
+
+	if (!access_ok(VERIFY_READ, save, sizeof(*save)))
+		return(-EFAULT);
+
+	__get_user(num32, (unsigned int __user *)&save->cwd);
+	tsk->thread.fcr = (tsk->thread.fcr & (~0x1f3f)) | (num32 & 0x1f3f);
+	__get_user(fsrlo, (unsigned int __user *)&save->swd);
+	__get_user(fsrhi, (unsigned int __user *)&save->twd);
+	num32 = (fsrhi << 16) | fsrlo;
+	tsk->thread.fsr = (tsk->thread.fsr & (~0xffffffff)) | num32;
+	__get_user(num32, (unsigned int __user *)&save->fip);
+	tsk->thread.fir = (tsk->thread.fir & (~0xffffffff)) | num32;
+	__get_user(num32, (unsigned int __user *)&save->foo);
+	tsk->thread.fdr = (tsk->thread.fdr & (~0xffffffff)) | num32;
+
+	/*
+	 *  Stack frames start with 16-bytes of temp space
+	 */
+	swp = (struct switch_stack *)(tsk->thread.ksp + 16);
+	ptp = ia64_task_regs(tsk);
+	tos = (tsk->thread.fsr >> 11) & 7;
+	for (i = 0; i < 8; i++)
+	get_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos);
+
+	__get_user(mxcsr, (unsigned int __user *)&save->mxcsr);
+	num64 = mxcsr & 0xff10;
+	tsk->thread.fcr = (tsk->thread.fcr & (~0xff1000000000UL)) | (num64<<32);
+	num64 = mxcsr & 0x3f;
+	tsk->thread.fsr = (tsk->thread.fsr & (~0x3f00000000UL)) | (num64<<32);
+
+	for (i = 0; i < 8; i++) {
+		copy_from_user(num128, &save->xmm_space[0] + 4*i, sizeof(struct _xmmreg_ia32));
+		memcpy(&(swp->f16) + i*2, &(num128[0]), sizeof(unsigned long));
+		memcpy(&(swp->f17) + i*2, &(num128[1]), sizeof(unsigned long));
+	}
+	return 0;
+}
+
+asmlinkage long
+sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data)
+{
+	struct task_struct *child;
+	unsigned int value, tmp;
+	long i, ret;
+
+	lock_kernel();
+	if (request == PTRACE_TRACEME) {
+		ret = sys_ptrace(request, pid, addr, data);
+		goto out;
+	}
+
+	ret = -ESRCH;
+	read_lock(&tasklist_lock);
+	child = find_task_by_pid(pid);
+	if (child)
+		get_task_struct(child);
+	read_unlock(&tasklist_lock);
+	if (!child)
+		goto out;
+	ret = -EPERM;
+	if (pid == 1)		/* no messing around with init! */
+		goto out_tsk;
+
+	if (request == PTRACE_ATTACH) {
+		ret = sys_ptrace(request, pid, addr, data);
+		goto out_tsk;
+	}
+
+	ret = ptrace_check_attach(child, request == PTRACE_KILL);
+	if (ret < 0)
+		goto out_tsk;
+
+	switch (request) {
+	      case PTRACE_PEEKTEXT:
+	      case PTRACE_PEEKDATA:	/* read word at location addr */
+		ret = ia32_peek(child, addr, &value);
+		if (ret == 0)
+			ret = put_user(value, (unsigned int __user *) compat_ptr(data));
+		else
+			ret = -EIO;
+		goto out_tsk;
+
+	      case PTRACE_POKETEXT:
+	      case PTRACE_POKEDATA:	/* write the word at location addr */
+		ret = ia32_poke(child, addr, data);
+		goto out_tsk;
+
+	      case PTRACE_PEEKUSR:	/* read word at addr in USER area */
+		ret = -EIO;
+		if ((addr & 3) || addr > 17*sizeof(int))
+			break;
+
+		tmp = getreg(child, addr);
+		if (!put_user(tmp, (unsigned int __user *) compat_ptr(data)))
+			ret = 0;
+		break;
+
+	      case PTRACE_POKEUSR:	/* write word at addr in USER area */
+		ret = -EIO;
+		if ((addr & 3) || addr > 17*sizeof(int))
+			break;
+
+		putreg(child, addr, data);
+		ret = 0;
+		break;
+
+	      case IA32_PTRACE_GETREGS:
+		if (!access_ok(VERIFY_WRITE, compat_ptr(data), 17*sizeof(int))) {
+			ret = -EIO;
+			break;
+		}
+		for (i = 0; i < (int) (17*sizeof(int)); i += sizeof(int) ) {
+			put_user(getreg(child, i), (unsigned int __user *) compat_ptr(data));
+			data += sizeof(int);
+		}
+		ret = 0;
+		break;
+
+	      case IA32_PTRACE_SETREGS:
+		if (!access_ok(VERIFY_READ, compat_ptr(data), 17*sizeof(int))) {
+			ret = -EIO;
+			break;
+		}
+		for (i = 0; i < (int) (17*sizeof(int)); i += sizeof(int) ) {
+			get_user(tmp, (unsigned int __user *) compat_ptr(data));
+			putreg(child, i, tmp);
+			data += sizeof(int);
+		}
+		ret = 0;
+		break;
+
+	      case IA32_PTRACE_GETFPREGS:
+		ret = save_ia32_fpstate(child, (struct ia32_user_i387_struct __user *)
+					compat_ptr(data));
+		break;
+
+	      case IA32_PTRACE_GETFPXREGS:
+		ret = save_ia32_fpxstate(child, (struct ia32_user_fxsr_struct __user *)
+					 compat_ptr(data));
+		break;
+
+	      case IA32_PTRACE_SETFPREGS:
+		ret = restore_ia32_fpstate(child, (struct ia32_user_i387_struct __user *)
+					   compat_ptr(data));
+		break;
+
+	      case IA32_PTRACE_SETFPXREGS:
+		ret = restore_ia32_fpxstate(child, (struct ia32_user_fxsr_struct __user *)
+					    compat_ptr(data));
+		break;
+
+	      case PTRACE_GETEVENTMSG:   
+		ret = put_user(child->ptrace_message, (unsigned int __user *) compat_ptr(data));
+		break;
+
+	      case PTRACE_SYSCALL:	/* continue, stop after next syscall */
+	      case PTRACE_CONT:		/* restart after signal. */
+	      case PTRACE_KILL:
+	      case PTRACE_SINGLESTEP:	/* execute chile for one instruction */
+	      case PTRACE_DETACH:	/* detach a process */
+		ret = sys_ptrace(request, pid, addr, data);
+		break;
+
+	      default:
+		ret = ptrace_request(child, request, addr, data);
+		break;
+
+	}
+  out_tsk:
+	put_task_struct(child);
+  out:
+	unlock_kernel();
+	return ret;
+}
+
+typedef struct {
+	unsigned int	ss_sp;
+	unsigned int	ss_flags;
+	unsigned int	ss_size;
+} ia32_stack_t;
+
+asmlinkage long
+sys32_sigaltstack (ia32_stack_t __user *uss32, ia32_stack_t __user *uoss32,
+		   long arg2, long arg3, long arg4, long arg5, long arg6,
+		   long arg7, struct pt_regs pt)
+{
+	stack_t uss, uoss;
+	ia32_stack_t buf32;
+	int ret;
+	mm_segment_t old_fs = get_fs();
+
+	if (uss32) {
+		if (copy_from_user(&buf32, uss32, sizeof(ia32_stack_t)))
+			return -EFAULT;
+		uss.ss_sp = (void __user *) (long) buf32.ss_sp;
+		uss.ss_flags = buf32.ss_flags;
+		/* MINSIGSTKSZ is different for ia32 vs ia64. We lie here to pass the
+	           check and set it to the user requested value later */
+		if ((buf32.ss_flags != SS_DISABLE) && (buf32.ss_size < MINSIGSTKSZ_IA32)) {
+			ret = -ENOMEM;
+			goto out;
+		}
+		uss.ss_size = MINSIGSTKSZ;
+	}
+	set_fs(KERNEL_DS);
+	ret = do_sigaltstack(uss32 ? (stack_t __user *) &uss : NULL,
+			     (stack_t __user *) &uoss, pt.r12);
+ 	current->sas_ss_size = buf32.ss_size;
+	set_fs(old_fs);
+out:
+	if (ret < 0)
+		return(ret);
+	if (uoss32) {
+		buf32.ss_sp = (long __user) uoss.ss_sp;
+		buf32.ss_flags = uoss.ss_flags;
+		buf32.ss_size = uoss.ss_size;
+		if (copy_to_user(uoss32, &buf32, sizeof(ia32_stack_t)))
+			return -EFAULT;
+	}
+	return ret;
+}
+
+asmlinkage int
+sys32_pause (void)
+{
+	current->state = TASK_INTERRUPTIBLE;
+	schedule();
+	return -ERESTARTNOHAND;
+}
+
+asmlinkage int
+sys32_msync (unsigned int start, unsigned int len, int flags)
+{
+	unsigned int addr;
+
+	if (OFFSET4K(start))
+		return -EINVAL;
+	addr = PAGE_START(start);
+	return sys_msync(addr, len + (start - addr), flags);
+}
+
+struct sysctl32 {
+	unsigned int	name;
+	int		nlen;
+	unsigned int	oldval;
+	unsigned int	oldlenp;
+	unsigned int	newval;
+	unsigned int	newlen;
+	unsigned int	__unused[4];
+};
+
+#ifdef CONFIG_SYSCTL
+asmlinkage long
+sys32_sysctl (struct sysctl32 __user *args)
+{
+	struct sysctl32 a32;
+	mm_segment_t old_fs = get_fs ();
+	void __user *oldvalp, *newvalp;
+	size_t oldlen;
+	int __user *namep;
+	long ret;
+
+	if (copy_from_user(&a32, args, sizeof(a32)))
+		return -EFAULT;
+
+	/*
+	 * We need to pre-validate these because we have to disable address checking
+	 * before calling do_sysctl() because of OLDLEN but we can't run the risk of the
+	 * user specifying bad addresses here.  Well, since we're dealing with 32 bit
+	 * addresses, we KNOW that access_ok() will always succeed, so this is an
+	 * expensive NOP, but so what...
+	 */
+	namep = (int __user *) compat_ptr(a32.name);
+	oldvalp = compat_ptr(a32.oldval);
+	newvalp = compat_ptr(a32.newval);
+
+	if ((oldvalp && get_user(oldlen, (int __user *) compat_ptr(a32.oldlenp)))
+	    || !access_ok(VERIFY_WRITE, namep, 0)
+	    || !access_ok(VERIFY_WRITE, oldvalp, 0)
+	    || !access_ok(VERIFY_WRITE, newvalp, 0))
+		return -EFAULT;
+
+	set_fs(KERNEL_DS);
+	lock_kernel();
+	ret = do_sysctl(namep, a32.nlen, oldvalp, (size_t __user *) &oldlen,
+			newvalp, (size_t) a32.newlen);
+	unlock_kernel();
+	set_fs(old_fs);
+
+	if (oldvalp && put_user (oldlen, (int __user *) compat_ptr(a32.oldlenp)))
+		return -EFAULT;
+
+	return ret;
+}
+#endif
+
+asmlinkage long
+sys32_newuname (struct new_utsname __user *name)
+{
+	int ret = sys_newuname(name);
+
+	if (!ret)
+		if (copy_to_user(name->machine, "i686\0\0\0", 8))
+			ret = -EFAULT;
+	return ret;
+}
+
+asmlinkage long
+sys32_getresuid16 (u16 __user *ruid, u16 __user *euid, u16 __user *suid)
+{
+	uid_t a, b, c;
+	int ret;
+	mm_segment_t old_fs = get_fs();
+
+	set_fs(KERNEL_DS);
+	ret = sys_getresuid((uid_t __user *) &a, (uid_t __user *) &b, (uid_t __user *) &c);
+	set_fs(old_fs);
+
+	if (put_user(a, ruid) || put_user(b, euid) || put_user(c, suid))
+		return -EFAULT;
+	return ret;
+}
+
+asmlinkage long
+sys32_getresgid16 (u16 __user *rgid, u16 __user *egid, u16 __user *sgid)
+{
+	gid_t a, b, c;
+	int ret;
+	mm_segment_t old_fs = get_fs();
+
+	set_fs(KERNEL_DS);
+	ret = sys_getresgid((gid_t __user *) &a, (gid_t __user *) &b, (gid_t __user *) &c);
+	set_fs(old_fs);
+
+	if (ret)
+		return ret;
+
+	return put_user(a, rgid) | put_user(b, egid) | put_user(c, sgid);
+}
+
+asmlinkage long
+sys32_lseek (unsigned int fd, int offset, unsigned int whence)
+{
+	/* Sign-extension of "offset" is important here... */
+	return sys_lseek(fd, offset, whence);
+}
+
+static int
+groups16_to_user(short __user *grouplist, struct group_info *group_info)
+{
+	int i;
+	short group;
+
+	for (i = 0; i < group_info->ngroups; i++) {
+		group = (short)GROUP_AT(group_info, i);
+		if (put_user(group, grouplist+i))
+			return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int
+groups16_from_user(struct group_info *group_info, short __user *grouplist)
+{
+	int i;
+	short group;
+
+	for (i = 0; i < group_info->ngroups; i++) {
+		if (get_user(group, grouplist+i))
+			return  -EFAULT;
+		GROUP_AT(group_info, i) = (gid_t)group;
+	}
+
+	return 0;
+}
+
+asmlinkage long
+sys32_getgroups16 (int gidsetsize, short __user *grouplist)
+{
+	int i;
+
+	if (gidsetsize < 0)
+		return -EINVAL;
+
+	get_group_info(current->group_info);
+	i = current->group_info->ngroups;
+	if (gidsetsize) {
+		if (i > gidsetsize) {
+			i = -EINVAL;
+			goto out;
+		}
+		if (groups16_to_user(grouplist, current->group_info)) {
+			i = -EFAULT;
+			goto out;
+		}
+	}
+out:
+	put_group_info(current->group_info);
+	return i;
+}
+
+asmlinkage long
+sys32_setgroups16 (int gidsetsize, short __user *grouplist)
+{
+	struct group_info *group_info;
+	int retval;
+
+	if (!capable(CAP_SETGID))
+		return -EPERM;
+	if ((unsigned)gidsetsize > NGROUPS_MAX)
+		return -EINVAL;
+
+	group_info = groups_alloc(gidsetsize);
+	if (!group_info)
+		return -ENOMEM;
+	retval = groups16_from_user(group_info, grouplist);
+	if (retval) {
+		put_group_info(group_info);
+		return retval;
+	}
+
+	retval = set_current_groups(group_info);
+	put_group_info(group_info);
+
+	return retval;
+}
+
+asmlinkage long
+sys32_truncate64 (unsigned int path, unsigned int len_lo, unsigned int len_hi)
+{
+	return sys_truncate(compat_ptr(path), ((unsigned long) len_hi << 32) | len_lo);
+}
+
+asmlinkage long
+sys32_ftruncate64 (int fd, unsigned int len_lo, unsigned int len_hi)
+{
+	return sys_ftruncate(fd, ((unsigned long) len_hi << 32) | len_lo);
+}
+
+static int
+putstat64 (struct stat64 __user *ubuf, struct kstat *kbuf)
+{
+	int err;
+	u64 hdev;
+
+	if (clear_user(ubuf, sizeof(*ubuf)))
+		return -EFAULT;
+
+	hdev = huge_encode_dev(kbuf->dev);
+	err  = __put_user(hdev, (u32 __user*)&ubuf->st_dev);
+	err |= __put_user(hdev >> 32, ((u32 __user*)&ubuf->st_dev) + 1);
+	err |= __put_user(kbuf->ino, &ubuf->__st_ino);
+	err |= __put_user(kbuf->ino, &ubuf->st_ino_lo);
+	err |= __put_user(kbuf->ino >> 32, &ubuf->st_ino_hi);
+	err |= __put_user(kbuf->mode, &ubuf->st_mode);
+	err |= __put_user(kbuf->nlink, &ubuf->st_nlink);
+	err |= __put_user(kbuf->uid, &ubuf->st_uid);
+	err |= __put_user(kbuf->gid, &ubuf->st_gid);
+	hdev = huge_encode_dev(kbuf->rdev);
+	err  = __put_user(hdev, (u32 __user*)&ubuf->st_rdev);
+	err |= __put_user(hdev >> 32, ((u32 __user*)&ubuf->st_rdev) + 1);
+	err |= __put_user(kbuf->size, &ubuf->st_size_lo);
+	err |= __put_user((kbuf->size >> 32), &ubuf->st_size_hi);
+	err |= __put_user(kbuf->atime.tv_sec, &ubuf->st_atime);
+	err |= __put_user(kbuf->atime.tv_nsec, &ubuf->st_atime_nsec);
+	err |= __put_user(kbuf->mtime.tv_sec, &ubuf->st_mtime);
+	err |= __put_user(kbuf->mtime.tv_nsec, &ubuf->st_mtime_nsec);
+	err |= __put_user(kbuf->ctime.tv_sec, &ubuf->st_ctime);
+	err |= __put_user(kbuf->ctime.tv_nsec, &ubuf->st_ctime_nsec);
+	err |= __put_user(kbuf->blksize, &ubuf->st_blksize);
+	err |= __put_user(kbuf->blocks, &ubuf->st_blocks);
+	return err;
+}
+
+asmlinkage long
+sys32_stat64 (char __user *filename, struct stat64 __user *statbuf)
+{
+	struct kstat s;
+	long ret = vfs_stat(filename, &s);
+	if (!ret)
+		ret = putstat64(statbuf, &s);
+	return ret;
+}
+
+asmlinkage long
+sys32_lstat64 (char __user *filename, struct stat64 __user *statbuf)
+{
+	struct kstat s;
+	long ret = vfs_lstat(filename, &s);
+	if (!ret)
+		ret = putstat64(statbuf, &s);
+	return ret;
+}
+
+asmlinkage long
+sys32_fstat64 (unsigned int fd, struct stat64 __user *statbuf)
+{
+	struct kstat s;
+	long ret = vfs_fstat(fd, &s);
+	if (!ret)
+		ret = putstat64(statbuf, &s);
+	return ret;
+}
+
+struct sysinfo32 {
+	s32 uptime;
+	u32 loads[3];
+	u32 totalram;
+	u32 freeram;
+	u32 sharedram;
+	u32 bufferram;
+	u32 totalswap;
+	u32 freeswap;
+	u16 procs;
+	u16 pad;
+	u32 totalhigh;
+	u32 freehigh;
+	u32 mem_unit;
+	char _f[8];
+};
+
+asmlinkage long
+sys32_sysinfo (struct sysinfo32 __user *info)
+{
+	struct sysinfo s;
+	long ret, err;
+	int bitcount = 0;
+	mm_segment_t old_fs = get_fs();
+
+	set_fs(KERNEL_DS);
+	ret = sys_sysinfo((struct sysinfo __user *) &s);
+	set_fs(old_fs);
+	/* Check to see if any memory value is too large for 32-bit and
+	 * scale down if needed.
+	 */
+	if ((s.totalram >> 32) || (s.totalswap >> 32)) {
+		while (s.mem_unit < PAGE_SIZE) {
+			s.mem_unit <<= 1;
+			bitcount++;
+		}
+		s.totalram >>= bitcount;
+		s.freeram >>= bitcount;
+		s.sharedram >>= bitcount;
+		s.bufferram >>= bitcount;
+		s.totalswap >>= bitcount;
+		s.freeswap >>= bitcount;
+		s.totalhigh >>= bitcount;
+		s.freehigh >>= bitcount;
+	}
+
+	if (!access_ok(VERIFY_WRITE, info, sizeof(*info)))
+		return -EFAULT;
+
+	err  = __put_user(s.uptime, &info->uptime);
+	err |= __put_user(s.loads[0], &info->loads[0]);
+	err |= __put_user(s.loads[1], &info->loads[1]);
+	err |= __put_user(s.loads[2], &info->loads[2]);
+	err |= __put_user(s.totalram, &info->totalram);
+	err |= __put_user(s.freeram, &info->freeram);
+	err |= __put_user(s.sharedram, &info->sharedram);
+	err |= __put_user(s.bufferram, &info->bufferram);
+	err |= __put_user(s.totalswap, &info->totalswap);
+	err |= __put_user(s.freeswap, &info->freeswap);
+	err |= __put_user(s.procs, &info->procs);
+	err |= __put_user (s.totalhigh, &info->totalhigh);
+	err |= __put_user (s.freehigh, &info->freehigh);
+	err |= __put_user (s.mem_unit, &info->mem_unit);
+	if (err)
+		return -EFAULT;
+	return ret;
+}
+
+asmlinkage long
+sys32_sched_rr_get_interval (pid_t pid, struct compat_timespec __user *interval)
+{
+	mm_segment_t old_fs = get_fs();
+	struct timespec t;
+	long ret;
+
+	set_fs(KERNEL_DS);
+	ret = sys_sched_rr_get_interval(pid, (struct timespec __user *) &t);
+	set_fs(old_fs);
+	if (put_compat_timespec(&t, interval))
+		return -EFAULT;
+	return ret;
+}
+
+asmlinkage long
+sys32_pread (unsigned int fd, void __user *buf, unsigned int count, u32 pos_lo, u32 pos_hi)
+{
+	return sys_pread64(fd, buf, count, ((unsigned long) pos_hi << 32) | pos_lo);
+}
+
+asmlinkage long
+sys32_pwrite (unsigned int fd, void __user *buf, unsigned int count, u32 pos_lo, u32 pos_hi)
+{
+	return sys_pwrite64(fd, buf, count, ((unsigned long) pos_hi << 32) | pos_lo);
+}
+
+asmlinkage long
+sys32_sendfile (int out_fd, int in_fd, int __user *offset, unsigned int count)
+{
+	mm_segment_t old_fs = get_fs();
+	long ret;
+	off_t of;
+
+	if (offset && get_user(of, offset))
+		return -EFAULT;
+
+	set_fs(KERNEL_DS);
+	ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *) &of : NULL, count);
+	set_fs(old_fs);
+
+	if (!ret && offset && put_user(of, offset))
+		return -EFAULT;
+
+	return ret;
+}
+
+asmlinkage long
+sys32_personality (unsigned int personality)
+{
+	long ret;
+
+	if (current->personality == PER_LINUX32 && personality == PER_LINUX)
+		personality = PER_LINUX32;
+	ret = sys_personality(personality);
+	if (ret == PER_LINUX32)
+		ret = PER_LINUX;
+	return ret;
+}
+
+asmlinkage unsigned long
+sys32_brk (unsigned int brk)
+{
+	unsigned long ret, obrk;
+	struct mm_struct *mm = current->mm;
+
+	obrk = mm->brk;
+	ret = sys_brk(brk);
+	if (ret < obrk)
+		clear_user(compat_ptr(ret), PAGE_ALIGN(ret) - ret);
+	return ret;
+}
+
+/*
+ * Exactly like fs/open.c:sys_open(), except that it doesn't set the O_LARGEFILE flag.
+ */
+asmlinkage long
+sys32_open (const char __user * filename, int flags, int mode)
+{
+	char * tmp;
+	int fd, error;
+
+	tmp = getname(filename);
+	fd = PTR_ERR(tmp);
+	if (!IS_ERR(tmp)) {
+		fd = get_unused_fd();
+		if (fd >= 0) {
+			struct file *f = filp_open(tmp, flags, mode);
+			error = PTR_ERR(f);
+			if (IS_ERR(f))
+				goto out_error;
+			fd_install(fd, f);
+		}
+out:
+		putname(tmp);
+	}
+	return fd;
+
+out_error:
+	put_unused_fd(fd);
+	fd = error;
+	goto out;
+}
+
+/* Structure for ia32 emulation on ia64 */
+struct epoll_event32
+{
+	u32 events;
+	u32 data[2];
+};
+
+asmlinkage long
+sys32_epoll_ctl(int epfd, int op, int fd, struct epoll_event32 __user *event)
+{
+	mm_segment_t old_fs = get_fs();
+	struct epoll_event event64;
+	int error;
+	u32 data_halfword;
+
+	if (!access_ok(VERIFY_READ, event, sizeof(struct epoll_event32)))
+		return -EFAULT;
+
+	__get_user(event64.events, &event->events);
+	__get_user(data_halfword, &event->data[0]);
+	event64.data = data_halfword;
+	__get_user(data_halfword, &event->data[1]);
+ 	event64.data |= (u64)data_halfword << 32;
+
+	set_fs(KERNEL_DS);
+	error = sys_epoll_ctl(epfd, op, fd, (struct epoll_event __user *) &event64);
+	set_fs(old_fs);
+
+	return error;
+}
+
+asmlinkage long
+sys32_epoll_wait(int epfd, struct epoll_event32 __user * events, int maxevents,
+		 int timeout)
+{
+	struct epoll_event *events64 = NULL;
+	mm_segment_t old_fs = get_fs();
+	int error, numevents, size;
+	int evt_idx;
+	int do_free_pages = 0;
+
+	if (maxevents <= 0) {
+		return -EINVAL;
+	}
+
+	/* Verify that the area passed by the user is writeable */
+	if (!access_ok(VERIFY_WRITE, events, maxevents * sizeof(struct epoll_event32)))
+		return -EFAULT;
+
+	/*
+ 	 * Allocate space for the intermediate copy.  If the space needed
+	 * is large enough to cause kmalloc to fail, then try again with
+	 * __get_free_pages.
+	 */
+	size = maxevents * sizeof(struct epoll_event);
+	events64 = kmalloc(size, GFP_KERNEL);
+	if (events64 == NULL) {
+		events64 = (struct epoll_event *)
+				__get_free_pages(GFP_KERNEL, get_order(size));
+		if (events64 == NULL)
+			return -ENOMEM;
+		do_free_pages = 1;
+	}
+
+	/* Do the system call */
+	set_fs(KERNEL_DS); /* copy_to/from_user should work on kernel mem*/
+	numevents = sys_epoll_wait(epfd, (struct epoll_event __user *) events64,
+				   maxevents, timeout);
+	set_fs(old_fs);
+
+	/* Don't modify userspace memory if we're returning an error */
+	if (numevents > 0) {
+		/* Translate the 64-bit structures back into the 32-bit
+		   structures */
+		for (evt_idx = 0; evt_idx < numevents; evt_idx++) {
+			__put_user(events64[evt_idx].events,
+				   &events[evt_idx].events);
+			__put_user((u32)events64[evt_idx].data,
+				   &events[evt_idx].data[0]);
+			__put_user((u32)(events64[evt_idx].data >> 32),
+				   &events[evt_idx].data[1]);
+		}
+	}
+
+	if (do_free_pages)
+		free_pages((unsigned long) events64, get_order(size));
+	else
+		kfree(events64);
+	return numevents;
+}
+
+/*
+ * Get a yet unused TLS descriptor index.
+ */
+static int
+get_free_idx (void)
+{
+	struct thread_struct *t = &current->thread;
+	int idx;
+
+	for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
+		if (desc_empty(t->tls_array + idx))
+			return idx + GDT_ENTRY_TLS_MIN;
+	return -ESRCH;
+}
+
+/*
+ * Set a given TLS descriptor:
+ */
+asmlinkage int
+sys32_set_thread_area (struct ia32_user_desc __user *u_info)
+{
+	struct thread_struct *t = &current->thread;
+	struct ia32_user_desc info;
+	struct desc_struct *desc;
+	int cpu, idx;
+
+	if (copy_from_user(&info, u_info, sizeof(info)))
+		return -EFAULT;
+	idx = info.entry_number;
+
+	/*
+	 * index -1 means the kernel should try to find and allocate an empty descriptor:
+	 */
+	if (idx == -1) {
+		idx = get_free_idx();
+		if (idx < 0)
+			return idx;
+		if (put_user(idx, &u_info->entry_number))
+			return -EFAULT;
+	}
+
+	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+		return -EINVAL;
+
+	desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN;
+
+	cpu = smp_processor_id();
+
+	if (LDT_empty(&info)) {
+		desc->a = 0;
+		desc->b = 0;
+	} else {
+		desc->a = LDT_entry_a(&info);
+		desc->b = LDT_entry_b(&info);
+	}
+	load_TLS(t, cpu);
+	return 0;
+}
+
+/*
+ * Get the current Thread-Local Storage area:
+ */
+
+#define GET_BASE(desc) (			\
+	(((desc)->a >> 16) & 0x0000ffff) |	\
+	(((desc)->b << 16) & 0x00ff0000) |	\
+	( (desc)->b        & 0xff000000)   )
+
+#define GET_LIMIT(desc) (			\
+	((desc)->a & 0x0ffff) |			\
+	 ((desc)->b & 0xf0000) )
+
+#define GET_32BIT(desc)		(((desc)->b >> 22) & 1)
+#define GET_CONTENTS(desc)	(((desc)->b >> 10) & 3)
+#define GET_WRITABLE(desc)	(((desc)->b >>  9) & 1)
+#define GET_LIMIT_PAGES(desc)	(((desc)->b >> 23) & 1)
+#define GET_PRESENT(desc)	(((desc)->b >> 15) & 1)
+#define GET_USEABLE(desc)	(((desc)->b >> 20) & 1)
+
+asmlinkage int
+sys32_get_thread_area (struct ia32_user_desc __user *u_info)
+{
+	struct ia32_user_desc info;
+	struct desc_struct *desc;
+	int idx;
+
+	if (get_user(idx, &u_info->entry_number))
+		return -EFAULT;
+	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+		return -EINVAL;
+
+	desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
+
+	info.entry_number = idx;
+	info.base_addr = GET_BASE(desc);
+	info.limit = GET_LIMIT(desc);
+	info.seg_32bit = GET_32BIT(desc);
+	info.contents = GET_CONTENTS(desc);
+	info.read_exec_only = !GET_WRITABLE(desc);
+	info.limit_in_pages = GET_LIMIT_PAGES(desc);
+	info.seg_not_present = !GET_PRESENT(desc);
+	info.useable = GET_USEABLE(desc);
+
+	if (copy_to_user(u_info, &info, sizeof(info)))
+		return -EFAULT;
+	return 0;
+}
+
+asmlinkage long
+sys32_timer_create(u32 clock, struct compat_sigevent __user *se32, timer_t __user *timer_id)
+{
+	struct sigevent se;
+	mm_segment_t oldfs;
+	timer_t t;
+	long err;
+
+	if (se32 == NULL)
+		return sys_timer_create(clock, NULL, timer_id);
+
+	if (get_compat_sigevent(&se, se32))
+		return -EFAULT;
+
+	if (!access_ok(VERIFY_WRITE,timer_id,sizeof(timer_t)))
+		return -EFAULT;
+
+	oldfs = get_fs();
+	set_fs(KERNEL_DS);
+	err = sys_timer_create(clock, (struct sigevent __user *) &se, (timer_t __user *) &t);
+	set_fs(oldfs);
+
+	if (!err)
+		err = __put_user (t, timer_id);
+
+	return err;
+}
+
+long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high, 
+			__u32 len_low, __u32 len_high, int advice)
+{ 
+	return sys_fadvise64_64(fd,
+			       (((u64)offset_high)<<32) | offset_low,
+			       (((u64)len_high)<<32) | len_low,
+			       advice); 
+} 
+
+#ifdef	NOTYET  /* UNTESTED FOR IA64 FROM HERE DOWN */
+
+asmlinkage long sys32_setreuid(compat_uid_t ruid, compat_uid_t euid)
+{
+	uid_t sruid, seuid;
+
+	sruid = (ruid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)ruid);
+	seuid = (euid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)euid);
+	return sys_setreuid(sruid, seuid);
+}
+
+asmlinkage long
+sys32_setresuid(compat_uid_t ruid, compat_uid_t euid,
+		compat_uid_t suid)
+{
+	uid_t sruid, seuid, ssuid;
+
+	sruid = (ruid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)ruid);
+	seuid = (euid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)euid);
+	ssuid = (suid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)suid);
+	return sys_setresuid(sruid, seuid, ssuid);
+}
+
+asmlinkage long
+sys32_setregid(compat_gid_t rgid, compat_gid_t egid)
+{
+	gid_t srgid, segid;
+
+	srgid = (rgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)rgid);
+	segid = (egid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)egid);
+	return sys_setregid(srgid, segid);
+}
+
+asmlinkage long
+sys32_setresgid(compat_gid_t rgid, compat_gid_t egid,
+		compat_gid_t sgid)
+{
+	gid_t srgid, segid, ssgid;
+
+	srgid = (rgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)rgid);
+	segid = (egid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)egid);
+	ssgid = (sgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)sgid);
+	return sys_setresgid(srgid, segid, ssgid);
+}
+
+/* Handle adjtimex compatibility. */
+
+struct timex32 {
+	u32 modes;
+	s32 offset, freq, maxerror, esterror;
+	s32 status, constant, precision, tolerance;
+	struct compat_timeval time;
+	s32 tick;
+	s32 ppsfreq, jitter, shift, stabil;
+	s32 jitcnt, calcnt, errcnt, stbcnt;
+	s32  :32; s32  :32; s32  :32; s32  :32;
+	s32  :32; s32  :32; s32  :32; s32  :32;
+	s32  :32; s32  :32; s32  :32; s32  :32;
+};
+
+extern int do_adjtimex(struct timex *);
+
+asmlinkage long
+sys32_adjtimex(struct timex32 *utp)
+{
+	struct timex txc;
+	int ret;
+
+	memset(&txc, 0, sizeof(struct timex));
+
+	if(get_user(txc.modes, &utp->modes) ||
+	   __get_user(txc.offset, &utp->offset) ||
+	   __get_user(txc.freq, &utp->freq) ||
+	   __get_user(txc.maxerror, &utp->maxerror) ||
+	   __get_user(txc.esterror, &utp->esterror) ||
+	   __get_user(txc.status, &utp->status) ||
+	   __get_user(txc.constant, &utp->constant) ||
+	   __get_user(txc.precision, &utp->precision) ||
+	   __get_user(txc.tolerance, &utp->tolerance) ||
+	   __get_user(txc.time.tv_sec, &utp->time.tv_sec) ||
+	   __get_user(txc.time.tv_usec, &utp->time.tv_usec) ||
+	   __get_user(txc.tick, &utp->tick) ||
+	   __get_user(txc.ppsfreq, &utp->ppsfreq) ||
+	   __get_user(txc.jitter, &utp->jitter) ||
+	   __get_user(txc.shift, &utp->shift) ||
+	   __get_user(txc.stabil, &utp->stabil) ||
+	   __get_user(txc.jitcnt, &utp->jitcnt) ||
+	   __get_user(txc.calcnt, &utp->calcnt) ||
+	   __get_user(txc.errcnt, &utp->errcnt) ||
+	   __get_user(txc.stbcnt, &utp->stbcnt))
+		return -EFAULT;
+
+	ret = do_adjtimex(&txc);
+
+	if(put_user(txc.modes, &utp->modes) ||
+	   __put_user(txc.offset, &utp->offset) ||
+	   __put_user(txc.freq, &utp->freq) ||
+	   __put_user(txc.maxerror, &utp->maxerror) ||
+	   __put_user(txc.esterror, &utp->esterror) ||
+	   __put_user(txc.status, &utp->status) ||
+	   __put_user(txc.constant, &utp->constant) ||
+	   __put_user(txc.precision, &utp->precision) ||
+	   __put_user(txc.tolerance, &utp->tolerance) ||
+	   __put_user(txc.time.tv_sec, &utp->time.tv_sec) ||
+	   __put_user(txc.time.tv_usec, &utp->time.tv_usec) ||
+	   __put_user(txc.tick, &utp->tick) ||
+	   __put_user(txc.ppsfreq, &utp->ppsfreq) ||
+	   __put_user(txc.jitter, &utp->jitter) ||
+	   __put_user(txc.shift, &utp->shift) ||
+	   __put_user(txc.stabil, &utp->stabil) ||
+	   __put_user(txc.jitcnt, &utp->jitcnt) ||
+	   __put_user(txc.calcnt, &utp->calcnt) ||
+	   __put_user(txc.errcnt, &utp->errcnt) ||
+	   __put_user(txc.stbcnt, &utp->stbcnt))
+		ret = -EFAULT;
+
+	return ret;
+}
+#endif /* NOTYET */