Linux-2.6.12-rc2

Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.

Let it rip!
diff --git a/arch/m68k/ifpsp060/src/README-SRC b/arch/m68k/ifpsp060/src/README-SRC
new file mode 100644
index 0000000..6be5cff
--- /dev/null
+++ b/arch/m68k/ifpsp060/src/README-SRC
@@ -0,0 +1,12 @@
+This is the original source code from Motorola for the 68060 processor
+support code, providing emulation for rarely used m68k instructions
+not implemented in the 68060 silicon.
+
+The code provided here will not assemble out of the box using the GNU
+assembler, however it is being included in order to comply with the
+GNU General Public License.
+
+You don't need to actually assemble these files in order to compile a
+workin m68k kernel, the precompiled .sa files in arch/m68k/ifpsp060
+are sufficient and were generated from these source files by
+Motorola.
diff --git a/arch/m68k/ifpsp060/src/fplsp.S b/arch/m68k/ifpsp060/src/fplsp.S
new file mode 100644
index 0000000..fdb79b9
--- /dev/null
+++ b/arch/m68k/ifpsp060/src/fplsp.S
@@ -0,0 +1,10980 @@
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
+M68000 Hi-Performance Microprocessor Division
+M68060 Software Package
+Production Release P1.00 -- October 10, 1994
+
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc.  All rights reserved.
+
+THE SOFTWARE is provided on an "AS IS" basis and without warranty.
+To the maximum extent permitted by applicable law,
+MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
+INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+and any warranty against infringement with regard to the SOFTWARE
+(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
+
+To the maximum extent permitted by applicable law,
+IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
+(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
+BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
+ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
+Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
+
+You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
+so long as this entire notice is retained without alteration in any modified and/or
+redistributed versions, and that such modified versions are clearly identified as such.
+No licenses are granted by implication, estoppel or otherwise under any patents
+or trademarks of Motorola, Inc.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# lfptop.s:
+#	This file is appended to the top of the 060ILSP package
+# and contains the entry points into the package. The user, in
+# effect, branches to one of the branch table entries located here.
+#
+
+	bra.l	_facoss_
+	short	0x0000
+	bra.l	_facosd_
+	short	0x0000
+	bra.l	_facosx_
+	short	0x0000
+
+	bra.l	_fasins_
+	short	0x0000
+	bra.l	_fasind_
+	short	0x0000
+	bra.l	_fasinx_
+	short	0x0000
+
+	bra.l	_fatans_
+	short	0x0000
+	bra.l	_fatand_
+	short	0x0000
+	bra.l	_fatanx_
+	short	0x0000
+
+	bra.l	_fatanhs_
+	short	0x0000
+	bra.l	_fatanhd_
+	short	0x0000
+	bra.l	_fatanhx_
+	short	0x0000
+
+	bra.l	_fcoss_
+	short	0x0000
+	bra.l	_fcosd_
+	short	0x0000
+	bra.l	_fcosx_
+	short	0x0000
+
+	bra.l	_fcoshs_
+	short	0x0000
+	bra.l	_fcoshd_
+	short	0x0000
+	bra.l	_fcoshx_
+	short	0x0000
+
+	bra.l	_fetoxs_
+	short	0x0000
+	bra.l	_fetoxd_
+	short	0x0000
+	bra.l	_fetoxx_
+	short	0x0000
+
+	bra.l	_fetoxm1s_
+	short	0x0000
+	bra.l	_fetoxm1d_
+	short	0x0000
+	bra.l	_fetoxm1x_
+	short	0x0000
+
+	bra.l	_fgetexps_
+	short	0x0000
+	bra.l	_fgetexpd_
+	short	0x0000
+	bra.l	_fgetexpx_
+	short	0x0000
+
+	bra.l	_fgetmans_
+	short	0x0000
+	bra.l	_fgetmand_
+	short	0x0000
+	bra.l	_fgetmanx_
+	short	0x0000
+
+	bra.l	_flog10s_
+	short	0x0000
+	bra.l	_flog10d_
+	short	0x0000
+	bra.l	_flog10x_
+	short	0x0000
+
+	bra.l	_flog2s_
+	short	0x0000
+	bra.l	_flog2d_
+	short	0x0000
+	bra.l	_flog2x_
+	short	0x0000
+
+	bra.l	_flogns_
+	short	0x0000
+	bra.l	_flognd_
+	short	0x0000
+	bra.l	_flognx_
+	short	0x0000
+
+	bra.l	_flognp1s_
+	short	0x0000
+	bra.l	_flognp1d_
+	short	0x0000
+	bra.l	_flognp1x_
+	short	0x0000
+
+	bra.l	_fmods_
+	short	0x0000
+	bra.l	_fmodd_
+	short	0x0000
+	bra.l	_fmodx_
+	short	0x0000
+
+	bra.l	_frems_
+	short	0x0000
+	bra.l	_fremd_
+	short	0x0000
+	bra.l	_fremx_
+	short	0x0000
+
+	bra.l	_fscales_
+	short	0x0000
+	bra.l	_fscaled_
+	short	0x0000
+	bra.l	_fscalex_
+	short	0x0000
+
+	bra.l	_fsins_
+	short	0x0000
+	bra.l	_fsind_
+	short	0x0000
+	bra.l	_fsinx_
+	short	0x0000
+
+	bra.l	_fsincoss_
+	short	0x0000
+	bra.l	_fsincosd_
+	short	0x0000
+	bra.l	_fsincosx_
+	short	0x0000
+
+	bra.l	_fsinhs_
+	short	0x0000
+	bra.l	_fsinhd_
+	short	0x0000
+	bra.l	_fsinhx_
+	short	0x0000
+
+	bra.l	_ftans_
+	short	0x0000
+	bra.l	_ftand_
+	short	0x0000
+	bra.l	_ftanx_
+	short	0x0000
+
+	bra.l	_ftanhs_
+	short	0x0000
+	bra.l	_ftanhd_
+	short	0x0000
+	bra.l	_ftanhx_
+	short	0x0000
+
+	bra.l	_ftentoxs_
+	short	0x0000
+	bra.l	_ftentoxd_
+	short	0x0000
+	bra.l	_ftentoxx_
+	short	0x0000
+
+	bra.l	_ftwotoxs_
+	short	0x0000
+	bra.l	_ftwotoxd_
+	short	0x0000
+	bra.l	_ftwotoxx_
+	short	0x0000
+
+	bra.l	_fabss_
+	short	0x0000
+	bra.l	_fabsd_
+	short	0x0000
+	bra.l	_fabsx_
+	short	0x0000
+
+	bra.l	_fadds_
+	short	0x0000
+	bra.l	_faddd_
+	short	0x0000
+	bra.l	_faddx_
+	short	0x0000
+
+	bra.l	_fdivs_
+	short	0x0000
+	bra.l	_fdivd_
+	short	0x0000
+	bra.l	_fdivx_
+	short	0x0000
+
+	bra.l	_fints_
+	short	0x0000
+	bra.l	_fintd_
+	short	0x0000
+	bra.l	_fintx_
+	short	0x0000
+
+	bra.l	_fintrzs_
+	short	0x0000
+	bra.l	_fintrzd_
+	short	0x0000
+	bra.l	_fintrzx_
+	short	0x0000
+
+	bra.l	_fmuls_
+	short	0x0000
+	bra.l	_fmuld_
+	short	0x0000
+	bra.l	_fmulx_
+	short	0x0000
+
+	bra.l	_fnegs_
+	short	0x0000
+	bra.l	_fnegd_
+	short	0x0000
+	bra.l	_fnegx_
+	short	0x0000
+
+	bra.l	_fsqrts_
+	short	0x0000
+	bra.l	_fsqrtd_
+	short	0x0000
+	bra.l	_fsqrtx_
+	short	0x0000
+
+	bra.l	_fsubs_
+	short	0x0000
+	bra.l	_fsubd_
+	short	0x0000
+	bra.l	_fsubx_
+	short	0x0000
+
+# leave room for future possible additions
+	align	0x400
+
+#
+# This file contains a set of define statements for constants
+# in order to promote readability within the corecode itself.
+#
+
+set LOCAL_SIZE,		192			# stack frame size(bytes)
+set LV,			-LOCAL_SIZE		# stack offset
+
+set EXC_SR,		0x4			# stack status register
+set EXC_PC,		0x6			# stack pc
+set EXC_VOFF,		0xa			# stacked vector offset
+set EXC_EA,		0xc			# stacked <ea>
+
+set EXC_FP,		0x0			# frame pointer
+
+set EXC_AREGS,		-68			# offset of all address regs
+set EXC_DREGS,		-100			# offset of all data regs
+set EXC_FPREGS,		-36			# offset of all fp regs
+
+set EXC_A7,		EXC_AREGS+(7*4)		# offset of saved a7
+set OLD_A7,		EXC_AREGS+(6*4)		# extra copy of saved a7
+set EXC_A6,		EXC_AREGS+(6*4)		# offset of saved a6
+set EXC_A5,		EXC_AREGS+(5*4)
+set EXC_A4,		EXC_AREGS+(4*4)
+set EXC_A3,		EXC_AREGS+(3*4)
+set EXC_A2,		EXC_AREGS+(2*4)
+set EXC_A1,		EXC_AREGS+(1*4)
+set EXC_A0,		EXC_AREGS+(0*4)
+set EXC_D7,		EXC_DREGS+(7*4)
+set EXC_D6,		EXC_DREGS+(6*4)
+set EXC_D5,		EXC_DREGS+(5*4)
+set EXC_D4,		EXC_DREGS+(4*4)
+set EXC_D3,		EXC_DREGS+(3*4)
+set EXC_D2,		EXC_DREGS+(2*4)
+set EXC_D1,		EXC_DREGS+(1*4)
+set EXC_D0,		EXC_DREGS+(0*4)
+
+set EXC_FP0,		EXC_FPREGS+(0*12)	# offset of saved fp0
+set EXC_FP1,		EXC_FPREGS+(1*12)	# offset of saved fp1
+set EXC_FP2,		EXC_FPREGS+(2*12)	# offset of saved fp2 (not used)
+
+set FP_SCR1,		LV+80			# fp scratch 1
+set FP_SCR1_EX,		FP_SCR1+0
+set FP_SCR1_SGN,	FP_SCR1+2
+set FP_SCR1_HI,		FP_SCR1+4
+set FP_SCR1_LO,		FP_SCR1+8
+
+set FP_SCR0,		LV+68			# fp scratch 0
+set FP_SCR0_EX,		FP_SCR0+0
+set FP_SCR0_SGN,	FP_SCR0+2
+set FP_SCR0_HI,		FP_SCR0+4
+set FP_SCR0_LO,		FP_SCR0+8
+
+set FP_DST,		LV+56			# fp destination operand
+set FP_DST_EX,		FP_DST+0
+set FP_DST_SGN,		FP_DST+2
+set FP_DST_HI,		FP_DST+4
+set FP_DST_LO,		FP_DST+8
+
+set FP_SRC,		LV+44			# fp source operand
+set FP_SRC_EX,		FP_SRC+0
+set FP_SRC_SGN,		FP_SRC+2
+set FP_SRC_HI,		FP_SRC+4
+set FP_SRC_LO,		FP_SRC+8
+
+set USER_FPIAR,		LV+40			# FP instr address register
+
+set USER_FPSR,		LV+36			# FP status register
+set FPSR_CC,		USER_FPSR+0		# FPSR condition codes
+set FPSR_QBYTE,		USER_FPSR+1		# FPSR qoutient byte
+set FPSR_EXCEPT,	USER_FPSR+2		# FPSR exception status byte
+set FPSR_AEXCEPT,	USER_FPSR+3		# FPSR accrued exception byte
+
+set USER_FPCR,		LV+32			# FP control register
+set FPCR_ENABLE,	USER_FPCR+2		# FPCR exception enable
+set FPCR_MODE,		USER_FPCR+3		# FPCR rounding mode control
+
+set L_SCR3,		LV+28			# integer scratch 3
+set L_SCR2,		LV+24			# integer scratch 2
+set L_SCR1,		LV+20			# integer scratch 1
+
+set STORE_FLG,		LV+19			# flag: operand store (ie. not fcmp/ftst)
+
+set EXC_TEMP2,		LV+24			# temporary space
+set EXC_TEMP,		LV+16			# temporary space
+
+set DTAG,		LV+15			# destination operand type
+set STAG,		LV+14			# source operand type
+
+set SPCOND_FLG,		LV+10			# flag: special case (see below)
+
+set EXC_CC,		LV+8			# saved condition codes
+set EXC_EXTWPTR,	LV+4			# saved current PC (active)
+set EXC_EXTWORD,	LV+2			# saved extension word
+set EXC_CMDREG,		LV+2			# saved extension word
+set EXC_OPWORD,		LV+0			# saved operation word
+
+################################
+
+# Helpful macros
+
+set FTEMP,		0			# offsets within an
+set FTEMP_EX,		0			# extended precision
+set FTEMP_SGN,		2			# value saved in memory.
+set FTEMP_HI,		4
+set FTEMP_LO,		8
+set FTEMP_GRS,		12
+
+set LOCAL,		0			# offsets within an
+set LOCAL_EX,		0			# extended precision
+set LOCAL_SGN,		2			# value saved in memory.
+set LOCAL_HI,		4
+set LOCAL_LO,		8
+set LOCAL_GRS,		12
+
+set DST,		0			# offsets within an
+set DST_EX,		0			# extended precision
+set DST_HI,		4			# value saved in memory.
+set DST_LO,		8
+
+set SRC,		0			# offsets within an
+set SRC_EX,		0			# extended precision
+set SRC_HI,		4			# value saved in memory.
+set SRC_LO,		8
+
+set SGL_LO,		0x3f81			# min sgl prec exponent
+set SGL_HI,		0x407e			# max sgl prec exponent
+set DBL_LO,		0x3c01			# min dbl prec exponent
+set DBL_HI,		0x43fe			# max dbl prec exponent
+set EXT_LO,		0x0			# min ext prec exponent
+set EXT_HI,		0x7ffe			# max ext prec exponent
+
+set EXT_BIAS,		0x3fff			# extended precision bias
+set SGL_BIAS,		0x007f			# single precision bias
+set DBL_BIAS,		0x03ff			# double precision bias
+
+set NORM,		0x00			# operand type for STAG/DTAG
+set ZERO,		0x01			# operand type for STAG/DTAG
+set INF,		0x02			# operand type for STAG/DTAG
+set QNAN,		0x03			# operand type for STAG/DTAG
+set DENORM,		0x04			# operand type for STAG/DTAG
+set SNAN,		0x05			# operand type for STAG/DTAG
+set UNNORM,		0x06			# operand type for STAG/DTAG
+
+##################
+# FPSR/FPCR bits #
+##################
+set neg_bit,		0x3			# negative result
+set z_bit,		0x2			# zero result
+set inf_bit,		0x1			# infinite result
+set nan_bit,		0x0			# NAN result
+
+set q_sn_bit,		0x7			# sign bit of quotient byte
+
+set bsun_bit,		7			# branch on unordered
+set snan_bit,		6			# signalling NAN
+set operr_bit,		5			# operand error
+set ovfl_bit,		4			# overflow
+set unfl_bit,		3			# underflow
+set dz_bit,		2			# divide by zero
+set inex2_bit,		1			# inexact result 2
+set inex1_bit,		0			# inexact result 1
+
+set aiop_bit,		7			# accrued inexact operation bit
+set aovfl_bit,		6			# accrued overflow bit
+set aunfl_bit,		5			# accrued underflow bit
+set adz_bit,		4			# accrued dz bit
+set ainex_bit,		3			# accrued inexact bit
+
+#############################
+# FPSR individual bit masks #
+#############################
+set neg_mask,		0x08000000		# negative bit mask (lw)
+set inf_mask,		0x02000000		# infinity bit mask (lw)
+set z_mask,		0x04000000		# zero bit mask (lw)
+set nan_mask,		0x01000000		# nan bit mask (lw)
+
+set neg_bmask,		0x08			# negative bit mask (byte)
+set inf_bmask,		0x02			# infinity bit mask (byte)
+set z_bmask,		0x04			# zero bit mask (byte)
+set nan_bmask,		0x01			# nan bit mask (byte)
+
+set bsun_mask,		0x00008000		# bsun exception mask
+set snan_mask,		0x00004000		# snan exception mask
+set operr_mask,		0x00002000		# operr exception mask
+set ovfl_mask,		0x00001000		# overflow exception mask
+set unfl_mask,		0x00000800		# underflow exception mask
+set dz_mask,		0x00000400		# dz exception mask
+set inex2_mask,		0x00000200		# inex2 exception mask
+set inex1_mask,		0x00000100		# inex1 exception mask
+
+set aiop_mask,		0x00000080		# accrued illegal operation
+set aovfl_mask,		0x00000040		# accrued overflow
+set aunfl_mask,		0x00000020		# accrued underflow
+set adz_mask,		0x00000010		# accrued divide by zero
+set ainex_mask,		0x00000008		# accrued inexact
+
+######################################
+# FPSR combinations used in the FPSP #
+######################################
+set dzinf_mask,		inf_mask+dz_mask+adz_mask
+set opnan_mask,		nan_mask+operr_mask+aiop_mask
+set nzi_mask,		0x01ffffff		#clears N, Z, and I
+set unfinx_mask,	unfl_mask+inex2_mask+aunfl_mask+ainex_mask
+set unf2inx_mask,	unfl_mask+inex2_mask+ainex_mask
+set ovfinx_mask,	ovfl_mask+inex2_mask+aovfl_mask+ainex_mask
+set inx1a_mask,		inex1_mask+ainex_mask
+set inx2a_mask,		inex2_mask+ainex_mask
+set snaniop_mask,	nan_mask+snan_mask+aiop_mask
+set snaniop2_mask,	snan_mask+aiop_mask
+set naniop_mask,	nan_mask+aiop_mask
+set neginf_mask,	neg_mask+inf_mask
+set infaiop_mask,	inf_mask+aiop_mask
+set negz_mask,		neg_mask+z_mask
+set opaop_mask,		operr_mask+aiop_mask
+set unfl_inx_mask,	unfl_mask+aunfl_mask+ainex_mask
+set ovfl_inx_mask,	ovfl_mask+aovfl_mask+ainex_mask
+
+#########
+# misc. #
+#########
+set rnd_stky_bit,	29			# stky bit pos in longword
+
+set sign_bit,		0x7			# sign bit
+set signan_bit,		0x6			# signalling nan bit
+
+set sgl_thresh,		0x3f81			# minimum sgl exponent
+set dbl_thresh,		0x3c01			# minimum dbl exponent
+
+set x_mode,		0x0			# extended precision
+set s_mode,		0x4			# single precision
+set d_mode,		0x8			# double precision
+
+set rn_mode,		0x0			# round-to-nearest
+set rz_mode,		0x1			# round-to-zero
+set rm_mode,		0x2			# round-tp-minus-infinity
+set rp_mode,		0x3			# round-to-plus-infinity
+
+set mantissalen,	64			# length of mantissa in bits
+
+set BYTE,		1			# len(byte) == 1 byte
+set WORD,		2			# len(word) == 2 bytes
+set LONG,		4			# len(longword) == 2 bytes
+
+set BSUN_VEC,		0xc0			# bsun    vector offset
+set INEX_VEC,		0xc4			# inexact vector offset
+set DZ_VEC,		0xc8			# dz      vector offset
+set UNFL_VEC,		0xcc			# unfl    vector offset
+set OPERR_VEC,		0xd0			# operr   vector offset
+set OVFL_VEC,		0xd4			# ovfl    vector offset
+set SNAN_VEC,		0xd8			# snan    vector offset
+
+###########################
+# SPecial CONDition FLaGs #
+###########################
+set ftrapcc_flg,	0x01			# flag bit: ftrapcc exception
+set fbsun_flg,		0x02			# flag bit: bsun exception
+set mia7_flg,		0x04			# flag bit: (a7)+ <ea>
+set mda7_flg,		0x08			# flag bit: -(a7) <ea>
+set fmovm_flg,		0x40			# flag bit: fmovm instruction
+set immed_flg,		0x80			# flag bit: &<data> <ea>
+
+set ftrapcc_bit,	0x0
+set fbsun_bit,		0x1
+set mia7_bit,		0x2
+set mda7_bit,		0x3
+set immed_bit,		0x7
+
+##################################
+# TRANSCENDENTAL "LAST-OP" FLAGS #
+##################################
+set FMUL_OP,		0x0			# fmul instr performed last
+set FDIV_OP,		0x1			# fdiv performed last
+set FADD_OP,		0x2			# fadd performed last
+set FMOV_OP,		0x3			# fmov performed last
+
+#############
+# CONSTANTS #
+#############
+T1:	long		0x40C62D38,0xD3D64634	# 16381 LOG2 LEAD
+T2:	long		0x3D6F90AE,0xB1E75CC7	# 16381 LOG2 TRAIL
+
+PI:	long		0x40000000,0xC90FDAA2,0x2168C235,0x00000000
+PIBY2:	long		0x3FFF0000,0xC90FDAA2,0x2168C235,0x00000000
+
+TWOBYPI:
+	long		0x3FE45F30,0x6DC9C883
+
+#########################################################################
+# MONADIC TEMPLATE							#
+#########################################################################
+	global		_fsins_
+_fsins_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L0_2s
+	bsr.l		ssin			# operand is a NORM
+	bra.b		_L0_6s
+_L0_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L0_3s			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L0_6s
+_L0_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L0_4s			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L0_6s
+_L0_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L0_5s			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L0_6s
+_L0_5s:
+	bsr.l		ssind			# operand is a DENORM
+_L0_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fsind_
+_fsind_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	mov.b		%d1,STAG(%a6)
+	tst.b		%d1
+	bne.b		_L0_2d
+	bsr.l		ssin			# operand is a NORM
+	bra.b		_L0_6d
+_L0_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L0_3d			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L0_6d
+_L0_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L0_4d			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L0_6d
+_L0_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L0_5d			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L0_6d
+_L0_5d:
+	bsr.l		ssind			# operand is a DENORM
+_L0_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fsinx_
+_fsinx_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext input
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L0_2x
+	bsr.l		ssin			# operand is a NORM
+	bra.b		_L0_6x
+_L0_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L0_3x			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L0_6x
+_L0_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L0_4x			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L0_6x
+_L0_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L0_5x			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L0_6x
+_L0_5x:
+	bsr.l		ssind			# operand is a DENORM
+_L0_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# MONADIC TEMPLATE							#
+#########################################################################
+	global		_fcoss_
+_fcoss_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L1_2s
+	bsr.l		scos			# operand is a NORM
+	bra.b		_L1_6s
+_L1_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L1_3s			# no
+	bsr.l		ld_pone			# yes
+	bra.b		_L1_6s
+_L1_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L1_4s			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L1_6s
+_L1_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L1_5s			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L1_6s
+_L1_5s:
+	bsr.l		scosd			# operand is a DENORM
+_L1_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fcosd_
+_fcosd_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	mov.b		%d1,STAG(%a6)
+	tst.b		%d1
+	bne.b		_L1_2d
+	bsr.l		scos			# operand is a NORM
+	bra.b		_L1_6d
+_L1_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L1_3d			# no
+	bsr.l		ld_pone			# yes
+	bra.b		_L1_6d
+_L1_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L1_4d			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L1_6d
+_L1_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L1_5d			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L1_6d
+_L1_5d:
+	bsr.l		scosd			# operand is a DENORM
+_L1_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fcosx_
+_fcosx_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext input
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L1_2x
+	bsr.l		scos			# operand is a NORM
+	bra.b		_L1_6x
+_L1_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L1_3x			# no
+	bsr.l		ld_pone			# yes
+	bra.b		_L1_6x
+_L1_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L1_4x			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L1_6x
+_L1_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L1_5x			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L1_6x
+_L1_5x:
+	bsr.l		scosd			# operand is a DENORM
+_L1_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# MONADIC TEMPLATE							#
+#########################################################################
+	global		_fsinhs_
+_fsinhs_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L2_2s
+	bsr.l		ssinh			# operand is a NORM
+	bra.b		_L2_6s
+_L2_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L2_3s			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L2_6s
+_L2_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L2_4s			# no
+	bsr.l		src_inf			# yes
+	bra.b		_L2_6s
+_L2_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L2_5s			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L2_6s
+_L2_5s:
+	bsr.l		ssinhd			# operand is a DENORM
+_L2_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fsinhd_
+_fsinhd_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	mov.b		%d1,STAG(%a6)
+	tst.b		%d1
+	bne.b		_L2_2d
+	bsr.l		ssinh			# operand is a NORM
+	bra.b		_L2_6d
+_L2_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L2_3d			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L2_6d
+_L2_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L2_4d			# no
+	bsr.l		src_inf			# yes
+	bra.b		_L2_6d
+_L2_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L2_5d			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L2_6d
+_L2_5d:
+	bsr.l		ssinhd			# operand is a DENORM
+_L2_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fsinhx_
+_fsinhx_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext input
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L2_2x
+	bsr.l		ssinh			# operand is a NORM
+	bra.b		_L2_6x
+_L2_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L2_3x			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L2_6x
+_L2_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L2_4x			# no
+	bsr.l		src_inf			# yes
+	bra.b		_L2_6x
+_L2_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L2_5x			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L2_6x
+_L2_5x:
+	bsr.l		ssinhd			# operand is a DENORM
+_L2_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# MONADIC TEMPLATE							#
+#########################################################################
+	global		_flognp1s_
+_flognp1s_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L3_2s
+	bsr.l		slognp1			# operand is a NORM
+	bra.b		_L3_6s
+_L3_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L3_3s			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L3_6s
+_L3_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L3_4s			# no
+	bsr.l		sopr_inf			# yes
+	bra.b		_L3_6s
+_L3_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L3_5s			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L3_6s
+_L3_5s:
+	bsr.l		slognp1d			# operand is a DENORM
+_L3_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_flognp1d_
+_flognp1d_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	mov.b		%d1,STAG(%a6)
+	tst.b		%d1
+	bne.b		_L3_2d
+	bsr.l		slognp1			# operand is a NORM
+	bra.b		_L3_6d
+_L3_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L3_3d			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L3_6d
+_L3_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L3_4d			# no
+	bsr.l		sopr_inf			# yes
+	bra.b		_L3_6d
+_L3_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L3_5d			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L3_6d
+_L3_5d:
+	bsr.l		slognp1d			# operand is a DENORM
+_L3_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_flognp1x_
+_flognp1x_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext input
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L3_2x
+	bsr.l		slognp1			# operand is a NORM
+	bra.b		_L3_6x
+_L3_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L3_3x			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L3_6x
+_L3_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L3_4x			# no
+	bsr.l		sopr_inf			# yes
+	bra.b		_L3_6x
+_L3_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L3_5x			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L3_6x
+_L3_5x:
+	bsr.l		slognp1d			# operand is a DENORM
+_L3_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# MONADIC TEMPLATE							#
+#########################################################################
+	global		_fetoxm1s_
+_fetoxm1s_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L4_2s
+	bsr.l		setoxm1			# operand is a NORM
+	bra.b		_L4_6s
+_L4_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L4_3s			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L4_6s
+_L4_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L4_4s			# no
+	bsr.l		setoxm1i			# yes
+	bra.b		_L4_6s
+_L4_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L4_5s			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L4_6s
+_L4_5s:
+	bsr.l		setoxm1d			# operand is a DENORM
+_L4_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fetoxm1d_
+_fetoxm1d_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	mov.b		%d1,STAG(%a6)
+	tst.b		%d1
+	bne.b		_L4_2d
+	bsr.l		setoxm1			# operand is a NORM
+	bra.b		_L4_6d
+_L4_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L4_3d			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L4_6d
+_L4_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L4_4d			# no
+	bsr.l		setoxm1i			# yes
+	bra.b		_L4_6d
+_L4_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L4_5d			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L4_6d
+_L4_5d:
+	bsr.l		setoxm1d			# operand is a DENORM
+_L4_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fetoxm1x_
+_fetoxm1x_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext input
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L4_2x
+	bsr.l		setoxm1			# operand is a NORM
+	bra.b		_L4_6x
+_L4_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L4_3x			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L4_6x
+_L4_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L4_4x			# no
+	bsr.l		setoxm1i			# yes
+	bra.b		_L4_6x
+_L4_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L4_5x			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L4_6x
+_L4_5x:
+	bsr.l		setoxm1d			# operand is a DENORM
+_L4_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# MONADIC TEMPLATE							#
+#########################################################################
+	global		_ftanhs_
+_ftanhs_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L5_2s
+	bsr.l		stanh			# operand is a NORM
+	bra.b		_L5_6s
+_L5_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L5_3s			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L5_6s
+_L5_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L5_4s			# no
+	bsr.l		src_one			# yes
+	bra.b		_L5_6s
+_L5_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L5_5s			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L5_6s
+_L5_5s:
+	bsr.l		stanhd			# operand is a DENORM
+_L5_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_ftanhd_
+_ftanhd_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	mov.b		%d1,STAG(%a6)
+	tst.b		%d1
+	bne.b		_L5_2d
+	bsr.l		stanh			# operand is a NORM
+	bra.b		_L5_6d
+_L5_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L5_3d			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L5_6d
+_L5_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L5_4d			# no
+	bsr.l		src_one			# yes
+	bra.b		_L5_6d
+_L5_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L5_5d			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L5_6d
+_L5_5d:
+	bsr.l		stanhd			# operand is a DENORM
+_L5_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_ftanhx_
+_ftanhx_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext input
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L5_2x
+	bsr.l		stanh			# operand is a NORM
+	bra.b		_L5_6x
+_L5_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L5_3x			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L5_6x
+_L5_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L5_4x			# no
+	bsr.l		src_one			# yes
+	bra.b		_L5_6x
+_L5_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L5_5x			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L5_6x
+_L5_5x:
+	bsr.l		stanhd			# operand is a DENORM
+_L5_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# MONADIC TEMPLATE							#
+#########################################################################
+	global		_fatans_
+_fatans_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L6_2s
+	bsr.l		satan			# operand is a NORM
+	bra.b		_L6_6s
+_L6_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L6_3s			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L6_6s
+_L6_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L6_4s			# no
+	bsr.l		spi_2			# yes
+	bra.b		_L6_6s
+_L6_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L6_5s			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L6_6s
+_L6_5s:
+	bsr.l		satand			# operand is a DENORM
+_L6_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fatand_
+_fatand_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	mov.b		%d1,STAG(%a6)
+	tst.b		%d1
+	bne.b		_L6_2d
+	bsr.l		satan			# operand is a NORM
+	bra.b		_L6_6d
+_L6_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L6_3d			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L6_6d
+_L6_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L6_4d			# no
+	bsr.l		spi_2			# yes
+	bra.b		_L6_6d
+_L6_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L6_5d			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L6_6d
+_L6_5d:
+	bsr.l		satand			# operand is a DENORM
+_L6_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fatanx_
+_fatanx_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext input
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L6_2x
+	bsr.l		satan			# operand is a NORM
+	bra.b		_L6_6x
+_L6_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L6_3x			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L6_6x
+_L6_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L6_4x			# no
+	bsr.l		spi_2			# yes
+	bra.b		_L6_6x
+_L6_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L6_5x			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L6_6x
+_L6_5x:
+	bsr.l		satand			# operand is a DENORM
+_L6_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# MONADIC TEMPLATE							#
+#########################################################################
+	global		_fasins_
+_fasins_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L7_2s
+	bsr.l		sasin			# operand is a NORM
+	bra.b		_L7_6s
+_L7_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L7_3s			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L7_6s
+_L7_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L7_4s			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L7_6s
+_L7_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L7_5s			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L7_6s
+_L7_5s:
+	bsr.l		sasind			# operand is a DENORM
+_L7_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fasind_
+_fasind_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	mov.b		%d1,STAG(%a6)
+	tst.b		%d1
+	bne.b		_L7_2d
+	bsr.l		sasin			# operand is a NORM
+	bra.b		_L7_6d
+_L7_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L7_3d			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L7_6d
+_L7_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L7_4d			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L7_6d
+_L7_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L7_5d			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L7_6d
+_L7_5d:
+	bsr.l		sasind			# operand is a DENORM
+_L7_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fasinx_
+_fasinx_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext input
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L7_2x
+	bsr.l		sasin			# operand is a NORM
+	bra.b		_L7_6x
+_L7_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L7_3x			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L7_6x
+_L7_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L7_4x			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L7_6x
+_L7_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L7_5x			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L7_6x
+_L7_5x:
+	bsr.l		sasind			# operand is a DENORM
+_L7_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# MONADIC TEMPLATE							#
+#########################################################################
+	global		_fatanhs_
+_fatanhs_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L8_2s
+	bsr.l		satanh			# operand is a NORM
+	bra.b		_L8_6s
+_L8_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L8_3s			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L8_6s
+_L8_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L8_4s			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L8_6s
+_L8_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L8_5s			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L8_6s
+_L8_5s:
+	bsr.l		satanhd			# operand is a DENORM
+_L8_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fatanhd_
+_fatanhd_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	mov.b		%d1,STAG(%a6)
+	tst.b		%d1
+	bne.b		_L8_2d
+	bsr.l		satanh			# operand is a NORM
+	bra.b		_L8_6d
+_L8_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L8_3d			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L8_6d
+_L8_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L8_4d			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L8_6d
+_L8_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L8_5d			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L8_6d
+_L8_5d:
+	bsr.l		satanhd			# operand is a DENORM
+_L8_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fatanhx_
+_fatanhx_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext input
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L8_2x
+	bsr.l		satanh			# operand is a NORM
+	bra.b		_L8_6x
+_L8_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L8_3x			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L8_6x
+_L8_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L8_4x			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L8_6x
+_L8_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L8_5x			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L8_6x
+_L8_5x:
+	bsr.l		satanhd			# operand is a DENORM
+_L8_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# MONADIC TEMPLATE							#
+#########################################################################
+	global		_ftans_
+_ftans_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L9_2s
+	bsr.l		stan			# operand is a NORM
+	bra.b		_L9_6s
+_L9_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L9_3s			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L9_6s
+_L9_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L9_4s			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L9_6s
+_L9_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L9_5s			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L9_6s
+_L9_5s:
+	bsr.l		stand			# operand is a DENORM
+_L9_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_ftand_
+_ftand_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	mov.b		%d1,STAG(%a6)
+	tst.b		%d1
+	bne.b		_L9_2d
+	bsr.l		stan			# operand is a NORM
+	bra.b		_L9_6d
+_L9_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L9_3d			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L9_6d
+_L9_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L9_4d			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L9_6d
+_L9_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L9_5d			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L9_6d
+_L9_5d:
+	bsr.l		stand			# operand is a DENORM
+_L9_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_ftanx_
+_ftanx_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext input
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L9_2x
+	bsr.l		stan			# operand is a NORM
+	bra.b		_L9_6x
+_L9_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L9_3x			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L9_6x
+_L9_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L9_4x			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L9_6x
+_L9_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L9_5x			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L9_6x
+_L9_5x:
+	bsr.l		stand			# operand is a DENORM
+_L9_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# MONADIC TEMPLATE							#
+#########################################################################
+	global		_fetoxs_
+_fetoxs_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L10_2s
+	bsr.l		setox			# operand is a NORM
+	bra.b		_L10_6s
+_L10_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L10_3s			# no
+	bsr.l		ld_pone			# yes
+	bra.b		_L10_6s
+_L10_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L10_4s			# no
+	bsr.l		szr_inf			# yes
+	bra.b		_L10_6s
+_L10_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L10_5s			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L10_6s
+_L10_5s:
+	bsr.l		setoxd			# operand is a DENORM
+_L10_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fetoxd_
+_fetoxd_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	mov.b		%d1,STAG(%a6)
+	tst.b		%d1
+	bne.b		_L10_2d
+	bsr.l		setox			# operand is a NORM
+	bra.b		_L10_6d
+_L10_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L10_3d			# no
+	bsr.l		ld_pone			# yes
+	bra.b		_L10_6d
+_L10_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L10_4d			# no
+	bsr.l		szr_inf			# yes
+	bra.b		_L10_6d
+_L10_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L10_5d			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L10_6d
+_L10_5d:
+	bsr.l		setoxd			# operand is a DENORM
+_L10_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fetoxx_
+_fetoxx_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext input
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L10_2x
+	bsr.l		setox			# operand is a NORM
+	bra.b		_L10_6x
+_L10_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L10_3x			# no
+	bsr.l		ld_pone			# yes
+	bra.b		_L10_6x
+_L10_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L10_4x			# no
+	bsr.l		szr_inf			# yes
+	bra.b		_L10_6x
+_L10_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L10_5x			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L10_6x
+_L10_5x:
+	bsr.l		setoxd			# operand is a DENORM
+_L10_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# MONADIC TEMPLATE							#
+#########################################################################
+	global		_ftwotoxs_
+_ftwotoxs_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L11_2s
+	bsr.l		stwotox			# operand is a NORM
+	bra.b		_L11_6s
+_L11_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L11_3s			# no
+	bsr.l		ld_pone			# yes
+	bra.b		_L11_6s
+_L11_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L11_4s			# no
+	bsr.l		szr_inf			# yes
+	bra.b		_L11_6s
+_L11_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L11_5s			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L11_6s
+_L11_5s:
+	bsr.l		stwotoxd			# operand is a DENORM
+_L11_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_ftwotoxd_
+_ftwotoxd_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	mov.b		%d1,STAG(%a6)
+	tst.b		%d1
+	bne.b		_L11_2d
+	bsr.l		stwotox			# operand is a NORM
+	bra.b		_L11_6d
+_L11_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L11_3d			# no
+	bsr.l		ld_pone			# yes
+	bra.b		_L11_6d
+_L11_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L11_4d			# no
+	bsr.l		szr_inf			# yes
+	bra.b		_L11_6d
+_L11_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L11_5d			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L11_6d
+_L11_5d:
+	bsr.l		stwotoxd			# operand is a DENORM
+_L11_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_ftwotoxx_
+_ftwotoxx_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext input
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L11_2x
+	bsr.l		stwotox			# operand is a NORM
+	bra.b		_L11_6x
+_L11_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L11_3x			# no
+	bsr.l		ld_pone			# yes
+	bra.b		_L11_6x
+_L11_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L11_4x			# no
+	bsr.l		szr_inf			# yes
+	bra.b		_L11_6x
+_L11_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L11_5x			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L11_6x
+_L11_5x:
+	bsr.l		stwotoxd			# operand is a DENORM
+_L11_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# MONADIC TEMPLATE							#
+#########################################################################
+	global		_ftentoxs_
+_ftentoxs_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L12_2s
+	bsr.l		stentox			# operand is a NORM
+	bra.b		_L12_6s
+_L12_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L12_3s			# no
+	bsr.l		ld_pone			# yes
+	bra.b		_L12_6s
+_L12_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L12_4s			# no
+	bsr.l		szr_inf			# yes
+	bra.b		_L12_6s
+_L12_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L12_5s			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L12_6s
+_L12_5s:
+	bsr.l		stentoxd			# operand is a DENORM
+_L12_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_ftentoxd_
+_ftentoxd_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	mov.b		%d1,STAG(%a6)
+	tst.b		%d1
+	bne.b		_L12_2d
+	bsr.l		stentox			# operand is a NORM
+	bra.b		_L12_6d
+_L12_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L12_3d			# no
+	bsr.l		ld_pone			# yes
+	bra.b		_L12_6d
+_L12_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L12_4d			# no
+	bsr.l		szr_inf			# yes
+	bra.b		_L12_6d
+_L12_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L12_5d			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L12_6d
+_L12_5d:
+	bsr.l		stentoxd			# operand is a DENORM
+_L12_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_ftentoxx_
+_ftentoxx_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext input
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L12_2x
+	bsr.l		stentox			# operand is a NORM
+	bra.b		_L12_6x
+_L12_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L12_3x			# no
+	bsr.l		ld_pone			# yes
+	bra.b		_L12_6x
+_L12_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L12_4x			# no
+	bsr.l		szr_inf			# yes
+	bra.b		_L12_6x
+_L12_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L12_5x			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L12_6x
+_L12_5x:
+	bsr.l		stentoxd			# operand is a DENORM
+_L12_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# MONADIC TEMPLATE							#
+#########################################################################
+	global		_flogns_
+_flogns_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L13_2s
+	bsr.l		slogn			# operand is a NORM
+	bra.b		_L13_6s
+_L13_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L13_3s			# no
+	bsr.l		t_dz2			# yes
+	bra.b		_L13_6s
+_L13_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L13_4s			# no
+	bsr.l		sopr_inf			# yes
+	bra.b		_L13_6s
+_L13_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L13_5s			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L13_6s
+_L13_5s:
+	bsr.l		slognd			# operand is a DENORM
+_L13_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_flognd_
+_flognd_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	mov.b		%d1,STAG(%a6)
+	tst.b		%d1
+	bne.b		_L13_2d
+	bsr.l		slogn			# operand is a NORM
+	bra.b		_L13_6d
+_L13_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L13_3d			# no
+	bsr.l		t_dz2			# yes
+	bra.b		_L13_6d
+_L13_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L13_4d			# no
+	bsr.l		sopr_inf			# yes
+	bra.b		_L13_6d
+_L13_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L13_5d			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L13_6d
+_L13_5d:
+	bsr.l		slognd			# operand is a DENORM
+_L13_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_flognx_
+_flognx_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext input
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L13_2x
+	bsr.l		slogn			# operand is a NORM
+	bra.b		_L13_6x
+_L13_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L13_3x			# no
+	bsr.l		t_dz2			# yes
+	bra.b		_L13_6x
+_L13_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L13_4x			# no
+	bsr.l		sopr_inf			# yes
+	bra.b		_L13_6x
+_L13_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L13_5x			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L13_6x
+_L13_5x:
+	bsr.l		slognd			# operand is a DENORM
+_L13_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# MONADIC TEMPLATE							#
+#########################################################################
+	global		_flog10s_
+_flog10s_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L14_2s
+	bsr.l		slog10			# operand is a NORM
+	bra.b		_L14_6s
+_L14_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L14_3s			# no
+	bsr.l		t_dz2			# yes
+	bra.b		_L14_6s
+_L14_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L14_4s			# no
+	bsr.l		sopr_inf			# yes
+	bra.b		_L14_6s
+_L14_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L14_5s			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L14_6s
+_L14_5s:
+	bsr.l		slog10d			# operand is a DENORM
+_L14_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_flog10d_
+_flog10d_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	mov.b		%d1,STAG(%a6)
+	tst.b		%d1
+	bne.b		_L14_2d
+	bsr.l		slog10			# operand is a NORM
+	bra.b		_L14_6d
+_L14_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L14_3d			# no
+	bsr.l		t_dz2			# yes
+	bra.b		_L14_6d
+_L14_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L14_4d			# no
+	bsr.l		sopr_inf			# yes
+	bra.b		_L14_6d
+_L14_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L14_5d			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L14_6d
+_L14_5d:
+	bsr.l		slog10d			# operand is a DENORM
+_L14_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_flog10x_
+_flog10x_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext input
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L14_2x
+	bsr.l		slog10			# operand is a NORM
+	bra.b		_L14_6x
+_L14_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L14_3x			# no
+	bsr.l		t_dz2			# yes
+	bra.b		_L14_6x
+_L14_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L14_4x			# no
+	bsr.l		sopr_inf			# yes
+	bra.b		_L14_6x
+_L14_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L14_5x			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L14_6x
+_L14_5x:
+	bsr.l		slog10d			# operand is a DENORM
+_L14_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# MONADIC TEMPLATE							#
+#########################################################################
+	global		_flog2s_
+_flog2s_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L15_2s
+	bsr.l		slog2			# operand is a NORM
+	bra.b		_L15_6s
+_L15_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L15_3s			# no
+	bsr.l		t_dz2			# yes
+	bra.b		_L15_6s
+_L15_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L15_4s			# no
+	bsr.l		sopr_inf			# yes
+	bra.b		_L15_6s
+_L15_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L15_5s			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L15_6s
+_L15_5s:
+	bsr.l		slog2d			# operand is a DENORM
+_L15_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_flog2d_
+_flog2d_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	mov.b		%d1,STAG(%a6)
+	tst.b		%d1
+	bne.b		_L15_2d
+	bsr.l		slog2			# operand is a NORM
+	bra.b		_L15_6d
+_L15_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L15_3d			# no
+	bsr.l		t_dz2			# yes
+	bra.b		_L15_6d
+_L15_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L15_4d			# no
+	bsr.l		sopr_inf			# yes
+	bra.b		_L15_6d
+_L15_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L15_5d			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L15_6d
+_L15_5d:
+	bsr.l		slog2d			# operand is a DENORM
+_L15_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_flog2x_
+_flog2x_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext input
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L15_2x
+	bsr.l		slog2			# operand is a NORM
+	bra.b		_L15_6x
+_L15_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L15_3x			# no
+	bsr.l		t_dz2			# yes
+	bra.b		_L15_6x
+_L15_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L15_4x			# no
+	bsr.l		sopr_inf			# yes
+	bra.b		_L15_6x
+_L15_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L15_5x			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L15_6x
+_L15_5x:
+	bsr.l		slog2d			# operand is a DENORM
+_L15_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# MONADIC TEMPLATE							#
+#########################################################################
+	global		_fcoshs_
+_fcoshs_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L16_2s
+	bsr.l		scosh			# operand is a NORM
+	bra.b		_L16_6s
+_L16_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L16_3s			# no
+	bsr.l		ld_pone			# yes
+	bra.b		_L16_6s
+_L16_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L16_4s			# no
+	bsr.l		ld_pinf			# yes
+	bra.b		_L16_6s
+_L16_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L16_5s			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L16_6s
+_L16_5s:
+	bsr.l		scoshd			# operand is a DENORM
+_L16_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fcoshd_
+_fcoshd_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	mov.b		%d1,STAG(%a6)
+	tst.b		%d1
+	bne.b		_L16_2d
+	bsr.l		scosh			# operand is a NORM
+	bra.b		_L16_6d
+_L16_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L16_3d			# no
+	bsr.l		ld_pone			# yes
+	bra.b		_L16_6d
+_L16_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L16_4d			# no
+	bsr.l		ld_pinf			# yes
+	bra.b		_L16_6d
+_L16_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L16_5d			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L16_6d
+_L16_5d:
+	bsr.l		scoshd			# operand is a DENORM
+_L16_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fcoshx_
+_fcoshx_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext input
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L16_2x
+	bsr.l		scosh			# operand is a NORM
+	bra.b		_L16_6x
+_L16_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L16_3x			# no
+	bsr.l		ld_pone			# yes
+	bra.b		_L16_6x
+_L16_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L16_4x			# no
+	bsr.l		ld_pinf			# yes
+	bra.b		_L16_6x
+_L16_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L16_5x			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L16_6x
+_L16_5x:
+	bsr.l		scoshd			# operand is a DENORM
+_L16_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# MONADIC TEMPLATE							#
+#########################################################################
+	global		_facoss_
+_facoss_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L17_2s
+	bsr.l		sacos			# operand is a NORM
+	bra.b		_L17_6s
+_L17_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L17_3s			# no
+	bsr.l		ld_ppi2			# yes
+	bra.b		_L17_6s
+_L17_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L17_4s			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L17_6s
+_L17_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L17_5s			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L17_6s
+_L17_5s:
+	bsr.l		sacosd			# operand is a DENORM
+_L17_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_facosd_
+_facosd_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	mov.b		%d1,STAG(%a6)
+	tst.b		%d1
+	bne.b		_L17_2d
+	bsr.l		sacos			# operand is a NORM
+	bra.b		_L17_6d
+_L17_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L17_3d			# no
+	bsr.l		ld_ppi2			# yes
+	bra.b		_L17_6d
+_L17_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L17_4d			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L17_6d
+_L17_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L17_5d			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L17_6d
+_L17_5d:
+	bsr.l		sacosd			# operand is a DENORM
+_L17_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_facosx_
+_facosx_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext input
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L17_2x
+	bsr.l		sacos			# operand is a NORM
+	bra.b		_L17_6x
+_L17_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L17_3x			# no
+	bsr.l		ld_ppi2			# yes
+	bra.b		_L17_6x
+_L17_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L17_4x			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L17_6x
+_L17_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L17_5x			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L17_6x
+_L17_5x:
+	bsr.l		sacosd			# operand is a DENORM
+_L17_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# MONADIC TEMPLATE							#
+#########################################################################
+	global		_fgetexps_
+_fgetexps_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L18_2s
+	bsr.l		sgetexp			# operand is a NORM
+	bra.b		_L18_6s
+_L18_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L18_3s			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L18_6s
+_L18_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L18_4s			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L18_6s
+_L18_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L18_5s			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L18_6s
+_L18_5s:
+	bsr.l		sgetexpd			# operand is a DENORM
+_L18_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fgetexpd_
+_fgetexpd_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	mov.b		%d1,STAG(%a6)
+	tst.b		%d1
+	bne.b		_L18_2d
+	bsr.l		sgetexp			# operand is a NORM
+	bra.b		_L18_6d
+_L18_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L18_3d			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L18_6d
+_L18_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L18_4d			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L18_6d
+_L18_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L18_5d			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L18_6d
+_L18_5d:
+	bsr.l		sgetexpd			# operand is a DENORM
+_L18_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fgetexpx_
+_fgetexpx_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext input
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L18_2x
+	bsr.l		sgetexp			# operand is a NORM
+	bra.b		_L18_6x
+_L18_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L18_3x			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L18_6x
+_L18_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L18_4x			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L18_6x
+_L18_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L18_5x			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L18_6x
+_L18_5x:
+	bsr.l		sgetexpd			# operand is a DENORM
+_L18_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# MONADIC TEMPLATE							#
+#########################################################################
+	global		_fgetmans_
+_fgetmans_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L19_2s
+	bsr.l		sgetman			# operand is a NORM
+	bra.b		_L19_6s
+_L19_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L19_3s			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L19_6s
+_L19_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L19_4s			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L19_6s
+_L19_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L19_5s			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L19_6s
+_L19_5s:
+	bsr.l		sgetmand			# operand is a DENORM
+_L19_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fgetmand_
+_fgetmand_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	mov.b		%d1,STAG(%a6)
+	tst.b		%d1
+	bne.b		_L19_2d
+	bsr.l		sgetman			# operand is a NORM
+	bra.b		_L19_6d
+_L19_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L19_3d			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L19_6d
+_L19_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L19_4d			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L19_6d
+_L19_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L19_5d			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L19_6d
+_L19_5d:
+	bsr.l		sgetmand			# operand is a DENORM
+_L19_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fgetmanx_
+_fgetmanx_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext input
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L19_2x
+	bsr.l		sgetman			# operand is a NORM
+	bra.b		_L19_6x
+_L19_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L19_3x			# no
+	bsr.l		src_zero			# yes
+	bra.b		_L19_6x
+_L19_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L19_4x			# no
+	bsr.l		t_operr			# yes
+	bra.b		_L19_6x
+_L19_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L19_5x			# no
+	bsr.l		src_qnan			# yes
+	bra.b		_L19_6x
+_L19_5x:
+	bsr.l		sgetmand			# operand is a DENORM
+_L19_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# MONADIC TEMPLATE							#
+#########################################################################
+	global		_fsincoss_
+_fsincoss_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L20_2s
+	bsr.l		ssincos			# operand is a NORM
+	bra.b		_L20_6s
+_L20_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L20_3s			# no
+	bsr.l		ssincosz			# yes
+	bra.b		_L20_6s
+_L20_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L20_4s			# no
+	bsr.l		ssincosi			# yes
+	bra.b		_L20_6s
+_L20_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L20_5s			# no
+	bsr.l		ssincosqnan			# yes
+	bra.b		_L20_6s
+_L20_5s:
+	bsr.l		ssincosd			# operand is a DENORM
+_L20_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		&0x03,-(%sp)		# store off fp0/fp1
+	fmovm.x		(%sp)+,&0x40		# fp0 now in fp1
+	fmovm.x		(%sp)+,&0x80		# fp1 now in fp0
+	unlk		%a6
+	rts
+
+	global		_fsincosd_
+_fsincosd_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl input
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	mov.b		%d1,STAG(%a6)
+	tst.b		%d1
+	bne.b		_L20_2d
+	bsr.l		ssincos			# operand is a NORM
+	bra.b		_L20_6d
+_L20_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L20_3d			# no
+	bsr.l		ssincosz			# yes
+	bra.b		_L20_6d
+_L20_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L20_4d			# no
+	bsr.l		ssincosi			# yes
+	bra.b		_L20_6d
+_L20_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L20_5d			# no
+	bsr.l		ssincosqnan			# yes
+	bra.b		_L20_6d
+_L20_5d:
+	bsr.l		ssincosd			# operand is a DENORM
+_L20_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		&0x03,-(%sp)		# store off fp0/fp1
+	fmovm.x		(%sp)+,&0x40		# fp0 now in fp1
+	fmovm.x		(%sp)+,&0x80		# fp1 now in fp0
+	unlk		%a6
+	rts
+
+	global		_fsincosx_
+_fsincosx_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext input
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.b		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	tst.b		%d1
+	bne.b		_L20_2x
+	bsr.l		ssincos			# operand is a NORM
+	bra.b		_L20_6x
+_L20_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L20_3x			# no
+	bsr.l		ssincosz			# yes
+	bra.b		_L20_6x
+_L20_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L20_4x			# no
+	bsr.l		ssincosi			# yes
+	bra.b		_L20_6x
+_L20_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L20_5x			# no
+	bsr.l		ssincosqnan			# yes
+	bra.b		_L20_6x
+_L20_5x:
+	bsr.l		ssincosd			# operand is a DENORM
+_L20_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		&0x03,-(%sp)		# store off fp0/fp1
+	fmovm.x		(%sp)+,&0x40		# fp0 now in fp1
+	fmovm.x		(%sp)+,&0x80		# fp1 now in fp0
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# DYADIC TEMPLATE							#
+#########################################################################
+	global		_frems_
+_frems_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl dst
+	fmov.x		%fp0,FP_DST(%a6)
+	lea		FP_DST(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,DTAG(%a6)
+
+	fmov.s		0xc(%a6),%fp0		# load sgl src
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.l		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	lea		FP_SRC(%a6),%a0		# pass ptr to src
+	lea		FP_DST(%a6),%a1		# pass ptr to dst
+
+	tst.b		%d1
+	bne.b		_L21_2s
+	bsr.l		srem_snorm			# operand is a NORM
+	bra.b		_L21_6s
+_L21_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L21_3s			# no
+	bsr.l		srem_szero			# yes
+	bra.b		_L21_6s
+_L21_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L21_4s			# no
+	bsr.l		srem_sinf			# yes
+	bra.b		_L21_6s
+_L21_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L21_5s			# no
+	bsr.l		sop_sqnan			# yes
+	bra.b		_L21_6s
+_L21_5s:
+	bsr.l		srem_sdnrm			# operand is a DENORM
+_L21_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fremd_
+_fremd_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl dst
+	fmov.x		%fp0,FP_DST(%a6)
+	lea		FP_DST(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,DTAG(%a6)
+
+	fmov.d		0x10(%a6),%fp0		# load dbl src
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.l		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	lea		FP_SRC(%a6),%a0		# pass ptr to src
+	lea		FP_DST(%a6),%a1		# pass ptr to dst
+
+	tst.b		%d1
+	bne.b		_L21_2d
+	bsr.l		srem_snorm			# operand is a NORM
+	bra.b		_L21_6d
+_L21_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L21_3d			# no
+	bsr.l		srem_szero			# yes
+	bra.b		_L21_6d
+_L21_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L21_4d			# no
+	bsr.l		srem_sinf			# yes
+	bra.b		_L21_6d
+_L21_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L21_5d			# no
+	bsr.l		sop_sqnan			# yes
+	bra.b		_L21_6d
+_L21_5d:
+	bsr.l		srem_sdnrm			# operand is a DENORM
+_L21_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fremx_
+_fremx_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_DST(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext dst
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,DTAG(%a6)
+
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x14+0x0(%a6),0x0(%a0)	# load ext src
+	mov.l		0x14+0x4(%a6),0x4(%a0)
+	mov.l		0x14+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.l		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	lea		FP_SRC(%a6),%a0		# pass ptr to src
+	lea		FP_DST(%a6),%a1		# pass ptr to dst
+
+	tst.b		%d1
+	bne.b		_L21_2x
+	bsr.l		srem_snorm			# operand is a NORM
+	bra.b		_L21_6x
+_L21_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L21_3x			# no
+	bsr.l		srem_szero			# yes
+	bra.b		_L21_6x
+_L21_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L21_4x			# no
+	bsr.l		srem_sinf			# yes
+	bra.b		_L21_6x
+_L21_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L21_5x			# no
+	bsr.l		sop_sqnan			# yes
+	bra.b		_L21_6x
+_L21_5x:
+	bsr.l		srem_sdnrm			# operand is a DENORM
+_L21_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# DYADIC TEMPLATE							#
+#########################################################################
+	global		_fmods_
+_fmods_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl dst
+	fmov.x		%fp0,FP_DST(%a6)
+	lea		FP_DST(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,DTAG(%a6)
+
+	fmov.s		0xc(%a6),%fp0		# load sgl src
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.l		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	lea		FP_SRC(%a6),%a0		# pass ptr to src
+	lea		FP_DST(%a6),%a1		# pass ptr to dst
+
+	tst.b		%d1
+	bne.b		_L22_2s
+	bsr.l		smod_snorm			# operand is a NORM
+	bra.b		_L22_6s
+_L22_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L22_3s			# no
+	bsr.l		smod_szero			# yes
+	bra.b		_L22_6s
+_L22_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L22_4s			# no
+	bsr.l		smod_sinf			# yes
+	bra.b		_L22_6s
+_L22_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L22_5s			# no
+	bsr.l		sop_sqnan			# yes
+	bra.b		_L22_6s
+_L22_5s:
+	bsr.l		smod_sdnrm			# operand is a DENORM
+_L22_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fmodd_
+_fmodd_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl dst
+	fmov.x		%fp0,FP_DST(%a6)
+	lea		FP_DST(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,DTAG(%a6)
+
+	fmov.d		0x10(%a6),%fp0		# load dbl src
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.l		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	lea		FP_SRC(%a6),%a0		# pass ptr to src
+	lea		FP_DST(%a6),%a1		# pass ptr to dst
+
+	tst.b		%d1
+	bne.b		_L22_2d
+	bsr.l		smod_snorm			# operand is a NORM
+	bra.b		_L22_6d
+_L22_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L22_3d			# no
+	bsr.l		smod_szero			# yes
+	bra.b		_L22_6d
+_L22_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L22_4d			# no
+	bsr.l		smod_sinf			# yes
+	bra.b		_L22_6d
+_L22_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L22_5d			# no
+	bsr.l		sop_sqnan			# yes
+	bra.b		_L22_6d
+_L22_5d:
+	bsr.l		smod_sdnrm			# operand is a DENORM
+_L22_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fmodx_
+_fmodx_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_DST(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext dst
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,DTAG(%a6)
+
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x14+0x0(%a6),0x0(%a0)	# load ext src
+	mov.l		0x14+0x4(%a6),0x4(%a0)
+	mov.l		0x14+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.l		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	lea		FP_SRC(%a6),%a0		# pass ptr to src
+	lea		FP_DST(%a6),%a1		# pass ptr to dst
+
+	tst.b		%d1
+	bne.b		_L22_2x
+	bsr.l		smod_snorm			# operand is a NORM
+	bra.b		_L22_6x
+_L22_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L22_3x			# no
+	bsr.l		smod_szero			# yes
+	bra.b		_L22_6x
+_L22_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L22_4x			# no
+	bsr.l		smod_sinf			# yes
+	bra.b		_L22_6x
+_L22_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L22_5x			# no
+	bsr.l		sop_sqnan			# yes
+	bra.b		_L22_6x
+_L22_5x:
+	bsr.l		smod_sdnrm			# operand is a DENORM
+_L22_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# DYADIC TEMPLATE							#
+#########################################################################
+	global		_fscales_
+_fscales_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.s		0x8(%a6),%fp0		# load sgl dst
+	fmov.x		%fp0,FP_DST(%a6)
+	lea		FP_DST(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,DTAG(%a6)
+
+	fmov.s		0xc(%a6),%fp0		# load sgl src
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.l		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	lea		FP_SRC(%a6),%a0		# pass ptr to src
+	lea		FP_DST(%a6),%a1		# pass ptr to dst
+
+	tst.b		%d1
+	bne.b		_L23_2s
+	bsr.l		sscale_snorm			# operand is a NORM
+	bra.b		_L23_6s
+_L23_2s:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L23_3s			# no
+	bsr.l		sscale_szero			# yes
+	bra.b		_L23_6s
+_L23_3s:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L23_4s			# no
+	bsr.l		sscale_sinf			# yes
+	bra.b		_L23_6s
+_L23_4s:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L23_5s			# no
+	bsr.l		sop_sqnan			# yes
+	bra.b		_L23_6s
+_L23_5s:
+	bsr.l		sscale_sdnrm			# operand is a DENORM
+_L23_6s:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fscaled_
+_fscaled_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	fmov.d		0x8(%a6),%fp0		# load dbl dst
+	fmov.x		%fp0,FP_DST(%a6)
+	lea		FP_DST(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,DTAG(%a6)
+
+	fmov.d		0x10(%a6),%fp0		# load dbl src
+	fmov.x		%fp0,FP_SRC(%a6)
+	lea		FP_SRC(%a6),%a0
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.l		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	lea		FP_SRC(%a6),%a0		# pass ptr to src
+	lea		FP_DST(%a6),%a1		# pass ptr to dst
+
+	tst.b		%d1
+	bne.b		_L23_2d
+	bsr.l		sscale_snorm			# operand is a NORM
+	bra.b		_L23_6d
+_L23_2d:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L23_3d			# no
+	bsr.l		sscale_szero			# yes
+	bra.b		_L23_6d
+_L23_3d:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L23_4d			# no
+	bsr.l		sscale_sinf			# yes
+	bra.b		_L23_6d
+_L23_4d:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L23_5d			# no
+	bsr.l		sop_sqnan			# yes
+	bra.b		_L23_6d
+_L23_5d:
+	bsr.l		sscale_sdnrm			# operand is a DENORM
+_L23_6d:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+	global		_fscalex_
+_fscalex_:
+	link		%a6,&-LOCAL_SIZE
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FP0(%a6)	# save fp0/fp1
+
+	fmov.l		&0x0,%fpcr		# zero FPCR
+
+#
+#	copy, convert, and tag input argument
+#
+	lea		FP_DST(%a6),%a0
+	mov.l		0x8+0x0(%a6),0x0(%a0)	# load ext dst
+	mov.l		0x8+0x4(%a6),0x4(%a0)
+	mov.l		0x8+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,DTAG(%a6)
+
+	lea		FP_SRC(%a6),%a0
+	mov.l		0x14+0x0(%a6),0x0(%a0)	# load ext src
+	mov.l		0x14+0x4(%a6),0x4(%a0)
+	mov.l		0x14+0x8(%a6),0x8(%a0)
+	bsr.l		tag			# fetch operand type
+	mov.b		%d0,STAG(%a6)
+	mov.l		%d0,%d1
+
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd mode,prec
+
+	lea		FP_SRC(%a6),%a0		# pass ptr to src
+	lea		FP_DST(%a6),%a1		# pass ptr to dst
+
+	tst.b		%d1
+	bne.b		_L23_2x
+	bsr.l		sscale_snorm			# operand is a NORM
+	bra.b		_L23_6x
+_L23_2x:
+	cmpi.b		%d1,&ZERO		# is operand a ZERO?
+	bne.b		_L23_3x			# no
+	bsr.l		sscale_szero			# yes
+	bra.b		_L23_6x
+_L23_3x:
+	cmpi.b		%d1,&INF		# is operand an INF?
+	bne.b		_L23_4x			# no
+	bsr.l		sscale_sinf			# yes
+	bra.b		_L23_6x
+_L23_4x:
+	cmpi.b		%d1,&QNAN		# is operand a QNAN?
+	bne.b		_L23_5x			# no
+	bsr.l		sop_sqnan			# yes
+	bra.b		_L23_6x
+_L23_5x:
+	bsr.l		sscale_sdnrm			# operand is a DENORM
+_L23_6x:
+
+#
+#	Result is now in FP0
+#
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+	fmovm.x		EXC_FP1(%a6),&0x40	# restore fp1
+	unlk		%a6
+	rts
+
+
+#########################################################################
+# ssin():     computes the sine of a normalized input			#
+# ssind():    computes the sine of a denormalized input			#
+# scos():     computes the cosine of a normalized input			#
+# scosd():    computes the cosine of a denormalized input		#
+# ssincos():  computes the sine and cosine of a normalized input	#
+# ssincosd(): computes the sine and cosine of a denormalized input	#
+#									#
+# INPUT *************************************************************** #
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT ************************************************************** #
+#	fp0 = sin(X) or cos(X)						#
+#									#
+#    For ssincos(X):							#
+#	fp0 = sin(X)							#
+#	fp1 = cos(X)							#
+#									#
+# ACCURACY and MONOTONICITY ******************************************* #
+#	The returned result is within 1 ulp in 64 significant bit, i.e.	#
+#	within 0.5001 ulp to 53 bits if the result is subsequently	#
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM ***********************************************************	#
+#									#
+#	SIN and COS:							#
+#	1. If SIN is invoked, set AdjN := 0; otherwise, set AdjN := 1.	#
+#									#
+#	2. If |X| >= 15Pi or |X| < 2**(-40), go to 7.			#
+#									#
+#	3. Decompose X as X = N(Pi/2) + r where |r| <= Pi/4. Let	#
+#		k = N mod 4, so in particular, k = 0,1,2,or 3.		#
+#		Overwrite k by k := k + AdjN.				#
+#									#
+#	4. If k is even, go to 6.					#
+#									#
+#	5. (k is odd) Set j := (k-1)/2, sgn := (-1)**j.			#
+#		Return sgn*cos(r) where cos(r) is approximated by an	#
+#		even polynomial in r, 1 + r*r*(B1+s*(B2+ ... + s*B8)),	#
+#		s = r*r.						#
+#		Exit.							#
+#									#
+#	6. (k is even) Set j := k/2, sgn := (-1)**j. Return sgn*sin(r)	#
+#		where sin(r) is approximated by an odd polynomial in r	#
+#		r + r*s*(A1+s*(A2+ ... + s*A7)),	s = r*r.	#
+#		Exit.							#
+#									#
+#	7. If |X| > 1, go to 9.						#
+#									#
+#	8. (|X|<2**(-40)) If SIN is invoked, return X;			#
+#		otherwise return 1.					#
+#									#
+#	9. Overwrite X by X := X rem 2Pi. Now that |X| <= Pi,		#
+#		go back to 3.						#
+#									#
+#	SINCOS:								#
+#	1. If |X| >= 15Pi or |X| < 2**(-40), go to 6.			#
+#									#
+#	2. Decompose X as X = N(Pi/2) + r where |r| <= Pi/4. Let	#
+#		k = N mod 4, so in particular, k = 0,1,2,or 3.		#
+#									#
+#	3. If k is even, go to 5.					#
+#									#
+#	4. (k is odd) Set j1 := (k-1)/2, j2 := j1 (EOR) (k mod 2), ie.	#
+#		j1 exclusive or with the l.s.b. of k.			#
+#		sgn1 := (-1)**j1, sgn2 := (-1)**j2.			#
+#		SIN(X) = sgn1 * cos(r) and COS(X) = sgn2*sin(r) where	#
+#		sin(r) and cos(r) are computed as odd and even		#
+#		polynomials in r, respectively. Exit			#
+#									#
+#	5. (k is even) Set j1 := k/2, sgn1 := (-1)**j1.			#
+#		SIN(X) = sgn1 * sin(r) and COS(X) = sgn1*cos(r) where	#
+#		sin(r) and cos(r) are computed as odd and even		#
+#		polynomials in r, respectively. Exit			#
+#									#
+#	6. If |X| > 1, go to 8.						#
+#									#
+#	7. (|X|<2**(-40)) SIN(X) = X and COS(X) = 1. Exit.		#
+#									#
+#	8. Overwrite X by X := X rem 2Pi. Now that |X| <= Pi,		#
+#		go back to 2.						#
+#									#
+#########################################################################
+
+SINA7:	long		0xBD6AAA77,0xCCC994F5
+SINA6:	long		0x3DE61209,0x7AAE8DA1
+SINA5:	long		0xBE5AE645,0x2A118AE4
+SINA4:	long		0x3EC71DE3,0xA5341531
+SINA3:	long		0xBF2A01A0,0x1A018B59,0x00000000,0x00000000
+SINA2:	long		0x3FF80000,0x88888888,0x888859AF,0x00000000
+SINA1:	long		0xBFFC0000,0xAAAAAAAA,0xAAAAAA99,0x00000000
+
+COSB8:	long		0x3D2AC4D0,0xD6011EE3
+COSB7:	long		0xBDA9396F,0x9F45AC19
+COSB6:	long		0x3E21EED9,0x0612C972
+COSB5:	long		0xBE927E4F,0xB79D9FCF
+COSB4:	long		0x3EFA01A0,0x1A01D423,0x00000000,0x00000000
+COSB3:	long		0xBFF50000,0xB60B60B6,0x0B61D438,0x00000000
+COSB2:	long		0x3FFA0000,0xAAAAAAAA,0xAAAAAB5E
+COSB1:	long		0xBF000000
+
+	set		INARG,FP_SCR0
+
+	set		X,FP_SCR0
+#	set		XDCARE,X+2
+	set		XFRAC,X+4
+
+	set		RPRIME,FP_SCR0
+	set		SPRIME,FP_SCR1
+
+	set		POSNEG1,L_SCR1
+	set		TWOTO63,L_SCR1
+
+	set		ENDFLAG,L_SCR2
+	set		INT,L_SCR2
+
+	set		ADJN,L_SCR3
+
+############################################
+	global		ssin
+ssin:
+	mov.l		&0,ADJN(%a6)		# yes; SET ADJN TO 0
+	bra.b		SINBGN
+
+############################################
+	global		scos
+scos:
+	mov.l		&1,ADJN(%a6)		# yes; SET ADJN TO 1
+
+############################################
+SINBGN:
+#--SAVE FPCR, FP1. CHECK IF |X| IS TOO SMALL OR LARGE
+
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+	fmov.x		%fp0,X(%a6)		# save input at X
+
+# "COMPACTIFY" X
+	mov.l		(%a0),%d1		# put exp in hi word
+	mov.w		4(%a0),%d1		# fetch hi(man)
+	and.l		&0x7FFFFFFF,%d1		# strip sign
+
+	cmpi.l		%d1,&0x3FD78000		# is |X| >= 2**(-40)?
+	bge.b		SOK1			# no
+	bra.w		SINSM			# yes; input is very small
+
+SOK1:
+	cmp.l		%d1,&0x4004BC7E		# is |X| < 15 PI?
+	blt.b		SINMAIN			# no
+	bra.w		SREDUCEX		# yes; input is very large
+
+#--THIS IS THE USUAL CASE, |X| <= 15 PI.
+#--THE ARGUMENT REDUCTION IS DONE BY TABLE LOOK UP.
+SINMAIN:
+	fmov.x		%fp0,%fp1
+	fmul.d		TWOBYPI(%pc),%fp1	# X*2/PI
+
+	lea		PITBL+0x200(%pc),%a1	# TABLE OF N*PI/2, N = -32,...,32
+
+	fmov.l		%fp1,INT(%a6)		# CONVERT TO INTEGER
+
+	mov.l		INT(%a6),%d1		# make a copy of N
+	asl.l		&4,%d1			# N *= 16
+	add.l		%d1,%a1			# tbl_addr = a1 + (N*16)
+
+# A1 IS THE ADDRESS OF N*PIBY2
+# ...WHICH IS IN TWO PIECES Y1 & Y2
+	fsub.x		(%a1)+,%fp0		# X-Y1
+	fsub.s		(%a1),%fp0		# fp0 = R = (X-Y1)-Y2
+
+SINCONT:
+#--continuation from REDUCEX
+
+#--GET N+ADJN AND SEE IF SIN(R) OR COS(R) IS NEEDED
+	mov.l		INT(%a6),%d1
+	add.l		ADJN(%a6),%d1		# SEE IF D0 IS ODD OR EVEN
+	ror.l		&1,%d1			# D0 WAS ODD IFF D0 IS NEGATIVE
+	cmp.l		%d1,&0
+	blt.w		COSPOLY
+
+#--LET J BE THE LEAST SIG. BIT OF D0, LET SGN := (-1)**J.
+#--THEN WE RETURN	SGN*SIN(R). SGN*SIN(R) IS COMPUTED BY
+#--R' + R'*S*(A1 + S(A2 + S(A3 + S(A4 + ... + SA7)))), WHERE
+#--R' = SGN*R, S=R*R. THIS CAN BE REWRITTEN AS
+#--R' + R'*S*( [A1+T(A3+T(A5+TA7))] + [S(A2+T(A4+TA6))])
+#--WHERE T=S*S.
+#--NOTE THAT A3 THROUGH A7 ARE STORED IN DOUBLE PRECISION
+#--WHILE A1 AND A2 ARE IN DOUBLE-EXTENDED FORMAT.
+SINPOLY:
+	fmovm.x		&0x0c,-(%sp)		# save fp2/fp3
+
+	fmov.x		%fp0,X(%a6)		# X IS R
+	fmul.x		%fp0,%fp0		# FP0 IS S
+
+	fmov.d		SINA7(%pc),%fp3
+	fmov.d		SINA6(%pc),%fp2
+
+	fmov.x		%fp0,%fp1
+	fmul.x		%fp1,%fp1		# FP1 IS T
+
+	ror.l		&1,%d1
+	and.l		&0x80000000,%d1
+# ...LEAST SIG. BIT OF D0 IN SIGN POSITION
+	eor.l		%d1,X(%a6)		# X IS NOW R'= SGN*R
+
+	fmul.x		%fp1,%fp3		# TA7
+	fmul.x		%fp1,%fp2		# TA6
+
+	fadd.d		SINA5(%pc),%fp3		# A5+TA7
+	fadd.d		SINA4(%pc),%fp2		# A4+TA6
+
+	fmul.x		%fp1,%fp3		# T(A5+TA7)
+	fmul.x		%fp1,%fp2		# T(A4+TA6)
+
+	fadd.d		SINA3(%pc),%fp3		# A3+T(A5+TA7)
+	fadd.x		SINA2(%pc),%fp2		# A2+T(A4+TA6)
+
+	fmul.x		%fp3,%fp1		# T(A3+T(A5+TA7))
+
+	fmul.x		%fp0,%fp2		# S(A2+T(A4+TA6))
+	fadd.x		SINA1(%pc),%fp1		# A1+T(A3+T(A5+TA7))
+	fmul.x		X(%a6),%fp0		# R'*S
+
+	fadd.x		%fp2,%fp1		# [A1+T(A3+T(A5+TA7))]+[S(A2+T(A4+TA6))]
+
+	fmul.x		%fp1,%fp0		# SIN(R')-R'
+
+	fmovm.x		(%sp)+,&0x30		# restore fp2/fp3
+
+	fmov.l		%d0,%fpcr		# restore users round mode,prec
+	fadd.x		X(%a6),%fp0		# last inst - possible exception set
+	bra		t_inx2
+
+#--LET J BE THE LEAST SIG. BIT OF D0, LET SGN := (-1)**J.
+#--THEN WE RETURN	SGN*COS(R). SGN*COS(R) IS COMPUTED BY
+#--SGN + S'*(B1 + S(B2 + S(B3 + S(B4 + ... + SB8)))), WHERE
+#--S=R*R AND S'=SGN*S. THIS CAN BE REWRITTEN AS
+#--SGN + S'*([B1+T(B3+T(B5+TB7))] + [S(B2+T(B4+T(B6+TB8)))])
+#--WHERE T=S*S.
+#--NOTE THAT B4 THROUGH B8 ARE STORED IN DOUBLE PRECISION
+#--WHILE B2 AND B3 ARE IN DOUBLE-EXTENDED FORMAT, B1 IS -1/2
+#--AND IS THEREFORE STORED AS SINGLE PRECISION.
+COSPOLY:
+	fmovm.x		&0x0c,-(%sp)		# save fp2/fp3
+
+	fmul.x		%fp0,%fp0		# FP0 IS S
+
+	fmov.d		COSB8(%pc),%fp2
+	fmov.d		COSB7(%pc),%fp3
+
+	fmov.x		%fp0,%fp1
+	fmul.x		%fp1,%fp1		# FP1 IS T
+
+	fmov.x		%fp0,X(%a6)		# X IS S
+	ror.l		&1,%d1
+	and.l		&0x80000000,%d1
+# ...LEAST SIG. BIT OF D0 IN SIGN POSITION
+
+	fmul.x		%fp1,%fp2		# TB8
+
+	eor.l		%d1,X(%a6)		# X IS NOW S'= SGN*S
+	and.l		&0x80000000,%d1
+
+	fmul.x		%fp1,%fp3		# TB7
+
+	or.l		&0x3F800000,%d1		# D0 IS SGN IN SINGLE
+	mov.l		%d1,POSNEG1(%a6)
+
+	fadd.d		COSB6(%pc),%fp2		# B6+TB8
+	fadd.d		COSB5(%pc),%fp3		# B5+TB7
+
+	fmul.x		%fp1,%fp2		# T(B6+TB8)
+	fmul.x		%fp1,%fp3		# T(B5+TB7)
+
+	fadd.d		COSB4(%pc),%fp2		# B4+T(B6+TB8)
+	fadd.x		COSB3(%pc),%fp3		# B3+T(B5+TB7)
+
+	fmul.x		%fp1,%fp2		# T(B4+T(B6+TB8))
+	fmul.x		%fp3,%fp1		# T(B3+T(B5+TB7))
+
+	fadd.x		COSB2(%pc),%fp2		# B2+T(B4+T(B6+TB8))
+	fadd.s		COSB1(%pc),%fp1		# B1+T(B3+T(B5+TB7))
+
+	fmul.x		%fp2,%fp0		# S(B2+T(B4+T(B6+TB8)))
+
+	fadd.x		%fp1,%fp0
+
+	fmul.x		X(%a6),%fp0
+
+	fmovm.x		(%sp)+,&0x30		# restore fp2/fp3
+
+	fmov.l		%d0,%fpcr		# restore users round mode,prec
+	fadd.s		POSNEG1(%a6),%fp0	# last inst - possible exception set
+	bra		t_inx2
+
+##############################################
+
+# SINe: Big OR Small?
+#--IF |X| > 15PI, WE USE THE GENERAL ARGUMENT REDUCTION.
+#--IF |X| < 2**(-40), RETURN X OR 1.
+SINBORS:
+	cmp.l		%d1,&0x3FFF8000
+	bgt.l		SREDUCEX
+
+SINSM:
+	mov.l		ADJN(%a6),%d1
+	cmp.l		%d1,&0
+	bgt.b		COSTINY
+
+# here, the operation may underflow iff the precision is sgl or dbl.
+# extended denorms are handled through another entry point.
+SINTINY:
+#	mov.w		&0x0000,XDCARE(%a6)	# JUST IN CASE
+
+	fmov.l		%d0,%fpcr		# restore users round mode,prec
+	mov.b		&FMOV_OP,%d1		# last inst is MOVE
+	fmov.x		X(%a6),%fp0		# last inst - possible exception set
+	bra		t_catch
+
+COSTINY:
+	fmov.s		&0x3F800000,%fp0	# fp0 = 1.0
+	fmov.l		%d0,%fpcr		# restore users round mode,prec
+	fadd.s		&0x80800000,%fp0	# last inst - possible exception set
+	bra		t_pinx2
+
+################################################
+	global		ssind
+#--SIN(X) = X FOR DENORMALIZED X
+ssind:
+	bra		t_extdnrm
+
+############################################
+	global		scosd
+#--COS(X) = 1 FOR DENORMALIZED X
+scosd:
+	fmov.s		&0x3F800000,%fp0	# fp0 = 1.0
+	bra		t_pinx2
+
+##################################################
+
+	global		ssincos
+ssincos:
+#--SET ADJN TO 4
+	mov.l		&4,ADJN(%a6)
+
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+	fmov.x		%fp0,X(%a6)
+
+	mov.l		(%a0),%d1
+	mov.w		4(%a0),%d1
+	and.l		&0x7FFFFFFF,%d1		# COMPACTIFY X
+
+	cmp.l		%d1,&0x3FD78000		# |X| >= 2**(-40)?
+	bge.b		SCOK1
+	bra.w		SCSM
+
+SCOK1:
+	cmp.l		%d1,&0x4004BC7E		# |X| < 15 PI?
+	blt.b		SCMAIN
+	bra.w		SREDUCEX
+
+
+#--THIS IS THE USUAL CASE, |X| <= 15 PI.
+#--THE ARGUMENT REDUCTION IS DONE BY TABLE LOOK UP.
+SCMAIN:
+	fmov.x		%fp0,%fp1
+
+	fmul.d		TWOBYPI(%pc),%fp1	# X*2/PI
+
+	lea		PITBL+0x200(%pc),%a1	# TABLE OF N*PI/2, N = -32,...,32
+
+	fmov.l		%fp1,INT(%a6)		# CONVERT TO INTEGER
+
+	mov.l		INT(%a6),%d1
+	asl.l		&4,%d1
+	add.l		%d1,%a1			# ADDRESS OF N*PIBY2, IN Y1, Y2
+
+	fsub.x		(%a1)+,%fp0		# X-Y1
+	fsub.s		(%a1),%fp0		# FP0 IS R = (X-Y1)-Y2
+
+SCCONT:
+#--continuation point from REDUCEX
+
+	mov.l		INT(%a6),%d1
+	ror.l		&1,%d1
+	cmp.l		%d1,&0			# D0 < 0 IFF N IS ODD
+	bge.w		NEVEN
+
+SNODD:
+#--REGISTERS SAVED SO FAR: D0, A0, FP2.
+	fmovm.x		&0x04,-(%sp)		# save fp2
+
+	fmov.x		%fp0,RPRIME(%a6)
+	fmul.x		%fp0,%fp0		# FP0 IS S = R*R
+	fmov.d		SINA7(%pc),%fp1		# A7
+	fmov.d		COSB8(%pc),%fp2		# B8
+	fmul.x		%fp0,%fp1		# SA7
+	fmul.x		%fp0,%fp2		# SB8
+
+	mov.l		%d2,-(%sp)
+	mov.l		%d1,%d2
+	ror.l		&1,%d2
+	and.l		&0x80000000,%d2
+	eor.l		%d1,%d2
+	and.l		&0x80000000,%d2
+
+	fadd.d		SINA6(%pc),%fp1		# A6+SA7
+	fadd.d		COSB7(%pc),%fp2		# B7+SB8
+
+	fmul.x		%fp0,%fp1		# S(A6+SA7)
+	eor.l		%d2,RPRIME(%a6)
+	mov.l		(%sp)+,%d2
+	fmul.x		%fp0,%fp2		# S(B7+SB8)
+	ror.l		&1,%d1
+	and.l		&0x80000000,%d1
+	mov.l		&0x3F800000,POSNEG1(%a6)
+	eor.l		%d1,POSNEG1(%a6)
+
+	fadd.d		SINA5(%pc),%fp1		# A5+S(A6+SA7)
+	fadd.d		COSB6(%pc),%fp2		# B6+S(B7+SB8)
+
+	fmul.x		%fp0,%fp1		# S(A5+S(A6+SA7))
+	fmul.x		%fp0,%fp2		# S(B6+S(B7+SB8))
+	fmov.x		%fp0,SPRIME(%a6)
+
+	fadd.d		SINA4(%pc),%fp1		# A4+S(A5+S(A6+SA7))
+	eor.l		%d1,SPRIME(%a6)
+	fadd.d		COSB5(%pc),%fp2		# B5+S(B6+S(B7+SB8))
+
+	fmul.x		%fp0,%fp1		# S(A4+...)
+	fmul.x		%fp0,%fp2		# S(B5+...)
+
+	fadd.d		SINA3(%pc),%fp1		# A3+S(A4+...)
+	fadd.d		COSB4(%pc),%fp2		# B4+S(B5+...)
+
+	fmul.x		%fp0,%fp1		# S(A3+...)
+	fmul.x		%fp0,%fp2		# S(B4+...)
+
+	fadd.x		SINA2(%pc),%fp1		# A2+S(A3+...)
+	fadd.x		COSB3(%pc),%fp2		# B3+S(B4+...)
+
+	fmul.x		%fp0,%fp1		# S(A2+...)
+	fmul.x		%fp0,%fp2		# S(B3+...)
+
+	fadd.x		SINA1(%pc),%fp1		# A1+S(A2+...)
+	fadd.x		COSB2(%pc),%fp2		# B2+S(B3+...)
+
+	fmul.x		%fp0,%fp1		# S(A1+...)
+	fmul.x		%fp2,%fp0		# S(B2+...)
+
+	fmul.x		RPRIME(%a6),%fp1	# R'S(A1+...)
+	fadd.s		COSB1(%pc),%fp0		# B1+S(B2...)
+	fmul.x		SPRIME(%a6),%fp0	# S'(B1+S(B2+...))
+
+	fmovm.x		(%sp)+,&0x20		# restore fp2
+
+	fmov.l		%d0,%fpcr
+	fadd.x		RPRIME(%a6),%fp1	# COS(X)
+	bsr		sto_cos			# store cosine result
+	fadd.s		POSNEG1(%a6),%fp0	# SIN(X)
+	bra		t_inx2
+
+NEVEN:
+#--REGISTERS SAVED SO FAR: FP2.
+	fmovm.x		&0x04,-(%sp)		# save fp2
+
+	fmov.x		%fp0,RPRIME(%a6)
+	fmul.x		%fp0,%fp0		# FP0 IS S = R*R
+
+	fmov.d		COSB8(%pc),%fp1		# B8
+	fmov.d		SINA7(%pc),%fp2		# A7
+
+	fmul.x		%fp0,%fp1		# SB8
+	fmov.x		%fp0,SPRIME(%a6)
+	fmul.x		%fp0,%fp2		# SA7
+
+	ror.l		&1,%d1
+	and.l		&0x80000000,%d1
+
+	fadd.d		COSB7(%pc),%fp1		# B7+SB8
+	fadd.d		SINA6(%pc),%fp2		# A6+SA7
+
+	eor.l		%d1,RPRIME(%a6)
+	eor.l		%d1,SPRIME(%a6)
+
+	fmul.x		%fp0,%fp1		# S(B7+SB8)
+
+	or.l		&0x3F800000,%d1
+	mov.l		%d1,POSNEG1(%a6)
+
+	fmul.x		%fp0,%fp2		# S(A6+SA7)
+
+	fadd.d		COSB6(%pc),%fp1		# B6+S(B7+SB8)
+	fadd.d		SINA5(%pc),%fp2		# A5+S(A6+SA7)
+
+	fmul.x		%fp0,%fp1		# S(B6+S(B7+SB8))
+	fmul.x		%fp0,%fp2		# S(A5+S(A6+SA7))
+
+	fadd.d		COSB5(%pc),%fp1		# B5+S(B6+S(B7+SB8))
+	fadd.d		SINA4(%pc),%fp2		# A4+S(A5+S(A6+SA7))
+
+	fmul.x		%fp0,%fp1		# S(B5+...)
+	fmul.x		%fp0,%fp2		# S(A4+...)
+
+	fadd.d		COSB4(%pc),%fp1		# B4+S(B5+...)
+	fadd.d		SINA3(%pc),%fp2		# A3+S(A4+...)
+
+	fmul.x		%fp0,%fp1		# S(B4+...)
+	fmul.x		%fp0,%fp2		# S(A3+...)
+
+	fadd.x		COSB3(%pc),%fp1		# B3+S(B4+...)
+	fadd.x		SINA2(%pc),%fp2		# A2+S(A3+...)
+
+	fmul.x		%fp0,%fp1		# S(B3+...)
+	fmul.x		%fp0,%fp2		# S(A2+...)
+
+	fadd.x		COSB2(%pc),%fp1		# B2+S(B3+...)
+	fadd.x		SINA1(%pc),%fp2		# A1+S(A2+...)
+
+	fmul.x		%fp0,%fp1		# S(B2+...)
+	fmul.x		%fp2,%fp0		# s(a1+...)
+
+
+	fadd.s		COSB1(%pc),%fp1		# B1+S(B2...)
+	fmul.x		RPRIME(%a6),%fp0	# R'S(A1+...)
+	fmul.x		SPRIME(%a6),%fp1	# S'(B1+S(B2+...))
+
+	fmovm.x		(%sp)+,&0x20		# restore fp2
+
+	fmov.l		%d0,%fpcr
+	fadd.s		POSNEG1(%a6),%fp1	# COS(X)
+	bsr		sto_cos			# store cosine result
+	fadd.x		RPRIME(%a6),%fp0	# SIN(X)
+	bra		t_inx2
+
+################################################
+
+SCBORS:
+	cmp.l		%d1,&0x3FFF8000
+	bgt.w		SREDUCEX
+
+################################################
+
+SCSM:
+#	mov.w		&0x0000,XDCARE(%a6)
+	fmov.s		&0x3F800000,%fp1
+
+	fmov.l		%d0,%fpcr
+	fsub.s		&0x00800000,%fp1
+	bsr		sto_cos			# store cosine result
+	fmov.l		%fpcr,%d0		# d0 must have fpcr,too
+	mov.b		&FMOV_OP,%d1		# last inst is MOVE
+	fmov.x		X(%a6),%fp0
+	bra		t_catch
+
+##############################################
+
+	global		ssincosd
+#--SIN AND COS OF X FOR DENORMALIZED X
+ssincosd:
+	mov.l		%d0,-(%sp)		# save d0
+	fmov.s		&0x3F800000,%fp1
+	bsr		sto_cos			# store cosine result
+	mov.l		(%sp)+,%d0		# restore d0
+	bra		t_extdnrm
+
+############################################
+
+#--WHEN REDUCEX IS USED, THE CODE WILL INEVITABLY BE SLOW.
+#--THIS REDUCTION METHOD, HOWEVER, IS MUCH FASTER THAN USING
+#--THE REMAINDER INSTRUCTION WHICH IS NOW IN SOFTWARE.
+SREDUCEX:
+	fmovm.x		&0x3c,-(%sp)		# save {fp2-fp5}
+	mov.l		%d2,-(%sp)		# save d2
+	fmov.s		&0x00000000,%fp1	# fp1 = 0
+
+#--If compact form of abs(arg) in d0=$7ffeffff, argument is so large that
+#--there is a danger of unwanted overflow in first LOOP iteration.  In this
+#--case, reduce argument by one remainder step to make subsequent reduction
+#--safe.
+	cmp.l		%d1,&0x7ffeffff		# is arg dangerously large?
+	bne.b		SLOOP			# no
+
+# yes; create 2**16383*PI/2
+	mov.w		&0x7ffe,FP_SCR0_EX(%a6)
+	mov.l		&0xc90fdaa2,FP_SCR0_HI(%a6)
+	clr.l		FP_SCR0_LO(%a6)
+
+# create low half of 2**16383*PI/2 at FP_SCR1
+	mov.w		&0x7fdc,FP_SCR1_EX(%a6)
+	mov.l		&0x85a308d3,FP_SCR1_HI(%a6)
+	clr.l		FP_SCR1_LO(%a6)
+
+	ftest.x		%fp0			# test sign of argument
+	fblt.w		sred_neg
+
+	or.b		&0x80,FP_SCR0_EX(%a6)	# positive arg
+	or.b		&0x80,FP_SCR1_EX(%a6)
+sred_neg:
+	fadd.x		FP_SCR0(%a6),%fp0	# high part of reduction is exact
+	fmov.x		%fp0,%fp1		# save high result in fp1
+	fadd.x		FP_SCR1(%a6),%fp0	# low part of reduction
+	fsub.x		%fp0,%fp1		# determine low component of result
+	fadd.x		FP_SCR1(%a6),%fp1	# fp0/fp1 are reduced argument.
+
+#--ON ENTRY, FP0 IS X, ON RETURN, FP0 IS X REM PI/2, |X| <= PI/4.
+#--integer quotient will be stored in N
+#--Intermeditate remainder is 66-bit long; (R,r) in (FP0,FP1)
+SLOOP:
+	fmov.x		%fp0,INARG(%a6)		# +-2**K * F, 1 <= F < 2
+	mov.w		INARG(%a6),%d1
+	mov.l		%d1,%a1			# save a copy of D0
+	and.l		&0x00007FFF,%d1
+	sub.l		&0x00003FFF,%d1		# d0 = K
+	cmp.l		%d1,&28
+	ble.b		SLASTLOOP
+SCONTLOOP:
+	sub.l		&27,%d1			# d0 = L := K-27
+	mov.b		&0,ENDFLAG(%a6)
+	bra.b		SWORK
+SLASTLOOP:
+	clr.l		%d1			# d0 = L := 0
+	mov.b		&1,ENDFLAG(%a6)
+
+SWORK:
+#--FIND THE REMAINDER OF (R,r) W.R.T.	2**L * (PI/2). L IS SO CHOSEN
+#--THAT	INT( X * (2/PI) / 2**(L) ) < 2**29.
+
+#--CREATE 2**(-L) * (2/PI), SIGN(INARG)*2**(63),
+#--2**L * (PIby2_1), 2**L * (PIby2_2)
+
+	mov.l		&0x00003FFE,%d2		# BIASED EXP OF 2/PI
+	sub.l		%d1,%d2			# BIASED EXP OF 2**(-L)*(2/PI)
+
+	mov.l		&0xA2F9836E,FP_SCR0_HI(%a6)
+	mov.l		&0x4E44152A,FP_SCR0_LO(%a6)
+	mov.w		%d2,FP_SCR0_EX(%a6)	# FP_SCR0 = 2**(-L)*(2/PI)
+
+	fmov.x		%fp0,%fp2
+	fmul.x		FP_SCR0(%a6),%fp2	# fp2 = X * 2**(-L)*(2/PI)
+
+#--WE MUST NOW FIND INT(FP2). SINCE WE NEED THIS VALUE IN
+#--FLOATING POINT FORMAT, THE TWO FMOVE'S	FMOVE.L FP <--> N
+#--WILL BE TOO INEFFICIENT. THE WAY AROUND IT IS THAT
+#--(SIGN(INARG)*2**63	+	FP2) - SIGN(INARG)*2**63 WILL GIVE
+#--US THE DESIRED VALUE IN FLOATING POINT.
+	mov.l		%a1,%d2
+	swap		%d2
+	and.l		&0x80000000,%d2
+	or.l		&0x5F000000,%d2		# d2 = SIGN(INARG)*2**63 IN SGL
+	mov.l		%d2,TWOTO63(%a6)
+	fadd.s		TWOTO63(%a6),%fp2	# THE FRACTIONAL PART OF FP1 IS ROUNDED
+	fsub.s		TWOTO63(%a6),%fp2	# fp2 = N
+#	fint.x		%fp2
+
+#--CREATING 2**(L)*Piby2_1 and 2**(L)*Piby2_2
+	mov.l		%d1,%d2			# d2 = L
+
+	add.l		&0x00003FFF,%d2		# BIASED EXP OF 2**L * (PI/2)
+	mov.w		%d2,FP_SCR0_EX(%a6)
+	mov.l		&0xC90FDAA2,FP_SCR0_HI(%a6)
+	clr.l		FP_SCR0_LO(%a6)		# FP_SCR0 = 2**(L) * Piby2_1
+
+	add.l		&0x00003FDD,%d1
+	mov.w		%d1,FP_SCR1_EX(%a6)
+	mov.l		&0x85A308D3,FP_SCR1_HI(%a6)
+	clr.l		FP_SCR1_LO(%a6)		# FP_SCR1 = 2**(L) * Piby2_2
+
+	mov.b		ENDFLAG(%a6),%d1
+
+#--We are now ready to perform (R+r) - N*P1 - N*P2, P1 = 2**(L) * Piby2_1 and
+#--P2 = 2**(L) * Piby2_2
+	fmov.x		%fp2,%fp4		# fp4 = N
+	fmul.x		FP_SCR0(%a6),%fp4	# fp4 = W = N*P1
+	fmov.x		%fp2,%fp5		# fp5 = N
+	fmul.x		FP_SCR1(%a6),%fp5	# fp5 = w = N*P2
+	fmov.x		%fp4,%fp3		# fp3 = W = N*P1
+
+#--we want P+p = W+w  but  |p| <= half ulp of P
+#--Then, we need to compute  A := R-P   and  a := r-p
+	fadd.x		%fp5,%fp3		# fp3 = P
+	fsub.x		%fp3,%fp4		# fp4 = W-P
+
+	fsub.x		%fp3,%fp0		# fp0 = A := R - P
+	fadd.x		%fp5,%fp4		# fp4 = p = (W-P)+w
+
+	fmov.x		%fp0,%fp3		# fp3 = A
+	fsub.x		%fp4,%fp1		# fp1 = a := r - p
+
+#--Now we need to normalize (A,a) to  "new (R,r)" where R+r = A+a but
+#--|r| <= half ulp of R.
+	fadd.x		%fp1,%fp0		# fp0 = R := A+a
+#--No need to calculate r if this is the last loop
+	cmp.b		%d1,&0
+	bgt.w		SRESTORE
+
+#--Need to calculate r
+	fsub.x		%fp0,%fp3		# fp3 = A-R
+	fadd.x		%fp3,%fp1		# fp1 = r := (A-R)+a
+	bra.w		SLOOP
+
+SRESTORE:
+	fmov.l		%fp2,INT(%a6)
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		(%sp)+,&0x3c		# restore {fp2-fp5}
+
+	mov.l		ADJN(%a6),%d1
+	cmp.l		%d1,&4
+
+	blt.w		SINCONT
+	bra.w		SCCONT
+
+#########################################################################
+# stan():  computes the tangent of a normalized input			#
+# stand(): computes the tangent of a denormalized input			#
+#									#
+# INPUT *************************************************************** #
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT ************************************************************** #
+#	fp0 = tan(X)							#
+#									#
+# ACCURACY and MONOTONICITY ******************************************* #
+#	The returned result is within 3 ulp in 64 significant bit, i.e. #
+#	within 0.5001 ulp to 53 bits if the result is subsequently	#
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM *********************************************************** #
+#									#
+#	1. If |X| >= 15Pi or |X| < 2**(-40), go to 6.			#
+#									#
+#	2. Decompose X as X = N(Pi/2) + r where |r| <= Pi/4. Let	#
+#		k = N mod 2, so in particular, k = 0 or 1.		#
+#									#
+#	3. If k is odd, go to 5.					#
+#									#
+#	4. (k is even) Tan(X) = tan(r) and tan(r) is approximated by a	#
+#		rational function U/V where				#
+#		U = r + r*s*(P1 + s*(P2 + s*P3)), and			#
+#		V = 1 + s*(Q1 + s*(Q2 + s*(Q3 + s*Q4))),  s = r*r.	#
+#		Exit.							#
+#									#
+#	4. (k is odd) Tan(X) = -cot(r). Since tan(r) is approximated by #
+#		a rational function U/V where				#
+#		U = r + r*s*(P1 + s*(P2 + s*P3)), and			#
+#		V = 1 + s*(Q1 + s*(Q2 + s*(Q3 + s*Q4))), s = r*r,	#
+#		-Cot(r) = -V/U. Exit.					#
+#									#
+#	6. If |X| > 1, go to 8.						#
+#									#
+#	7. (|X|<2**(-40)) Tan(X) = X. Exit.				#
+#									#
+#	8. Overwrite X by X := X rem 2Pi. Now that |X| <= Pi, go back	#
+#		to 2.							#
+#									#
+#########################################################################
+
+TANQ4:
+	long		0x3EA0B759,0xF50F8688
+TANP3:
+	long		0xBEF2BAA5,0xA8924F04
+
+TANQ3:
+	long		0xBF346F59,0xB39BA65F,0x00000000,0x00000000
+
+TANP2:
+	long		0x3FF60000,0xE073D3FC,0x199C4A00,0x00000000
+
+TANQ2:
+	long		0x3FF90000,0xD23CD684,0x15D95FA1,0x00000000
+
+TANP1:
+	long		0xBFFC0000,0x8895A6C5,0xFB423BCA,0x00000000
+
+TANQ1:
+	long		0xBFFD0000,0xEEF57E0D,0xA84BC8CE,0x00000000
+
+INVTWOPI:
+	long		0x3FFC0000,0xA2F9836E,0x4E44152A,0x00000000
+
+TWOPI1:
+	long		0x40010000,0xC90FDAA2,0x00000000,0x00000000
+TWOPI2:
+	long		0x3FDF0000,0x85A308D4,0x00000000,0x00000000
+
+#--N*PI/2, -32 <= N <= 32, IN A LEADING TERM IN EXT. AND TRAILING
+#--TERM IN SGL. NOTE THAT PI IS 64-BIT LONG, THUS N*PI/2 IS AT
+#--MOST 69 BITS LONG.
+#	global		PITBL
+PITBL:
+	long		0xC0040000,0xC90FDAA2,0x2168C235,0x21800000
+	long		0xC0040000,0xC2C75BCD,0x105D7C23,0xA0D00000
+	long		0xC0040000,0xBC7EDCF7,0xFF523611,0xA1E80000
+	long		0xC0040000,0xB6365E22,0xEE46F000,0x21480000
+	long		0xC0040000,0xAFEDDF4D,0xDD3BA9EE,0xA1200000
+	long		0xC0040000,0xA9A56078,0xCC3063DD,0x21FC0000
+	long		0xC0040000,0xA35CE1A3,0xBB251DCB,0x21100000
+	long		0xC0040000,0x9D1462CE,0xAA19D7B9,0xA1580000
+	long		0xC0040000,0x96CBE3F9,0x990E91A8,0x21E00000
+	long		0xC0040000,0x90836524,0x88034B96,0x20B00000
+	long		0xC0040000,0x8A3AE64F,0x76F80584,0xA1880000
+	long		0xC0040000,0x83F2677A,0x65ECBF73,0x21C40000
+	long		0xC0030000,0xFB53D14A,0xA9C2F2C2,0x20000000
+	long		0xC0030000,0xEEC2D3A0,0x87AC669F,0x21380000
+	long		0xC0030000,0xE231D5F6,0x6595DA7B,0xA1300000
+	long		0xC0030000,0xD5A0D84C,0x437F4E58,0x9FC00000
+	long		0xC0030000,0xC90FDAA2,0x2168C235,0x21000000
+	long		0xC0030000,0xBC7EDCF7,0xFF523611,0xA1680000
+	long		0xC0030000,0xAFEDDF4D,0xDD3BA9EE,0xA0A00000
+	long		0xC0030000,0xA35CE1A3,0xBB251DCB,0x20900000
+	long		0xC0030000,0x96CBE3F9,0x990E91A8,0x21600000
+	long		0xC0030000,0x8A3AE64F,0x76F80584,0xA1080000
+	long		0xC0020000,0xFB53D14A,0xA9C2F2C2,0x1F800000
+	long		0xC0020000,0xE231D5F6,0x6595DA7B,0xA0B00000
+	long		0xC0020000,0xC90FDAA2,0x2168C235,0x20800000
+	long		0xC0020000,0xAFEDDF4D,0xDD3BA9EE,0xA0200000
+	long		0xC0020000,0x96CBE3F9,0x990E91A8,0x20E00000
+	long		0xC0010000,0xFB53D14A,0xA9C2F2C2,0x1F000000
+	long		0xC0010000,0xC90FDAA2,0x2168C235,0x20000000
+	long		0xC0010000,0x96CBE3F9,0x990E91A8,0x20600000
+	long		0xC0000000,0xC90FDAA2,0x2168C235,0x1F800000
+	long		0xBFFF0000,0xC90FDAA2,0x2168C235,0x1F000000
+	long		0x00000000,0x00000000,0x00000000,0x00000000
+	long		0x3FFF0000,0xC90FDAA2,0x2168C235,0x9F000000
+	long		0x40000000,0xC90FDAA2,0x2168C235,0x9F800000
+	long		0x40010000,0x96CBE3F9,0x990E91A8,0xA0600000
+	long		0x40010000,0xC90FDAA2,0x2168C235,0xA0000000
+	long		0x40010000,0xFB53D14A,0xA9C2F2C2,0x9F000000
+	long		0x40020000,0x96CBE3F9,0x990E91A8,0xA0E00000
+	long		0x40020000,0xAFEDDF4D,0xDD3BA9EE,0x20200000
+	long		0x40020000,0xC90FDAA2,0x2168C235,0xA0800000
+	long		0x40020000,0xE231D5F6,0x6595DA7B,0x20B00000
+	long		0x40020000,0xFB53D14A,0xA9C2F2C2,0x9F800000
+	long		0x40030000,0x8A3AE64F,0x76F80584,0x21080000
+	long		0x40030000,0x96CBE3F9,0x990E91A8,0xA1600000
+	long		0x40030000,0xA35CE1A3,0xBB251DCB,0xA0900000
+	long		0x40030000,0xAFEDDF4D,0xDD3BA9EE,0x20A00000
+	long		0x40030000,0xBC7EDCF7,0xFF523611,0x21680000
+	long		0x40030000,0xC90FDAA2,0x2168C235,0xA1000000
+	long		0x40030000,0xD5A0D84C,0x437F4E58,0x1FC00000
+	long		0x40030000,0xE231D5F6,0x6595DA7B,0x21300000
+	long		0x40030000,0xEEC2D3A0,0x87AC669F,0xA1380000
+	long		0x40030000,0xFB53D14A,0xA9C2F2C2,0xA0000000
+	long		0x40040000,0x83F2677A,0x65ECBF73,0xA1C40000
+	long		0x40040000,0x8A3AE64F,0x76F80584,0x21880000
+	long		0x40040000,0x90836524,0x88034B96,0xA0B00000
+	long		0x40040000,0x96CBE3F9,0x990E91A8,0xA1E00000
+	long		0x40040000,0x9D1462CE,0xAA19D7B9,0x21580000
+	long		0x40040000,0xA35CE1A3,0xBB251DCB,0xA1100000
+	long		0x40040000,0xA9A56078,0xCC3063DD,0xA1FC0000
+	long		0x40040000,0xAFEDDF4D,0xDD3BA9EE,0x21200000
+	long		0x40040000,0xB6365E22,0xEE46F000,0xA1480000
+	long		0x40040000,0xBC7EDCF7,0xFF523611,0x21E80000
+	long		0x40040000,0xC2C75BCD,0x105D7C23,0x20D00000
+	long		0x40040000,0xC90FDAA2,0x2168C235,0xA1800000
+
+	set		INARG,FP_SCR0
+
+	set		TWOTO63,L_SCR1
+	set		INT,L_SCR1
+	set		ENDFLAG,L_SCR2
+
+	global		stan
+stan:
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+
+	mov.l		(%a0),%d1
+	mov.w		4(%a0),%d1
+	and.l		&0x7FFFFFFF,%d1
+
+	cmp.l		%d1,&0x3FD78000		# |X| >= 2**(-40)?
+	bge.b		TANOK1
+	bra.w		TANSM
+TANOK1:
+	cmp.l		%d1,&0x4004BC7E		# |X| < 15 PI?
+	blt.b		TANMAIN
+	bra.w		REDUCEX
+
+TANMAIN:
+#--THIS IS THE USUAL CASE, |X| <= 15 PI.
+#--THE ARGUMENT REDUCTION IS DONE BY TABLE LOOK UP.
+	fmov.x		%fp0,%fp1
+	fmul.d		TWOBYPI(%pc),%fp1	# X*2/PI
+
+	lea.l		PITBL+0x200(%pc),%a1	# TABLE OF N*PI/2, N = -32,...,32
+
+	fmov.l		%fp1,%d1		# CONVERT TO INTEGER
+
+	asl.l		&4,%d1
+	add.l		%d1,%a1			# ADDRESS N*PIBY2 IN Y1, Y2
+
+	fsub.x		(%a1)+,%fp0		# X-Y1
+
+	fsub.s		(%a1),%fp0		# FP0 IS R = (X-Y1)-Y2
+
+	ror.l		&5,%d1
+	and.l		&0x80000000,%d1		# D0 WAS ODD IFF D0 < 0
+
+TANCONT:
+	fmovm.x		&0x0c,-(%sp)		# save fp2,fp3
+
+	cmp.l		%d1,&0
+	blt.w		NODD
+
+	fmov.x		%fp0,%fp1
+	fmul.x		%fp1,%fp1		# S = R*R
+
+	fmov.d		TANQ4(%pc),%fp3
+	fmov.d		TANP3(%pc),%fp2
+
+	fmul.x		%fp1,%fp3		# SQ4
+	fmul.x		%fp1,%fp2		# SP3
+
+	fadd.d		TANQ3(%pc),%fp3		# Q3+SQ4
+	fadd.x		TANP2(%pc),%fp2		# P2+SP3
+
+	fmul.x		%fp1,%fp3		# S(Q3+SQ4)
+	fmul.x		%fp1,%fp2		# S(P2+SP3)
+
+	fadd.x		TANQ2(%pc),%fp3		# Q2+S(Q3+SQ4)
+	fadd.x		TANP1(%pc),%fp2		# P1+S(P2+SP3)
+
+	fmul.x		%fp1,%fp3		# S(Q2+S(Q3+SQ4))
+	fmul.x		%fp1,%fp2		# S(P1+S(P2+SP3))
+
+	fadd.x		TANQ1(%pc),%fp3		# Q1+S(Q2+S(Q3+SQ4))
+	fmul.x		%fp0,%fp2		# RS(P1+S(P2+SP3))
+
+	fmul.x		%fp3,%fp1		# S(Q1+S(Q2+S(Q3+SQ4)))
+
+	fadd.x		%fp2,%fp0		# R+RS(P1+S(P2+SP3))
+
+	fadd.s		&0x3F800000,%fp1	# 1+S(Q1+...)
+
+	fmovm.x		(%sp)+,&0x30		# restore fp2,fp3
+
+	fmov.l		%d0,%fpcr		# restore users round mode,prec
+	fdiv.x		%fp1,%fp0		# last inst - possible exception set
+	bra		t_inx2
+
+NODD:
+	fmov.x		%fp0,%fp1
+	fmul.x		%fp0,%fp0		# S = R*R
+
+	fmov.d		TANQ4(%pc),%fp3
+	fmov.d		TANP3(%pc),%fp2
+
+	fmul.x		%fp0,%fp3		# SQ4
+	fmul.x		%fp0,%fp2		# SP3
+
+	fadd.d		TANQ3(%pc),%fp3		# Q3+SQ4
+	fadd.x		TANP2(%pc),%fp2		# P2+SP3
+
+	fmul.x		%fp0,%fp3		# S(Q3+SQ4)
+	fmul.x		%fp0,%fp2		# S(P2+SP3)
+
+	fadd.x		TANQ2(%pc),%fp3		# Q2+S(Q3+SQ4)
+	fadd.x		TANP1(%pc),%fp2		# P1+S(P2+SP3)
+
+	fmul.x		%fp0,%fp3		# S(Q2+S(Q3+SQ4))
+	fmul.x		%fp0,%fp2		# S(P1+S(P2+SP3))
+
+	fadd.x		TANQ1(%pc),%fp3		# Q1+S(Q2+S(Q3+SQ4))
+	fmul.x		%fp1,%fp2		# RS(P1+S(P2+SP3))
+
+	fmul.x		%fp3,%fp0		# S(Q1+S(Q2+S(Q3+SQ4)))
+
+	fadd.x		%fp2,%fp1		# R+RS(P1+S(P2+SP3))
+	fadd.s		&0x3F800000,%fp0	# 1+S(Q1+...)
+
+	fmovm.x		(%sp)+,&0x30		# restore fp2,fp3
+
+	fmov.x		%fp1,-(%sp)
+	eor.l		&0x80000000,(%sp)
+
+	fmov.l		%d0,%fpcr		# restore users round mode,prec
+	fdiv.x		(%sp)+,%fp0		# last inst - possible exception set
+	bra		t_inx2
+
+TANBORS:
+#--IF |X| > 15PI, WE USE THE GENERAL ARGUMENT REDUCTION.
+#--IF |X| < 2**(-40), RETURN X OR 1.
+	cmp.l		%d1,&0x3FFF8000
+	bgt.b		REDUCEX
+
+TANSM:
+	fmov.x		%fp0,-(%sp)
+	fmov.l		%d0,%fpcr		# restore users round mode,prec
+	mov.b		&FMOV_OP,%d1		# last inst is MOVE
+	fmov.x		(%sp)+,%fp0		# last inst - posibble exception set
+	bra		t_catch
+
+	global		stand
+#--TAN(X) = X FOR DENORMALIZED X
+stand:
+	bra		t_extdnrm
+
+#--WHEN REDUCEX IS USED, THE CODE WILL INEVITABLY BE SLOW.
+#--THIS REDUCTION METHOD, HOWEVER, IS MUCH FASTER THAN USING
+#--THE REMAINDER INSTRUCTION WHICH IS NOW IN SOFTWARE.
+REDUCEX:
+	fmovm.x		&0x3c,-(%sp)		# save {fp2-fp5}
+	mov.l		%d2,-(%sp)		# save d2
+	fmov.s		&0x00000000,%fp1	# fp1 = 0
+
+#--If compact form of abs(arg) in d0=$7ffeffff, argument is so large that
+#--there is a danger of unwanted overflow in first LOOP iteration.  In this
+#--case, reduce argument by one remainder step to make subsequent reduction
+#--safe.
+	cmp.l		%d1,&0x7ffeffff		# is arg dangerously large?
+	bne.b		LOOP			# no
+
+# yes; create 2**16383*PI/2
+	mov.w		&0x7ffe,FP_SCR0_EX(%a6)
+	mov.l		&0xc90fdaa2,FP_SCR0_HI(%a6)
+	clr.l		FP_SCR0_LO(%a6)
+
+# create low half of 2**16383*PI/2 at FP_SCR1
+	mov.w		&0x7fdc,FP_SCR1_EX(%a6)
+	mov.l		&0x85a308d3,FP_SCR1_HI(%a6)
+	clr.l		FP_SCR1_LO(%a6)
+
+	ftest.x		%fp0			# test sign of argument
+	fblt.w		red_neg
+
+	or.b		&0x80,FP_SCR0_EX(%a6)	# positive arg
+	or.b		&0x80,FP_SCR1_EX(%a6)
+red_neg:
+	fadd.x		FP_SCR0(%a6),%fp0	# high part of reduction is exact
+	fmov.x		%fp0,%fp1		# save high result in fp1
+	fadd.x		FP_SCR1(%a6),%fp0	# low part of reduction
+	fsub.x		%fp0,%fp1		# determine low component of result
+	fadd.x		FP_SCR1(%a6),%fp1	# fp0/fp1 are reduced argument.
+
+#--ON ENTRY, FP0 IS X, ON RETURN, FP0 IS X REM PI/2, |X| <= PI/4.
+#--integer quotient will be stored in N
+#--Intermeditate remainder is 66-bit long; (R,r) in (FP0,FP1)
+LOOP:
+	fmov.x		%fp0,INARG(%a6)		# +-2**K * F, 1 <= F < 2
+	mov.w		INARG(%a6),%d1
+	mov.l		%d1,%a1			# save a copy of D0
+	and.l		&0x00007FFF,%d1
+	sub.l		&0x00003FFF,%d1		# d0 = K
+	cmp.l		%d1,&28
+	ble.b		LASTLOOP
+CONTLOOP:
+	sub.l		&27,%d1			# d0 = L := K-27
+	mov.b		&0,ENDFLAG(%a6)
+	bra.b		WORK
+LASTLOOP:
+	clr.l		%d1			# d0 = L := 0
+	mov.b		&1,ENDFLAG(%a6)
+
+WORK:
+#--FIND THE REMAINDER OF (R,r) W.R.T.	2**L * (PI/2). L IS SO CHOSEN
+#--THAT	INT( X * (2/PI) / 2**(L) ) < 2**29.
+
+#--CREATE 2**(-L) * (2/PI), SIGN(INARG)*2**(63),
+#--2**L * (PIby2_1), 2**L * (PIby2_2)
+
+	mov.l		&0x00003FFE,%d2		# BIASED EXP OF 2/PI
+	sub.l		%d1,%d2			# BIASED EXP OF 2**(-L)*(2/PI)
+
+	mov.l		&0xA2F9836E,FP_SCR0_HI(%a6)
+	mov.l		&0x4E44152A,FP_SCR0_LO(%a6)
+	mov.w		%d2,FP_SCR0_EX(%a6)	# FP_SCR0 = 2**(-L)*(2/PI)
+
+	fmov.x		%fp0,%fp2
+	fmul.x		FP_SCR0(%a6),%fp2	# fp2 = X * 2**(-L)*(2/PI)
+
+#--WE MUST NOW FIND INT(FP2). SINCE WE NEED THIS VALUE IN
+#--FLOATING POINT FORMAT, THE TWO FMOVE'S	FMOVE.L FP <--> N
+#--WILL BE TOO INEFFICIENT. THE WAY AROUND IT IS THAT
+#--(SIGN(INARG)*2**63	+	FP2) - SIGN(INARG)*2**63 WILL GIVE
+#--US THE DESIRED VALUE IN FLOATING POINT.
+	mov.l		%a1,%d2
+	swap		%d2
+	and.l		&0x80000000,%d2
+	or.l		&0x5F000000,%d2		# d2 = SIGN(INARG)*2**63 IN SGL
+	mov.l		%d2,TWOTO63(%a6)
+	fadd.s		TWOTO63(%a6),%fp2	# THE FRACTIONAL PART OF FP1 IS ROUNDED
+	fsub.s		TWOTO63(%a6),%fp2	# fp2 = N
+#	fintrz.x	%fp2,%fp2
+
+#--CREATING 2**(L)*Piby2_1 and 2**(L)*Piby2_2
+	mov.l		%d1,%d2			# d2 = L
+
+	add.l		&0x00003FFF,%d2		# BIASED EXP OF 2**L * (PI/2)
+	mov.w		%d2,FP_SCR0_EX(%a6)
+	mov.l		&0xC90FDAA2,FP_SCR0_HI(%a6)
+	clr.l		FP_SCR0_LO(%a6)		# FP_SCR0 = 2**(L) * Piby2_1
+
+	add.l		&0x00003FDD,%d1
+	mov.w		%d1,FP_SCR1_EX(%a6)
+	mov.l		&0x85A308D3,FP_SCR1_HI(%a6)
+	clr.l		FP_SCR1_LO(%a6)		# FP_SCR1 = 2**(L) * Piby2_2
+
+	mov.b		ENDFLAG(%a6),%d1
+
+#--We are now ready to perform (R+r) - N*P1 - N*P2, P1 = 2**(L) * Piby2_1 and
+#--P2 = 2**(L) * Piby2_2
+	fmov.x		%fp2,%fp4		# fp4 = N
+	fmul.x		FP_SCR0(%a6),%fp4	# fp4 = W = N*P1
+	fmov.x		%fp2,%fp5		# fp5 = N
+	fmul.x		FP_SCR1(%a6),%fp5	# fp5 = w = N*P2
+	fmov.x		%fp4,%fp3		# fp3 = W = N*P1
+
+#--we want P+p = W+w  but  |p| <= half ulp of P
+#--Then, we need to compute  A := R-P   and  a := r-p
+	fadd.x		%fp5,%fp3		# fp3 = P
+	fsub.x		%fp3,%fp4		# fp4 = W-P
+
+	fsub.x		%fp3,%fp0		# fp0 = A := R - P
+	fadd.x		%fp5,%fp4		# fp4 = p = (W-P)+w
+
+	fmov.x		%fp0,%fp3		# fp3 = A
+	fsub.x		%fp4,%fp1		# fp1 = a := r - p
+
+#--Now we need to normalize (A,a) to  "new (R,r)" where R+r = A+a but
+#--|r| <= half ulp of R.
+	fadd.x		%fp1,%fp0		# fp0 = R := A+a
+#--No need to calculate r if this is the last loop
+	cmp.b		%d1,&0
+	bgt.w		RESTORE
+
+#--Need to calculate r
+	fsub.x		%fp0,%fp3		# fp3 = A-R
+	fadd.x		%fp3,%fp1		# fp1 = r := (A-R)+a
+	bra.w		LOOP
+
+RESTORE:
+	fmov.l		%fp2,INT(%a6)
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		(%sp)+,&0x3c		# restore {fp2-fp5}
+
+	mov.l		INT(%a6),%d1
+	ror.l		&1,%d1
+
+	bra.w		TANCONT
+
+#########################################################################
+# satan():  computes the arctangent of a normalized number		#
+# satand(): computes the arctangent of a denormalized number		#
+#									#
+# INPUT	*************************************************************** #
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT ************************************************************** #
+#	fp0 = arctan(X)							#
+#									#
+# ACCURACY and MONOTONICITY ******************************************* #
+#	The returned result is within 2 ulps in	64 significant bit,	#
+#	i.e. within 0.5001 ulp to 53 bits if the result is subsequently	#
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM *********************************************************** #
+#	Step 1. If |X| >= 16 or |X| < 1/16, go to Step 5.		#
+#									#
+#	Step 2. Let X = sgn * 2**k * 1.xxxxxxxx...x.			#
+#		Note that k = -4, -3,..., or 3.				#
+#		Define F = sgn * 2**k * 1.xxxx1, i.e. the first 5	#
+#		significant bits of X with a bit-1 attached at the 6-th	#
+#		bit position. Define u to be u = (X-F) / (1 + X*F).	#
+#									#
+#	Step 3. Approximate arctan(u) by a polynomial poly.		#
+#									#
+#	Step 4. Return arctan(F) + poly, arctan(F) is fetched from a	#
+#		table of values calculated beforehand. Exit.		#
+#									#
+#	Step 5. If |X| >= 16, go to Step 7.				#
+#									#
+#	Step 6. Approximate arctan(X) by an odd polynomial in X. Exit.	#
+#									#
+#	Step 7. Define X' = -1/X. Approximate arctan(X') by an odd	#
+#		polynomial in X'.					#
+#		Arctan(X) = sign(X)*Pi/2 + arctan(X'). Exit.		#
+#									#
+#########################################################################
+
+ATANA3:	long		0xBFF6687E,0x314987D8
+ATANA2:	long		0x4002AC69,0x34A26DB3
+ATANA1:	long		0xBFC2476F,0x4E1DA28E
+
+ATANB6:	long		0x3FB34444,0x7F876989
+ATANB5:	long		0xBFB744EE,0x7FAF45DB
+ATANB4:	long		0x3FBC71C6,0x46940220
+ATANB3:	long		0xBFC24924,0x921872F9
+ATANB2:	long		0x3FC99999,0x99998FA9
+ATANB1:	long		0xBFD55555,0x55555555
+
+ATANC5:	long		0xBFB70BF3,0x98539E6A
+ATANC4:	long		0x3FBC7187,0x962D1D7D
+ATANC3:	long		0xBFC24924,0x827107B8
+ATANC2:	long		0x3FC99999,0x9996263E
+ATANC1:	long		0xBFD55555,0x55555536
+
+PPIBY2:	long		0x3FFF0000,0xC90FDAA2,0x2168C235,0x00000000
+NPIBY2:	long		0xBFFF0000,0xC90FDAA2,0x2168C235,0x00000000
+
+PTINY:	long		0x00010000,0x80000000,0x00000000,0x00000000
+NTINY:	long		0x80010000,0x80000000,0x00000000,0x00000000
+
+ATANTBL:
+	long		0x3FFB0000,0x83D152C5,0x060B7A51,0x00000000
+	long		0x3FFB0000,0x8BC85445,0x65498B8B,0x00000000
+	long		0x3FFB0000,0x93BE4060,0x17626B0D,0x00000000
+	long		0x3FFB0000,0x9BB3078D,0x35AEC202,0x00000000
+	long		0x3FFB0000,0xA3A69A52,0x5DDCE7DE,0x00000000
+	long		0x3FFB0000,0xAB98E943,0x62765619,0x00000000
+	long		0x3FFB0000,0xB389E502,0xF9C59862,0x00000000
+	long		0x3FFB0000,0xBB797E43,0x6B09E6FB,0x00000000
+	long		0x3FFB0000,0xC367A5C7,0x39E5F446,0x00000000
+	long		0x3FFB0000,0xCB544C61,0xCFF7D5C6,0x00000000
+	long		0x3FFB0000,0xD33F62F8,0x2488533E,0x00000000
+	long		0x3FFB0000,0xDB28DA81,0x62404C77,0x00000000
+	long		0x3FFB0000,0xE310A407,0x8AD34F18,0x00000000
+	long		0x3FFB0000,0xEAF6B0A8,0x188EE1EB,0x00000000
+	long		0x3FFB0000,0xF2DAF194,0x9DBE79D5,0x00000000
+	long		0x3FFB0000,0xFABD5813,0x61D47E3E,0x00000000
+	long		0x3FFC0000,0x8346AC21,0x0959ECC4,0x00000000
+	long		0x3FFC0000,0x8B232A08,0x304282D8,0x00000000
+	long		0x3FFC0000,0x92FB70B8,0xD29AE2F9,0x00000000
+	long		0x3FFC0000,0x9ACF476F,0x5CCD1CB4,0x00000000
+	long		0x3FFC0000,0xA29E7630,0x4954F23F,0x00000000
+	long		0x3FFC0000,0xAA68C5D0,0x8AB85230,0x00000000
+	long		0x3FFC0000,0xB22DFFFD,0x9D539F83,0x00000000
+	long		0x3FFC0000,0xB9EDEF45,0x3E900EA5,0x00000000
+	long		0x3FFC0000,0xC1A85F1C,0xC75E3EA5,0x00000000
+	long		0x3FFC0000,0xC95D1BE8,0x28138DE6,0x00000000
+	long		0x3FFC0000,0xD10BF300,0x840D2DE4,0x00000000
+	long		0x3FFC0000,0xD8B4B2BA,0x6BC05E7A,0x00000000
+	long		0x3FFC0000,0xE0572A6B,0xB42335F6,0x00000000
+	long		0x3FFC0000,0xE7F32A70,0xEA9CAA8F,0x00000000
+	long		0x3FFC0000,0xEF888432,0x64ECEFAA,0x00000000
+	long		0x3FFC0000,0xF7170A28,0xECC06666,0x00000000
+	long		0x3FFD0000,0x812FD288,0x332DAD32,0x00000000
+	long		0x3FFD0000,0x88A8D1B1,0x218E4D64,0x00000000
+	long		0x3FFD0000,0x9012AB3F,0x23E4AEE8,0x00000000
+	long		0x3FFD0000,0x976CC3D4,0x11E7F1B9,0x00000000
+	long		0x3FFD0000,0x9EB68949,0x3889A227,0x00000000
+	long		0x3FFD0000,0xA5EF72C3,0x4487361B,0x00000000
+	long		0x3FFD0000,0xAD1700BA,0xF07A7227,0x00000000
+	long		0x3FFD0000,0xB42CBCFA,0xFD37EFB7,0x00000000
+	long		0x3FFD0000,0xBB303A94,0x0BA80F89,0x00000000
+	long		0x3FFD0000,0xC22115C6,0xFCAEBBAF,0x00000000
+	long		0x3FFD0000,0xC8FEF3E6,0x86331221,0x00000000
+	long		0x3FFD0000,0xCFC98330,0xB4000C70,0x00000000
+	long		0x3FFD0000,0xD6807AA1,0x102C5BF9,0x00000000
+	long		0x3FFD0000,0xDD2399BC,0x31252AA3,0x00000000
+	long		0x3FFD0000,0xE3B2A855,0x6B8FC517,0x00000000
+	long		0x3FFD0000,0xEA2D764F,0x64315989,0x00000000
+	long		0x3FFD0000,0xF3BF5BF8,0xBAD1A21D,0x00000000
+	long		0x3FFE0000,0x801CE39E,0x0D205C9A,0x00000000
+	long		0x3FFE0000,0x8630A2DA,0xDA1ED066,0x00000000
+	long		0x3FFE0000,0x8C1AD445,0xF3E09B8C,0x00000000
+	long		0x3FFE0000,0x91DB8F16,0x64F350E2,0x00000000
+	long		0x3FFE0000,0x97731420,0x365E538C,0x00000000
+	long		0x3FFE0000,0x9CE1C8E6,0xA0B8CDBA,0x00000000
+	long		0x3FFE0000,0xA22832DB,0xCADAAE09,0x00000000
+	long		0x3FFE0000,0xA746F2DD,0xB7602294,0x00000000
+	long		0x3FFE0000,0xAC3EC0FB,0x997DD6A2,0x00000000
+	long		0x3FFE0000,0xB110688A,0xEBDC6F6A,0x00000000
+	long		0x3FFE0000,0xB5BCC490,0x59ECC4B0,0x00000000
+	long		0x3FFE0000,0xBA44BC7D,0xD470782F,0x00000000
+	long		0x3FFE0000,0xBEA94144,0xFD049AAC,0x00000000
+	long		0x3FFE0000,0xC2EB4ABB,0x661628B6,0x00000000
+	long		0x3FFE0000,0xC70BD54C,0xE602EE14,0x00000000
+	long		0x3FFE0000,0xCD000549,0xADEC7159,0x00000000
+	long		0x3FFE0000,0xD48457D2,0xD8EA4EA3,0x00000000
+	long		0x3FFE0000,0xDB948DA7,0x12DECE3B,0x00000000
+	long		0x3FFE0000,0xE23855F9,0x69E8096A,0x00000000
+	long		0x3FFE0000,0xE8771129,0xC4353259,0x00000000
+	long		0x3FFE0000,0xEE57C16E,0x0D379C0D,0x00000000
+	long		0x3FFE0000,0xF3E10211,0xA87C3779,0x00000000
+	long		0x3FFE0000,0xF919039D,0x758B8D41,0x00000000
+	long		0x3FFE0000,0xFE058B8F,0x64935FB3,0x00000000
+	long		0x3FFF0000,0x8155FB49,0x7B685D04,0x00000000
+	long		0x3FFF0000,0x83889E35,0x49D108E1,0x00000000
+	long		0x3FFF0000,0x859CFA76,0x511D724B,0x00000000
+	long		0x3FFF0000,0x87952ECF,0xFF8131E7,0x00000000
+	long		0x3FFF0000,0x89732FD1,0x9557641B,0x00000000
+	long		0x3FFF0000,0x8B38CAD1,0x01932A35,0x00000000
+	long		0x3FFF0000,0x8CE7A8D8,0x301EE6B5,0x00000000
+	long		0x3FFF0000,0x8F46A39E,0x2EAE5281,0x00000000
+	long		0x3FFF0000,0x922DA7D7,0x91888487,0x00000000
+	long		0x3FFF0000,0x94D19FCB,0xDEDF5241,0x00000000
+	long		0x3FFF0000,0x973AB944,0x19D2A08B,0x00000000
+	long		0x3FFF0000,0x996FF00E,0x08E10B96,0x00000000
+	long		0x3FFF0000,0x9B773F95,0x12321DA7,0x00000000
+	long		0x3FFF0000,0x9D55CC32,0x0F935624,0x00000000
+	long		0x3FFF0000,0x9F100575,0x006CC571,0x00000000
+	long		0x3FFF0000,0xA0A9C290,0xD97CC06C,0x00000000
+	long		0x3FFF0000,0xA22659EB,0xEBC0630A,0x00000000
+	long		0x3FFF0000,0xA388B4AF,0xF6EF0EC9,0x00000000
+	long		0x3FFF0000,0xA4D35F10,0x61D292C4,0x00000000
+	long		0x3FFF0000,0xA60895DC,0xFBE3187E,0x00000000
+	long		0x3FFF0000,0xA72A51DC,0x7367BEAC,0x00000000
+	long		0x3FFF0000,0xA83A5153,0x0956168F,0x00000000
+	long		0x3FFF0000,0xA93A2007,0x7539546E,0x00000000
+	long		0x3FFF0000,0xAA9E7245,0x023B2605,0x00000000
+	long		0x3FFF0000,0xAC4C84BA,0x6FE4D58F,0x00000000
+	long		0x3FFF0000,0xADCE4A4A,0x606B9712,0x00000000
+	long		0x3FFF0000,0xAF2A2DCD,0x8D263C9C,0x00000000
+	long		0x3FFF0000,0xB0656F81,0xF22265C7,0x00000000
+	long		0x3FFF0000,0xB1846515,0x0F71496A,0x00000000
+	long		0x3FFF0000,0xB28AAA15,0x6F9ADA35,0x00000000
+	long		0x3FFF0000,0xB37B44FF,0x3766B895,0x00000000
+	long		0x3FFF0000,0xB458C3DC,0xE9630433,0x00000000
+	long		0x3FFF0000,0xB525529D,0x562246BD,0x00000000
+	long		0x3FFF0000,0xB5E2CCA9,0x5F9D88CC,0x00000000
+	long		0x3FFF0000,0xB692CADA,0x7ACA1ADA,0x00000000
+	long		0x3FFF0000,0xB736AEA7,0xA6925838,0x00000000
+	long		0x3FFF0000,0xB7CFAB28,0x7E9F7B36,0x00000000
+	long		0x3FFF0000,0xB85ECC66,0xCB219835,0x00000000
+	long		0x3FFF0000,0xB8E4FD5A,0x20A593DA,0x00000000
+	long		0x3FFF0000,0xB99F41F6,0x4AFF9BB5,0x00000000
+	long		0x3FFF0000,0xBA7F1E17,0x842BBE7B,0x00000000
+	long		0x3FFF0000,0xBB471285,0x7637E17D,0x00000000
+	long		0x3FFF0000,0xBBFABE8A,0x4788DF6F,0x00000000
+	long		0x3FFF0000,0xBC9D0FAD,0x2B689D79,0x00000000
+	long		0x3FFF0000,0xBD306A39,0x471ECD86,0x00000000
+	long		0x3FFF0000,0xBDB6C731,0x856AF18A,0x00000000
+	long		0x3FFF0000,0xBE31CAC5,0x02E80D70,0x00000000
+	long		0x3FFF0000,0xBEA2D55C,0xE33194E2,0x00000000
+	long		0x3FFF0000,0xBF0B10B7,0xC03128F0,0x00000000
+	long		0x3FFF0000,0xBF6B7A18,0xDACB778D,0x00000000
+	long		0x3FFF0000,0xBFC4EA46,0x63FA18F6,0x00000000
+	long		0x3FFF0000,0xC0181BDE,0x8B89A454,0x00000000
+	long		0x3FFF0000,0xC065B066,0xCFBF6439,0x00000000
+	long		0x3FFF0000,0xC0AE345F,0x56340AE6,0x00000000
+	long		0x3FFF0000,0xC0F22291,0x9CB9E6A7,0x00000000
+
+	set		X,FP_SCR0
+	set		XDCARE,X+2
+	set		XFRAC,X+4
+	set		XFRACLO,X+8
+
+	set		ATANF,FP_SCR1
+	set		ATANFHI,ATANF+4
+	set		ATANFLO,ATANF+8
+
+	global		satan
+#--ENTRY POINT FOR ATAN(X), HERE X IS FINITE, NON-ZERO, AND NOT NAN'S
+satan:
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+
+	mov.l		(%a0),%d1
+	mov.w		4(%a0),%d1
+	fmov.x		%fp0,X(%a6)
+	and.l		&0x7FFFFFFF,%d1
+
+	cmp.l		%d1,&0x3FFB8000		# |X| >= 1/16?
+	bge.b		ATANOK1
+	bra.w		ATANSM
+
+ATANOK1:
+	cmp.l		%d1,&0x4002FFFF		# |X| < 16 ?
+	ble.b		ATANMAIN
+	bra.w		ATANBIG
+
+#--THE MOST LIKELY CASE, |X| IN [1/16, 16). WE USE TABLE TECHNIQUE
+#--THE IDEA IS ATAN(X) = ATAN(F) + ATAN( [X-F] / [1+XF] ).
+#--SO IF F IS CHOSEN TO BE CLOSE TO X AND ATAN(F) IS STORED IN
+#--A TABLE, ALL WE NEED IS TO APPROXIMATE ATAN(U) WHERE
+#--U = (X-F)/(1+XF) IS SMALL (REMEMBER F IS CLOSE TO X). IT IS
+#--TRUE THAT A DIVIDE IS NOW NEEDED, BUT THE APPROXIMATION FOR
+#--ATAN(U) IS A VERY SHORT POLYNOMIAL AND THE INDEXING TO
+#--FETCH F AND SAVING OF REGISTERS CAN BE ALL HIDED UNDER THE
+#--DIVIDE. IN THE END THIS METHOD IS MUCH FASTER THAN A TRADITIONAL
+#--ONE. NOTE ALSO THAT THE TRADITIONAL SCHEME THAT APPROXIMATE
+#--ATAN(X) DIRECTLY WILL NEED TO USE A RATIONAL APPROXIMATION
+#--(DIVISION NEEDED) ANYWAY BECAUSE A POLYNOMIAL APPROXIMATION
+#--WILL INVOLVE A VERY LONG POLYNOMIAL.
+
+#--NOW WE SEE X AS +-2^K * 1.BBBBBBB....B <- 1. + 63 BITS
+#--WE CHOSE F TO BE +-2^K * 1.BBBB1
+#--THAT IS IT MATCHES THE EXPONENT AND FIRST 5 BITS OF X, THE
+#--SIXTH BITS IS SET TO BE 1. SINCE K = -4, -3, ..., 3, THERE
+#--ARE ONLY 8 TIMES 16 = 2^7 = 128 |F|'S. SINCE ATAN(-|F|) IS
+#-- -ATAN(|F|), WE NEED TO STORE ONLY ATAN(|F|).
+
+ATANMAIN:
+
+	and.l		&0xF8000000,XFRAC(%a6)	# FIRST 5 BITS
+	or.l		&0x04000000,XFRAC(%a6)	# SET 6-TH BIT TO 1
+	mov.l		&0x00000000,XFRACLO(%a6) # LOCATION OF X IS NOW F
+
+	fmov.x		%fp0,%fp1		# FP1 IS X
+	fmul.x		X(%a6),%fp1		# FP1 IS X*F, NOTE THAT X*F > 0
+	fsub.x		X(%a6),%fp0		# FP0 IS X-F
+	fadd.s		&0x3F800000,%fp1	# FP1 IS 1 + X*F
+	fdiv.x		%fp1,%fp0		# FP0 IS U = (X-F)/(1+X*F)
+
+#--WHILE THE DIVISION IS TAKING ITS TIME, WE FETCH ATAN(|F|)
+#--CREATE ATAN(F) AND STORE IT IN ATANF, AND
+#--SAVE REGISTERS FP2.
+
+	mov.l		%d2,-(%sp)		# SAVE d2 TEMPORARILY
+	mov.l		%d1,%d2			# THE EXP AND 16 BITS OF X
+	and.l		&0x00007800,%d1		# 4 VARYING BITS OF F'S FRACTION
+	and.l		&0x7FFF0000,%d2		# EXPONENT OF F
+	sub.l		&0x3FFB0000,%d2		# K+4
+	asr.l		&1,%d2
+	add.l		%d2,%d1			# THE 7 BITS IDENTIFYING F
+	asr.l		&7,%d1			# INDEX INTO TBL OF ATAN(|F|)
+	lea		ATANTBL(%pc),%a1
+	add.l		%d1,%a1			# ADDRESS OF ATAN(|F|)
+	mov.l		(%a1)+,ATANF(%a6)
+	mov.l		(%a1)+,ATANFHI(%a6)
+	mov.l		(%a1)+,ATANFLO(%a6)	# ATANF IS NOW ATAN(|F|)
+	mov.l		X(%a6),%d1		# LOAD SIGN AND EXPO. AGAIN
+	and.l		&0x80000000,%d1		# SIGN(F)
+	or.l		%d1,ATANF(%a6)		# ATANF IS NOW SIGN(F)*ATAN(|F|)
+	mov.l		(%sp)+,%d2		# RESTORE d2
+
+#--THAT'S ALL I HAVE TO DO FOR NOW,
+#--BUT ALAS, THE DIVIDE IS STILL CRANKING!
+
+#--U IN FP0, WE ARE NOW READY TO COMPUTE ATAN(U) AS
+#--U + A1*U*V*(A2 + V*(A3 + V)), V = U*U
+#--THE POLYNOMIAL MAY LOOK STRANGE, BUT IS NEVERTHELESS CORRECT.
+#--THE NATURAL FORM IS U + U*V*(A1 + V*(A2 + V*A3))
+#--WHAT WE HAVE HERE IS MERELY	A1 = A3, A2 = A1/A3, A3 = A2/A3.
+#--THE REASON FOR THIS REARRANGEMENT IS TO MAKE THE INDEPENDENT
+#--PARTS A1*U*V AND (A2 + ... STUFF) MORE LOAD-BALANCED
+
+	fmovm.x		&0x04,-(%sp)		# save fp2
+
+	fmov.x		%fp0,%fp1
+	fmul.x		%fp1,%fp1
+	fmov.d		ATANA3(%pc),%fp2
+	fadd.x		%fp1,%fp2		# A3+V
+	fmul.x		%fp1,%fp2		# V*(A3+V)
+	fmul.x		%fp0,%fp1		# U*V
+	fadd.d		ATANA2(%pc),%fp2	# A2+V*(A3+V)
+	fmul.d		ATANA1(%pc),%fp1	# A1*U*V
+	fmul.x		%fp2,%fp1		# A1*U*V*(A2+V*(A3+V))
+	fadd.x		%fp1,%fp0		# ATAN(U), FP1 RELEASED
+
+	fmovm.x		(%sp)+,&0x20		# restore fp2
+
+	fmov.l		%d0,%fpcr		# restore users rnd mode,prec
+	fadd.x		ATANF(%a6),%fp0		# ATAN(X)
+	bra		t_inx2
+
+ATANBORS:
+#--|X| IS IN d0 IN COMPACT FORM. FP1, d0 SAVED.
+#--FP0 IS X AND |X| <= 1/16 OR |X| >= 16.
+	cmp.l		%d1,&0x3FFF8000
+	bgt.w		ATANBIG			# I.E. |X| >= 16
+
+ATANSM:
+#--|X| <= 1/16
+#--IF |X| < 2^(-40), RETURN X AS ANSWER. OTHERWISE, APPROXIMATE
+#--ATAN(X) BY X + X*Y*(B1+Y*(B2+Y*(B3+Y*(B4+Y*(B5+Y*B6)))))
+#--WHICH IS X + X*Y*( [B1+Z*(B3+Z*B5)] + [Y*(B2+Z*(B4+Z*B6)] )
+#--WHERE Y = X*X, AND Z = Y*Y.
+
+	cmp.l		%d1,&0x3FD78000
+	blt.w		ATANTINY
+
+#--COMPUTE POLYNOMIAL
+	fmovm.x		&0x0c,-(%sp)		# save fp2/fp3
+
+	fmul.x		%fp0,%fp0		# FPO IS Y = X*X
+
+	fmov.x		%fp0,%fp1
+	fmul.x		%fp1,%fp1		# FP1 IS Z = Y*Y
+
+	fmov.d		ATANB6(%pc),%fp2
+	fmov.d		ATANB5(%pc),%fp3
+
+	fmul.x		%fp1,%fp2		# Z*B6
+	fmul.x		%fp1,%fp3		# Z*B5
+
+	fadd.d		ATANB4(%pc),%fp2	# B4+Z*B6
+	fadd.d		ATANB3(%pc),%fp3	# B3+Z*B5
+
+	fmul.x		%fp1,%fp2		# Z*(B4+Z*B6)
+	fmul.x		%fp3,%fp1		# Z*(B3+Z*B5)
+
+	fadd.d		ATANB2(%pc),%fp2	# B2+Z*(B4+Z*B6)
+	fadd.d		ATANB1(%pc),%fp1	# B1+Z*(B3+Z*B5)
+
+	fmul.x		%fp0,%fp2		# Y*(B2+Z*(B4+Z*B6))
+	fmul.x		X(%a6),%fp0		# X*Y
+
+	fadd.x		%fp2,%fp1		# [B1+Z*(B3+Z*B5)]+[Y*(B2+Z*(B4+Z*B6))]
+
+	fmul.x		%fp1,%fp0		# X*Y*([B1+Z*(B3+Z*B5)]+[Y*(B2+Z*(B4+Z*B6))])
+
+	fmovm.x		(%sp)+,&0x30		# restore fp2/fp3
+
+	fmov.l		%d0,%fpcr		# restore users rnd mode,prec
+	fadd.x		X(%a6),%fp0
+	bra		t_inx2
+
+ATANTINY:
+#--|X| < 2^(-40), ATAN(X) = X
+
+	fmov.l		%d0,%fpcr		# restore users rnd mode,prec
+	mov.b		&FMOV_OP,%d1		# last inst is MOVE
+	fmov.x		X(%a6),%fp0		# last inst - possible exception set
+
+	bra		t_catch
+
+ATANBIG:
+#--IF |X| > 2^(100), RETURN	SIGN(X)*(PI/2 - TINY). OTHERWISE,
+#--RETURN SIGN(X)*PI/2 + ATAN(-1/X).
+	cmp.l		%d1,&0x40638000
+	bgt.w		ATANHUGE
+
+#--APPROXIMATE ATAN(-1/X) BY
+#--X'+X'*Y*(C1+Y*(C2+Y*(C3+Y*(C4+Y*C5)))), X' = -1/X, Y = X'*X'
+#--THIS CAN BE RE-WRITTEN AS
+#--X'+X'*Y*( [C1+Z*(C3+Z*C5)] + [Y*(C2+Z*C4)] ), Z = Y*Y.
+
+	fmovm.x		&0x0c,-(%sp)		# save fp2/fp3
+
+	fmov.s		&0xBF800000,%fp1	# LOAD -1
+	fdiv.x		%fp0,%fp1		# FP1 IS -1/X
+
+#--DIVIDE IS STILL CRANKING
+
+	fmov.x		%fp1,%fp0		# FP0 IS X'
+	fmul.x		%fp0,%fp0		# FP0 IS Y = X'*X'
+	fmov.x		%fp1,X(%a6)		# X IS REALLY X'
+
+	fmov.x		%fp0,%fp1
+	fmul.x		%fp1,%fp1		# FP1 IS Z = Y*Y
+
+	fmov.d		ATANC5(%pc),%fp3
+	fmov.d		ATANC4(%pc),%fp2
+
+	fmul.x		%fp1,%fp3		# Z*C5
+	fmul.x		%fp1,%fp2		# Z*B4
+
+	fadd.d		ATANC3(%pc),%fp3	# C3+Z*C5
+	fadd.d		ATANC2(%pc),%fp2	# C2+Z*C4
+
+	fmul.x		%fp3,%fp1		# Z*(C3+Z*C5), FP3 RELEASED
+	fmul.x		%fp0,%fp2		# Y*(C2+Z*C4)
+
+	fadd.d		ATANC1(%pc),%fp1	# C1+Z*(C3+Z*C5)
+	fmul.x		X(%a6),%fp0		# X'*Y
+
+	fadd.x		%fp2,%fp1		# [Y*(C2+Z*C4)]+[C1+Z*(C3+Z*C5)]
+
+	fmul.x		%fp1,%fp0		# X'*Y*([B1+Z*(B3+Z*B5)]
+#					...	+[Y*(B2+Z*(B4+Z*B6))])
+	fadd.x		X(%a6),%fp0
+
+	fmovm.x		(%sp)+,&0x30		# restore fp2/fp3
+
+	fmov.l		%d0,%fpcr		# restore users rnd mode,prec
+	tst.b		(%a0)
+	bpl.b		pos_big
+
+neg_big:
+	fadd.x		NPIBY2(%pc),%fp0
+	bra		t_minx2
+
+pos_big:
+	fadd.x		PPIBY2(%pc),%fp0
+	bra		t_pinx2
+
+ATANHUGE:
+#--RETURN SIGN(X)*(PIBY2 - TINY) = SIGN(X)*PIBY2 - SIGN(X)*TINY
+	tst.b		(%a0)
+	bpl.b		pos_huge
+
+neg_huge:
+	fmov.x		NPIBY2(%pc),%fp0
+	fmov.l		%d0,%fpcr
+	fadd.x		PTINY(%pc),%fp0
+	bra		t_minx2
+
+pos_huge:
+	fmov.x		PPIBY2(%pc),%fp0
+	fmov.l		%d0,%fpcr
+	fadd.x		NTINY(%pc),%fp0
+	bra		t_pinx2
+
+	global		satand
+#--ENTRY POINT FOR ATAN(X) FOR DENORMALIZED ARGUMENT
+satand:
+	bra		t_extdnrm
+
+#########################################################################
+# sasin():  computes the inverse sine of a normalized input		#
+# sasind(): computes the inverse sine of a denormalized input		#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = arcsin(X)							#
+#									#
+# ACCURACY and MONOTONICITY *******************************************	#
+#	The returned result is within 3 ulps in	64 significant bit,	#
+#	i.e. within 0.5001 ulp to 53 bits if the result is subsequently	#
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM ***********************************************************	#
+#									#
+#	ASIN								#
+#	1. If |X| >= 1, go to 3.					#
+#									#
+#	2. (|X| < 1) Calculate asin(X) by				#
+#		z := sqrt( [1-X][1+X] )					#
+#		asin(X) = atan( x / z ).				#
+#		Exit.							#
+#									#
+#	3. If |X| > 1, go to 5.						#
+#									#
+#	4. (|X| = 1) sgn := sign(X), return asin(X) := sgn * Pi/2. Exit.#
+#									#
+#	5. (|X| > 1) Generate an invalid operation by 0 * infinity.	#
+#		Exit.							#
+#									#
+#########################################################################
+
+	global		sasin
+sasin:
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+
+	mov.l		(%a0),%d1
+	mov.w		4(%a0),%d1
+	and.l		&0x7FFFFFFF,%d1
+	cmp.l		%d1,&0x3FFF8000
+	bge.b		ASINBIG
+
+# This catch is added here for the '060 QSP. Originally, the call to
+# satan() would handle this case by causing the exception which would
+# not be caught until gen_except(). Now, with the exceptions being
+# detected inside of satan(), the exception would have been handled there
+# instead of inside sasin() as expected.
+	cmp.l		%d1,&0x3FD78000
+	blt.w		ASINTINY
+
+#--THIS IS THE USUAL CASE, |X| < 1
+#--ASIN(X) = ATAN( X / SQRT( (1-X)(1+X) ) )
+
+ASINMAIN:
+	fmov.s		&0x3F800000,%fp1
+	fsub.x		%fp0,%fp1		# 1-X
+	fmovm.x		&0x4,-(%sp)		#  {fp2}
+	fmov.s		&0x3F800000,%fp2
+	fadd.x		%fp0,%fp2		# 1+X
+	fmul.x		%fp2,%fp1		# (1+X)(1-X)
+	fmovm.x		(%sp)+,&0x20		#  {fp2}
+	fsqrt.x		%fp1			# SQRT([1-X][1+X])
+	fdiv.x		%fp1,%fp0		# X/SQRT([1-X][1+X])
+	fmovm.x		&0x01,-(%sp)		# save X/SQRT(...)
+	lea		(%sp),%a0		# pass ptr to X/SQRT(...)
+	bsr		satan
+	add.l		&0xc,%sp		# clear X/SQRT(...) from stack
+	bra		t_inx2
+
+ASINBIG:
+	fabs.x		%fp0			# |X|
+	fcmp.s		%fp0,&0x3F800000
+	fbgt		t_operr			# cause an operr exception
+
+#--|X| = 1, ASIN(X) = +- PI/2.
+ASINONE:
+	fmov.x		PIBY2(%pc),%fp0
+	mov.l		(%a0),%d1
+	and.l		&0x80000000,%d1		# SIGN BIT OF X
+	or.l		&0x3F800000,%d1		# +-1 IN SGL FORMAT
+	mov.l		%d1,-(%sp)		# push SIGN(X) IN SGL-FMT
+	fmov.l		%d0,%fpcr
+	fmul.s		(%sp)+,%fp0
+	bra		t_inx2
+
+#--|X| < 2^(-40), ATAN(X) = X
+ASINTINY:
+	fmov.l		%d0,%fpcr		# restore users rnd mode,prec
+	mov.b		&FMOV_OP,%d1		# last inst is MOVE
+	fmov.x		(%a0),%fp0		# last inst - possible exception
+	bra		t_catch
+
+	global		sasind
+#--ASIN(X) = X FOR DENORMALIZED X
+sasind:
+	bra		t_extdnrm
+
+#########################################################################
+# sacos():  computes the inverse cosine of a normalized input		#
+# sacosd(): computes the inverse cosine of a denormalized input		#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT ************************************************************** #
+#	fp0 = arccos(X)							#
+#									#
+# ACCURACY and MONOTONICITY *******************************************	#
+#	The returned result is within 3 ulps in	64 significant bit,	#
+#	i.e. within 0.5001 ulp to 53 bits if the result is subsequently	#
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM *********************************************************** #
+#									#
+#	ACOS								#
+#	1. If |X| >= 1, go to 3.					#
+#									#
+#	2. (|X| < 1) Calculate acos(X) by				#
+#		z := (1-X) / (1+X)					#
+#		acos(X) = 2 * atan( sqrt(z) ).				#
+#		Exit.							#
+#									#
+#	3. If |X| > 1, go to 5.						#
+#									#
+#	4. (|X| = 1) If X > 0, return 0. Otherwise, return Pi. Exit.	#
+#									#
+#	5. (|X| > 1) Generate an invalid operation by 0 * infinity.	#
+#		Exit.							#
+#									#
+#########################################################################
+
+	global		sacos
+sacos:
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+
+	mov.l		(%a0),%d1		# pack exp w/ upper 16 fraction
+	mov.w		4(%a0),%d1
+	and.l		&0x7FFFFFFF,%d1
+	cmp.l		%d1,&0x3FFF8000
+	bge.b		ACOSBIG
+
+#--THIS IS THE USUAL CASE, |X| < 1
+#--ACOS(X) = 2 * ATAN(	SQRT( (1-X)/(1+X) ) )
+
+ACOSMAIN:
+	fmov.s		&0x3F800000,%fp1
+	fadd.x		%fp0,%fp1		# 1+X
+	fneg.x		%fp0			# -X
+	fadd.s		&0x3F800000,%fp0	# 1-X
+	fdiv.x		%fp1,%fp0		# (1-X)/(1+X)
+	fsqrt.x		%fp0			# SQRT((1-X)/(1+X))
+	mov.l		%d0,-(%sp)		# save original users fpcr
+	clr.l		%d0
+	fmovm.x		&0x01,-(%sp)		# save SQRT(...) to stack
+	lea		(%sp),%a0		# pass ptr to sqrt
+	bsr		satan			# ATAN(SQRT([1-X]/[1+X]))
+	add.l		&0xc,%sp		# clear SQRT(...) from stack
+
+	fmov.l		(%sp)+,%fpcr		# restore users round prec,mode
+	fadd.x		%fp0,%fp0		# 2 * ATAN( STUFF )
+	bra		t_pinx2
+
+ACOSBIG:
+	fabs.x		%fp0
+	fcmp.s		%fp0,&0x3F800000
+	fbgt		t_operr			# cause an operr exception
+
+#--|X| = 1, ACOS(X) = 0 OR PI
+	tst.b		(%a0)			# is X positive or negative?
+	bpl.b		ACOSP1
+
+#--X = -1
+#Returns PI and inexact exception
+ACOSM1:
+	fmov.x		PI(%pc),%fp0		# load PI
+	fmov.l		%d0,%fpcr		# load round mode,prec
+	fadd.s		&0x00800000,%fp0	# add a small value
+	bra		t_pinx2
+
+ACOSP1:
+	bra		ld_pzero		# answer is positive zero
+
+	global		sacosd
+#--ACOS(X) = PI/2 FOR DENORMALIZED X
+sacosd:
+	fmov.l		%d0,%fpcr		# load user's rnd mode/prec
+	fmov.x		PIBY2(%pc),%fp0
+	bra		t_pinx2
+
+#########################################################################
+# setox():    computes the exponential for a normalized input		#
+# setoxd():   computes the exponential for a denormalized input		#
+# setoxm1():  computes the exponential minus 1 for a normalized input	#
+# setoxm1d(): computes the exponential minus 1 for a denormalized input	#
+#									#
+# INPUT	*************************************************************** #
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT ************************************************************** #
+#	fp0 = exp(X) or exp(X)-1					#
+#									#
+# ACCURACY and MONOTONICITY ******************************************* #
+#	The returned result is within 0.85 ulps in 64 significant bit,	#
+#	i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM and IMPLEMENTATION **************************************** #
+#									#
+#	setoxd								#
+#	------								#
+#	Step 1.	Set ans := 1.0						#
+#									#
+#	Step 2.	Return	ans := ans + sign(X)*2^(-126). Exit.		#
+#	Notes:	This will always generate one exception -- inexact.	#
+#									#
+#									#
+#	setox								#
+#	-----								#
+#									#
+#	Step 1.	Filter out extreme cases of input argument.		#
+#		1.1	If |X| >= 2^(-65), go to Step 1.3.		#
+#		1.2	Go to Step 7.					#
+#		1.3	If |X| < 16380 log(2), go to Step 2.		#
+#		1.4	Go to Step 8.					#
+#	Notes:	The usual case should take the branches 1.1 -> 1.3 -> 2.#
+#		To avoid the use of floating-point comparisons, a	#
+#		compact representation of |X| is used. This format is a	#
+#		32-bit integer, the upper (more significant) 16 bits	#
+#		are the sign and biased exponent field of |X|; the	#
+#		lower 16 bits are the 16 most significant fraction	#
+#		(including the explicit bit) bits of |X|. Consequently,	#
+#		the comparisons in Steps 1.1 and 1.3 can be performed	#
+#		by integer comparison. Note also that the constant	#
+#		16380 log(2) used in Step 1.3 is also in the compact	#
+#		form. Thus taking the branch to Step 2 guarantees	#
+#		|X| < 16380 log(2). There is no harm to have a small	#
+#		number of cases where |X| is less than,	but close to,	#
+#		16380 log(2) and the branch to Step 9 is taken.		#
+#									#
+#	Step 2.	Calculate N = round-to-nearest-int( X * 64/log2 ).	#
+#		2.1	Set AdjFlag := 0 (indicates the branch 1.3 -> 2 #
+#			was taken)					#
+#		2.2	N := round-to-nearest-integer( X * 64/log2 ).	#
+#		2.3	Calculate	J = N mod 64; so J = 0,1,2,..., #
+#			or 63.						#
+#		2.4	Calculate	M = (N - J)/64; so N = 64M + J.	#
+#		2.5	Calculate the address of the stored value of	#
+#			2^(J/64).					#
+#		2.6	Create the value Scale = 2^M.			#
+#	Notes:	The calculation in 2.2 is really performed by		#
+#			Z := X * constant				#
+#			N := round-to-nearest-integer(Z)		#
+#		where							#
+#			constant := single-precision( 64/log 2 ).	#
+#									#
+#		Using a single-precision constant avoids memory		#
+#		access. Another effect of using a single-precision	#
+#		"constant" is that the calculated value Z is		#
+#									#
+#			Z = X*(64/log2)*(1+eps), |eps| <= 2^(-24).	#
+#									#
+#		This error has to be considered later in Steps 3 and 4.	#
+#									#
+#	Step 3.	Calculate X - N*log2/64.				#
+#		3.1	R := X + N*L1,					#
+#				where L1 := single-precision(-log2/64).	#
+#		3.2	R := R + N*L2,					#
+#				L2 := extended-precision(-log2/64 - L1).#
+#	Notes:	a) The way L1 and L2 are chosen ensures L1+L2		#
+#		approximate the value -log2/64 to 88 bits of accuracy.	#
+#		b) N*L1 is exact because N is no longer than 22 bits	#
+#		and L1 is no longer than 24 bits.			#
+#		c) The calculation X+N*L1 is also exact due to		#
+#		cancellation. Thus, R is practically X+N(L1+L2) to full	#
+#		64 bits.						#
+#		d) It is important to estimate how large can |R| be	#
+#		after Step 3.2.						#
+#									#
+#		N = rnd-to-int( X*64/log2 (1+eps) ), |eps|<=2^(-24)	#
+#		X*64/log2 (1+eps)	=	N + f,	|f| <= 0.5	#
+#		X*64/log2 - N	=	f - eps*X 64/log2		#
+#		X - N*log2/64	=	f*log2/64 - eps*X		#
+#									#
+#									#
+#		Now |X| <= 16446 log2, thus				#
+#									#
+#			|X - N*log2/64| <= (0.5 + 16446/2^(18))*log2/64	#
+#					<= 0.57 log2/64.		#
+#		 This bound will be used in Step 4.			#
+#									#
+#	Step 4.	Approximate exp(R)-1 by a polynomial			#
+#		p = R + R*R*(A1 + R*(A2 + R*(A3 + R*(A4 + R*A5))))	#
+#	Notes:	a) In order to reduce memory access, the coefficients	#
+#		are made as "short" as possible: A1 (which is 1/2), A4	#
+#		and A5 are single precision; A2 and A3 are double	#
+#		precision.						#
+#		b) Even with the restrictions above,			#
+#		   |p - (exp(R)-1)| < 2^(-68.8) for all |R| <= 0.0062.	#
+#		Note that 0.0062 is slightly bigger than 0.57 log2/64.	#
+#		c) To fully utilize the pipeline, p is separated into	#
+#		two independent pieces of roughly equal complexities	#
+#			p = [ R + R*S*(A2 + S*A4) ]	+		#
+#				[ S*(A1 + S*(A3 + S*A5)) ]		#
+#		where S = R*R.						#
+#									#
+#	Step 5.	Compute 2^(J/64)*exp(R) = 2^(J/64)*(1+p) by		#
+#				ans := T + ( T*p + t)			#
+#		where T and t are the stored values for 2^(J/64).	#
+#	Notes:	2^(J/64) is stored as T and t where T+t approximates	#
+#		2^(J/64) to roughly 85 bits; T is in extended precision	#
+#		and t is in single precision. Note also that T is	#
+#		rounded to 62 bits so that the last two bits of T are	#
+#		zero. The reason for such a special form is that T-1,	#
+#		T-2, and T-8 will all be exact --- a property that will	#
+#		give much more accurate computation of the function	#
+#		EXPM1.							#
+#									#
+#	Step 6.	Reconstruction of exp(X)				#
+#			exp(X) = 2^M * 2^(J/64) * exp(R).		#
+#		6.1	If AdjFlag = 0, go to 6.3			#
+#		6.2	ans := ans * AdjScale				#
+#		6.3	Restore the user FPCR				#
+#		6.4	Return ans := ans * Scale. Exit.		#
+#	Notes:	If AdjFlag = 0, we have X = Mlog2 + Jlog2/64 + R,	#
+#		|M| <= 16380, and Scale = 2^M. Moreover, exp(X) will	#
+#		neither overflow nor underflow. If AdjFlag = 1, that	#
+#		means that						#
+#			X = (M1+M)log2 + Jlog2/64 + R, |M1+M| >= 16380.	#
+#		Hence, exp(X) may overflow or underflow or neither.	#
+#		When that is the case, AdjScale = 2^(M1) where M1 is	#
+#		approximately M. Thus 6.2 will never cause		#
+#		over/underflow. Possible exception in 6.4 is overflow	#
+#		or underflow. The inexact exception is not generated in	#
+#		6.4. Although one can argue that the inexact flag	#
+#		should always be raised, to simulate that exception	#
+#		cost to much than the flag is worth in practical uses.	#
+#									#
+#	Step 7.	Return 1 + X.						#
+#		7.1	ans := X					#
+#		7.2	Restore user FPCR.				#
+#		7.3	Return ans := 1 + ans. Exit			#
+#	Notes:	For non-zero X, the inexact exception will always be	#
+#		raised by 7.3. That is the only exception raised by 7.3.#
+#		Note also that we use the FMOVEM instruction to move X	#
+#		in Step 7.1 to avoid unnecessary trapping. (Although	#
+#		the FMOVEM may not seem relevant since X is normalized,	#
+#		the precaution will be useful in the library version of	#
+#		this code where the separate entry for denormalized	#
+#		inputs will be done away with.)				#
+#									#
+#	Step 8.	Handle exp(X) where |X| >= 16380log2.			#
+#		8.1	If |X| > 16480 log2, go to Step 9.		#
+#		(mimic 2.2 - 2.6)					#
+#		8.2	N := round-to-integer( X * 64/log2 )		#
+#		8.3	Calculate J = N mod 64, J = 0,1,...,63		#
+#		8.4	K := (N-J)/64, M1 := truncate(K/2), M = K-M1,	#
+#			AdjFlag := 1.					#
+#		8.5	Calculate the address of the stored value	#
+#			2^(J/64).					#
+#		8.6	Create the values Scale = 2^M, AdjScale = 2^M1.	#
+#		8.7	Go to Step 3.					#
+#	Notes:	Refer to notes for 2.2 - 2.6.				#
+#									#
+#	Step 9.	Handle exp(X), |X| > 16480 log2.			#
+#		9.1	If X < 0, go to 9.3				#
+#		9.2	ans := Huge, go to 9.4				#
+#		9.3	ans := Tiny.					#
+#		9.4	Restore user FPCR.				#
+#		9.5	Return ans := ans * ans. Exit.			#
+#	Notes:	Exp(X) will surely overflow or underflow, depending on	#
+#		X's sign. "Huge" and "Tiny" are respectively large/tiny	#
+#		extended-precision numbers whose square over/underflow	#
+#		with an inexact result. Thus, 9.5 always raises the	#
+#		inexact together with either overflow or underflow.	#
+#									#
+#	setoxm1d							#
+#	--------							#
+#									#
+#	Step 1.	Set ans := 0						#
+#									#
+#	Step 2.	Return	ans := X + ans. Exit.				#
+#	Notes:	This will return X with the appropriate rounding	#
+#		 precision prescribed by the user FPCR.			#
+#									#
+#	setoxm1								#
+#	-------								#
+#									#
+#	Step 1.	Check |X|						#
+#		1.1	If |X| >= 1/4, go to Step 1.3.			#
+#		1.2	Go to Step 7.					#
+#		1.3	If |X| < 70 log(2), go to Step 2.		#
+#		1.4	Go to Step 10.					#
+#	Notes:	The usual case should take the branches 1.1 -> 1.3 -> 2.#
+#		However, it is conceivable |X| can be small very often	#
+#		because EXPM1 is intended to evaluate exp(X)-1		#
+#		accurately when |X| is small. For further details on	#
+#		the comparisons, see the notes on Step 1 of setox.	#
+#									#
+#	Step 2.	Calculate N = round-to-nearest-int( X * 64/log2 ).	#
+#		2.1	N := round-to-nearest-integer( X * 64/log2 ).	#
+#		2.2	Calculate	J = N mod 64; so J = 0,1,2,..., #
+#			or 63.						#
+#		2.3	Calculate	M = (N - J)/64; so N = 64M + J.	#
+#		2.4	Calculate the address of the stored value of	#
+#			2^(J/64).					#
+#		2.5	Create the values Sc = 2^M and			#
+#			OnebySc := -2^(-M).				#
+#	Notes:	See the notes on Step 2 of setox.			#
+#									#
+#	Step 3.	Calculate X - N*log2/64.				#
+#		3.1	R := X + N*L1,					#
+#				where L1 := single-precision(-log2/64).	#
+#		3.2	R := R + N*L2,					#
+#				L2 := extended-precision(-log2/64 - L1).#
+#	Notes:	Applying the analysis of Step 3 of setox in this case	#
+#		shows that |R| <= 0.0055 (note that |X| <= 70 log2 in	#
+#		this case).						#
+#									#
+#	Step 4.	Approximate exp(R)-1 by a polynomial			#
+#			p = R+R*R*(A1+R*(A2+R*(A3+R*(A4+R*(A5+R*A6)))))	#
+#	Notes:	a) In order to reduce memory access, the coefficients	#
+#		are made as "short" as possible: A1 (which is 1/2), A5	#
+#		and A6 are single precision; A2, A3 and A4 are double	#
+#		precision.						#
+#		b) Even with the restriction above,			#
+#			|p - (exp(R)-1)| <	|R| * 2^(-72.7)		#
+#		for all |R| <= 0.0055.					#
+#		c) To fully utilize the pipeline, p is separated into	#
+#		two independent pieces of roughly equal complexity	#
+#			p = [ R*S*(A2 + S*(A4 + S*A6)) ]	+	#
+#				[ R + S*(A1 + S*(A3 + S*A5)) ]		#
+#		where S = R*R.						#
+#									#
+#	Step 5.	Compute 2^(J/64)*p by					#
+#				p := T*p				#
+#		where T and t are the stored values for 2^(J/64).	#
+#	Notes:	2^(J/64) is stored as T and t where T+t approximates	#
+#		2^(J/64) to roughly 85 bits; T is in extended precision	#
+#		and t is in single precision. Note also that T is	#
+#		rounded to 62 bits so that the last two bits of T are	#
+#		zero. The reason for such a special form is that T-1,	#
+#		T-2, and T-8 will all be exact --- a property that will	#
+#		be exploited in Step 6 below. The total relative error	#
+#		in p is no bigger than 2^(-67.7) compared to the final	#
+#		result.							#
+#									#
+#	Step 6.	Reconstruction of exp(X)-1				#
+#			exp(X)-1 = 2^M * ( 2^(J/64) + p - 2^(-M) ).	#
+#		6.1	If M <= 63, go to Step 6.3.			#
+#		6.2	ans := T + (p + (t + OnebySc)). Go to 6.6	#
+#		6.3	If M >= -3, go to 6.5.				#
+#		6.4	ans := (T + (p + t)) + OnebySc. Go to 6.6	#
+#		6.5	ans := (T + OnebySc) + (p + t).			#
+#		6.6	Restore user FPCR.				#
+#		6.7	Return ans := Sc * ans. Exit.			#
+#	Notes:	The various arrangements of the expressions give	#
+#		accurate evaluations.					#
+#									#
+#	Step 7.	exp(X)-1 for |X| < 1/4.					#
+#		7.1	If |X| >= 2^(-65), go to Step 9.		#
+#		7.2	Go to Step 8.					#
+#									#
+#	Step 8.	Calculate exp(X)-1, |X| < 2^(-65).			#
+#		8.1	If |X| < 2^(-16312), goto 8.3			#
+#		8.2	Restore FPCR; return ans := X - 2^(-16382).	#
+#			Exit.						#
+#		8.3	X := X * 2^(140).				#
+#		8.4	Restore FPCR; ans := ans - 2^(-16382).		#
+#		 Return ans := ans*2^(140). Exit			#
+#	Notes:	The idea is to return "X - tiny" under the user		#
+#		precision and rounding modes. To avoid unnecessary	#
+#		inefficiency, we stay away from denormalized numbers	#
+#		the best we can. For |X| >= 2^(-16312), the		#
+#		straightforward 8.2 generates the inexact exception as	#
+#		the case warrants.					#
+#									#
+#	Step 9.	Calculate exp(X)-1, |X| < 1/4, by a polynomial		#
+#			p = X + X*X*(B1 + X*(B2 + ... + X*B12))		#
+#	Notes:	a) In order to reduce memory access, the coefficients	#
+#		are made as "short" as possible: B1 (which is 1/2), B9	#
+#		to B12 are single precision; B3 to B8 are double	#
+#		precision; and B2 is double extended.			#
+#		b) Even with the restriction above,			#
+#			|p - (exp(X)-1)| < |X| 2^(-70.6)		#
+#		for all |X| <= 0.251.					#
+#		Note that 0.251 is slightly bigger than 1/4.		#
+#		c) To fully preserve accuracy, the polynomial is	#
+#		computed as						#
+#			X + ( S*B1 +	Q ) where S = X*X and		#
+#			Q	=	X*S*(B2 + X*(B3 + ... + X*B12))	#
+#		d) To fully utilize the pipeline, Q is separated into	#
+#		two independent pieces of roughly equal complexity	#
+#			Q = [ X*S*(B2 + S*(B4 + ... + S*B12)) ] +	#
+#				[ S*S*(B3 + S*(B5 + ... + S*B11)) ]	#
+#									#
+#	Step 10. Calculate exp(X)-1 for |X| >= 70 log 2.		#
+#		10.1 If X >= 70log2 , exp(X) - 1 = exp(X) for all	#
+#		practical purposes. Therefore, go to Step 1 of setox.	#
+#		10.2 If X <= -70log2, exp(X) - 1 = -1 for all practical	#
+#		purposes.						#
+#		ans := -1						#
+#		Restore user FPCR					#
+#		Return ans := ans + 2^(-126). Exit.			#
+#	Notes:	10.2 will always create an inexact and return -1 + tiny	#
+#		in the user rounding precision and mode.		#
+#									#
+#########################################################################
+
+L2:	long		0x3FDC0000,0x82E30865,0x4361C4C6,0x00000000
+
+EEXPA3:	long		0x3FA55555,0x55554CC1
+EEXPA2:	long		0x3FC55555,0x55554A54
+
+EM1A4:	long		0x3F811111,0x11174385
+EM1A3:	long		0x3FA55555,0x55554F5A
+
+EM1A2:	long		0x3FC55555,0x55555555,0x00000000,0x00000000
+
+EM1B8:	long		0x3EC71DE3,0xA5774682
+EM1B7:	long		0x3EFA01A0,0x19D7CB68
+
+EM1B6:	long		0x3F2A01A0,0x1A019DF3
+EM1B5:	long		0x3F56C16C,0x16C170E2
+
+EM1B4:	long		0x3F811111,0x11111111
+EM1B3:	long		0x3FA55555,0x55555555
+
+EM1B2:	long		0x3FFC0000,0xAAAAAAAA,0xAAAAAAAB
+	long		0x00000000
+
+TWO140:	long		0x48B00000,0x00000000
+TWON140:
+	long		0x37300000,0x00000000
+
+EEXPTBL:
+	long		0x3FFF0000,0x80000000,0x00000000,0x00000000
+	long		0x3FFF0000,0x8164D1F3,0xBC030774,0x9F841A9B
+	long		0x3FFF0000,0x82CD8698,0xAC2BA1D8,0x9FC1D5B9
+	long		0x3FFF0000,0x843A28C3,0xACDE4048,0xA0728369
+	long		0x3FFF0000,0x85AAC367,0xCC487B14,0x1FC5C95C
+	long		0x3FFF0000,0x871F6196,0x9E8D1010,0x1EE85C9F
+	long		0x3FFF0000,0x88980E80,0x92DA8528,0x9FA20729
+	long		0x3FFF0000,0x8A14D575,0x496EFD9C,0xA07BF9AF
+	long		0x3FFF0000,0x8B95C1E3,0xEA8BD6E8,0xA0020DCF
+	long		0x3FFF0000,0x8D1ADF5B,0x7E5BA9E4,0x205A63DA
+	long		0x3FFF0000,0x8EA4398B,0x45CD53C0,0x1EB70051
+	long		0x3FFF0000,0x9031DC43,0x1466B1DC,0x1F6EB029
+	long		0x3FFF0000,0x91C3D373,0xAB11C338,0xA0781494
+	long		0x3FFF0000,0x935A2B2F,0x13E6E92C,0x9EB319B0
+	long		0x3FFF0000,0x94F4EFA8,0xFEF70960,0x2017457D
+	long		0x3FFF0000,0x96942D37,0x20185A00,0x1F11D537
+	long		0x3FFF0000,0x9837F051,0x8DB8A970,0x9FB952DD
+	long		0x3FFF0000,0x99E04593,0x20B7FA64,0x1FE43087
+	long		0x3FFF0000,0x9B8D39B9,0xD54E5538,0x1FA2A818
+	long		0x3FFF0000,0x9D3ED9A7,0x2CFFB750,0x1FDE494D
+	long		0x3FFF0000,0x9EF53260,0x91A111AC,0x20504890
+	long		0x3FFF0000,0xA0B0510F,0xB9714FC4,0xA073691C
+	long		0x3FFF0000,0xA2704303,0x0C496818,0x1F9B7A05
+	long		0x3FFF0000,0xA43515AE,0x09E680A0,0xA0797126
+	long		0x3FFF0000,0xA5FED6A9,0xB15138EC,0xA071A140
+	long		0x3FFF0000,0xA7CD93B4,0xE9653568,0x204F62DA
+	long		0x3FFF0000,0xA9A15AB4,0xEA7C0EF8,0x1F283C4A
+	long		0x3FFF0000,0xAB7A39B5,0xA93ED338,0x9F9A7FDC
+	long		0x3FFF0000,0xAD583EEA,0x42A14AC8,0xA05B3FAC
+	long		0x3FFF0000,0xAF3B78AD,0x690A4374,0x1FDF2610
+	long		0x3FFF0000,0xB123F581,0xD2AC2590,0x9F705F90
+	long		0x3FFF0000,0xB311C412,0xA9112488,0x201F678A
+	long		0x3FFF0000,0xB504F333,0xF9DE6484,0x1F32FB13
+	long		0x3FFF0000,0xB6FD91E3,0x28D17790,0x20038B30
+	long		0x3FFF0000,0xB8FBAF47,0x62FB9EE8,0x200DC3CC
+	long		0x3FFF0000,0xBAFF5AB2,0x133E45FC,0x9F8B2AE6
+	long		0x3FFF0000,0xBD08A39F,0x580C36C0,0xA02BBF70
+	long		0x3FFF0000,0xBF1799B6,0x7A731084,0xA00BF518
+	long		0x3FFF0000,0xC12C4CCA,0x66709458,0xA041DD41
+	long		0x3FFF0000,0xC346CCDA,0x24976408,0x9FDF137B
+	long		0x3FFF0000,0xC5672A11,0x5506DADC,0x201F1568
+	long		0x3FFF0000,0xC78D74C8,0xABB9B15C,0x1FC13A2E
+	long		0x3FFF0000,0xC9B9BD86,0x6E2F27A4,0xA03F8F03
+	long		0x3FFF0000,0xCBEC14FE,0xF2727C5C,0x1FF4907D
+	long		0x3FFF0000,0xCE248C15,0x1F8480E4,0x9E6E53E4
+	long		0x3FFF0000,0xD06333DA,0xEF2B2594,0x1FD6D45C
+	long		0x3FFF0000,0xD2A81D91,0xF12AE45C,0xA076EDB9
+	long		0x3FFF0000,0xD4F35AAB,0xCFEDFA20,0x9FA6DE21
+	long		0x3FFF0000,0xD744FCCA,0xD69D6AF4,0x1EE69A2F
+	long		0x3FFF0000,0xD99D15C2,0x78AFD7B4,0x207F439F
+	long		0x3FFF0000,0xDBFBB797,0xDAF23754,0x201EC207
+	long		0x3FFF0000,0xDE60F482,0x5E0E9124,0x9E8BE175
+	long		0x3FFF0000,0xE0CCDEEC,0x2A94E110,0x20032C4B
+	long		0x3FFF0000,0xE33F8972,0xBE8A5A50,0x2004DFF5
+	long		0x3FFF0000,0xE5B906E7,0x7C8348A8,0x1E72F47A
+	long		0x3FFF0000,0xE8396A50,0x3C4BDC68,0x1F722F22
+	long		0x3FFF0000,0xEAC0C6E7,0xDD243930,0xA017E945
+	long		0x3FFF0000,0xED4F301E,0xD9942B84,0x1F401A5B
+	long		0x3FFF0000,0xEFE4B99B,0xDCDAF5CC,0x9FB9A9E3
+	long		0x3FFF0000,0xF281773C,0x59FFB138,0x20744C05
+	long		0x3FFF0000,0xF5257D15,0x2486CC2C,0x1F773A19
+	long		0x3FFF0000,0xF7D0DF73,0x0AD13BB8,0x1FFE90D5
+	long		0x3FFF0000,0xFA83B2DB,0x722A033C,0xA041ED22
+	long		0x3FFF0000,0xFD3E0C0C,0xF486C174,0x1F853F3A
+
+	set		ADJFLAG,L_SCR2
+	set		SCALE,FP_SCR0
+	set		ADJSCALE,FP_SCR1
+	set		SC,FP_SCR0
+	set		ONEBYSC,FP_SCR1
+
+	global		setox
+setox:
+#--entry point for EXP(X), here X is finite, non-zero, and not NaN's
+
+#--Step 1.
+	mov.l		(%a0),%d1		# load part of input X
+	and.l		&0x7FFF0000,%d1		# biased expo. of X
+	cmp.l		%d1,&0x3FBE0000		# 2^(-65)
+	bge.b		EXPC1			# normal case
+	bra		EXPSM
+
+EXPC1:
+#--The case |X| >= 2^(-65)
+	mov.w		4(%a0),%d1		# expo. and partial sig. of |X|
+	cmp.l		%d1,&0x400CB167		# 16380 log2 trunc. 16 bits
+	blt.b		EXPMAIN			# normal case
+	bra		EEXPBIG
+
+EXPMAIN:
+#--Step 2.
+#--This is the normal branch:	2^(-65) <= |X| < 16380 log2.
+	fmov.x		(%a0),%fp0		# load input from (a0)
+
+	fmov.x		%fp0,%fp1
+	fmul.s		&0x42B8AA3B,%fp0	# 64/log2 * X
+	fmovm.x		&0xc,-(%sp)		# save fp2 {%fp2/%fp3}
+	mov.l		&0,ADJFLAG(%a6)
+	fmov.l		%fp0,%d1		# N = int( X * 64/log2 )
+	lea		EEXPTBL(%pc),%a1
+	fmov.l		%d1,%fp0		# convert to floating-format
+
+	mov.l		%d1,L_SCR1(%a6)		# save N temporarily
+	and.l		&0x3F,%d1		# D0 is J = N mod 64
+	lsl.l		&4,%d1
+	add.l		%d1,%a1			# address of 2^(J/64)
+	mov.l		L_SCR1(%a6),%d1
+	asr.l		&6,%d1			# D0 is M
+	add.w		&0x3FFF,%d1		# biased expo. of 2^(M)
+	mov.w		L2(%pc),L_SCR1(%a6)	# prefetch L2, no need in CB
+
+EXPCONT1:
+#--Step 3.
+#--fp1,fp2 saved on the stack. fp0 is N, fp1 is X,
+#--a0 points to 2^(J/64), D0 is biased expo. of 2^(M)
+	fmov.x		%fp0,%fp2
+	fmul.s		&0xBC317218,%fp0	# N * L1, L1 = lead(-log2/64)
+	fmul.x		L2(%pc),%fp2		# N * L2, L1+L2 = -log2/64
+	fadd.x		%fp1,%fp0		# X + N*L1
+	fadd.x		%fp2,%fp0		# fp0 is R, reduced arg.
+
+#--Step 4.
+#--WE NOW COMPUTE EXP(R)-1 BY A POLYNOMIAL
+#-- R + R*R*(A1 + R*(A2 + R*(A3 + R*(A4 + R*A5))))
+#--TO FULLY UTILIZE THE PIPELINE, WE COMPUTE S = R*R
+#--[R+R*S*(A2+S*A4)] + [S*(A1+S*(A3+S*A5))]
+
+	fmov.x		%fp0,%fp1
+	fmul.x		%fp1,%fp1		# fp1 IS S = R*R
+
+	fmov.s		&0x3AB60B70,%fp2	# fp2 IS A5
+
+	fmul.x		%fp1,%fp2		# fp2 IS S*A5
+	fmov.x		%fp1,%fp3
+	fmul.s		&0x3C088895,%fp3	# fp3 IS S*A4
+
+	fadd.d		EEXPA3(%pc),%fp2	# fp2 IS A3+S*A5
+	fadd.d		EEXPA2(%pc),%fp3	# fp3 IS A2+S*A4
+
+	fmul.x		%fp1,%fp2		# fp2 IS S*(A3+S*A5)
+	mov.w		%d1,SCALE(%a6)		# SCALE is 2^(M) in extended
+	mov.l		&0x80000000,SCALE+4(%a6)
+	clr.l		SCALE+8(%a6)
+
+	fmul.x		%fp1,%fp3		# fp3 IS S*(A2+S*A4)
+
+	fadd.s		&0x3F000000,%fp2	# fp2 IS A1+S*(A3+S*A5)
+	fmul.x		%fp0,%fp3		# fp3 IS R*S*(A2+S*A4)
+
+	fmul.x		%fp1,%fp2		# fp2 IS S*(A1+S*(A3+S*A5))
+	fadd.x		%fp3,%fp0		# fp0 IS R+R*S*(A2+S*A4),
+
+	fmov.x		(%a1)+,%fp1		# fp1 is lead. pt. of 2^(J/64)
+	fadd.x		%fp2,%fp0		# fp0 is EXP(R) - 1
+
+#--Step 5
+#--final reconstruction process
+#--EXP(X) = 2^M * ( 2^(J/64) + 2^(J/64)*(EXP(R)-1) )
+
+	fmul.x		%fp1,%fp0		# 2^(J/64)*(Exp(R)-1)
+	fmovm.x		(%sp)+,&0x30		# fp2 restored {%fp2/%fp3}
+	fadd.s		(%a1),%fp0		# accurate 2^(J/64)
+
+	fadd.x		%fp1,%fp0		# 2^(J/64) + 2^(J/64)*...
+	mov.l		ADJFLAG(%a6),%d1
+
+#--Step 6
+	tst.l		%d1
+	beq.b		NORMAL
+ADJUST:
+	fmul.x		ADJSCALE(%a6),%fp0
+NORMAL:
+	fmov.l		%d0,%fpcr		# restore user FPCR
+	mov.b		&FMUL_OP,%d1		# last inst is MUL
+	fmul.x		SCALE(%a6),%fp0		# multiply 2^(M)
+	bra		t_catch
+
+EXPSM:
+#--Step 7
+	fmovm.x		(%a0),&0x80		# load X
+	fmov.l		%d0,%fpcr
+	fadd.s		&0x3F800000,%fp0	# 1+X in user mode
+	bra		t_pinx2
+
+EEXPBIG:
+#--Step 8
+	cmp.l		%d1,&0x400CB27C		# 16480 log2
+	bgt.b		EXP2BIG
+#--Steps 8.2 -- 8.6
+	fmov.x		(%a0),%fp0		# load input from (a0)
+
+	fmov.x		%fp0,%fp1
+	fmul.s		&0x42B8AA3B,%fp0	# 64/log2 * X
+	fmovm.x		&0xc,-(%sp)		# save fp2 {%fp2/%fp3}
+	mov.l		&1,ADJFLAG(%a6)
+	fmov.l		%fp0,%d1		# N = int( X * 64/log2 )
+	lea		EEXPTBL(%pc),%a1
+	fmov.l		%d1,%fp0		# convert to floating-format
+	mov.l		%d1,L_SCR1(%a6)		# save N temporarily
+	and.l		&0x3F,%d1		# D0 is J = N mod 64
+	lsl.l		&4,%d1
+	add.l		%d1,%a1			# address of 2^(J/64)
+	mov.l		L_SCR1(%a6),%d1
+	asr.l		&6,%d1			# D0 is K
+	mov.l		%d1,L_SCR1(%a6)		# save K temporarily
+	asr.l		&1,%d1			# D0 is M1
+	sub.l		%d1,L_SCR1(%a6)		# a1 is M
+	add.w		&0x3FFF,%d1		# biased expo. of 2^(M1)
+	mov.w		%d1,ADJSCALE(%a6)	# ADJSCALE := 2^(M1)
+	mov.l		&0x80000000,ADJSCALE+4(%a6)
+	clr.l		ADJSCALE+8(%a6)
+	mov.l		L_SCR1(%a6),%d1		# D0 is M
+	add.w		&0x3FFF,%d1		# biased expo. of 2^(M)
+	bra.w		EXPCONT1		# go back to Step 3
+
+EXP2BIG:
+#--Step 9
+	tst.b		(%a0)			# is X positive or negative?
+	bmi		t_unfl2
+	bra		t_ovfl2
+
+	global		setoxd
+setoxd:
+#--entry point for EXP(X), X is denormalized
+	mov.l		(%a0),-(%sp)
+	andi.l		&0x80000000,(%sp)
+	ori.l		&0x00800000,(%sp)	# sign(X)*2^(-126)
+
+	fmov.s		&0x3F800000,%fp0
+
+	fmov.l		%d0,%fpcr
+	fadd.s		(%sp)+,%fp0
+	bra		t_pinx2
+
+	global		setoxm1
+setoxm1:
+#--entry point for EXPM1(X), here X is finite, non-zero, non-NaN
+
+#--Step 1.
+#--Step 1.1
+	mov.l		(%a0),%d1		# load part of input X
+	and.l		&0x7FFF0000,%d1		# biased expo. of X
+	cmp.l		%d1,&0x3FFD0000		# 1/4
+	bge.b		EM1CON1			# |X| >= 1/4
+	bra		EM1SM
+
+EM1CON1:
+#--Step 1.3
+#--The case |X| >= 1/4
+	mov.w		4(%a0),%d1		# expo. and partial sig. of |X|
+	cmp.l		%d1,&0x4004C215		# 70log2 rounded up to 16 bits
+	ble.b		EM1MAIN			# 1/4 <= |X| <= 70log2
+	bra		EM1BIG
+
+EM1MAIN:
+#--Step 2.
+#--This is the case:	1/4 <= |X| <= 70 log2.
+	fmov.x		(%a0),%fp0		# load input from (a0)
+
+	fmov.x		%fp0,%fp1
+	fmul.s		&0x42B8AA3B,%fp0	# 64/log2 * X
+	fmovm.x		&0xc,-(%sp)		# save fp2 {%fp2/%fp3}
+	fmov.l		%fp0,%d1		# N = int( X * 64/log2 )
+	lea		EEXPTBL(%pc),%a1
+	fmov.l		%d1,%fp0		# convert to floating-format
+
+	mov.l		%d1,L_SCR1(%a6)		# save N temporarily
+	and.l		&0x3F,%d1		# D0 is J = N mod 64
+	lsl.l		&4,%d1
+	add.l		%d1,%a1			# address of 2^(J/64)
+	mov.l		L_SCR1(%a6),%d1
+	asr.l		&6,%d1			# D0 is M
+	mov.l		%d1,L_SCR1(%a6)		# save a copy of M
+
+#--Step 3.
+#--fp1,fp2 saved on the stack. fp0 is N, fp1 is X,
+#--a0 points to 2^(J/64), D0 and a1 both contain M
+	fmov.x		%fp0,%fp2
+	fmul.s		&0xBC317218,%fp0	# N * L1, L1 = lead(-log2/64)
+	fmul.x		L2(%pc),%fp2		# N * L2, L1+L2 = -log2/64
+	fadd.x		%fp1,%fp0		# X + N*L1
+	fadd.x		%fp2,%fp0		# fp0 is R, reduced arg.
+	add.w		&0x3FFF,%d1		# D0 is biased expo. of 2^M
+
+#--Step 4.
+#--WE NOW COMPUTE EXP(R)-1 BY A POLYNOMIAL
+#-- R + R*R*(A1 + R*(A2 + R*(A3 + R*(A4 + R*(A5 + R*A6)))))
+#--TO FULLY UTILIZE THE PIPELINE, WE COMPUTE S = R*R
+#--[R*S*(A2+S*(A4+S*A6))] + [R+S*(A1+S*(A3+S*A5))]
+
+	fmov.x		%fp0,%fp1
+	fmul.x		%fp1,%fp1		# fp1 IS S = R*R
+
+	fmov.s		&0x3950097B,%fp2	# fp2 IS a6
+
+	fmul.x		%fp1,%fp2		# fp2 IS S*A6
+	fmov.x		%fp1,%fp3
+	fmul.s		&0x3AB60B6A,%fp3	# fp3 IS S*A5
+
+	fadd.d		EM1A4(%pc),%fp2		# fp2 IS A4+S*A6
+	fadd.d		EM1A3(%pc),%fp3		# fp3 IS A3+S*A5
+	mov.w		%d1,SC(%a6)		# SC is 2^(M) in extended
+	mov.l		&0x80000000,SC+4(%a6)
+	clr.l		SC+8(%a6)
+
+	fmul.x		%fp1,%fp2		# fp2 IS S*(A4+S*A6)
+	mov.l		L_SCR1(%a6),%d1		# D0 is	M
+	neg.w		%d1			# D0 is -M
+	fmul.x		%fp1,%fp3		# fp3 IS S*(A3+S*A5)
+	add.w		&0x3FFF,%d1		# biased expo. of 2^(-M)
+	fadd.d		EM1A2(%pc),%fp2		# fp2 IS A2+S*(A4+S*A6)
+	fadd.s		&0x3F000000,%fp3	# fp3 IS A1+S*(A3+S*A5)
+
+	fmul.x		%fp1,%fp2		# fp2 IS S*(A2+S*(A4+S*A6))
+	or.w		&0x8000,%d1		# signed/expo. of -2^(-M)
+	mov.w		%d1,ONEBYSC(%a6)	# OnebySc is -2^(-M)
+	mov.l		&0x80000000,ONEBYSC+4(%a6)
+	clr.l		ONEBYSC+8(%a6)
+	fmul.x		%fp3,%fp1		# fp1 IS S*(A1+S*(A3+S*A5))
+
+	fmul.x		%fp0,%fp2		# fp2 IS R*S*(A2+S*(A4+S*A6))
+	fadd.x		%fp1,%fp0		# fp0 IS R+S*(A1+S*(A3+S*A5))
+
+	fadd.x		%fp2,%fp0		# fp0 IS EXP(R)-1
+
+	fmovm.x		(%sp)+,&0x30		# fp2 restored {%fp2/%fp3}
+
+#--Step 5
+#--Compute 2^(J/64)*p
+
+	fmul.x		(%a1),%fp0		# 2^(J/64)*(Exp(R)-1)
+
+#--Step 6
+#--Step 6.1
+	mov.l		L_SCR1(%a6),%d1		# retrieve M
+	cmp.l		%d1,&63
+	ble.b		MLE63
+#--Step 6.2	M >= 64
+	fmov.s		12(%a1),%fp1		# fp1 is t
+	fadd.x		ONEBYSC(%a6),%fp1	# fp1 is t+OnebySc
+	fadd.x		%fp1,%fp0		# p+(t+OnebySc), fp1 released
+	fadd.x		(%a1),%fp0		# T+(p+(t+OnebySc))
+	bra		EM1SCALE
+MLE63:
+#--Step 6.3	M <= 63
+	cmp.l		%d1,&-3
+	bge.b		MGEN3
+MLTN3:
+#--Step 6.4	M <= -4
+	fadd.s		12(%a1),%fp0		# p+t
+	fadd.x		(%a1),%fp0		# T+(p+t)
+	fadd.x		ONEBYSC(%a6),%fp0	# OnebySc + (T+(p+t))
+	bra		EM1SCALE
+MGEN3:
+#--Step 6.5	-3 <= M <= 63
+	fmov.x		(%a1)+,%fp1		# fp1 is T
+	fadd.s		(%a1),%fp0		# fp0 is p+t
+	fadd.x		ONEBYSC(%a6),%fp1	# fp1 is T+OnebySc
+	fadd.x		%fp1,%fp0		# (T+OnebySc)+(p+t)
+
+EM1SCALE:
+#--Step 6.6
+	fmov.l		%d0,%fpcr
+	fmul.x		SC(%a6),%fp0
+	bra		t_inx2
+
+EM1SM:
+#--Step 7	|X| < 1/4.
+	cmp.l		%d1,&0x3FBE0000		# 2^(-65)
+	bge.b		EM1POLY
+
+EM1TINY:
+#--Step 8	|X| < 2^(-65)
+	cmp.l		%d1,&0x00330000		# 2^(-16312)
+	blt.b		EM12TINY
+#--Step 8.2
+	mov.l		&0x80010000,SC(%a6)	# SC is -2^(-16382)
+	mov.l		&0x80000000,SC+4(%a6)
+	clr.l		SC+8(%a6)
+	fmov.x		(%a0),%fp0
+	fmov.l		%d0,%fpcr
+	mov.b		&FADD_OP,%d1		# last inst is ADD
+	fadd.x		SC(%a6),%fp0
+	bra		t_catch
+
+EM12TINY:
+#--Step 8.3
+	fmov.x		(%a0),%fp0
+	fmul.d		TWO140(%pc),%fp0
+	mov.l		&0x80010000,SC(%a6)
+	mov.l		&0x80000000,SC+4(%a6)
+	clr.l		SC+8(%a6)
+	fadd.x		SC(%a6),%fp0
+	fmov.l		%d0,%fpcr
+	mov.b		&FMUL_OP,%d1		# last inst is MUL
+	fmul.d		TWON140(%pc),%fp0
+	bra		t_catch
+
+EM1POLY:
+#--Step 9	exp(X)-1 by a simple polynomial
+	fmov.x		(%a0),%fp0		# fp0 is X
+	fmul.x		%fp0,%fp0		# fp0 is S := X*X
+	fmovm.x		&0xc,-(%sp)		# save fp2 {%fp2/%fp3}
+	fmov.s		&0x2F30CAA8,%fp1	# fp1 is B12
+	fmul.x		%fp0,%fp1		# fp1 is S*B12
+	fmov.s		&0x310F8290,%fp2	# fp2 is B11
+	fadd.s		&0x32D73220,%fp1	# fp1 is B10+S*B12
+
+	fmul.x		%fp0,%fp2		# fp2 is S*B11
+	fmul.x		%fp0,%fp1		# fp1 is S*(B10 + ...
+
+	fadd.s		&0x3493F281,%fp2	# fp2 is B9+S*...
+	fadd.d		EM1B8(%pc),%fp1		# fp1 is B8+S*...
+
+	fmul.x		%fp0,%fp2		# fp2 is S*(B9+...
+	fmul.x		%fp0,%fp1		# fp1 is S*(B8+...
+
+	fadd.d		EM1B7(%pc),%fp2		# fp2 is B7+S*...
+	fadd.d		EM1B6(%pc),%fp1		# fp1 is B6+S*...
+
+	fmul.x		%fp0,%fp2		# fp2 is S*(B7+...
+	fmul.x		%fp0,%fp1		# fp1 is S*(B6+...
+
+	fadd.d		EM1B5(%pc),%fp2		# fp2 is B5+S*...
+	fadd.d		EM1B4(%pc),%fp1		# fp1 is B4+S*...
+
+	fmul.x		%fp0,%fp2		# fp2 is S*(B5+...
+	fmul.x		%fp0,%fp1		# fp1 is S*(B4+...
+
+	fadd.d		EM1B3(%pc),%fp2		# fp2 is B3+S*...
+	fadd.x		EM1B2(%pc),%fp1		# fp1 is B2+S*...
+
+	fmul.x		%fp0,%fp2		# fp2 is S*(B3+...
+	fmul.x		%fp0,%fp1		# fp1 is S*(B2+...
+
+	fmul.x		%fp0,%fp2		# fp2 is S*S*(B3+...)
+	fmul.x		(%a0),%fp1		# fp1 is X*S*(B2...
+
+	fmul.s		&0x3F000000,%fp0	# fp0 is S*B1
+	fadd.x		%fp2,%fp1		# fp1 is Q
+
+	fmovm.x		(%sp)+,&0x30		# fp2 restored {%fp2/%fp3}
+
+	fadd.x		%fp1,%fp0		# fp0 is S*B1+Q
+
+	fmov.l		%d0,%fpcr
+	fadd.x		(%a0),%fp0
+	bra		t_inx2
+
+EM1BIG:
+#--Step 10	|X| > 70 log2
+	mov.l		(%a0),%d1
+	cmp.l		%d1,&0
+	bgt.w		EXPC1
+#--Step 10.2
+	fmov.s		&0xBF800000,%fp0	# fp0 is -1
+	fmov.l		%d0,%fpcr
+	fadd.s		&0x00800000,%fp0	# -1 + 2^(-126)
+	bra		t_minx2
+
+	global		setoxm1d
+setoxm1d:
+#--entry point for EXPM1(X), here X is denormalized
+#--Step 0.
+	bra		t_extdnrm
+
+#########################################################################
+# sgetexp():  returns the exponent portion of the input argument.	#
+#	      The exponent bias is removed and the exponent value is	#
+#	      returned as an extended precision number in fp0.		#
+# sgetexpd(): handles denormalized numbers.				#
+#									#
+# sgetman():  extracts the mantissa of the input argument. The		#
+#	      mantissa is converted to an extended precision number w/	#
+#	      an exponent of $3fff and is returned in fp0. The range of #
+#	      the result is [1.0 - 2.0).				#
+# sgetmand(): handles denormalized numbers.				#
+#									#
+# INPUT *************************************************************** #
+#	a0  = pointer to extended precision input			#
+#									#
+# OUTPUT ************************************************************** #
+#	fp0 = exponent(X) or mantissa(X)				#
+#									#
+#########################################################################
+
+	global		sgetexp
+sgetexp:
+	mov.w		SRC_EX(%a0),%d0		# get the exponent
+	bclr		&0xf,%d0		# clear the sign bit
+	subi.w		&0x3fff,%d0		# subtract off the bias
+	fmov.w		%d0,%fp0		# return exp in fp0
+	blt.b		sgetexpn		# it's negative
+	rts
+
+sgetexpn:
+	mov.b		&neg_bmask,FPSR_CC(%a6)	# set 'N' ccode bit
+	rts
+
+	global		sgetexpd
+sgetexpd:
+	bsr.l		norm			# normalize
+	neg.w		%d0			# new exp = -(shft amt)
+	subi.w		&0x3fff,%d0		# subtract off the bias
+	fmov.w		%d0,%fp0		# return exp in fp0
+	mov.b		&neg_bmask,FPSR_CC(%a6)	# set 'N' ccode bit
+	rts
+
+	global		sgetman
+sgetman:
+	mov.w		SRC_EX(%a0),%d0		# get the exp
+	ori.w		&0x7fff,%d0		# clear old exp
+	bclr		&0xe,%d0		# make it the new exp +-3fff
+
+# here, we build the result in a tmp location so as not to disturb the input
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6) # copy to tmp loc
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6) # copy to tmp loc
+	mov.w		%d0,FP_SCR0_EX(%a6)	# insert new exponent
+	fmov.x		FP_SCR0(%a6),%fp0	# put new value back in fp0
+	bmi.b		sgetmann		# it's negative
+	rts
+
+sgetmann:
+	mov.b		&neg_bmask,FPSR_CC(%a6)	# set 'N' ccode bit
+	rts
+
+#
+# For denormalized numbers, shift the mantissa until the j-bit = 1,
+# then load the exponent with +/1 $3fff.
+#
+	global		sgetmand
+sgetmand:
+	bsr.l		norm			# normalize exponent
+	bra.b		sgetman
+
+#########################################################################
+# scosh():  computes the hyperbolic cosine of a normalized input	#
+# scoshd(): computes the hyperbolic cosine of a denormalized input	#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = cosh(X)							#
+#									#
+# ACCURACY and MONOTONICITY *******************************************	#
+#	The returned result is within 3 ulps in 64 significant bit,	#
+#	i.e. within 0.5001 ulp to 53 bits if the result is subsequently	#
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM ***********************************************************	#
+#									#
+#	COSH								#
+#	1. If |X| > 16380 log2, go to 3.				#
+#									#
+#	2. (|X| <= 16380 log2) Cosh(X) is obtained by the formulae	#
+#		y = |X|, z = exp(Y), and				#
+#		cosh(X) = (1/2)*( z + 1/z ).				#
+#		Exit.							#
+#									#
+#	3. (|X| > 16380 log2). If |X| > 16480 log2, go to 5.		#
+#									#
+#	4. (16380 log2 < |X| <= 16480 log2)				#
+#		cosh(X) = sign(X) * exp(|X|)/2.				#
+#		However, invoking exp(|X|) may cause premature		#
+#		overflow. Thus, we calculate sinh(X) as follows:	#
+#		Y	:= |X|						#
+#		Fact	:=	2**(16380)				#
+#		Y'	:= Y - 16381 log2				#
+#		cosh(X) := Fact * exp(Y').				#
+#		Exit.							#
+#									#
+#	5. (|X| > 16480 log2) sinh(X) must overflow. Return		#
+#		Huge*Huge to generate overflow and an infinity with	#
+#		the appropriate sign. Huge is the largest finite number	#
+#		in extended format. Exit.				#
+#									#
+#########################################################################
+
+TWO16380:
+	long		0x7FFB0000,0x80000000,0x00000000,0x00000000
+
+	global		scosh
+scosh:
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+
+	mov.l		(%a0),%d1
+	mov.w		4(%a0),%d1
+	and.l		&0x7FFFFFFF,%d1
+	cmp.l		%d1,&0x400CB167
+	bgt.b		COSHBIG
+
+#--THIS IS THE USUAL CASE, |X| < 16380 LOG2
+#--COSH(X) = (1/2) * ( EXP(X) + 1/EXP(X) )
+
+	fabs.x		%fp0			# |X|
+
+	mov.l		%d0,-(%sp)
+	clr.l		%d0
+	fmovm.x		&0x01,-(%sp)		# save |X| to stack
+	lea		(%sp),%a0		# pass ptr to |X|
+	bsr		setox			# FP0 IS EXP(|X|)
+	add.l		&0xc,%sp		# erase |X| from stack
+	fmul.s		&0x3F000000,%fp0	# (1/2)EXP(|X|)
+	mov.l		(%sp)+,%d0
+
+	fmov.s		&0x3E800000,%fp1	# (1/4)
+	fdiv.x		%fp0,%fp1		# 1/(2 EXP(|X|))
+
+	fmov.l		%d0,%fpcr
+	mov.b		&FADD_OP,%d1		# last inst is ADD
+	fadd.x		%fp1,%fp0
+	bra		t_catch
+
+COSHBIG:
+	cmp.l		%d1,&0x400CB2B3
+	bgt.b		COSHHUGE
+
+	fabs.x		%fp0
+	fsub.d		T1(%pc),%fp0		# (|X|-16381LOG2_LEAD)
+	fsub.d		T2(%pc),%fp0		# |X| - 16381 LOG2, ACCURATE
+
+	mov.l		%d0,-(%sp)
+	clr.l		%d0
+	fmovm.x		&0x01,-(%sp)		# save fp0 to stack
+	lea		(%sp),%a0		# pass ptr to fp0
+	bsr		setox
+	add.l		&0xc,%sp		# clear fp0 from stack
+	mov.l		(%sp)+,%d0
+
+	fmov.l		%d0,%fpcr
+	mov.b		&FMUL_OP,%d1		# last inst is MUL
+	fmul.x		TWO16380(%pc),%fp0
+	bra		t_catch
+
+COSHHUGE:
+	bra		t_ovfl2
+
+	global		scoshd
+#--COSH(X) = 1 FOR DENORMALIZED X
+scoshd:
+	fmov.s		&0x3F800000,%fp0
+
+	fmov.l		%d0,%fpcr
+	fadd.s		&0x00800000,%fp0
+	bra		t_pinx2
+
+#########################################################################
+# ssinh():  computes the hyperbolic sine of a normalized input		#
+# ssinhd(): computes the hyperbolic sine of a denormalized input	#
+#									#
+# INPUT *************************************************************** #
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT ************************************************************** #
+#	fp0 = sinh(X)							#
+#									#
+# ACCURACY and MONOTONICITY *******************************************	#
+#	The returned result is within 3 ulps in 64 significant bit,	#
+#	i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM *********************************************************** #
+#									#
+#       SINH								#
+#       1. If |X| > 16380 log2, go to 3.				#
+#									#
+#       2. (|X| <= 16380 log2) Sinh(X) is obtained by the formula	#
+#               y = |X|, sgn = sign(X), and z = expm1(Y),		#
+#               sinh(X) = sgn*(1/2)*( z + z/(1+z) ).			#
+#          Exit.							#
+#									#
+#       3. If |X| > 16480 log2, go to 5.				#
+#									#
+#       4. (16380 log2 < |X| <= 16480 log2)				#
+#               sinh(X) = sign(X) * exp(|X|)/2.				#
+#          However, invoking exp(|X|) may cause premature overflow.	#
+#          Thus, we calculate sinh(X) as follows:			#
+#             Y       := |X|						#
+#             sgn     := sign(X)					#
+#             sgnFact := sgn * 2**(16380)				#
+#             Y'      := Y - 16381 log2					#
+#             sinh(X) := sgnFact * exp(Y').				#
+#          Exit.							#
+#									#
+#       5. (|X| > 16480 log2) sinh(X) must overflow. Return		#
+#          sign(X)*Huge*Huge to generate overflow and an infinity with	#
+#          the appropriate sign. Huge is the largest finite number in	#
+#          extended format. Exit.					#
+#									#
+#########################################################################
+
+	global		ssinh
+ssinh:
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+
+	mov.l		(%a0),%d1
+	mov.w		4(%a0),%d1
+	mov.l		%d1,%a1			# save (compacted) operand
+	and.l		&0x7FFFFFFF,%d1
+	cmp.l		%d1,&0x400CB167
+	bgt.b		SINHBIG
+
+#--THIS IS THE USUAL CASE, |X| < 16380 LOG2
+#--Y = |X|, Z = EXPM1(Y), SINH(X) = SIGN(X)*(1/2)*( Z + Z/(1+Z) )
+
+	fabs.x		%fp0			# Y = |X|
+
+	movm.l		&0x8040,-(%sp)		# {a1/d0}
+	fmovm.x		&0x01,-(%sp)		# save Y on stack
+	lea		(%sp),%a0		# pass ptr to Y
+	clr.l		%d0
+	bsr		setoxm1			# FP0 IS Z = EXPM1(Y)
+	add.l		&0xc,%sp		# clear Y from stack
+	fmov.l		&0,%fpcr
+	movm.l		(%sp)+,&0x0201		# {a1/d0}
+
+	fmov.x		%fp0,%fp1
+	fadd.s		&0x3F800000,%fp1	# 1+Z
+	fmov.x		%fp0,-(%sp)
+	fdiv.x		%fp1,%fp0		# Z/(1+Z)
+	mov.l		%a1,%d1
+	and.l		&0x80000000,%d1
+	or.l		&0x3F000000,%d1
+	fadd.x		(%sp)+,%fp0
+	mov.l		%d1,-(%sp)
+
+	fmov.l		%d0,%fpcr
+	mov.b		&FMUL_OP,%d1		# last inst is MUL
+	fmul.s		(%sp)+,%fp0		# last fp inst - possible exceptions set
+	bra		t_catch
+
+SINHBIG:
+	cmp.l		%d1,&0x400CB2B3
+	bgt		t_ovfl
+	fabs.x		%fp0
+	fsub.d		T1(%pc),%fp0		# (|X|-16381LOG2_LEAD)
+	mov.l		&0,-(%sp)
+	mov.l		&0x80000000,-(%sp)
+	mov.l		%a1,%d1
+	and.l		&0x80000000,%d1
+	or.l		&0x7FFB0000,%d1
+	mov.l		%d1,-(%sp)		# EXTENDED FMT
+	fsub.d		T2(%pc),%fp0		# |X| - 16381 LOG2, ACCURATE
+
+	mov.l		%d0,-(%sp)
+	clr.l		%d0
+	fmovm.x		&0x01,-(%sp)		# save fp0 on stack
+	lea		(%sp),%a0		# pass ptr to fp0
+	bsr		setox
+	add.l		&0xc,%sp		# clear fp0 from stack
+
+	mov.l		(%sp)+,%d0
+	fmov.l		%d0,%fpcr
+	mov.b		&FMUL_OP,%d1		# last inst is MUL
+	fmul.x		(%sp)+,%fp0		# possible exception
+	bra		t_catch
+
+	global		ssinhd
+#--SINH(X) = X FOR DENORMALIZED X
+ssinhd:
+	bra		t_extdnrm
+
+#########################################################################
+# stanh():  computes the hyperbolic tangent of a normalized input	#
+# stanhd(): computes the hyperbolic tangent of a denormalized input	#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = tanh(X)							#
+#									#
+# ACCURACY and MONOTONICITY *******************************************	#
+#	The returned result is within 3 ulps in 64 significant bit,	#
+#	i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM ***********************************************************	#
+#									#
+#	TANH								#
+#	1. If |X| >= (5/2) log2 or |X| <= 2**(-40), go to 3.		#
+#									#
+#	2. (2**(-40) < |X| < (5/2) log2) Calculate tanh(X) by		#
+#		sgn := sign(X), y := 2|X|, z := expm1(Y), and		#
+#		tanh(X) = sgn*( z/(2+z) ).				#
+#		Exit.							#
+#									#
+#	3. (|X| <= 2**(-40) or |X| >= (5/2) log2). If |X| < 1,		#
+#		go to 7.						#
+#									#
+#	4. (|X| >= (5/2) log2) If |X| >= 50 log2, go to 6.		#
+#									#
+#	5. ((5/2) log2 <= |X| < 50 log2) Calculate tanh(X) by		#
+#		sgn := sign(X), y := 2|X|, z := exp(Y),			#
+#		tanh(X) = sgn - [ sgn*2/(1+z) ].			#
+#		Exit.							#
+#									#
+#	6. (|X| >= 50 log2) Tanh(X) = +-1 (round to nearest). Thus, we	#
+#		calculate Tanh(X) by					#
+#		sgn := sign(X), Tiny := 2**(-126),			#
+#		tanh(X) := sgn - sgn*Tiny.				#
+#		Exit.							#
+#									#
+#	7. (|X| < 2**(-40)). Tanh(X) = X.	Exit.			#
+#									#
+#########################################################################
+
+	set		X,FP_SCR0
+	set		XFRAC,X+4
+
+	set		SGN,L_SCR3
+
+	set		V,FP_SCR0
+
+	global		stanh
+stanh:
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+
+	fmov.x		%fp0,X(%a6)
+	mov.l		(%a0),%d1
+	mov.w		4(%a0),%d1
+	mov.l		%d1,X(%a6)
+	and.l		&0x7FFFFFFF,%d1
+	cmp.l		%d1, &0x3fd78000	# is |X| < 2^(-40)?
+	blt.w		TANHBORS		# yes
+	cmp.l		%d1, &0x3fffddce	# is |X| > (5/2)LOG2?
+	bgt.w		TANHBORS		# yes
+
+#--THIS IS THE USUAL CASE
+#--Y = 2|X|, Z = EXPM1(Y), TANH(X) = SIGN(X) * Z / (Z+2).
+
+	mov.l		X(%a6),%d1
+	mov.l		%d1,SGN(%a6)
+	and.l		&0x7FFF0000,%d1
+	add.l		&0x00010000,%d1		# EXPONENT OF 2|X|
+	mov.l		%d1,X(%a6)
+	and.l		&0x80000000,SGN(%a6)
+	fmov.x		X(%a6),%fp0		# FP0 IS Y = 2|X|
+
+	mov.l		%d0,-(%sp)
+	clr.l		%d0
+	fmovm.x		&0x1,-(%sp)		# save Y on stack
+	lea		(%sp),%a0		# pass ptr to Y
+	bsr		setoxm1			# FP0 IS Z = EXPM1(Y)
+	add.l		&0xc,%sp		# clear Y from stack
+	mov.l		(%sp)+,%d0
+
+	fmov.x		%fp0,%fp1
+	fadd.s		&0x40000000,%fp1	# Z+2
+	mov.l		SGN(%a6),%d1
+	fmov.x		%fp1,V(%a6)
+	eor.l		%d1,V(%a6)
+
+	fmov.l		%d0,%fpcr		# restore users round prec,mode
+	fdiv.x		V(%a6),%fp0
+	bra		t_inx2
+
+TANHBORS:
+	cmp.l		%d1,&0x3FFF8000
+	blt.w		TANHSM
+
+	cmp.l		%d1,&0x40048AA1
+	bgt.w		TANHHUGE
+
+#-- (5/2) LOG2 < |X| < 50 LOG2,
+#--TANH(X) = 1 - (2/[EXP(2X)+1]). LET Y = 2|X|, SGN = SIGN(X),
+#--TANH(X) = SGN -	SGN*2/[EXP(Y)+1].
+
+	mov.l		X(%a6),%d1
+	mov.l		%d1,SGN(%a6)
+	and.l		&0x7FFF0000,%d1
+	add.l		&0x00010000,%d1		# EXPO OF 2|X|
+	mov.l		%d1,X(%a6)		# Y = 2|X|
+	and.l		&0x80000000,SGN(%a6)
+	mov.l		SGN(%a6),%d1
+	fmov.x		X(%a6),%fp0		# Y = 2|X|
+
+	mov.l		%d0,-(%sp)
+	clr.l		%d0
+	fmovm.x		&0x01,-(%sp)		# save Y on stack
+	lea		(%sp),%a0		# pass ptr to Y
+	bsr		setox			# FP0 IS EXP(Y)
+	add.l		&0xc,%sp		# clear Y from stack
+	mov.l		(%sp)+,%d0
+	mov.l		SGN(%a6),%d1
+	fadd.s		&0x3F800000,%fp0	# EXP(Y)+1
+
+	eor.l		&0xC0000000,%d1		# -SIGN(X)*2
+	fmov.s		%d1,%fp1		# -SIGN(X)*2 IN SGL FMT
+	fdiv.x		%fp0,%fp1		# -SIGN(X)2 / [EXP(Y)+1 ]
+
+	mov.l		SGN(%a6),%d1
+	or.l		&0x3F800000,%d1		# SGN
+	fmov.s		%d1,%fp0		# SGN IN SGL FMT
+
+	fmov.l		%d0,%fpcr		# restore users round prec,mode
+	mov.b		&FADD_OP,%d1		# last inst is ADD
+	fadd.x		%fp1,%fp0
+	bra		t_inx2
+
+TANHSM:
+	fmov.l		%d0,%fpcr		# restore users round prec,mode
+	mov.b		&FMOV_OP,%d1		# last inst is MOVE
+	fmov.x		X(%a6),%fp0		# last inst - possible exception set
+	bra		t_catch
+
+#---RETURN SGN(X) - SGN(X)EPS
+TANHHUGE:
+	mov.l		X(%a6),%d1
+	and.l		&0x80000000,%d1
+	or.l		&0x3F800000,%d1
+	fmov.s		%d1,%fp0
+	and.l		&0x80000000,%d1
+	eor.l		&0x80800000,%d1		# -SIGN(X)*EPS
+
+	fmov.l		%d0,%fpcr		# restore users round prec,mode
+	fadd.s		%d1,%fp0
+	bra		t_inx2
+
+	global		stanhd
+#--TANH(X) = X FOR DENORMALIZED X
+stanhd:
+	bra		t_extdnrm
+
+#########################################################################
+# slogn():    computes the natural logarithm of a normalized input	#
+# slognd():   computes the natural logarithm of a denormalized input	#
+# slognp1():  computes the log(1+X) of a normalized input		#
+# slognp1d(): computes the log(1+X) of a denormalized input		#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = log(X) or log(1+X)					#
+#									#
+# ACCURACY and MONOTONICITY *******************************************	#
+#	The returned result is within 2 ulps in 64 significant bit,	#
+#	i.e. within 0.5001 ulp to 53 bits if the result is subsequently	#
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM ***********************************************************	#
+#	LOGN:								#
+#	Step 1. If |X-1| < 1/16, approximate log(X) by an odd		#
+#		polynomial in u, where u = 2(X-1)/(X+1). Otherwise,	#
+#		move on to Step 2.					#
+#									#
+#	Step 2. X = 2**k * Y where 1 <= Y < 2. Define F to be the first	#
+#		seven significant bits of Y plus 2**(-7), i.e.		#
+#		F = 1.xxxxxx1 in base 2 where the six "x" match those	#
+#		of Y. Note that |Y-F| <= 2**(-7).			#
+#									#
+#	Step 3. Define u = (Y-F)/F. Approximate log(1+u) by a		#
+#		polynomial in u, log(1+u) = poly.			#
+#									#
+#	Step 4. Reconstruct						#
+#		log(X) = log( 2**k * Y ) = k*log(2) + log(F) + log(1+u)	#
+#		by k*log(2) + (log(F) + poly). The values of log(F) are	#
+#		calculated beforehand and stored in the program.	#
+#									#
+#	lognp1:								#
+#	Step 1: If |X| < 1/16, approximate log(1+X) by an odd		#
+#		polynomial in u where u = 2X/(2+X). Otherwise, move on	#
+#		to Step 2.						#
+#									#
+#	Step 2: Let 1+X = 2**k * Y, where 1 <= Y < 2. Define F as done	#
+#		in Step 2 of the algorithm for LOGN and compute		#
+#		log(1+X) as k*log(2) + log(F) + poly where poly		#
+#		approximates log(1+u), u = (Y-F)/F.			#
+#									#
+#	Implementation Notes:						#
+#	Note 1. There are 64 different possible values for F, thus 64	#
+#		log(F)'s need to be tabulated. Moreover, the values of	#
+#		1/F are also tabulated so that the division in (Y-F)/F	#
+#		can be performed by a multiplication.			#
+#									#
+#	Note 2. In Step 2 of lognp1, in order to preserved accuracy,	#
+#		the value Y-F has to be calculated carefully when	#
+#		1/2 <= X < 3/2.						#
+#									#
+#	Note 3. To fully exploit the pipeline, polynomials are usually	#
+#		separated into two parts evaluated independently before	#
+#		being added up.						#
+#									#
+#########################################################################
+LOGOF2:
+	long		0x3FFE0000,0xB17217F7,0xD1CF79AC,0x00000000
+
+one:
+	long		0x3F800000
+zero:
+	long		0x00000000
+infty:
+	long		0x7F800000
+negone:
+	long		0xBF800000
+
+LOGA6:
+	long		0x3FC2499A,0xB5E4040B
+LOGA5:
+	long		0xBFC555B5,0x848CB7DB
+
+LOGA4:
+	long		0x3FC99999,0x987D8730
+LOGA3:
+	long		0xBFCFFFFF,0xFF6F7E97
+
+LOGA2:
+	long		0x3FD55555,0x555555A4
+LOGA1:
+	long		0xBFE00000,0x00000008
+
+LOGB5:
+	long		0x3F175496,0xADD7DAD6
+LOGB4:
+	long		0x3F3C71C2,0xFE80C7E0
+
+LOGB3:
+	long		0x3F624924,0x928BCCFF
+LOGB2:
+	long		0x3F899999,0x999995EC
+
+LOGB1:
+	long		0x3FB55555,0x55555555
+TWO:
+	long		0x40000000,0x00000000
+
+LTHOLD:
+	long		0x3f990000,0x80000000,0x00000000,0x00000000
+
+LOGTBL:
+	long		0x3FFE0000,0xFE03F80F,0xE03F80FE,0x00000000
+	long		0x3FF70000,0xFF015358,0x833C47E2,0x00000000
+	long		0x3FFE0000,0xFA232CF2,0x52138AC0,0x00000000
+	long		0x3FF90000,0xBDC8D83E,0xAD88D549,0x00000000
+	long		0x3FFE0000,0xF6603D98,0x0F6603DA,0x00000000
+	long		0x3FFA0000,0x9CF43DCF,0xF5EAFD48,0x00000000
+	long		0x3FFE0000,0xF2B9D648,0x0F2B9D65,0x00000000
+	long		0x3FFA0000,0xDA16EB88,0xCB8DF614,0x00000000
+	long		0x3FFE0000,0xEF2EB71F,0xC4345238,0x00000000
+	long		0x3FFB0000,0x8B29B775,0x1BD70743,0x00000000
+	long		0x3FFE0000,0xEBBDB2A5,0xC1619C8C,0x00000000
+	long		0x3FFB0000,0xA8D839F8,0x30C1FB49,0x00000000
+	long		0x3FFE0000,0xE865AC7B,0x7603A197,0x00000000
+	long		0x3FFB0000,0xC61A2EB1,0x8CD907AD,0x00000000
+	long		0x3FFE0000,0xE525982A,0xF70C880E,0x00000000
+	long		0x3FFB0000,0xE2F2A47A,0xDE3A18AF,0x00000000
+	long		0x3FFE0000,0xE1FC780E,0x1FC780E2,0x00000000
+	long		0x3FFB0000,0xFF64898E,0xDF55D551,0x00000000
+	long		0x3FFE0000,0xDEE95C4C,0xA037BA57,0x00000000
+	long		0x3FFC0000,0x8DB956A9,0x7B3D0148,0x00000000
+	long		0x3FFE0000,0xDBEB61EE,0xD19C5958,0x00000000
+	long		0x3FFC0000,0x9B8FE100,0xF47BA1DE,0x00000000
+	long		0x3FFE0000,0xD901B203,0x6406C80E,0x00000000
+	long		0x3FFC0000,0xA9372F1D,0x0DA1BD17,0x00000000
+	long		0x3FFE0000,0xD62B80D6,0x2B80D62C,0x00000000
+	long		0x3FFC0000,0xB6B07F38,0xCE90E46B,0x00000000
+	long		0x3FFE0000,0xD3680D36,0x80D3680D,0x00000000
+	long		0x3FFC0000,0xC3FD0329,0x06488481,0x00000000
+	long		0x3FFE0000,0xD0B69FCB,0xD2580D0B,0x00000000
+	long		0x3FFC0000,0xD11DE0FF,0x15AB18CA,0x00000000
+	long		0x3FFE0000,0xCE168A77,0x25080CE1,0x00000000
+	long		0x3FFC0000,0xDE1433A1,0x6C66B150,0x00000000
+	long		0x3FFE0000,0xCB8727C0,0x65C393E0,0x00000000
+	long		0x3FFC0000,0xEAE10B5A,0x7DDC8ADD,0x00000000
+	long		0x3FFE0000,0xC907DA4E,0x871146AD,0x00000000
+	long		0x3FFC0000,0xF7856E5E,0xE2C9B291,0x00000000
+	long		0x3FFE0000,0xC6980C69,0x80C6980C,0x00000000
+	long		0x3FFD0000,0x82012CA5,0xA68206D7,0x00000000
+	long		0x3FFE0000,0xC4372F85,0x5D824CA6,0x00000000
+	long		0x3FFD0000,0x882C5FCD,0x7256A8C5,0x00000000
+	long		0x3FFE0000,0xC1E4BBD5,0x95F6E947,0x00000000
+	long		0x3FFD0000,0x8E44C60B,0x4CCFD7DE,0x00000000
+	long		0x3FFE0000,0xBFA02FE8,0x0BFA02FF,0x00000000
+	long		0x3FFD0000,0x944AD09E,0xF4351AF6,0x00000000
+	long		0x3FFE0000,0xBD691047,0x07661AA3,0x00000000
+	long		0x3FFD0000,0x9A3EECD4,0xC3EAA6B2,0x00000000
+	long		0x3FFE0000,0xBB3EE721,0xA54D880C,0x00000000
+	long		0x3FFD0000,0xA0218434,0x353F1DE8,0x00000000
+	long		0x3FFE0000,0xB92143FA,0x36F5E02E,0x00000000
+	long		0x3FFD0000,0xA5F2FCAB,0xBBC506DA,0x00000000
+	long		0x3FFE0000,0xB70FBB5A,0x19BE3659,0x00000000
+	long		0x3FFD0000,0xABB3B8BA,0x2AD362A5,0x00000000
+	long		0x3FFE0000,0xB509E68A,0x9B94821F,0x00000000
+	long		0x3FFD0000,0xB1641795,0xCE3CA97B,0x00000000
+	long		0x3FFE0000,0xB30F6352,0x8917C80B,0x00000000
+	long		0x3FFD0000,0xB7047551,0x5D0F1C61,0x00000000
+	long		0x3FFE0000,0xB11FD3B8,0x0B11FD3C,0x00000000
+	long		0x3FFD0000,0xBC952AFE,0xEA3D13E1,0x00000000
+	long		0x3FFE0000,0xAF3ADDC6,0x80AF3ADE,0x00000000
+	long		0x3FFD0000,0xC2168ED0,0xF458BA4A,0x00000000
+	long		0x3FFE0000,0xAD602B58,0x0AD602B6,0x00000000
+	long		0x3FFD0000,0xC788F439,0xB3163BF1,0x00000000
+	long		0x3FFE0000,0xAB8F69E2,0x8359CD11,0x00000000
+	long		0x3FFD0000,0xCCECAC08,0xBF04565D,0x00000000
+	long		0x3FFE0000,0xA9C84A47,0xA07F5638,0x00000000
+	long		0x3FFD0000,0xD2420487,0x2DD85160,0x00000000
+	long		0x3FFE0000,0xA80A80A8,0x0A80A80B,0x00000000
+	long		0x3FFD0000,0xD7894992,0x3BC3588A,0x00000000
+	long		0x3FFE0000,0xA655C439,0x2D7B73A8,0x00000000
+	long		0x3FFD0000,0xDCC2C4B4,0x9887DACC,0x00000000
+	long		0x3FFE0000,0xA4A9CF1D,0x96833751,0x00000000
+	long		0x3FFD0000,0xE1EEBD3E,0x6D6A6B9E,0x00000000
+	long		0x3FFE0000,0xA3065E3F,0xAE7CD0E0,0x00000000
+	long		0x3FFD0000,0xE70D785C,0x2F9F5BDC,0x00000000
+	long		0x3FFE0000,0xA16B312E,0xA8FC377D,0x00000000
+	long		0x3FFD0000,0xEC1F392C,0x5179F283,0x00000000
+	long		0x3FFE0000,0x9FD809FD,0x809FD80A,0x00000000
+	long		0x3FFD0000,0xF12440D3,0xE36130E6,0x00000000
+	long		0x3FFE0000,0x9E4CAD23,0xDD5F3A20,0x00000000
+	long		0x3FFD0000,0xF61CCE92,0x346600BB,0x00000000
+	long		0x3FFE0000,0x9CC8E160,0xC3FB19B9,0x00000000
+	long		0x3FFD0000,0xFB091FD3,0x8145630A,0x00000000
+	long		0x3FFE0000,0x9B4C6F9E,0xF03A3CAA,0x00000000
+	long		0x3FFD0000,0xFFE97042,0xBFA4C2AD,0x00000000
+	long		0x3FFE0000,0x99D722DA,0xBDE58F06,0x00000000
+	long		0x3FFE0000,0x825EFCED,0x49369330,0x00000000
+	long		0x3FFE0000,0x9868C809,0x868C8098,0x00000000
+	long		0x3FFE0000,0x84C37A7A,0xB9A905C9,0x00000000
+	long		0x3FFE0000,0x97012E02,0x5C04B809,0x00000000
+	long		0x3FFE0000,0x87224C2E,0x8E645FB7,0x00000000
+	long		0x3FFE0000,0x95A02568,0x095A0257,0x00000000
+	long		0x3FFE0000,0x897B8CAC,0x9F7DE298,0x00000000
+	long		0x3FFE0000,0x94458094,0x45809446,0x00000000
+	long		0x3FFE0000,0x8BCF55DE,0xC4CD05FE,0x00000000
+	long		0x3FFE0000,0x92F11384,0x0497889C,0x00000000
+	long		0x3FFE0000,0x8E1DC0FB,0x89E125E5,0x00000000
+	long		0x3FFE0000,0x91A2B3C4,0xD5E6F809,0x00000000
+	long		0x3FFE0000,0x9066E68C,0x955B6C9B,0x00000000
+	long		0x3FFE0000,0x905A3863,0x3E06C43B,0x00000000
+	long		0x3FFE0000,0x92AADE74,0xC7BE59E0,0x00000000
+	long		0x3FFE0000,0x8F1779D9,0xFDC3A219,0x00000000
+	long		0x3FFE0000,0x94E9BFF6,0x15845643,0x00000000
+	long		0x3FFE0000,0x8DDA5202,0x37694809,0x00000000
+	long		0x3FFE0000,0x9723A1B7,0x20134203,0x00000000
+	long		0x3FFE0000,0x8CA29C04,0x6514E023,0x00000000
+	long		0x3FFE0000,0x995899C8,0x90EB8990,0x00000000
+	long		0x3FFE0000,0x8B70344A,0x139BC75A,0x00000000
+	long		0x3FFE0000,0x9B88BDAA,0x3A3DAE2F,0x00000000
+	long		0x3FFE0000,0x8A42F870,0x5669DB46,0x00000000
+	long		0x3FFE0000,0x9DB4224F,0xFFE1157C,0x00000000
+	long		0x3FFE0000,0x891AC73A,0xE9819B50,0x00000000
+	long		0x3FFE0000,0x9FDADC26,0x8B7A12DA,0x00000000
+	long		0x3FFE0000,0x87F78087,0xF78087F8,0x00000000
+	long		0x3FFE0000,0xA1FCFF17,0xCE733BD4,0x00000000
+	long		0x3FFE0000,0x86D90544,0x7A34ACC6,0x00000000
+	long		0x3FFE0000,0xA41A9E8F,0x5446FB9F,0x00000000
+	long		0x3FFE0000,0x85BF3761,0x2CEE3C9B,0x00000000
+	long		0x3FFE0000,0xA633CD7E,0x6771CD8B,0x00000000
+	long		0x3FFE0000,0x84A9F9C8,0x084A9F9D,0x00000000
+	long		0x3FFE0000,0xA8489E60,0x0B435A5E,0x00000000
+	long		0x3FFE0000,0x83993052,0x3FBE3368,0x00000000
+	long		0x3FFE0000,0xAA59233C,0xCCA4BD49,0x00000000
+	long		0x3FFE0000,0x828CBFBE,0xB9A020A3,0x00000000
+	long		0x3FFE0000,0xAC656DAE,0x6BCC4985,0x00000000
+	long		0x3FFE0000,0x81848DA8,0xFAF0D277,0x00000000
+	long		0x3FFE0000,0xAE6D8EE3,0x60BB2468,0x00000000
+	long		0x3FFE0000,0x80808080,0x80808081,0x00000000
+	long		0x3FFE0000,0xB07197A2,0x3C46C654,0x00000000
+
+	set		ADJK,L_SCR1
+
+	set		X,FP_SCR0
+	set		XDCARE,X+2
+	set		XFRAC,X+4
+
+	set		F,FP_SCR1
+	set		FFRAC,F+4
+
+	set		KLOG2,FP_SCR0
+
+	set		SAVEU,FP_SCR0
+
+	global		slogn
+#--ENTRY POINT FOR LOG(X) FOR X FINITE, NON-ZERO, NOT NAN'S
+slogn:
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+	mov.l		&0x00000000,ADJK(%a6)
+
+LOGBGN:
+#--FPCR SAVED AND CLEARED, INPUT IS 2^(ADJK)*FP0, FP0 CONTAINS
+#--A FINITE, NON-ZERO, NORMALIZED NUMBER.
+
+	mov.l		(%a0),%d1
+	mov.w		4(%a0),%d1
+
+	mov.l		(%a0),X(%a6)
+	mov.l		4(%a0),X+4(%a6)
+	mov.l		8(%a0),X+8(%a6)
+
+	cmp.l		%d1,&0			# CHECK IF X IS NEGATIVE
+	blt.w		LOGNEG			# LOG OF NEGATIVE ARGUMENT IS INVALID
+# X IS POSITIVE, CHECK IF X IS NEAR 1
+	cmp.l		%d1,&0x3ffef07d		# IS X < 15/16?
+	blt.b		LOGMAIN			# YES
+	cmp.l		%d1,&0x3fff8841		# IS X > 17/16?
+	ble.w		LOGNEAR1		# NO
+
+LOGMAIN:
+#--THIS SHOULD BE THE USUAL CASE, X NOT VERY CLOSE TO 1
+
+#--X = 2^(K) * Y, 1 <= Y < 2. THUS, Y = 1.XXXXXXXX....XX IN BINARY.
+#--WE DEFINE F = 1.XXXXXX1, I.E. FIRST 7 BITS OF Y AND ATTACH A 1.
+#--THE IDEA IS THAT LOG(X) = K*LOG2 + LOG(Y)
+#--			 = K*LOG2 + LOG(F) + LOG(1 + (Y-F)/F).
+#--NOTE THAT U = (Y-F)/F IS VERY SMALL AND THUS APPROXIMATING
+#--LOG(1+U) CAN BE VERY EFFICIENT.
+#--ALSO NOTE THAT THE VALUE 1/F IS STORED IN A TABLE SO THAT NO
+#--DIVISION IS NEEDED TO CALCULATE (Y-F)/F.
+
+#--GET K, Y, F, AND ADDRESS OF 1/F.
+	asr.l		&8,%d1
+	asr.l		&8,%d1			# SHIFTED 16 BITS, BIASED EXPO. OF X
+	sub.l		&0x3FFF,%d1		# THIS IS K
+	add.l		ADJK(%a6),%d1		# ADJUST K, ORIGINAL INPUT MAY BE  DENORM.
+	lea		LOGTBL(%pc),%a0		# BASE ADDRESS OF 1/F AND LOG(F)
+	fmov.l		%d1,%fp1		# CONVERT K TO FLOATING-POINT FORMAT
+
+#--WHILE THE CONVERSION IS GOING ON, WE GET F AND ADDRESS OF 1/F
+	mov.l		&0x3FFF0000,X(%a6)	# X IS NOW Y, I.E. 2^(-K)*X
+	mov.l		XFRAC(%a6),FFRAC(%a6)
+	and.l		&0xFE000000,FFRAC(%a6)	# FIRST 7 BITS OF Y
+	or.l		&0x01000000,FFRAC(%a6)	# GET F: ATTACH A 1 AT THE EIGHTH BIT
+	mov.l		FFRAC(%a6),%d1	# READY TO GET ADDRESS OF 1/F
+	and.l		&0x7E000000,%d1
+	asr.l		&8,%d1
+	asr.l		&8,%d1
+	asr.l		&4,%d1			# SHIFTED 20, D0 IS THE DISPLACEMENT
+	add.l		%d1,%a0			# A0 IS THE ADDRESS FOR 1/F
+
+	fmov.x		X(%a6),%fp0
+	mov.l		&0x3fff0000,F(%a6)
+	clr.l		F+8(%a6)
+	fsub.x		F(%a6),%fp0		# Y-F
+	fmovm.x		&0xc,-(%sp)		# SAVE FP2-3 WHILE FP0 IS NOT READY
+#--SUMMARY: FP0 IS Y-F, A0 IS ADDRESS OF 1/F, FP1 IS K
+#--REGISTERS SAVED: FPCR, FP1, FP2
+
+LP1CONT1:
+#--AN RE-ENTRY POINT FOR LOGNP1
+	fmul.x		(%a0),%fp0		# FP0 IS U = (Y-F)/F
+	fmul.x		LOGOF2(%pc),%fp1	# GET K*LOG2 WHILE FP0 IS NOT READY
+	fmov.x		%fp0,%fp2
+	fmul.x		%fp2,%fp2		# FP2 IS V=U*U
+	fmov.x		%fp1,KLOG2(%a6)		# PUT K*LOG2 IN MEMEORY, FREE FP1
+
+#--LOG(1+U) IS APPROXIMATED BY
+#--U + V*(A1+U*(A2+U*(A3+U*(A4+U*(A5+U*A6))))) WHICH IS
+#--[U + V*(A1+V*(A3+V*A5))]  +  [U*V*(A2+V*(A4+V*A6))]
+
+	fmov.x		%fp2,%fp3
+	fmov.x		%fp2,%fp1
+
+	fmul.d		LOGA6(%pc),%fp1		# V*A6
+	fmul.d		LOGA5(%pc),%fp2		# V*A5
+
+	fadd.d		LOGA4(%pc),%fp1		# A4+V*A6
+	fadd.d		LOGA3(%pc),%fp2		# A3+V*A5
+
+	fmul.x		%fp3,%fp1		# V*(A4+V*A6)
+	fmul.x		%fp3,%fp2		# V*(A3+V*A5)
+
+	fadd.d		LOGA2(%pc),%fp1		# A2+V*(A4+V*A6)
+	fadd.d		LOGA1(%pc),%fp2		# A1+V*(A3+V*A5)
+
+	fmul.x		%fp3,%fp1		# V*(A2+V*(A4+V*A6))
+	add.l		&16,%a0			# ADDRESS OF LOG(F)
+	fmul.x		%fp3,%fp2		# V*(A1+V*(A3+V*A5))
+
+	fmul.x		%fp0,%fp1		# U*V*(A2+V*(A4+V*A6))
+	fadd.x		%fp2,%fp0		# U+V*(A1+V*(A3+V*A5))
+
+	fadd.x		(%a0),%fp1		# LOG(F)+U*V*(A2+V*(A4+V*A6))
+	fmovm.x		(%sp)+,&0x30		# RESTORE FP2-3
+	fadd.x		%fp1,%fp0		# FP0 IS LOG(F) + LOG(1+U)
+
+	fmov.l		%d0,%fpcr
+	fadd.x		KLOG2(%a6),%fp0		# FINAL ADD
+	bra		t_inx2
+
+
+LOGNEAR1:
+
+# if the input is exactly equal to one, then exit through ld_pzero.
+# if these 2 lines weren't here, the correct answer would be returned
+# but the INEX2 bit would be set.
+	fcmp.b		%fp0,&0x1		# is it equal to one?
+	fbeq.l		ld_pzero		# yes
+
+#--REGISTERS SAVED: FPCR, FP1. FP0 CONTAINS THE INPUT.
+	fmov.x		%fp0,%fp1
+	fsub.s		one(%pc),%fp1		# FP1 IS X-1
+	fadd.s		one(%pc),%fp0		# FP0 IS X+1
+	fadd.x		%fp1,%fp1		# FP1 IS 2(X-1)
+#--LOG(X) = LOG(1+U/2)-LOG(1-U/2) WHICH IS AN ODD POLYNOMIAL
+#--IN U, U = 2(X-1)/(X+1) = FP1/FP0
+
+LP1CONT2:
+#--THIS IS AN RE-ENTRY POINT FOR LOGNP1
+	fdiv.x		%fp0,%fp1		# FP1 IS U
+	fmovm.x		&0xc,-(%sp)		# SAVE FP2-3
+#--REGISTERS SAVED ARE NOW FPCR,FP1,FP2,FP3
+#--LET V=U*U, W=V*V, CALCULATE
+#--U + U*V*(B1 + V*(B2 + V*(B3 + V*(B4 + V*B5)))) BY
+#--U + U*V*(  [B1 + W*(B3 + W*B5)]  +  [V*(B2 + W*B4)]  )
+	fmov.x		%fp1,%fp0
+	fmul.x		%fp0,%fp0		# FP0 IS V
+	fmov.x		%fp1,SAVEU(%a6)		# STORE U IN MEMORY, FREE FP1
+	fmov.x		%fp0,%fp1
+	fmul.x		%fp1,%fp1		# FP1 IS W
+
+	fmov.d		LOGB5(%pc),%fp3
+	fmov.d		LOGB4(%pc),%fp2
+
+	fmul.x		%fp1,%fp3		# W*B5
+	fmul.x		%fp1,%fp2		# W*B4
+
+	fadd.d		LOGB3(%pc),%fp3		# B3+W*B5
+	fadd.d		LOGB2(%pc),%fp2		# B2+W*B4
+
+	fmul.x		%fp3,%fp1		# W*(B3+W*B5), FP3 RELEASED
+
+	fmul.x		%fp0,%fp2		# V*(B2+W*B4)
+
+	fadd.d		LOGB1(%pc),%fp1		# B1+W*(B3+W*B5)
+	fmul.x		SAVEU(%a6),%fp0		# FP0 IS U*V
+
+	fadd.x		%fp2,%fp1		# B1+W*(B3+W*B5) + V*(B2+W*B4), FP2 RELEASED
+	fmovm.x		(%sp)+,&0x30		# FP2-3 RESTORED
+
+	fmul.x		%fp1,%fp0		# U*V*( [B1+W*(B3+W*B5)] + [V*(B2+W*B4)] )
+
+	fmov.l		%d0,%fpcr
+	fadd.x		SAVEU(%a6),%fp0
+	bra		t_inx2
+
+#--REGISTERS SAVED FPCR. LOG(-VE) IS INVALID
+LOGNEG:
+	bra		t_operr
+
+	global		slognd
+slognd:
+#--ENTRY POINT FOR LOG(X) FOR DENORMALIZED INPUT
+
+	mov.l		&-100,ADJK(%a6)		# INPUT = 2^(ADJK) * FP0
+
+#----normalize the input value by left shifting k bits (k to be determined
+#----below), adjusting exponent and storing -k to  ADJK
+#----the value TWOTO100 is no longer needed.
+#----Note that this code assumes the denormalized input is NON-ZERO.
+
+	movm.l		&0x3f00,-(%sp)		# save some registers  {d2-d7}
+	mov.l		(%a0),%d3		# D3 is exponent of smallest norm. #
+	mov.l		4(%a0),%d4
+	mov.l		8(%a0),%d5		# (D4,D5) is (Hi_X,Lo_X)
+	clr.l		%d2			# D2 used for holding K
+
+	tst.l		%d4
+	bne.b		Hi_not0
+
+Hi_0:
+	mov.l		%d5,%d4
+	clr.l		%d5
+	mov.l		&32,%d2
+	clr.l		%d6
+	bfffo		%d4{&0:&32},%d6
+	lsl.l		%d6,%d4
+	add.l		%d6,%d2			# (D3,D4,D5) is normalized
+
+	mov.l		%d3,X(%a6)
+	mov.l		%d4,XFRAC(%a6)
+	mov.l		%d5,XFRAC+4(%a6)
+	neg.l		%d2
+	mov.l		%d2,ADJK(%a6)
+	fmov.x		X(%a6),%fp0
+	movm.l		(%sp)+,&0xfc		# restore registers {d2-d7}
+	lea		X(%a6),%a0
+	bra.w		LOGBGN			# begin regular log(X)
+
+Hi_not0:
+	clr.l		%d6
+	bfffo		%d4{&0:&32},%d6		# find first 1
+	mov.l		%d6,%d2			# get k
+	lsl.l		%d6,%d4
+	mov.l		%d5,%d7			# a copy of D5
+	lsl.l		%d6,%d5
+	neg.l		%d6
+	add.l		&32,%d6
+	lsr.l		%d6,%d7
+	or.l		%d7,%d4			# (D3,D4,D5) normalized
+
+	mov.l		%d3,X(%a6)
+	mov.l		%d4,XFRAC(%a6)
+	mov.l		%d5,XFRAC+4(%a6)
+	neg.l		%d2
+	mov.l		%d2,ADJK(%a6)
+	fmov.x		X(%a6),%fp0
+	movm.l		(%sp)+,&0xfc		# restore registers {d2-d7}
+	lea		X(%a6),%a0
+	bra.w		LOGBGN			# begin regular log(X)
+
+	global		slognp1
+#--ENTRY POINT FOR LOG(1+X) FOR X FINITE, NON-ZERO, NOT NAN'S
+slognp1:
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+	fabs.x		%fp0			# test magnitude
+	fcmp.x		%fp0,LTHOLD(%pc)	# compare with min threshold
+	fbgt.w		LP1REAL			# if greater, continue
+	fmov.l		%d0,%fpcr
+	mov.b		&FMOV_OP,%d1		# last inst is MOVE
+	fmov.x		(%a0),%fp0		# return signed argument
+	bra		t_catch
+
+LP1REAL:
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+	mov.l		&0x00000000,ADJK(%a6)
+	fmov.x		%fp0,%fp1		# FP1 IS INPUT Z
+	fadd.s		one(%pc),%fp0		# X := ROUND(1+Z)
+	fmov.x		%fp0,X(%a6)
+	mov.w		XFRAC(%a6),XDCARE(%a6)
+	mov.l		X(%a6),%d1
+	cmp.l		%d1,&0
+	ble.w		LP1NEG0			# LOG OF ZERO OR -VE
+	cmp.l		%d1,&0x3ffe8000		# IS BOUNDS [1/2,3/2]?
+	blt.w		LOGMAIN
+	cmp.l		%d1,&0x3fffc000
+	bgt.w		LOGMAIN
+#--IF 1+Z > 3/2 OR 1+Z < 1/2, THEN X, WHICH IS ROUNDING 1+Z,
+#--CONTAINS AT LEAST 63 BITS OF INFORMATION OF Z. IN THAT CASE,
+#--SIMPLY INVOKE LOG(X) FOR LOG(1+Z).
+
+LP1NEAR1:
+#--NEXT SEE IF EXP(-1/16) < X < EXP(1/16)
+	cmp.l		%d1,&0x3ffef07d
+	blt.w		LP1CARE
+	cmp.l		%d1,&0x3fff8841
+	bgt.w		LP1CARE
+
+LP1ONE16:
+#--EXP(-1/16) < X < EXP(1/16). LOG(1+Z) = LOG(1+U/2) - LOG(1-U/2)
+#--WHERE U = 2Z/(2+Z) = 2Z/(1+X).
+	fadd.x		%fp1,%fp1		# FP1 IS 2Z
+	fadd.s		one(%pc),%fp0		# FP0 IS 1+X
+#--U = FP1/FP0
+	bra.w		LP1CONT2
+
+LP1CARE:
+#--HERE WE USE THE USUAL TABLE DRIVEN APPROACH. CARE HAS TO BE
+#--TAKEN BECAUSE 1+Z CAN HAVE 67 BITS OF INFORMATION AND WE MUST
+#--PRESERVE ALL THE INFORMATION. BECAUSE 1+Z IS IN [1/2,3/2],
+#--THERE ARE ONLY TWO CASES.
+#--CASE 1: 1+Z < 1, THEN K = -1 AND Y-F = (2-F) + 2Z
+#--CASE 2: 1+Z > 1, THEN K = 0  AND Y-F = (1-F) + Z
+#--ON RETURNING TO LP1CONT1, WE MUST HAVE K IN FP1, ADDRESS OF
+#--(1/F) IN A0, Y-F IN FP0, AND FP2 SAVED.
+
+	mov.l		XFRAC(%a6),FFRAC(%a6)
+	and.l		&0xFE000000,FFRAC(%a6)
+	or.l		&0x01000000,FFRAC(%a6)	# F OBTAINED
+	cmp.l		%d1,&0x3FFF8000		# SEE IF 1+Z > 1
+	bge.b		KISZERO
+
+KISNEG1:
+	fmov.s		TWO(%pc),%fp0
+	mov.l		&0x3fff0000,F(%a6)
+	clr.l		F+8(%a6)
+	fsub.x		F(%a6),%fp0		# 2-F
+	mov.l		FFRAC(%a6),%d1
+	and.l		&0x7E000000,%d1
+	asr.l		&8,%d1
+	asr.l		&8,%d1
+	asr.l		&4,%d1			# D0 CONTAINS DISPLACEMENT FOR 1/F
+	fadd.x		%fp1,%fp1		# GET 2Z
+	fmovm.x		&0xc,-(%sp)		# SAVE FP2  {%fp2/%fp3}
+	fadd.x		%fp1,%fp0		# FP0 IS Y-F = (2-F)+2Z
+	lea		LOGTBL(%pc),%a0		# A0 IS ADDRESS OF 1/F
+	add.l		%d1,%a0
+	fmov.s		negone(%pc),%fp1	# FP1 IS K = -1
+	bra.w		LP1CONT1
+
+KISZERO:
+	fmov.s		one(%pc),%fp0
+	mov.l		&0x3fff0000,F(%a6)
+	clr.l		F+8(%a6)
+	fsub.x		F(%a6),%fp0		# 1-F
+	mov.l		FFRAC(%a6),%d1
+	and.l		&0x7E000000,%d1
+	asr.l		&8,%d1
+	asr.l		&8,%d1
+	asr.l		&4,%d1
+	fadd.x		%fp1,%fp0		# FP0 IS Y-F
+	fmovm.x		&0xc,-(%sp)		# FP2 SAVED {%fp2/%fp3}
+	lea		LOGTBL(%pc),%a0
+	add.l		%d1,%a0			# A0 IS ADDRESS OF 1/F
+	fmov.s		zero(%pc),%fp1		# FP1 IS K = 0
+	bra.w		LP1CONT1
+
+LP1NEG0:
+#--FPCR SAVED. D0 IS X IN COMPACT FORM.
+	cmp.l		%d1,&0
+	blt.b		LP1NEG
+LP1ZERO:
+	fmov.s		negone(%pc),%fp0
+
+	fmov.l		%d0,%fpcr
+	bra		t_dz
+
+LP1NEG:
+	fmov.s		zero(%pc),%fp0
+
+	fmov.l		%d0,%fpcr
+	bra		t_operr
+
+	global		slognp1d
+#--ENTRY POINT FOR LOG(1+Z) FOR DENORMALIZED INPUT
+# Simply return the denorm
+slognp1d:
+	bra		t_extdnrm
+
+#########################################################################
+# satanh():  computes the inverse hyperbolic tangent of a norm input	#
+# satanhd(): computes the inverse hyperbolic tangent of a denorm input	#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = arctanh(X)						#
+#									#
+# ACCURACY and MONOTONICITY *******************************************	#
+#	The returned result is within 3 ulps in	64 significant bit,	#
+#	i.e. within 0.5001 ulp to 53 bits if the result is subsequently	#
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM ***********************************************************	#
+#									#
+#	ATANH								#
+#	1. If |X| >= 1, go to 3.					#
+#									#
+#	2. (|X| < 1) Calculate atanh(X) by				#
+#		sgn := sign(X)						#
+#		y := |X|						#
+#		z := 2y/(1-y)						#
+#		atanh(X) := sgn * (1/2) * logp1(z)			#
+#		Exit.							#
+#									#
+#	3. If |X| > 1, go to 5.						#
+#									#
+#	4. (|X| = 1) Generate infinity with an appropriate sign and	#
+#		divide-by-zero by					#
+#		sgn := sign(X)						#
+#		atan(X) := sgn / (+0).					#
+#		Exit.							#
+#									#
+#	5. (|X| > 1) Generate an invalid operation by 0 * infinity.	#
+#		Exit.							#
+#									#
+#########################################################################
+
+	global		satanh
+satanh:
+	mov.l		(%a0),%d1
+	mov.w		4(%a0),%d1
+	and.l		&0x7FFFFFFF,%d1
+	cmp.l		%d1,&0x3FFF8000
+	bge.b		ATANHBIG
+
+#--THIS IS THE USUAL CASE, |X| < 1
+#--Y = |X|, Z = 2Y/(1-Y), ATANH(X) = SIGN(X) * (1/2) * LOG1P(Z).
+
+	fabs.x		(%a0),%fp0		# Y = |X|
+	fmov.x		%fp0,%fp1
+	fneg.x		%fp1			# -Y
+	fadd.x		%fp0,%fp0		# 2Y
+	fadd.s		&0x3F800000,%fp1	# 1-Y
+	fdiv.x		%fp1,%fp0		# 2Y/(1-Y)
+	mov.l		(%a0),%d1
+	and.l		&0x80000000,%d1
+	or.l		&0x3F000000,%d1		# SIGN(X)*HALF
+	mov.l		%d1,-(%sp)
+
+	mov.l		%d0,-(%sp)		# save rnd prec,mode
+	clr.l		%d0			# pass ext prec,RN
+	fmovm.x		&0x01,-(%sp)		# save Z on stack
+	lea		(%sp),%a0		# pass ptr to Z
+	bsr		slognp1			# LOG1P(Z)
+	add.l		&0xc,%sp		# clear Z from stack
+
+	mov.l		(%sp)+,%d0		# fetch old prec,mode
+	fmov.l		%d0,%fpcr		# load it
+	mov.b		&FMUL_OP,%d1		# last inst is MUL
+	fmul.s		(%sp)+,%fp0
+	bra		t_catch
+
+ATANHBIG:
+	fabs.x		(%a0),%fp0		# |X|
+	fcmp.s		%fp0,&0x3F800000
+	fbgt		t_operr
+	bra		t_dz
+
+	global		satanhd
+#--ATANH(X) = X FOR DENORMALIZED X
+satanhd:
+	bra		t_extdnrm
+
+#########################################################################
+# slog10():  computes the base-10 logarithm of a normalized input	#
+# slog10d(): computes the base-10 logarithm of a denormalized input	#
+# slog2():   computes the base-2 logarithm of a normalized input	#
+# slog2d():  computes the base-2 logarithm of a denormalized input	#
+#									#
+# INPUT *************************************************************** #
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = log_10(X) or log_2(X)					#
+#									#
+# ACCURACY and MONOTONICITY *******************************************	#
+#	The returned result is within 1.7 ulps in 64 significant bit,	#
+#	i.e. within 0.5003 ulp to 53 bits if the result is subsequently	#
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM ***********************************************************	#
+#									#
+#       slog10d:							#
+#									#
+#       Step 0.	If X < 0, create a NaN and raise the invalid operation	#
+#               flag. Otherwise, save FPCR in D1; set FpCR to default.	#
+#       Notes:  Default means round-to-nearest mode, no floating-point	#
+#               traps, and precision control = double extended.		#
+#									#
+#       Step 1. Call slognd to obtain Y = log(X), the natural log of X.	#
+#       Notes:  Even if X is denormalized, log(X) is always normalized.	#
+#									#
+#       Step 2.  Compute log_10(X) = log(X) * (1/log(10)).		#
+#            2.1 Restore the user FPCR					#
+#            2.2 Return ans := Y * INV_L10.				#
+#									#
+#       slog10:								#
+#									#
+#       Step 0. If X < 0, create a NaN and raise the invalid operation	#
+#               flag. Otherwise, save FPCR in D1; set FpCR to default.	#
+#       Notes:  Default means round-to-nearest mode, no floating-point	#
+#               traps, and precision control = double extended.		#
+#									#
+#       Step 1. Call sLogN to obtain Y = log(X), the natural log of X.	#
+#									#
+#       Step 2.   Compute log_10(X) = log(X) * (1/log(10)).		#
+#            2.1  Restore the user FPCR					#
+#            2.2  Return ans := Y * INV_L10.				#
+#									#
+#       sLog2d:								#
+#									#
+#       Step 0. If X < 0, create a NaN and raise the invalid operation	#
+#               flag. Otherwise, save FPCR in D1; set FpCR to default.	#
+#       Notes:  Default means round-to-nearest mode, no floating-point	#
+#               traps, and precision control = double extended.		#
+#									#
+#       Step 1. Call slognd to obtain Y = log(X), the natural log of X.	#
+#       Notes:  Even if X is denormalized, log(X) is always normalized.	#
+#									#
+#       Step 2.   Compute log_10(X) = log(X) * (1/log(2)).		#
+#            2.1  Restore the user FPCR					#
+#            2.2  Return ans := Y * INV_L2.				#
+#									#
+#       sLog2:								#
+#									#
+#       Step 0. If X < 0, create a NaN and raise the invalid operation	#
+#               flag. Otherwise, save FPCR in D1; set FpCR to default.	#
+#       Notes:  Default means round-to-nearest mode, no floating-point	#
+#               traps, and precision control = double extended.		#
+#									#
+#       Step 1. If X is not an integer power of two, i.e., X != 2^k,	#
+#               go to Step 3.						#
+#									#
+#       Step 2.   Return k.						#
+#            2.1  Get integer k, X = 2^k.				#
+#            2.2  Restore the user FPCR.				#
+#            2.3  Return ans := convert-to-double-extended(k).		#
+#									#
+#       Step 3. Call sLogN to obtain Y = log(X), the natural log of X.	#
+#									#
+#       Step 4.   Compute log_2(X) = log(X) * (1/log(2)).		#
+#            4.1  Restore the user FPCR					#
+#            4.2  Return ans := Y * INV_L2.				#
+#									#
+#########################################################################
+
+INV_L10:
+	long		0x3FFD0000,0xDE5BD8A9,0x37287195,0x00000000
+
+INV_L2:
+	long		0x3FFF0000,0xB8AA3B29,0x5C17F0BC,0x00000000
+
+	global		slog10
+#--entry point for Log10(X), X is normalized
+slog10:
+	fmov.b		&0x1,%fp0
+	fcmp.x		%fp0,(%a0)		# if operand == 1,
+	fbeq.l		ld_pzero		# return an EXACT zero
+
+	mov.l		(%a0),%d1
+	blt.w		invalid
+	mov.l		%d0,-(%sp)
+	clr.l		%d0
+	bsr		slogn			# log(X), X normal.
+	fmov.l		(%sp)+,%fpcr
+	fmul.x		INV_L10(%pc),%fp0
+	bra		t_inx2
+
+	global		slog10d
+#--entry point for Log10(X), X is denormalized
+slog10d:
+	mov.l		(%a0),%d1
+	blt.w		invalid
+	mov.l		%d0,-(%sp)
+	clr.l		%d0
+	bsr		slognd			# log(X), X denorm.
+	fmov.l		(%sp)+,%fpcr
+	fmul.x		INV_L10(%pc),%fp0
+	bra		t_minx2
+
+	global		slog2
+#--entry point for Log2(X), X is normalized
+slog2:
+	mov.l		(%a0),%d1
+	blt.w		invalid
+
+	mov.l		8(%a0),%d1
+	bne.b		continue		# X is not 2^k
+
+	mov.l		4(%a0),%d1
+	and.l		&0x7FFFFFFF,%d1
+	bne.b		continue
+
+#--X = 2^k.
+	mov.w		(%a0),%d1
+	and.l		&0x00007FFF,%d1
+	sub.l		&0x3FFF,%d1
+	beq.l		ld_pzero
+	fmov.l		%d0,%fpcr
+	fmov.l		%d1,%fp0
+	bra		t_inx2
+
+continue:
+	mov.l		%d0,-(%sp)
+	clr.l		%d0
+	bsr		slogn			# log(X), X normal.
+	fmov.l		(%sp)+,%fpcr
+	fmul.x		INV_L2(%pc),%fp0
+	bra		t_inx2
+
+invalid:
+	bra		t_operr
+
+	global		slog2d
+#--entry point for Log2(X), X is denormalized
+slog2d:
+	mov.l		(%a0),%d1
+	blt.w		invalid
+	mov.l		%d0,-(%sp)
+	clr.l		%d0
+	bsr		slognd			# log(X), X denorm.
+	fmov.l		(%sp)+,%fpcr
+	fmul.x		INV_L2(%pc),%fp0
+	bra		t_minx2
+
+#########################################################################
+# stwotox():  computes 2**X for a normalized input			#
+# stwotoxd(): computes 2**X for a denormalized input			#
+# stentox():  computes 10**X for a normalized input			#
+# stentoxd(): computes 10**X for a denormalized input			#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = 2**X or 10**X						#
+#									#
+# ACCURACY and MONOTONICITY *******************************************	#
+#	The returned result is within 2 ulps in 64 significant bit,	#
+#	i.e. within 0.5001 ulp to 53 bits if the result is subsequently	#
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM ***********************************************************	#
+#									#
+#	twotox								#
+#	1. If |X| > 16480, go to ExpBig.				#
+#									#
+#	2. If |X| < 2**(-70), go to ExpSm.				#
+#									#
+#	3. Decompose X as X = N/64 + r where |r| <= 1/128. Furthermore	#
+#		decompose N as						#
+#		 N = 64(M + M') + j,  j = 0,1,2,...,63.			#
+#									#
+#	4. Overwrite r := r * log2. Then				#
+#		2**X = 2**(M') * 2**(M) * 2**(j/64) * exp(r).		#
+#		Go to expr to compute that expression.			#
+#									#
+#	tentox								#
+#	1. If |X| > 16480*log_10(2) (base 10 log of 2), go to ExpBig.	#
+#									#
+#	2. If |X| < 2**(-70), go to ExpSm.				#
+#									#
+#	3. Set y := X*log_2(10)*64 (base 2 log of 10). Set		#
+#		N := round-to-int(y). Decompose N as			#
+#		 N = 64(M + M') + j,  j = 0,1,2,...,63.			#
+#									#
+#	4. Define r as							#
+#		r := ((X - N*L1)-N*L2) * L10				#
+#		where L1, L2 are the leading and trailing parts of	#
+#		log_10(2)/64 and L10 is the natural log of 10. Then	#
+#		10**X = 2**(M') * 2**(M) * 2**(j/64) * exp(r).		#
+#		Go to expr to compute that expression.			#
+#									#
+#	expr								#
+#	1. Fetch 2**(j/64) from table as Fact1 and Fact2.		#
+#									#
+#	2. Overwrite Fact1 and Fact2 by					#
+#		Fact1 := 2**(M) * Fact1					#
+#		Fact2 := 2**(M) * Fact2					#
+#		Thus Fact1 + Fact2 = 2**(M) * 2**(j/64).		#
+#									#
+#	3. Calculate P where 1 + P approximates exp(r):			#
+#		P = r + r*r*(A1+r*(A2+...+r*A5)).			#
+#									#
+#	4. Let AdjFact := 2**(M'). Return				#
+#		AdjFact * ( Fact1 + ((Fact1*P) + Fact2) ).		#
+#		Exit.							#
+#									#
+#	ExpBig								#
+#	1. Generate overflow by Huge * Huge if X > 0; otherwise,	#
+#	        generate underflow by Tiny * Tiny.			#
+#									#
+#	ExpSm								#
+#	1. Return 1 + X.						#
+#									#
+#########################################################################
+
+L2TEN64:
+	long		0x406A934F,0x0979A371	# 64LOG10/LOG2
+L10TWO1:
+	long		0x3F734413,0x509F8000	# LOG2/64LOG10
+
+L10TWO2:
+	long		0xBFCD0000,0xC0219DC1,0xDA994FD2,0x00000000
+
+LOG10:	long		0x40000000,0x935D8DDD,0xAAA8AC17,0x00000000
+
+LOG2:	long		0x3FFE0000,0xB17217F7,0xD1CF79AC,0x00000000
+
+EXPA5:	long		0x3F56C16D,0x6F7BD0B2
+EXPA4:	long		0x3F811112,0x302C712C
+EXPA3:	long		0x3FA55555,0x55554CC1
+EXPA2:	long		0x3FC55555,0x55554A54
+EXPA1:	long		0x3FE00000,0x00000000,0x00000000,0x00000000
+
+TEXPTBL:
+	long		0x3FFF0000,0x80000000,0x00000000,0x3F738000
+	long		0x3FFF0000,0x8164D1F3,0xBC030773,0x3FBEF7CA
+	long		0x3FFF0000,0x82CD8698,0xAC2BA1D7,0x3FBDF8A9
+	long		0x3FFF0000,0x843A28C3,0xACDE4046,0x3FBCD7C9
+	long		0x3FFF0000,0x85AAC367,0xCC487B15,0xBFBDE8DA
+	long		0x3FFF0000,0x871F6196,0x9E8D1010,0x3FBDE85C
+	long		0x3FFF0000,0x88980E80,0x92DA8527,0x3FBEBBF1
+	long		0x3FFF0000,0x8A14D575,0x496EFD9A,0x3FBB80CA
+	long		0x3FFF0000,0x8B95C1E3,0xEA8BD6E7,0xBFBA8373
+	long		0x3FFF0000,0x8D1ADF5B,0x7E5BA9E6,0xBFBE9670
+	long		0x3FFF0000,0x8EA4398B,0x45CD53C0,0x3FBDB700
+	long		0x3FFF0000,0x9031DC43,0x1466B1DC,0x3FBEEEB0
+	long		0x3FFF0000,0x91C3D373,0xAB11C336,0x3FBBFD6D
+	long		0x3FFF0000,0x935A2B2F,0x13E6E92C,0xBFBDB319
+	long		0x3FFF0000,0x94F4EFA8,0xFEF70961,0x3FBDBA2B
+	long		0x3FFF0000,0x96942D37,0x20185A00,0x3FBE91D5
+	long		0x3FFF0000,0x9837F051,0x8DB8A96F,0x3FBE8D5A
+	long		0x3FFF0000,0x99E04593,0x20B7FA65,0xBFBCDE7B
+	long		0x3FFF0000,0x9B8D39B9,0xD54E5539,0xBFBEBAAF
+	long		0x3FFF0000,0x9D3ED9A7,0x2CFFB751,0xBFBD86DA
+	long		0x3FFF0000,0x9EF53260,0x91A111AE,0xBFBEBEDD
+	long		0x3FFF0000,0xA0B0510F,0xB9714FC2,0x3FBCC96E
+	long		0x3FFF0000,0xA2704303,0x0C496819,0xBFBEC90B
+	long		0x3FFF0000,0xA43515AE,0x09E6809E,0x3FBBD1DB
+	long		0x3FFF0000,0xA5FED6A9,0xB15138EA,0x3FBCE5EB
+	long		0x3FFF0000,0xA7CD93B4,0xE965356A,0xBFBEC274
+	long		0x3FFF0000,0xA9A15AB4,0xEA7C0EF8,0x3FBEA83C
+	long		0x3FFF0000,0xAB7A39B5,0xA93ED337,0x3FBECB00
+	long		0x3FFF0000,0xAD583EEA,0x42A14AC6,0x3FBE9301
+	long		0x3FFF0000,0xAF3B78AD,0x690A4375,0xBFBD8367
+	long		0x3FFF0000,0xB123F581,0xD2AC2590,0xBFBEF05F
+	long		0x3FFF0000,0xB311C412,0xA9112489,0x3FBDFB3C
+	long		0x3FFF0000,0xB504F333,0xF9DE6484,0x3FBEB2FB
+	long		0x3FFF0000,0xB6FD91E3,0x28D17791,0x3FBAE2CB
+	long		0x3FFF0000,0xB8FBAF47,0x62FB9EE9,0x3FBCDC3C
+	long		0x3FFF0000,0xBAFF5AB2,0x133E45FB,0x3FBEE9AA
+	long		0x3FFF0000,0xBD08A39F,0x580C36BF,0xBFBEAEFD
+	long		0x3FFF0000,0xBF1799B6,0x7A731083,0xBFBCBF51
+	long		0x3FFF0000,0xC12C4CCA,0x66709456,0x3FBEF88A
+	long		0x3FFF0000,0xC346CCDA,0x24976407,0x3FBD83B2
+	long		0x3FFF0000,0xC5672A11,0x5506DADD,0x3FBDF8AB
+	long		0x3FFF0000,0xC78D74C8,0xABB9B15D,0xBFBDFB17
+	long		0x3FFF0000,0xC9B9BD86,0x6E2F27A3,0xBFBEFE3C
+	long		0x3FFF0000,0xCBEC14FE,0xF2727C5D,0xBFBBB6F8
+	long		0x3FFF0000,0xCE248C15,0x1F8480E4,0xBFBCEE53
+	long		0x3FFF0000,0xD06333DA,0xEF2B2595,0xBFBDA4AE
+	long		0x3FFF0000,0xD2A81D91,0xF12AE45A,0x3FBC9124
+	long		0x3FFF0000,0xD4F35AAB,0xCFEDFA1F,0x3FBEB243
+	long		0x3FFF0000,0xD744FCCA,0xD69D6AF4,0x3FBDE69A
+	long		0x3FFF0000,0xD99D15C2,0x78AFD7B6,0xBFB8BC61
+	long		0x3FFF0000,0xDBFBB797,0xDAF23755,0x3FBDF610
+	long		0x3FFF0000,0xDE60F482,0x5E0E9124,0xBFBD8BE1
+	long		0x3FFF0000,0xE0CCDEEC,0x2A94E111,0x3FBACB12
+	long		0x3FFF0000,0xE33F8972,0xBE8A5A51,0x3FBB9BFE
+	long		0x3FFF0000,0xE5B906E7,0x7C8348A8,0x3FBCF2F4
+	long		0x3FFF0000,0xE8396A50,0x3C4BDC68,0x3FBEF22F
+	long		0x3FFF0000,0xEAC0C6E7,0xDD24392F,0xBFBDBF4A
+	long		0x3FFF0000,0xED4F301E,0xD9942B84,0x3FBEC01A
+	long		0x3FFF0000,0xEFE4B99B,0xDCDAF5CB,0x3FBE8CAC
+	long		0x3FFF0000,0xF281773C,0x59FFB13A,0xBFBCBB3F
+	long		0x3FFF0000,0xF5257D15,0x2486CC2C,0x3FBEF73A
+	long		0x3FFF0000,0xF7D0DF73,0x0AD13BB9,0xBFB8B795
+	long		0x3FFF0000,0xFA83B2DB,0x722A033A,0x3FBEF84B
+	long		0x3FFF0000,0xFD3E0C0C,0xF486C175,0xBFBEF581
+
+	set		INT,L_SCR1
+
+	set		X,FP_SCR0
+	set		XDCARE,X+2
+	set		XFRAC,X+4
+
+	set		ADJFACT,FP_SCR0
+
+	set		FACT1,FP_SCR0
+	set		FACT1HI,FACT1+4
+	set		FACT1LOW,FACT1+8
+
+	set		FACT2,FP_SCR1
+	set		FACT2HI,FACT2+4
+	set		FACT2LOW,FACT2+8
+
+	global		stwotox
+#--ENTRY POINT FOR 2**(X), HERE X IS FINITE, NON-ZERO, AND NOT NAN'S
+stwotox:
+	fmovm.x		(%a0),&0x80		# LOAD INPUT
+
+	mov.l		(%a0),%d1
+	mov.w		4(%a0),%d1
+	fmov.x		%fp0,X(%a6)
+	and.l		&0x7FFFFFFF,%d1
+
+	cmp.l		%d1,&0x3FB98000		# |X| >= 2**(-70)?
+	bge.b		TWOOK1
+	bra.w		EXPBORS
+
+TWOOK1:
+	cmp.l		%d1,&0x400D80C0		# |X| > 16480?
+	ble.b		TWOMAIN
+	bra.w		EXPBORS
+
+TWOMAIN:
+#--USUAL CASE, 2^(-70) <= |X| <= 16480
+
+	fmov.x		%fp0,%fp1
+	fmul.s		&0x42800000,%fp1	# 64 * X
+	fmov.l		%fp1,INT(%a6)		# N = ROUND-TO-INT(64 X)
+	mov.l		%d2,-(%sp)
+	lea		TEXPTBL(%pc),%a1	# LOAD ADDRESS OF TABLE OF 2^(J/64)
+	fmov.l		INT(%a6),%fp1		# N --> FLOATING FMT
+	mov.l		INT(%a6),%d1
+	mov.l		%d1,%d2
+	and.l		&0x3F,%d1		# D0 IS J
+	asl.l		&4,%d1			# DISPLACEMENT FOR 2^(J/64)
+	add.l		%d1,%a1			# ADDRESS FOR 2^(J/64)
+	asr.l		&6,%d2			# d2 IS L, N = 64L + J
+	mov.l		%d2,%d1
+	asr.l		&1,%d1			# D0 IS M
+	sub.l		%d1,%d2			# d2 IS M', N = 64(M+M') + J
+	add.l		&0x3FFF,%d2
+
+#--SUMMARY: a1 IS ADDRESS FOR THE LEADING PORTION OF 2^(J/64),
+#--D0 IS M WHERE N = 64(M+M') + J. NOTE THAT |M| <= 16140 BY DESIGN.
+#--ADJFACT = 2^(M').
+#--REGISTERS SAVED SO FAR ARE (IN ORDER) FPCR, D0, FP1, a1, AND FP2.
+
+	fmovm.x		&0x0c,-(%sp)		# save fp2/fp3
+
+	fmul.s		&0x3C800000,%fp1	# (1/64)*N
+	mov.l		(%a1)+,FACT1(%a6)
+	mov.l		(%a1)+,FACT1HI(%a6)
+	mov.l		(%a1)+,FACT1LOW(%a6)
+	mov.w		(%a1)+,FACT2(%a6)
+
+	fsub.x		%fp1,%fp0		# X - (1/64)*INT(64 X)
+
+	mov.w		(%a1)+,FACT2HI(%a6)
+	clr.w		FACT2HI+2(%a6)
+	clr.l		FACT2LOW(%a6)
+	add.w		%d1,FACT1(%a6)
+	fmul.x		LOG2(%pc),%fp0		# FP0 IS R
+	add.w		%d1,FACT2(%a6)
+
+	bra.w		expr
+
+EXPBORS:
+#--FPCR, D0 SAVED
+	cmp.l		%d1,&0x3FFF8000
+	bgt.b		TEXPBIG
+
+#--|X| IS SMALL, RETURN 1 + X
+
+	fmov.l		%d0,%fpcr		# restore users round prec,mode
+	fadd.s		&0x3F800000,%fp0	# RETURN 1 + X
+	bra		t_pinx2
+
+TEXPBIG:
+#--|X| IS LARGE, GENERATE OVERFLOW IF X > 0; ELSE GENERATE UNDERFLOW
+#--REGISTERS SAVE SO FAR ARE FPCR AND  D0
+	mov.l		X(%a6),%d1
+	cmp.l		%d1,&0
+	blt.b		EXPNEG
+
+	bra		t_ovfl2			# t_ovfl expects positive value
+
+EXPNEG:
+	bra		t_unfl2			# t_unfl expects positive value
+
+	global		stwotoxd
+stwotoxd:
+#--ENTRY POINT FOR 2**(X) FOR DENORMALIZED ARGUMENT
+
+	fmov.l		%d0,%fpcr		# set user's rounding mode/precision
+	fmov.s		&0x3F800000,%fp0	# RETURN 1 + X
+	mov.l		(%a0),%d1
+	or.l		&0x00800001,%d1
+	fadd.s		%d1,%fp0
+	bra		t_pinx2
+
+	global		stentox
+#--ENTRY POINT FOR 10**(X), HERE X IS FINITE, NON-ZERO, AND NOT NAN'S
+stentox:
+	fmovm.x		(%a0),&0x80		# LOAD INPUT
+
+	mov.l		(%a0),%d1
+	mov.w		4(%a0),%d1
+	fmov.x		%fp0,X(%a6)
+	and.l		&0x7FFFFFFF,%d1
+
+	cmp.l		%d1,&0x3FB98000		# |X| >= 2**(-70)?
+	bge.b		TENOK1
+	bra.w		EXPBORS
+
+TENOK1:
+	cmp.l		%d1,&0x400B9B07		# |X| <= 16480*log2/log10 ?
+	ble.b		TENMAIN
+	bra.w		EXPBORS
+
+TENMAIN:
+#--USUAL CASE, 2^(-70) <= |X| <= 16480 LOG 2 / LOG 10
+
+	fmov.x		%fp0,%fp1
+	fmul.d		L2TEN64(%pc),%fp1	# X*64*LOG10/LOG2
+	fmov.l		%fp1,INT(%a6)		# N=INT(X*64*LOG10/LOG2)
+	mov.l		%d2,-(%sp)
+	lea		TEXPTBL(%pc),%a1	# LOAD ADDRESS OF TABLE OF 2^(J/64)
+	fmov.l		INT(%a6),%fp1		# N --> FLOATING FMT
+	mov.l		INT(%a6),%d1
+	mov.l		%d1,%d2
+	and.l		&0x3F,%d1		# D0 IS J
+	asl.l		&4,%d1			# DISPLACEMENT FOR 2^(J/64)
+	add.l		%d1,%a1			# ADDRESS FOR 2^(J/64)
+	asr.l		&6,%d2			# d2 IS L, N = 64L + J
+	mov.l		%d2,%d1
+	asr.l		&1,%d1			# D0 IS M
+	sub.l		%d1,%d2			# d2 IS M', N = 64(M+M') + J
+	add.l		&0x3FFF,%d2
+
+#--SUMMARY: a1 IS ADDRESS FOR THE LEADING PORTION OF 2^(J/64),
+#--D0 IS M WHERE N = 64(M+M') + J. NOTE THAT |M| <= 16140 BY DESIGN.
+#--ADJFACT = 2^(M').
+#--REGISTERS SAVED SO FAR ARE (IN ORDER) FPCR, D0, FP1, a1, AND FP2.
+	fmovm.x		&0x0c,-(%sp)		# save fp2/fp3
+
+	fmov.x		%fp1,%fp2
+
+	fmul.d		L10TWO1(%pc),%fp1	# N*(LOG2/64LOG10)_LEAD
+	mov.l		(%a1)+,FACT1(%a6)
+
+	fmul.x		L10TWO2(%pc),%fp2	# N*(LOG2/64LOG10)_TRAIL
+
+	mov.l		(%a1)+,FACT1HI(%a6)
+	mov.l		(%a1)+,FACT1LOW(%a6)
+	fsub.x		%fp1,%fp0		# X - N L_LEAD
+	mov.w		(%a1)+,FACT2(%a6)
+
+	fsub.x		%fp2,%fp0		# X - N L_TRAIL
+
+	mov.w		(%a1)+,FACT2HI(%a6)
+	clr.w		FACT2HI+2(%a6)
+	clr.l		FACT2LOW(%a6)
+
+	fmul.x		LOG10(%pc),%fp0		# FP0 IS R
+	add.w		%d1,FACT1(%a6)
+	add.w		%d1,FACT2(%a6)
+
+expr:
+#--FPCR, FP2, FP3 ARE SAVED IN ORDER AS SHOWN.
+#--ADJFACT CONTAINS 2**(M'), FACT1 + FACT2 = 2**(M) * 2**(J/64).
+#--FP0 IS R. THE FOLLOWING CODE COMPUTES
+#--	2**(M'+M) * 2**(J/64) * EXP(R)
+
+	fmov.x		%fp0,%fp1
+	fmul.x		%fp1,%fp1		# FP1 IS S = R*R
+
+	fmov.d		EXPA5(%pc),%fp2		# FP2 IS A5
+	fmov.d		EXPA4(%pc),%fp3		# FP3 IS A4
+
+	fmul.x		%fp1,%fp2		# FP2 IS S*A5
+	fmul.x		%fp1,%fp3		# FP3 IS S*A4
+
+	fadd.d		EXPA3(%pc),%fp2		# FP2 IS A3+S*A5
+	fadd.d		EXPA2(%pc),%fp3		# FP3 IS A2+S*A4
+
+	fmul.x		%fp1,%fp2		# FP2 IS S*(A3+S*A5)
+	fmul.x		%fp1,%fp3		# FP3 IS S*(A2+S*A4)
+
+	fadd.d		EXPA1(%pc),%fp2		# FP2 IS A1+S*(A3+S*A5)
+	fmul.x		%fp0,%fp3		# FP3 IS R*S*(A2+S*A4)
+
+	fmul.x		%fp1,%fp2		# FP2 IS S*(A1+S*(A3+S*A5))
+	fadd.x		%fp3,%fp0		# FP0 IS R+R*S*(A2+S*A4)
+	fadd.x		%fp2,%fp0		# FP0 IS EXP(R) - 1
+
+	fmovm.x		(%sp)+,&0x30		# restore fp2/fp3
+
+#--FINAL RECONSTRUCTION PROCESS
+#--EXP(X) = 2^M*2^(J/64) + 2^M*2^(J/64)*(EXP(R)-1)  -  (1 OR 0)
+
+	fmul.x		FACT1(%a6),%fp0
+	fadd.x		FACT2(%a6),%fp0
+	fadd.x		FACT1(%a6),%fp0
+
+	fmov.l		%d0,%fpcr		# restore users round prec,mode
+	mov.w		%d2,ADJFACT(%a6)	# INSERT EXPONENT
+	mov.l		(%sp)+,%d2
+	mov.l		&0x80000000,ADJFACT+4(%a6)
+	clr.l		ADJFACT+8(%a6)
+	mov.b		&FMUL_OP,%d1		# last inst is MUL
+	fmul.x		ADJFACT(%a6),%fp0	# FINAL ADJUSTMENT
+	bra		t_catch
+
+	global		stentoxd
+stentoxd:
+#--ENTRY POINT FOR 10**(X) FOR DENORMALIZED ARGUMENT
+
+	fmov.l		%d0,%fpcr		# set user's rounding mode/precision
+	fmov.s		&0x3F800000,%fp0	# RETURN 1 + X
+	mov.l		(%a0),%d1
+	or.l		&0x00800001,%d1
+	fadd.s		%d1,%fp0
+	bra		t_pinx2
+
+#########################################################################
+# sscale(): computes the destination operand scaled by the source	#
+#	    operand. If the absoulute value of the source operand is	#
+#	    >= 2^14, an overflow or underflow is returned.		#
+#									#
+# INPUT *************************************************************** #
+#	a0  = pointer to double-extended source operand X		#
+#	a1  = pointer to double-extended destination operand Y		#
+#									#
+# OUTPUT ************************************************************** #
+#	fp0 =  scale(X,Y)						#
+#									#
+#########################################################################
+
+set	SIGN,		L_SCR1
+
+	global		sscale
+sscale:
+	mov.l		%d0,-(%sp)		# store off ctrl bits for now
+
+	mov.w		DST_EX(%a1),%d1		# get dst exponent
+	smi.b		SIGN(%a6)		# use SIGN to hold dst sign
+	andi.l		&0x00007fff,%d1		# strip sign from dst exp
+
+	mov.w		SRC_EX(%a0),%d0		# check src bounds
+	andi.w		&0x7fff,%d0		# clr src sign bit
+	cmpi.w		%d0,&0x3fff		# is src ~ ZERO?
+	blt.w		src_small		# yes
+	cmpi.w		%d0,&0x400c		# no; is src too big?
+	bgt.w		src_out			# yes
+
+#
+# Source is within 2^14 range.
+#
+src_ok:
+	fintrz.x	SRC(%a0),%fp0		# calc int of src
+	fmov.l		%fp0,%d0		# int src to d0
+# don't want any accrued bits from the fintrz showing up later since
+# we may need to read the fpsr for the last fp op in t_catch2().
+	fmov.l		&0x0,%fpsr
+
+	tst.b		DST_HI(%a1)		# is dst denormalized?
+	bmi.b		sok_norm
+
+# the dst is a DENORM. normalize the DENORM and add the adjustment to
+# the src value. then, jump to the norm part of the routine.
+sok_dnrm:
+	mov.l		%d0,-(%sp)		# save src for now
+
+	mov.w		DST_EX(%a1),FP_SCR0_EX(%a6) # make a copy
+	mov.l		DST_HI(%a1),FP_SCR0_HI(%a6)
+	mov.l		DST_LO(%a1),FP_SCR0_LO(%a6)
+
+	lea		FP_SCR0(%a6),%a0	# pass ptr to DENORM
+	bsr.l		norm			# normalize the DENORM
+	neg.l		%d0
+	add.l		(%sp)+,%d0		# add adjustment to src
+
+	fmovm.x		FP_SCR0(%a6),&0x80	# load normalized DENORM
+
+	cmpi.w		%d0,&-0x3fff		# is the shft amt really low?
+	bge.b		sok_norm2		# thank goodness no
+
+# the multiply factor that we're trying to create should be a denorm
+# for the multiply to work. therefore, we're going to actually do a
+# multiply with a denorm which will cause an unimplemented data type
+# exception to be put into the machine which will be caught and corrected
+# later. we don't do this with the DENORMs above because this method
+# is slower. but, don't fret, I don't see it being used much either.
+	fmov.l		(%sp)+,%fpcr		# restore user fpcr
+	mov.l		&0x80000000,%d1		# load normalized mantissa
+	subi.l		&-0x3fff,%d0		# how many should we shift?
+	neg.l		%d0			# make it positive
+	cmpi.b		%d0,&0x20		# is it > 32?
+	bge.b		sok_dnrm_32		# yes
+	lsr.l		%d0,%d1			# no; bit stays in upper lw
+	clr.l		-(%sp)			# insert zero low mantissa
+	mov.l		%d1,-(%sp)		# insert new high mantissa
+	clr.l		-(%sp)			# make zero exponent
+	bra.b		sok_norm_cont
+sok_dnrm_32:
+	subi.b		&0x20,%d0		# get shift count
+	lsr.l		%d0,%d1			# make low mantissa longword
+	mov.l		%d1,-(%sp)		# insert new low mantissa
+	clr.l		-(%sp)			# insert zero high mantissa
+	clr.l		-(%sp)			# make zero exponent
+	bra.b		sok_norm_cont
+
+# the src will force the dst to a DENORM value or worse. so, let's
+# create an fp multiply that will create the result.
+sok_norm:
+	fmovm.x		DST(%a1),&0x80		# load fp0 with normalized src
+sok_norm2:
+	fmov.l		(%sp)+,%fpcr		# restore user fpcr
+
+	addi.w		&0x3fff,%d0		# turn src amt into exp value
+	swap		%d0			# put exponent in high word
+	clr.l		-(%sp)			# insert new exponent
+	mov.l		&0x80000000,-(%sp)	# insert new high mantissa
+	mov.l		%d0,-(%sp)		# insert new lo mantissa
+
+sok_norm_cont:
+	fmov.l		%fpcr,%d0		# d0 needs fpcr for t_catch2
+	mov.b		&FMUL_OP,%d1		# last inst is MUL
+	fmul.x		(%sp)+,%fp0		# do the multiply
+	bra		t_catch2		# catch any exceptions
+
+#
+# Source is outside of 2^14 range.  Test the sign and branch
+# to the appropriate exception handler.
+#
+src_out:
+	mov.l		(%sp)+,%d0		# restore ctrl bits
+	exg		%a0,%a1			# swap src,dst ptrs
+	tst.b		SRC_EX(%a1)		# is src negative?
+	bmi		t_unfl			# yes; underflow
+	bra		t_ovfl_sc		# no; overflow
+
+#
+# The source input is below 1, so we check for denormalized numbers
+# and set unfl.
+#
+src_small:
+	tst.b		DST_HI(%a1)		# is dst denormalized?
+	bpl.b		ssmall_done		# yes
+
+	mov.l		(%sp)+,%d0
+	fmov.l		%d0,%fpcr		# no; load control bits
+	mov.b		&FMOV_OP,%d1		# last inst is MOVE
+	fmov.x		DST(%a1),%fp0		# simply return dest
+	bra		t_catch2
+ssmall_done:
+	mov.l		(%sp)+,%d0		# load control bits into d1
+	mov.l		%a1,%a0			# pass ptr to dst
+	bra		t_resdnrm
+
+#########################################################################
+# smod(): computes the fp MOD of the input values X,Y.			#
+# srem(): computes the fp (IEEE) REM of the input values X,Y.		#
+#									#
+# INPUT *************************************************************** #
+#	a0 = pointer to extended precision input X			#
+#	a1 = pointer to extended precision input Y			#
+#	d0 = round precision,mode					#
+#									#
+#	The input operands X and Y can be either normalized or		#
+#	denormalized.							#
+#									#
+# OUTPUT ************************************************************** #
+#      fp0 = FREM(X,Y) or FMOD(X,Y)					#
+#									#
+# ALGORITHM *********************************************************** #
+#									#
+#       Step 1.  Save and strip signs of X and Y: signX := sign(X),	#
+#                signY := sign(Y), X := |X|, Y := |Y|,			#
+#                signQ := signX EOR signY. Record whether MOD or REM	#
+#                is requested.						#
+#									#
+#       Step 2.  Set L := expo(X)-expo(Y), k := 0, Q := 0.		#
+#                If (L < 0) then					#
+#                   R := X, go to Step 4.				#
+#                else							#
+#                   R := 2^(-L)X, j := L.				#
+#                endif							#
+#									#
+#       Step 3.  Perform MOD(X,Y)					#
+#            3.1 If R = Y, go to Step 9.				#
+#            3.2 If R > Y, then { R := R - Y, Q := Q + 1}		#
+#            3.3 If j = 0, go to Step 4.				#
+#            3.4 k := k + 1, j := j - 1, Q := 2Q, R := 2R. Go to	#
+#                Step 3.1.						#
+#									#
+#       Step 4.  At this point, R = X - QY = MOD(X,Y). Set		#
+#                Last_Subtract := false (used in Step 7 below). If	#
+#                MOD is requested, go to Step 6.			#
+#									#
+#       Step 5.  R = MOD(X,Y), but REM(X,Y) is requested.		#
+#            5.1 If R < Y/2, then R = MOD(X,Y) = REM(X,Y). Go to	#
+#                Step 6.						#
+#            5.2 If R > Y/2, then { set Last_Subtract := true,		#
+#                Q := Q + 1, Y := signY*Y }. Go to Step 6.		#
+#            5.3 This is the tricky case of R = Y/2. If Q is odd,	#
+#                then { Q := Q + 1, signX := -signX }.			#
+#									#
+#       Step 6.  R := signX*R.						#
+#									#
+#       Step 7.  If Last_Subtract = true, R := R - Y.			#
+#									#
+#       Step 8.  Return signQ, last 7 bits of Q, and R as required.	#
+#									#
+#       Step 9.  At this point, R = 2^(-j)*X - Q Y = Y. Thus,		#
+#                X = 2^(j)*(Q+1)Y. set Q := 2^(j)*(Q+1),		#
+#                R := 0. Return signQ, last 7 bits of Q, and R.		#
+#									#
+#########################################################################
+
+	set		Mod_Flag,L_SCR3
+	set		Sc_Flag,L_SCR3+1
+
+	set		SignY,L_SCR2
+	set		SignX,L_SCR2+2
+	set		SignQ,L_SCR3+2
+
+	set		Y,FP_SCR0
+	set		Y_Hi,Y+4
+	set		Y_Lo,Y+8
+
+	set		R,FP_SCR1
+	set		R_Hi,R+4
+	set		R_Lo,R+8
+
+Scale:
+	long		0x00010000,0x80000000,0x00000000,0x00000000
+
+	global		smod
+smod:
+	clr.b		FPSR_QBYTE(%a6)
+	mov.l		%d0,-(%sp)		# save ctrl bits
+	clr.b		Mod_Flag(%a6)
+	bra.b		Mod_Rem
+
+	global		srem
+srem:
+	clr.b		FPSR_QBYTE(%a6)
+	mov.l		%d0,-(%sp)		# save ctrl bits
+	mov.b		&0x1,Mod_Flag(%a6)
+
+Mod_Rem:
+#..Save sign of X and Y
+	movm.l		&0x3f00,-(%sp)		# save data registers
+	mov.w		SRC_EX(%a0),%d3
+	mov.w		%d3,SignY(%a6)
+	and.l		&0x00007FFF,%d3		# Y := |Y|
+
+#
+	mov.l		SRC_HI(%a0),%d4
+	mov.l		SRC_LO(%a0),%d5		# (D3,D4,D5) is |Y|
+
+	tst.l		%d3
+	bne.b		Y_Normal
+
+	mov.l		&0x00003FFE,%d3		# $3FFD + 1
+	tst.l		%d4
+	bne.b		HiY_not0
+
+HiY_0:
+	mov.l		%d5,%d4
+	clr.l		%d5
+	sub.l		&32,%d3
+	clr.l		%d6
+	bfffo		%d4{&0:&32},%d6
+	lsl.l		%d6,%d4
+	sub.l		%d6,%d3			# (D3,D4,D5) is normalized
+#	                                        ...with bias $7FFD
+	bra.b		Chk_X
+
+HiY_not0:
+	clr.l		%d6
+	bfffo		%d4{&0:&32},%d6
+	sub.l		%d6,%d3
+	lsl.l		%d6,%d4
+	mov.l		%d5,%d7			# a copy of D5
+	lsl.l		%d6,%d5
+	neg.l		%d6
+	add.l		&32,%d6
+	lsr.l		%d6,%d7
+	or.l		%d7,%d4			# (D3,D4,D5) normalized
+#                                       ...with bias $7FFD
+	bra.b		Chk_X
+
+Y_Normal:
+	add.l		&0x00003FFE,%d3		# (D3,D4,D5) normalized
+#                                       ...with bias $7FFD
+
+Chk_X:
+	mov.w		DST_EX(%a1),%d0
+	mov.w		%d0,SignX(%a6)
+	mov.w		SignY(%a6),%d1
+	eor.l		%d0,%d1
+	and.l		&0x00008000,%d1
+	mov.w		%d1,SignQ(%a6)		# sign(Q) obtained
+	and.l		&0x00007FFF,%d0
+	mov.l		DST_HI(%a1),%d1
+	mov.l		DST_LO(%a1),%d2		# (D0,D1,D2) is |X|
+	tst.l		%d0
+	bne.b		X_Normal
+	mov.l		&0x00003FFE,%d0
+	tst.l		%d1
+	bne.b		HiX_not0
+
+HiX_0:
+	mov.l		%d2,%d1
+	clr.l		%d2
+	sub.l		&32,%d0
+	clr.l		%d6
+	bfffo		%d1{&0:&32},%d6
+	lsl.l		%d6,%d1
+	sub.l		%d6,%d0			# (D0,D1,D2) is normalized
+#                                       ...with bias $7FFD
+	bra.b		Init
+
+HiX_not0:
+	clr.l		%d6
+	bfffo		%d1{&0:&32},%d6
+	sub.l		%d6,%d0
+	lsl.l		%d6,%d1
+	mov.l		%d2,%d7			# a copy of D2
+	lsl.l		%d6,%d2
+	neg.l		%d6
+	add.l		&32,%d6
+	lsr.l		%d6,%d7
+	or.l		%d7,%d1			# (D0,D1,D2) normalized
+#                                       ...with bias $7FFD
+	bra.b		Init
+
+X_Normal:
+	add.l		&0x00003FFE,%d0		# (D0,D1,D2) normalized
+#                                       ...with bias $7FFD
+
+Init:
+#
+	mov.l		%d3,L_SCR1(%a6)		# save biased exp(Y)
+	mov.l		%d0,-(%sp)		# save biased exp(X)
+	sub.l		%d3,%d0			# L := expo(X)-expo(Y)
+
+	clr.l		%d6			# D6 := carry <- 0
+	clr.l		%d3			# D3 is Q
+	mov.l		&0,%a1			# A1 is k; j+k=L, Q=0
+
+#..(Carry,D1,D2) is R
+	tst.l		%d0
+	bge.b		Mod_Loop_pre
+
+#..expo(X) < expo(Y). Thus X = mod(X,Y)
+#
+	mov.l		(%sp)+,%d0		# restore d0
+	bra.w		Get_Mod
+
+Mod_Loop_pre:
+	addq.l		&0x4,%sp		# erase exp(X)
+#..At this point  R = 2^(-L)X; Q = 0; k = 0; and  k+j = L
+Mod_Loop:
+	tst.l		%d6			# test carry bit
+	bgt.b		R_GT_Y
+
+#..At this point carry = 0, R = (D1,D2), Y = (D4,D5)
+	cmp.l		%d1,%d4			# compare hi(R) and hi(Y)
+	bne.b		R_NE_Y
+	cmp.l		%d2,%d5			# compare lo(R) and lo(Y)
+	bne.b		R_NE_Y
+
+#..At this point, R = Y
+	bra.w		Rem_is_0
+
+R_NE_Y:
+#..use the borrow of the previous compare
+	bcs.b		R_LT_Y			# borrow is set iff R < Y
+
+R_GT_Y:
+#..If Carry is set, then Y < (Carry,D1,D2) < 2Y. Otherwise, Carry = 0
+#..and Y < (D1,D2) < 2Y. Either way, perform R - Y
+	sub.l		%d5,%d2			# lo(R) - lo(Y)
+	subx.l		%d4,%d1			# hi(R) - hi(Y)
+	clr.l		%d6			# clear carry
+	addq.l		&1,%d3			# Q := Q + 1
+
+R_LT_Y:
+#..At this point, Carry=0, R < Y. R = 2^(k-L)X - QY; k+j = L; j >= 0.
+	tst.l		%d0			# see if j = 0.
+	beq.b		PostLoop
+
+	add.l		%d3,%d3			# Q := 2Q
+	add.l		%d2,%d2			# lo(R) = 2lo(R)
+	roxl.l		&1,%d1			# hi(R) = 2hi(R) + carry
+	scs		%d6			# set Carry if 2(R) overflows
+	addq.l		&1,%a1			# k := k+1
+	subq.l		&1,%d0			# j := j - 1
+#..At this point, R=(Carry,D1,D2) = 2^(k-L)X - QY, j+k=L, j >= 0, R < 2Y.
+
+	bra.b		Mod_Loop
+
+PostLoop:
+#..k = L, j = 0, Carry = 0, R = (D1,D2) = X - QY, R < Y.
+
+#..normalize R.
+	mov.l		L_SCR1(%a6),%d0		# new biased expo of R
+	tst.l		%d1
+	bne.b		HiR_not0
+
+HiR_0:
+	mov.l		%d2,%d1
+	clr.l		%d2
+	sub.l		&32,%d0
+	clr.l		%d6
+	bfffo		%d1{&0:&32},%d6
+	lsl.l		%d6,%d1
+	sub.l		%d6,%d0			# (D0,D1,D2) is normalized
+#                                       ...with bias $7FFD
+	bra.b		Get_Mod
+
+HiR_not0:
+	clr.l		%d6
+	bfffo		%d1{&0:&32},%d6
+	bmi.b		Get_Mod			# already normalized
+	sub.l		%d6,%d0
+	lsl.l		%d6,%d1
+	mov.l		%d2,%d7			# a copy of D2
+	lsl.l		%d6,%d2
+	neg.l		%d6
+	add.l		&32,%d6
+	lsr.l		%d6,%d7
+	or.l		%d7,%d1			# (D0,D1,D2) normalized
+
+#
+Get_Mod:
+	cmp.l		%d0,&0x000041FE
+	bge.b		No_Scale
+Do_Scale:
+	mov.w		%d0,R(%a6)
+	mov.l		%d1,R_Hi(%a6)
+	mov.l		%d2,R_Lo(%a6)
+	mov.l		L_SCR1(%a6),%d6
+	mov.w		%d6,Y(%a6)
+	mov.l		%d4,Y_Hi(%a6)
+	mov.l		%d5,Y_Lo(%a6)
+	fmov.x		R(%a6),%fp0		# no exception
+	mov.b		&1,Sc_Flag(%a6)
+	bra.b		ModOrRem
+No_Scale:
+	mov.l		%d1,R_Hi(%a6)
+	mov.l		%d2,R_Lo(%a6)
+	sub.l		&0x3FFE,%d0
+	mov.w		%d0,R(%a6)
+	mov.l		L_SCR1(%a6),%d6
+	sub.l		&0x3FFE,%d6
+	mov.l		%d6,L_SCR1(%a6)
+	fmov.x		R(%a6),%fp0
+	mov.w		%d6,Y(%a6)
+	mov.l		%d4,Y_Hi(%a6)
+	mov.l		%d5,Y_Lo(%a6)
+	clr.b		Sc_Flag(%a6)
+
+#
+ModOrRem:
+	tst.b		Mod_Flag(%a6)
+	beq.b		Fix_Sign
+
+	mov.l		L_SCR1(%a6),%d6		# new biased expo(Y)
+	subq.l		&1,%d6			# biased expo(Y/2)
+	cmp.l		%d0,%d6
+	blt.b		Fix_Sign
+	bgt.b		Last_Sub
+
+	cmp.l		%d1,%d4
+	bne.b		Not_EQ
+	cmp.l		%d2,%d5
+	bne.b		Not_EQ
+	bra.w		Tie_Case
+
+Not_EQ:
+	bcs.b		Fix_Sign
+
+Last_Sub:
+#
+	fsub.x		Y(%a6),%fp0		# no exceptions
+	addq.l		&1,%d3			# Q := Q + 1
+
+#
+Fix_Sign:
+#..Get sign of X
+	mov.w		SignX(%a6),%d6
+	bge.b		Get_Q
+	fneg.x		%fp0
+
+#..Get Q
+#
+Get_Q:
+	clr.l		%d6
+	mov.w		SignQ(%a6),%d6		# D6 is sign(Q)
+	mov.l		&8,%d7
+	lsr.l		%d7,%d6
+	and.l		&0x0000007F,%d3		# 7 bits of Q
+	or.l		%d6,%d3			# sign and bits of Q
+#	swap		%d3
+#	fmov.l		%fpsr,%d6
+#	and.l		&0xFF00FFFF,%d6
+#	or.l		%d3,%d6
+#	fmov.l		%d6,%fpsr		# put Q in fpsr
+	mov.b		%d3,FPSR_QBYTE(%a6)	# put Q in fpsr
+
+#
+Restore:
+	movm.l		(%sp)+,&0xfc		#  {%d2-%d7}
+	mov.l		(%sp)+,%d0
+	fmov.l		%d0,%fpcr
+	tst.b		Sc_Flag(%a6)
+	beq.b		Finish
+	mov.b		&FMUL_OP,%d1		# last inst is MUL
+	fmul.x		Scale(%pc),%fp0		# may cause underflow
+	bra		t_catch2
+# the '040 package did this apparently to see if the dst operand for the
+# preceding fmul was a denorm. but, it better not have been since the
+# algorithm just got done playing with fp0 and expected no exceptions
+# as a result. trust me...
+#	bra		t_avoid_unsupp		# check for denorm as a
+#						;result of the scaling
+
+Finish:
+	mov.b		&FMOV_OP,%d1		# last inst is MOVE
+	fmov.x		%fp0,%fp0		# capture exceptions & round
+	bra		t_catch2
+
+Rem_is_0:
+#..R = 2^(-j)X - Q Y = Y, thus R = 0 and quotient = 2^j (Q+1)
+	addq.l		&1,%d3
+	cmp.l		%d0,&8			# D0 is j
+	bge.b		Q_Big
+
+	lsl.l		%d0,%d3
+	bra.b		Set_R_0
+
+Q_Big:
+	clr.l		%d3
+
+Set_R_0:
+	fmov.s		&0x00000000,%fp0
+	clr.b		Sc_Flag(%a6)
+	bra.w		Fix_Sign
+
+Tie_Case:
+#..Check parity of Q
+	mov.l		%d3,%d6
+	and.l		&0x00000001,%d6
+	tst.l		%d6
+	beq.w		Fix_Sign		# Q is even
+
+#..Q is odd, Q := Q + 1, signX := -signX
+	addq.l		&1,%d3
+	mov.w		SignX(%a6),%d6
+	eor.l		&0x00008000,%d6
+	mov.w		%d6,SignX(%a6)
+	bra.w		Fix_Sign
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	tag(): return the optype of the input ext fp number		#
+#									#
+#	This routine is used by the 060FPLSP.				#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision operand			#
+#									#
+# OUTPUT **************************************************************	#
+#	d0 = value of type tag						#
+#		one of: NORM, INF, QNAN, SNAN, DENORM, ZERO		#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Simply test the exponent, j-bit, and mantissa values to		#
+# determine the type of operand.					#
+#	If it's an unnormalized zero, alter the operand and force it	#
+# to be a normal zero.							#
+#									#
+#########################################################################
+
+	global		tag
+tag:
+	mov.w		FTEMP_EX(%a0), %d0	# extract exponent
+	andi.w		&0x7fff, %d0		# strip off sign
+	cmpi.w		%d0, &0x7fff		# is (EXP == MAX)?
+	beq.b		inf_or_nan_x
+not_inf_or_nan_x:
+	btst		&0x7,FTEMP_HI(%a0)
+	beq.b		not_norm_x
+is_norm_x:
+	mov.b		&NORM, %d0
+	rts
+not_norm_x:
+	tst.w		%d0			# is exponent = 0?
+	bne.b		is_unnorm_x
+not_unnorm_x:
+	tst.l		FTEMP_HI(%a0)
+	bne.b		is_denorm_x
+	tst.l		FTEMP_LO(%a0)
+	bne.b		is_denorm_x
+is_zero_x:
+	mov.b		&ZERO, %d0
+	rts
+is_denorm_x:
+	mov.b		&DENORM, %d0
+	rts
+is_unnorm_x:
+	bsr.l		unnorm_fix		# convert to norm,denorm,or zero
+	rts
+is_unnorm_reg_x:
+	mov.b		&UNNORM, %d0
+	rts
+inf_or_nan_x:
+	tst.l		FTEMP_LO(%a0)
+	bne.b		is_nan_x
+	mov.l		FTEMP_HI(%a0), %d0
+	and.l		&0x7fffffff, %d0	# msb is a don't care!
+	bne.b		is_nan_x
+is_inf_x:
+	mov.b		&INF, %d0
+	rts
+is_nan_x:
+	mov.b		&QNAN, %d0
+	rts
+
+#############################################################
+
+qnan:	long		0x7fff0000, 0xffffffff, 0xffffffff
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	t_dz(): Handle 060FPLSP dz exception for "flogn" emulation.	#
+#	t_dz2(): Handle 060FPLSP dz exception for "fatanh" emulation.	#
+#									#
+#	These rouitnes are used by the 060FPLSP package.		#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand.		#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = default DZ result.					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Transcendental emulation for the 060FPLSP has detected that	#
+# a DZ exception should occur for the instruction. If DZ is disabled,	#
+# return the default result.						#
+#	If DZ is enabled, the dst operand should be returned unscathed	#
+# in fp0 while fp1 is used to create a DZ exception so that the		#
+# operating system can log that such an event occurred.			#
+#									#
+#########################################################################
+
+	global		t_dz
+t_dz:
+	tst.b		SRC_EX(%a0)		# check sign for neg or pos
+	bpl.b		dz_pinf			# branch if pos sign
+
+	global		t_dz2
+t_dz2:
+	ori.l		&dzinf_mask+neg_mask,USER_FPSR(%a6) # set N/I/DZ/ADZ
+
+	btst		&dz_bit,FPCR_ENABLE(%a6)
+	bne.b		dz_minf_ena
+
+# dz is disabled. return a -INF.
+	fmov.s		&0xff800000,%fp0	# return -INF
+	rts
+
+# dz is enabled. create a dz exception so the user can record it
+# but use fp1 instead. return the dst operand unscathed in fp0.
+dz_minf_ena:
+	fmovm.x		EXC_FP0(%a6),&0x80	# return fp0 unscathed
+	fmov.l		USER_FPCR(%a6),%fpcr
+	fmov.s		&0xbf800000,%fp1	# load -1
+	fdiv.s		&0x00000000,%fp1	# -1 / 0
+	rts
+
+dz_pinf:
+	ori.l		&dzinf_mask,USER_FPSR(%a6) # set I/DZ/ADZ
+
+	btst		&dz_bit,FPCR_ENABLE(%a6)
+	bne.b		dz_pinf_ena
+
+# dz is disabled. return a +INF.
+	fmov.s		&0x7f800000,%fp0	# return +INF
+	rts
+
+# dz is enabled. create a dz exception so the user can record it
+# but use fp1 instead. return the dst operand unscathed in fp0.
+dz_pinf_ena:
+	fmovm.x		EXC_FP0(%a6),&0x80	# return fp0 unscathed
+	fmov.l		USER_FPCR(%a6),%fpcr
+	fmov.s		&0x3f800000,%fp1	# load +1
+	fdiv.s		&0x00000000,%fp1	# +1 / 0
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	t_operr(): Handle 060FPLSP OPERR exception during emulation.	#
+#									#
+#	This routine is used by the 060FPLSP package.			#
+#									#
+# XREF ****************************************************************	#
+#	None.								#
+#									#
+# INPUT ***************************************************************	#
+#	fp1 = source operand						#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = default result						#
+#	fp1 = unchanged							#
+#									#
+# ALGORITHM ***********************************************************	#
+#	An operand error should occur as the result of transcendental	#
+# emulation in the 060FPLSP. If OPERR is disabled, just return a NAN	#
+# in fp0. If OPERR is enabled, return the dst operand unscathed in fp0	#
+# and the source operand in fp1. Use fp2 to create an OPERR exception	#
+# so that the operating system can log the event.			#
+#									#
+#########################################################################
+
+	global		t_operr
+t_operr:
+	ori.l		&opnan_mask,USER_FPSR(%a6) # set NAN/OPERR/AIOP
+
+	btst		&operr_bit,FPCR_ENABLE(%a6)
+	bne.b		operr_ena
+
+# operr is disabled. return a QNAN in fp0
+	fmovm.x		qnan(%pc),&0x80		# return QNAN
+	rts
+
+# operr is enabled. create an operr exception so the user can record it
+# but use fp2 instead. return the dst operand unscathed in fp0.
+operr_ena:
+	fmovm.x		EXC_FP0(%a6),&0x80	# return fp0 unscathed
+	fmov.l		USER_FPCR(%a6),%fpcr
+	fmovm.x		&0x04,-(%sp)		# save fp2
+	fmov.s		&0x7f800000,%fp2	# load +INF
+	fmul.s		&0x00000000,%fp2	# +INF x 0
+	fmovm.x		(%sp)+,&0x20		# restore fp2
+	rts
+
+pls_huge:
+	long		0x7ffe0000,0xffffffff,0xffffffff
+mns_huge:
+	long		0xfffe0000,0xffffffff,0xffffffff
+pls_tiny:
+	long		0x00000000,0x80000000,0x00000000
+mns_tiny:
+	long		0x80000000,0x80000000,0x00000000
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	t_unfl(): Handle 060FPLSP underflow exception during emulation.	#
+#	t_unfl2(): Handle 060FPLSP underflow exception during		#
+#	           emulation. result always positive.			#
+#									#
+#	This routine is used by the 060FPLSP package.			#
+#									#
+# XREF ****************************************************************	#
+#	None.								#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = default underflow result					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	An underflow should occur as the result of transcendental	#
+# emulation in the 060FPLSP. Create an underflow by using "fmul"	#
+# and two very small numbers of appropriate sign so the operating	#
+# system can log the event.						#
+#									#
+#########################################################################
+
+	global		t_unfl
+t_unfl:
+	tst.b		SRC_EX(%a0)
+	bpl.b		unf_pos
+
+	global		t_unfl2
+t_unfl2:
+	ori.l		&unfinx_mask+neg_mask,USER_FPSR(%a6) # set N/UNFL/INEX2/AUNFL/AINEX
+
+	fmov.l		USER_FPCR(%a6),%fpcr
+	fmovm.x		mns_tiny(%pc),&0x80
+	fmul.x		pls_tiny(%pc),%fp0
+
+	fmov.l		%fpsr,%d0
+	rol.l		&0x8,%d0
+	mov.b		%d0,FPSR_CC(%a6)
+	rts
+unf_pos:
+	ori.w		&unfinx_mask,FPSR_EXCEPT(%a6) # set UNFL/INEX2/AUNFL/AINEX
+
+	fmov.l		USER_FPCR(%a6),%fpcr
+	fmovm.x		pls_tiny(%pc),&0x80
+	fmul.x		%fp0,%fp0
+
+	fmov.l		%fpsr,%d0
+	rol.l		&0x8,%d0
+	mov.b		%d0,FPSR_CC(%a6)
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	t_ovfl(): Handle 060FPLSP overflow exception during emulation.	#
+#		  (monadic)						#
+#	t_ovfl2(): Handle 060FPLSP overflow exception during		#
+#	           emulation. result always positive. (dyadic)		#
+#	t_ovfl_sc(): Handle 060FPLSP overflow exception during		#
+#	             emulation for "fscale".				#
+#									#
+#	This routine is used by the 060FPLSP package.			#
+#									#
+# XREF ****************************************************************	#
+#	None.								#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = default underflow result					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	An overflow should occur as the result of transcendental	#
+# emulation in the 060FPLSP. Create an overflow by using "fmul"		#
+# and two very lareg numbers of appropriate sign so the operating	#
+# system can log the event.						#
+#	For t_ovfl_sc() we take special care not to lose the INEX2 bit.	#
+#									#
+#########################################################################
+
+	global		t_ovfl_sc
+t_ovfl_sc:
+	ori.l		&ovfl_inx_mask,USER_FPSR(%a6) # set OVFL/AOVFL/AINEX
+
+	mov.b		%d0,%d1			# fetch rnd prec,mode
+	andi.b		&0xc0,%d1		# extract prec
+	beq.w		ovfl_work
+
+# dst op is a DENORM. we have to normalize the mantissa to see if the
+# result would be inexact for the given precision. make a copy of the
+# dst so we don't screw up the version passed to us.
+	mov.w		LOCAL_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		LOCAL_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		LOCAL_LO(%a0),FP_SCR0_LO(%a6)
+	lea		FP_SCR0(%a6),%a0	# pass ptr to FP_SCR0
+	movm.l		&0xc080,-(%sp)		# save d0-d1/a0
+	bsr.l		norm			# normalize mantissa
+	movm.l		(%sp)+,&0x0103		# restore d0-d1/a0
+
+	cmpi.b		%d1,&0x40		# is precision sgl?
+	bne.b		ovfl_sc_dbl		# no; dbl
+ovfl_sc_sgl:
+	tst.l		LOCAL_LO(%a0)		# is lo lw of sgl set?
+	bne.b		ovfl_sc_inx		# yes
+	tst.b		3+LOCAL_HI(%a0)		# is lo byte of hi lw set?
+	bne.b		ovfl_sc_inx		# yes
+	bra.w		ovfl_work		# don't set INEX2
+ovfl_sc_dbl:
+	mov.l		LOCAL_LO(%a0),%d1	# are any of lo 11 bits of
+	andi.l		&0x7ff,%d1		# dbl mantissa set?
+	beq.w		ovfl_work		# no; don't set INEX2
+ovfl_sc_inx:
+	ori.l		&inex2_mask,USER_FPSR(%a6) # set INEX2
+	bra.b		ovfl_work		# continue
+
+	global		t_ovfl
+t_ovfl:
+	ori.w		&ovfinx_mask,FPSR_EXCEPT(%a6) # set OVFL/INEX2/AOVFL/AINEX
+ovfl_work:
+	tst.b		SRC_EX(%a0)
+	bpl.b		ovfl_p
+ovfl_m:
+	fmov.l		USER_FPCR(%a6),%fpcr
+	fmovm.x		mns_huge(%pc),&0x80
+	fmul.x		pls_huge(%pc),%fp0
+
+	fmov.l		%fpsr,%d0
+	rol.l		&0x8,%d0
+	ori.b		&neg_mask,%d0
+	mov.b		%d0,FPSR_CC(%a6)
+	rts
+ovfl_p:
+	fmov.l		USER_FPCR(%a6),%fpcr
+	fmovm.x		pls_huge(%pc),&0x80
+	fmul.x		pls_huge(%pc),%fp0
+
+	fmov.l		%fpsr,%d0
+	rol.l		&0x8,%d0
+	mov.b		%d0,FPSR_CC(%a6)
+	rts
+
+	global		t_ovfl2
+t_ovfl2:
+	ori.w		&ovfinx_mask,FPSR_EXCEPT(%a6) # set OVFL/INEX2/AOVFL/AINEX
+	fmov.l		USER_FPCR(%a6),%fpcr
+	fmovm.x		pls_huge(%pc),&0x80
+	fmul.x		pls_huge(%pc),%fp0
+
+	fmov.l		%fpsr,%d0
+	rol.l		&0x8,%d0
+	mov.b		%d0,FPSR_CC(%a6)
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	t_catch(): Handle 060FPLSP OVFL,UNFL,or INEX2 exception during	#
+#		   emulation.						#
+#	t_catch2(): Handle 060FPLSP OVFL,UNFL,or INEX2 exception during	#
+#		    emulation.						#
+#									#
+#	These routines are used by the 060FPLSP package.		#
+#									#
+# XREF ****************************************************************	#
+#	None.								#
+#									#
+# INPUT ***************************************************************	#
+#	fp0 = default underflow or overflow result			#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = default result						#
+#									#
+# ALGORITHM ***********************************************************	#
+#	If an overflow or underflow occurred during the last		#
+# instruction of transcendental 060FPLSP emulation, then it has already	#
+# occurred and has been logged. Now we need to see if an inexact	#
+# exception should occur.						#
+#									#
+#########################################################################
+
+	global		t_catch2
+t_catch2:
+	fmov.l		%fpsr,%d0
+	or.l		%d0,USER_FPSR(%a6)
+	bra.b		inx2_work
+
+	global		t_catch
+t_catch:
+	fmov.l		%fpsr,%d0
+	or.l		%d0,USER_FPSR(%a6)
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	t_inx2(): Handle inexact 060FPLSP exception during emulation.	#
+#	t_pinx2(): Handle inexact 060FPLSP exception for "+" results.	#
+#	t_minx2(): Handle inexact 060FPLSP exception for "-" results.	#
+#									#
+# XREF ****************************************************************	#
+#	None.								#
+#									#
+# INPUT ***************************************************************	#
+#	fp0 = default result						#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = default result						#
+#									#
+# ALGORITHM ***********************************************************	#
+#	The last instruction of transcendental emulation for the	#
+# 060FPLSP should be inexact. So, if inexact is enabled, then we create	#
+# the event here by adding a large and very small number together	#
+# so that the operating system can log the event.			#
+#	Must check, too, if the result was zero, in which case we just	#
+# set the FPSR bits and return.						#
+#									#
+#########################################################################
+
+	global		t_inx2
+t_inx2:
+	fblt.w		t_minx2
+	fbeq.w		inx2_zero
+
+	global		t_pinx2
+t_pinx2:
+	ori.w		&inx2a_mask,FPSR_EXCEPT(%a6) # set INEX2/AINEX
+	bra.b		inx2_work
+
+	global		t_minx2
+t_minx2:
+	ori.l		&inx2a_mask+neg_mask,USER_FPSR(%a6)
+
+inx2_work:
+	btst		&inex2_bit,FPCR_ENABLE(%a6) # is inexact enabled?
+	bne.b		inx2_work_ena		# yes
+	rts
+inx2_work_ena:
+	fmov.l		USER_FPCR(%a6),%fpcr	# insert user's exceptions
+	fmov.s		&0x3f800000,%fp1	# load +1
+	fadd.x		pls_tiny(%pc),%fp1	# cause exception
+	rts
+
+inx2_zero:
+	mov.b		&z_bmask,FPSR_CC(%a6)
+	ori.w		&inx2a_mask,2+USER_FPSR(%a6) # set INEX/AINEX
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	t_extdnrm(): Handle DENORM inputs in 060FPLSP.			#
+#	t_resdnrm(): Handle DENORM inputs in 060FPLSP for "fscale".	#
+#									#
+#	This routine is used by the 060FPLSP package.			#
+#									#
+# XREF ****************************************************************	#
+#	None.								#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision input operand		#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = default result						#
+#									#
+# ALGORITHM ***********************************************************	#
+#	For all functions that have a denormalized input and that	#
+# f(x)=x, this is the entry point.					#
+#	DENORM value is moved using "fmove" which triggers an exception	#
+# if enabled so the operating system can log the event.			#
+#									#
+#########################################################################
+
+	global		t_extdnrm
+t_extdnrm:
+	fmov.l		USER_FPCR(%a6),%fpcr
+	fmov.x		SRC_EX(%a0),%fp0
+	fmov.l		%fpsr,%d0
+	ori.l		&unfinx_mask,%d0
+	or.l		%d0,USER_FPSR(%a6)
+	rts
+
+	global		t_resdnrm
+t_resdnrm:
+	fmov.l		USER_FPCR(%a6),%fpcr
+	fmov.x		SRC_EX(%a0),%fp0
+	fmov.l		%fpsr,%d0
+	or.l		%d0,USER_FPSR(%a6)
+	rts
+
+##########################################
+
+#
+# sto_cos:
+#	This is used by fsincos library emulation. The correct
+# values are already in fp0 and fp1 so we do nothing here.
+#
+	global		sto_cos
+sto_cos:
+	rts
+
+##########################################
+
+#
+#	dst_qnan --- force result when destination is a NaN
+#
+	global		dst_qnan
+dst_qnan:
+	fmov.x		DST(%a1),%fp0
+	tst.b		DST_EX(%a1)
+	bmi.b		dst_qnan_m
+dst_qnan_p:
+	mov.b		&nan_bmask,FPSR_CC(%a6)
+	rts
+dst_qnan_m:
+	mov.b		&nan_bmask+neg_bmask,FPSR_CC(%a6)
+	rts
+
+#
+#	src_qnan --- force result when source is a NaN
+#
+	global		src_qnan
+src_qnan:
+	fmov.x		SRC(%a0),%fp0
+	tst.b		SRC_EX(%a0)
+	bmi.b		src_qnan_m
+src_qnan_p:
+	mov.b		&nan_bmask,FPSR_CC(%a6)
+	rts
+src_qnan_m:
+	mov.b		&nan_bmask+neg_bmask,FPSR_CC(%a6)
+	rts
+
+##########################################
+
+#
+#	Native instruction support
+#
+#	Some systems may need entry points even for 68060 native
+#	instructions.  These routines are provided for
+#	convenience.
+#
+	global		_fadds_
+_fadds_:
+	fmov.l		%fpcr,-(%sp)		# save fpcr
+	fmov.l		&0x00000000,%fpcr	# clear fpcr for load
+	fmov.s		0x8(%sp),%fp0		# load sgl dst
+	fmov.l		(%sp)+,%fpcr		# restore fpcr
+	fadd.s		0x8(%sp),%fp0		# fadd w/ sgl src
+	rts
+
+	global		_faddd_
+_faddd_:
+	fmov.l		%fpcr,-(%sp)		# save fpcr
+	fmov.l		&0x00000000,%fpcr	# clear fpcr for load
+	fmov.d		0x8(%sp),%fp0		# load dbl dst
+	fmov.l		(%sp)+,%fpcr		# restore fpcr
+	fadd.d		0xc(%sp),%fp0		# fadd w/ dbl src
+	rts
+
+	global		_faddx_
+_faddx_:
+	fmovm.x		0x4(%sp),&0x80		# load ext dst
+	fadd.x		0x10(%sp),%fp0		# fadd w/ ext src
+	rts
+
+	global		_fsubs_
+_fsubs_:
+	fmov.l		%fpcr,-(%sp)		# save fpcr
+	fmov.l		&0x00000000,%fpcr	# clear fpcr for load
+	fmov.s		0x8(%sp),%fp0		# load sgl dst
+	fmov.l		(%sp)+,%fpcr		# restore fpcr
+	fsub.s		0x8(%sp),%fp0		# fsub w/ sgl src
+	rts
+
+	global		_fsubd_
+_fsubd_:
+	fmov.l		%fpcr,-(%sp)		# save fpcr
+	fmov.l		&0x00000000,%fpcr	# clear fpcr for load
+	fmov.d		0x8(%sp),%fp0		# load dbl dst
+	fmov.l		(%sp)+,%fpcr		# restore fpcr
+	fsub.d		0xc(%sp),%fp0		# fsub w/ dbl src
+	rts
+
+	global		_fsubx_
+_fsubx_:
+	fmovm.x		0x4(%sp),&0x80		# load ext dst
+	fsub.x		0x10(%sp),%fp0		# fsub w/ ext src
+	rts
+
+	global		_fmuls_
+_fmuls_:
+	fmov.l		%fpcr,-(%sp)		# save fpcr
+	fmov.l		&0x00000000,%fpcr	# clear fpcr for load
+	fmov.s		0x8(%sp),%fp0		# load sgl dst
+	fmov.l		(%sp)+,%fpcr		# restore fpcr
+	fmul.s		0x8(%sp),%fp0		# fmul w/ sgl src
+	rts
+
+	global		_fmuld_
+_fmuld_:
+	fmov.l		%fpcr,-(%sp)		# save fpcr
+	fmov.l		&0x00000000,%fpcr	# clear fpcr for load
+	fmov.d		0x8(%sp),%fp0		# load dbl dst
+	fmov.l		(%sp)+,%fpcr		# restore fpcr
+	fmul.d		0xc(%sp),%fp0		# fmul w/ dbl src
+	rts
+
+	global		_fmulx_
+_fmulx_:
+	fmovm.x		0x4(%sp),&0x80		# load ext dst
+	fmul.x		0x10(%sp),%fp0		# fmul w/ ext src
+	rts
+
+	global		_fdivs_
+_fdivs_:
+	fmov.l		%fpcr,-(%sp)		# save fpcr
+	fmov.l		&0x00000000,%fpcr	# clear fpcr for load
+	fmov.s		0x8(%sp),%fp0		# load sgl dst
+	fmov.l		(%sp)+,%fpcr		# restore fpcr
+	fdiv.s		0x8(%sp),%fp0		# fdiv w/ sgl src
+	rts
+
+	global		_fdivd_
+_fdivd_:
+	fmov.l		%fpcr,-(%sp)		# save fpcr
+	fmov.l		&0x00000000,%fpcr	# clear fpcr for load
+	fmov.d		0x8(%sp),%fp0		# load dbl dst
+	fmov.l		(%sp)+,%fpcr		# restore fpcr
+	fdiv.d		0xc(%sp),%fp0		# fdiv w/ dbl src
+	rts
+
+	global		_fdivx_
+_fdivx_:
+	fmovm.x		0x4(%sp),&0x80		# load ext dst
+	fdiv.x		0x10(%sp),%fp0		# fdiv w/ ext src
+	rts
+
+	global		_fabss_
+_fabss_:
+	fabs.s		0x4(%sp),%fp0		# fabs w/ sgl src
+	rts
+
+	global		_fabsd_
+_fabsd_:
+	fabs.d		0x4(%sp),%fp0		# fabs w/ dbl src
+	rts
+
+	global		_fabsx_
+_fabsx_:
+	fabs.x		0x4(%sp),%fp0		# fabs w/ ext src
+	rts
+
+	global		_fnegs_
+_fnegs_:
+	fneg.s		0x4(%sp),%fp0		# fneg w/ sgl src
+	rts
+
+	global		_fnegd_
+_fnegd_:
+	fneg.d		0x4(%sp),%fp0		# fneg w/ dbl src
+	rts
+
+	global		_fnegx_
+_fnegx_:
+	fneg.x		0x4(%sp),%fp0		# fneg w/ ext src
+	rts
+
+	global		_fsqrts_
+_fsqrts_:
+	fsqrt.s		0x4(%sp),%fp0		# fsqrt w/ sgl src
+	rts
+
+	global		_fsqrtd_
+_fsqrtd_:
+	fsqrt.d		0x4(%sp),%fp0		# fsqrt w/ dbl src
+	rts
+
+	global		_fsqrtx_
+_fsqrtx_:
+	fsqrt.x		0x4(%sp),%fp0		# fsqrt w/ ext src
+	rts
+
+	global		_fints_
+_fints_:
+	fint.s		0x4(%sp),%fp0		# fint w/ sgl src
+	rts
+
+	global		_fintd_
+_fintd_:
+	fint.d		0x4(%sp),%fp0		# fint w/ dbl src
+	rts
+
+	global		_fintx_
+_fintx_:
+	fint.x		0x4(%sp),%fp0		# fint w/ ext src
+	rts
+
+	global		_fintrzs_
+_fintrzs_:
+	fintrz.s	0x4(%sp),%fp0		# fintrz w/ sgl src
+	rts
+
+	global		_fintrzd_
+_fintrzd_:
+	fintrz.d	0x4(%sp),%fp0		# fintrx w/ dbl src
+	rts
+
+	global		_fintrzx_
+_fintrzx_:
+	fintrz.x	0x4(%sp),%fp0		# fintrz w/ ext src
+	rts
+
+########################################################################
+
+#########################################################################
+# src_zero(): Return signed zero according to sign of src operand.	#
+#########################################################################
+	global		src_zero
+src_zero:
+	tst.b		SRC_EX(%a0)		# get sign of src operand
+	bmi.b		ld_mzero		# if neg, load neg zero
+
+#
+# ld_pzero(): return a positive zero.
+#
+	global		ld_pzero
+ld_pzero:
+	fmov.s		&0x00000000,%fp0	# load +0
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set 'Z' ccode bit
+	rts
+
+# ld_mzero(): return a negative zero.
+	global		ld_mzero
+ld_mzero:
+	fmov.s		&0x80000000,%fp0	# load -0
+	mov.b		&neg_bmask+z_bmask,FPSR_CC(%a6) # set 'N','Z' ccode bits
+	rts
+
+#########################################################################
+# dst_zero(): Return signed zero according to sign of dst operand.	#
+#########################################################################
+	global		dst_zero
+dst_zero:
+	tst.b		DST_EX(%a1)		# get sign of dst operand
+	bmi.b		ld_mzero		# if neg, load neg zero
+	bra.b		ld_pzero		# load positive zero
+
+#########################################################################
+# src_inf(): Return signed inf according to sign of src operand.	#
+#########################################################################
+	global		src_inf
+src_inf:
+	tst.b		SRC_EX(%a0)		# get sign of src operand
+	bmi.b		ld_minf			# if negative branch
+
+#
+# ld_pinf(): return a positive infinity.
+#
+	global		ld_pinf
+ld_pinf:
+	fmov.s		&0x7f800000,%fp0	# load +INF
+	mov.b		&inf_bmask,FPSR_CC(%a6)	# set 'INF' ccode bit
+	rts
+
+#
+# ld_minf():return a negative infinity.
+#
+	global		ld_minf
+ld_minf:
+	fmov.s		&0xff800000,%fp0	# load -INF
+	mov.b		&neg_bmask+inf_bmask,FPSR_CC(%a6) # set 'N','I' ccode bits
+	rts
+
+#########################################################################
+# dst_inf(): Return signed inf according to sign of dst operand.	#
+#########################################################################
+	global		dst_inf
+dst_inf:
+	tst.b		DST_EX(%a1)		# get sign of dst operand
+	bmi.b		ld_minf			# if negative branch
+	bra.b		ld_pinf
+
+	global		szr_inf
+#################################################################
+# szr_inf(): Return +ZERO for a negative src operand or		#
+#	            +INF for a positive src operand.		#
+#	     Routine used for fetox, ftwotox, and ftentox.	#
+#################################################################
+szr_inf:
+	tst.b		SRC_EX(%a0)		# check sign of source
+	bmi.b		ld_pzero
+	bra.b		ld_pinf
+
+#########################################################################
+# sopr_inf(): Return +INF for a positive src operand or			#
+#	      jump to operand error routine for a negative src operand.	#
+#	      Routine used for flogn, flognp1, flog10, and flog2.	#
+#########################################################################
+	global		sopr_inf
+sopr_inf:
+	tst.b		SRC_EX(%a0)		# check sign of source
+	bmi.w		t_operr
+	bra.b		ld_pinf
+
+#################################################################
+# setoxm1i(): Return minus one for a negative src operand or	#
+#	      positive infinity for a positive src operand.	#
+#	      Routine used for fetoxm1.				#
+#################################################################
+	global		setoxm1i
+setoxm1i:
+	tst.b		SRC_EX(%a0)		# check sign of source
+	bmi.b		ld_mone
+	bra.b		ld_pinf
+
+#########################################################################
+# src_one(): Return signed one according to sign of src operand.	#
+#########################################################################
+	global		src_one
+src_one:
+	tst.b		SRC_EX(%a0)		# check sign of source
+	bmi.b		ld_mone
+
+#
+# ld_pone(): return positive one.
+#
+	global		ld_pone
+ld_pone:
+	fmov.s		&0x3f800000,%fp0	# load +1
+	clr.b		FPSR_CC(%a6)
+	rts
+
+#
+# ld_mone(): return negative one.
+#
+	global		ld_mone
+ld_mone:
+	fmov.s		&0xbf800000,%fp0	# load -1
+	mov.b		&neg_bmask,FPSR_CC(%a6)	# set 'N' ccode bit
+	rts
+
+ppiby2:	long		0x3fff0000, 0xc90fdaa2, 0x2168c235
+mpiby2:	long		0xbfff0000, 0xc90fdaa2, 0x2168c235
+
+#################################################################
+# spi_2(): Return signed PI/2 according to sign of src operand.	#
+#################################################################
+	global		spi_2
+spi_2:
+	tst.b		SRC_EX(%a0)		# check sign of source
+	bmi.b		ld_mpi2
+
+#
+# ld_ppi2(): return positive PI/2.
+#
+	global		ld_ppi2
+ld_ppi2:
+	fmov.l		%d0,%fpcr
+	fmov.x		ppiby2(%pc),%fp0	# load +pi/2
+	bra.w		t_pinx2			# set INEX2
+
+#
+# ld_mpi2(): return negative PI/2.
+#
+	global		ld_mpi2
+ld_mpi2:
+	fmov.l		%d0,%fpcr
+	fmov.x		mpiby2(%pc),%fp0	# load -pi/2
+	bra.w		t_minx2			# set INEX2
+
+####################################################
+# The following routines give support for fsincos. #
+####################################################
+
+#
+# ssincosz(): When the src operand is ZERO, store a one in the
+#	      cosine register and return a ZERO in fp0 w/ the same sign
+#	      as the src operand.
+#
+	global		ssincosz
+ssincosz:
+	fmov.s		&0x3f800000,%fp1
+	tst.b		SRC_EX(%a0)		# test sign
+	bpl.b		sincoszp
+	fmov.s		&0x80000000,%fp0	# return sin result in fp0
+	mov.b		&z_bmask+neg_bmask,FPSR_CC(%a6)
+	rts
+sincoszp:
+	fmov.s		&0x00000000,%fp0	# return sin result in fp0
+	mov.b		&z_bmask,FPSR_CC(%a6)
+	rts
+
+#
+# ssincosi(): When the src operand is INF, store a QNAN in the cosine
+#	      register and jump to the operand error routine for negative
+#	      src operands.
+#
+	global		ssincosi
+ssincosi:
+	fmov.x		qnan(%pc),%fp1		# load NAN
+	bra.w		t_operr
+
+#
+# ssincosqnan(): When the src operand is a QNAN, store the QNAN in the cosine
+#		 register and branch to the src QNAN routine.
+#
+	global		ssincosqnan
+ssincosqnan:
+	fmov.x		LOCAL_EX(%a0),%fp1
+	bra.w		src_qnan
+
+########################################################################
+
+	global		smod_sdnrm
+	global		smod_snorm
+smod_sdnrm:
+smod_snorm:
+	mov.b		DTAG(%a6),%d1
+	beq.l		smod
+	cmpi.b		%d1,&ZERO
+	beq.w		smod_zro
+	cmpi.b		%d1,&INF
+	beq.l		t_operr
+	cmpi.b		%d1,&DENORM
+	beq.l		smod
+	bra.l		dst_qnan
+
+	global		smod_szero
+smod_szero:
+	mov.b		DTAG(%a6),%d1
+	beq.l		t_operr
+	cmpi.b		%d1,&ZERO
+	beq.l		t_operr
+	cmpi.b		%d1,&INF
+	beq.l		t_operr
+	cmpi.b		%d1,&DENORM
+	beq.l		t_operr
+	bra.l		dst_qnan
+
+	global		smod_sinf
+smod_sinf:
+	mov.b		DTAG(%a6),%d1
+	beq.l		smod_fpn
+	cmpi.b		%d1,&ZERO
+	beq.l		smod_zro
+	cmpi.b		%d1,&INF
+	beq.l		t_operr
+	cmpi.b		%d1,&DENORM
+	beq.l		smod_fpn
+	bra.l		dst_qnan
+
+smod_zro:
+srem_zro:
+	mov.b		SRC_EX(%a0),%d1		# get src sign
+	mov.b		DST_EX(%a1),%d0		# get dst sign
+	eor.b		%d0,%d1			# get qbyte sign
+	andi.b		&0x80,%d1
+	mov.b		%d1,FPSR_QBYTE(%a6)
+	tst.b		%d0
+	bpl.w		ld_pzero
+	bra.w		ld_mzero
+
+smod_fpn:
+srem_fpn:
+	clr.b		FPSR_QBYTE(%a6)
+	mov.l		%d0,-(%sp)
+	mov.b		SRC_EX(%a0),%d1		# get src sign
+	mov.b		DST_EX(%a1),%d0		# get dst sign
+	eor.b		%d0,%d1			# get qbyte sign
+	andi.b		&0x80,%d1
+	mov.b		%d1,FPSR_QBYTE(%a6)
+	cmpi.b		DTAG(%a6),&DENORM
+	bne.b		smod_nrm
+	lea		DST(%a1),%a0
+	mov.l		(%sp)+,%d0
+	bra		t_resdnrm
+smod_nrm:
+	fmov.l		(%sp)+,%fpcr
+	fmov.x		DST(%a1),%fp0
+	tst.b		DST_EX(%a1)
+	bmi.b		smod_nrm_neg
+	rts
+
+smod_nrm_neg:
+	mov.b		&neg_bmask,FPSR_CC(%a6)	# set 'N' code
+	rts
+
+#########################################################################
+	global		srem_snorm
+	global		srem_sdnrm
+srem_sdnrm:
+srem_snorm:
+	mov.b		DTAG(%a6),%d1
+	beq.l		srem
+	cmpi.b		%d1,&ZERO
+	beq.w		srem_zro
+	cmpi.b		%d1,&INF
+	beq.l		t_operr
+	cmpi.b		%d1,&DENORM
+	beq.l		srem
+	bra.l		dst_qnan
+
+	global		srem_szero
+srem_szero:
+	mov.b		DTAG(%a6),%d1
+	beq.l		t_operr
+	cmpi.b		%d1,&ZERO
+	beq.l		t_operr
+	cmpi.b		%d1,&INF
+	beq.l		t_operr
+	cmpi.b		%d1,&DENORM
+	beq.l		t_operr
+	bra.l		dst_qnan
+
+	global		srem_sinf
+srem_sinf:
+	mov.b		DTAG(%a6),%d1
+	beq.w		srem_fpn
+	cmpi.b		%d1,&ZERO
+	beq.w		srem_zro
+	cmpi.b		%d1,&INF
+	beq.l		t_operr
+	cmpi.b		%d1,&DENORM
+	beq.l		srem_fpn
+	bra.l		dst_qnan
+
+#########################################################################
+
+	global		sscale_snorm
+	global		sscale_sdnrm
+sscale_snorm:
+sscale_sdnrm:
+	mov.b		DTAG(%a6),%d1
+	beq.l		sscale
+	cmpi.b		%d1,&ZERO
+	beq.l		dst_zero
+	cmpi.b		%d1,&INF
+	beq.l		dst_inf
+	cmpi.b		%d1,&DENORM
+	beq.l		sscale
+	bra.l		dst_qnan
+
+	global		sscale_szero
+sscale_szero:
+	mov.b		DTAG(%a6),%d1
+	beq.l		sscale
+	cmpi.b		%d1,&ZERO
+	beq.l		dst_zero
+	cmpi.b		%d1,&INF
+	beq.l		dst_inf
+	cmpi.b		%d1,&DENORM
+	beq.l		sscale
+	bra.l		dst_qnan
+
+	global		sscale_sinf
+sscale_sinf:
+	mov.b		DTAG(%a6),%d1
+	beq.l		t_operr
+	cmpi.b		%d1,&QNAN
+	beq.l		dst_qnan
+	bra.l		t_operr
+
+########################################################################
+
+	global		sop_sqnan
+sop_sqnan:
+	mov.b		DTAG(%a6),%d1
+	cmpi.b		%d1,&QNAN
+	beq.l		dst_qnan
+	bra.l		src_qnan
+
+#########################################################################
+# norm(): normalize the mantissa of an extended precision input. the	#
+#	  input operand should not be normalized already.		#
+#									#
+# XDEF ****************************************************************	#
+#	norm()								#
+#									#
+# XREF **************************************************************** #
+#	none								#
+#									#
+# INPUT *************************************************************** #
+#	a0 = pointer fp extended precision operand to normalize		#
+#									#
+# OUTPUT ************************************************************** #
+#	d0 = number of bit positions the mantissa was shifted		#
+#	a0 = the input operand's mantissa is normalized; the exponent	#
+#	     is unchanged.						#
+#									#
+#########################################################################
+	global		norm
+norm:
+	mov.l		%d2, -(%sp)		# create some temp regs
+	mov.l		%d3, -(%sp)
+
+	mov.l		FTEMP_HI(%a0), %d0	# load hi(mantissa)
+	mov.l		FTEMP_LO(%a0), %d1	# load lo(mantissa)
+
+	bfffo		%d0{&0:&32}, %d2	# how many places to shift?
+	beq.b		norm_lo			# hi(man) is all zeroes!
+
+norm_hi:
+	lsl.l		%d2, %d0		# left shift hi(man)
+	bfextu		%d1{&0:%d2}, %d3	# extract lo bits
+
+	or.l		%d3, %d0		# create hi(man)
+	lsl.l		%d2, %d1		# create lo(man)
+
+	mov.l		%d0, FTEMP_HI(%a0)	# store new hi(man)
+	mov.l		%d1, FTEMP_LO(%a0)	# store new lo(man)
+
+	mov.l		%d2, %d0		# return shift amount
+
+	mov.l		(%sp)+, %d3		# restore temp regs
+	mov.l		(%sp)+, %d2
+
+	rts
+
+norm_lo:
+	bfffo		%d1{&0:&32}, %d2	# how many places to shift?
+	lsl.l		%d2, %d1		# shift lo(man)
+	add.l		&32, %d2		# add 32 to shft amount
+
+	mov.l		%d1, FTEMP_HI(%a0)	# store hi(man)
+	clr.l		FTEMP_LO(%a0)		# lo(man) is now zero
+
+	mov.l		%d2, %d0		# return shift amount
+
+	mov.l		(%sp)+, %d3		# restore temp regs
+	mov.l		(%sp)+, %d2
+
+	rts
+
+#########################################################################
+# unnorm_fix(): - changes an UNNORM to one of NORM, DENORM, or ZERO	#
+#		- returns corresponding optype tag			#
+#									#
+# XDEF ****************************************************************	#
+#	unnorm_fix()							#
+#									#
+# XREF **************************************************************** #
+#	norm() - normalize the mantissa					#
+#									#
+# INPUT *************************************************************** #
+#	a0 = pointer to unnormalized extended precision number		#
+#									#
+# OUTPUT ************************************************************** #
+#	d0 = optype tag - is corrected to one of NORM, DENORM, or ZERO	#
+#	a0 = input operand has been converted to a norm, denorm, or	#
+#	     zero; both the exponent and mantissa are changed.		#
+#									#
+#########################################################################
+
+	global		unnorm_fix
+unnorm_fix:
+	bfffo		FTEMP_HI(%a0){&0:&32}, %d0 # how many shifts are needed?
+	bne.b		unnorm_shift		# hi(man) is not all zeroes
+
+#
+# hi(man) is all zeroes so see if any bits in lo(man) are set
+#
+unnorm_chk_lo:
+	bfffo		FTEMP_LO(%a0){&0:&32}, %d0 # is operand really a zero?
+	beq.w		unnorm_zero		# yes
+
+	add.w		&32, %d0		# no; fix shift distance
+
+#
+# d0 = # shifts needed for complete normalization
+#
+unnorm_shift:
+	clr.l		%d1			# clear top word
+	mov.w		FTEMP_EX(%a0), %d1	# extract exponent
+	and.w		&0x7fff, %d1		# strip off sgn
+
+	cmp.w		%d0, %d1		# will denorm push exp < 0?
+	bgt.b		unnorm_nrm_zero		# yes; denorm only until exp = 0
+
+#
+# exponent would not go < 0. therefore, number stays normalized
+#
+	sub.w		%d0, %d1		# shift exponent value
+	mov.w		FTEMP_EX(%a0), %d0	# load old exponent
+	and.w		&0x8000, %d0		# save old sign
+	or.w		%d0, %d1		# {sgn,new exp}
+	mov.w		%d1, FTEMP_EX(%a0)	# insert new exponent
+
+	bsr.l		norm			# normalize UNNORM
+
+	mov.b		&NORM, %d0		# return new optype tag
+	rts
+
+#
+# exponent would go < 0, so only denormalize until exp = 0
+#
+unnorm_nrm_zero:
+	cmp.b		%d1, &32		# is exp <= 32?
+	bgt.b		unnorm_nrm_zero_lrg	# no; go handle large exponent
+
+	bfextu		FTEMP_HI(%a0){%d1:&32}, %d0 # extract new hi(man)
+	mov.l		%d0, FTEMP_HI(%a0)	# save new hi(man)
+
+	mov.l		FTEMP_LO(%a0), %d0	# fetch old lo(man)
+	lsl.l		%d1, %d0		# extract new lo(man)
+	mov.l		%d0, FTEMP_LO(%a0)	# save new lo(man)
+
+	and.w		&0x8000, FTEMP_EX(%a0)	# set exp = 0
+
+	mov.b		&DENORM, %d0		# return new optype tag
+	rts
+
+#
+# only mantissa bits set are in lo(man)
+#
+unnorm_nrm_zero_lrg:
+	sub.w		&32, %d1		# adjust shft amt by 32
+
+	mov.l		FTEMP_LO(%a0), %d0	# fetch old lo(man)
+	lsl.l		%d1, %d0		# left shift lo(man)
+
+	mov.l		%d0, FTEMP_HI(%a0)	# store new hi(man)
+	clr.l		FTEMP_LO(%a0)		# lo(man) = 0
+
+	and.w		&0x8000, FTEMP_EX(%a0)	# set exp = 0
+
+	mov.b		&DENORM, %d0		# return new optype tag
+	rts
+
+#
+# whole mantissa is zero so this UNNORM is actually a zero
+#
+unnorm_zero:
+	and.w		&0x8000, FTEMP_EX(%a0)	# force exponent to zero
+
+	mov.b		&ZERO, %d0		# fix optype tag
+	rts
diff --git a/arch/m68k/ifpsp060/src/fpsp.S b/arch/m68k/ifpsp060/src/fpsp.S
new file mode 100644
index 0000000..3b597a9
--- /dev/null
+++ b/arch/m68k/ifpsp060/src/fpsp.S
@@ -0,0 +1,24785 @@
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
+M68000 Hi-Performance Microprocessor Division
+M68060 Software Package
+Production Release P1.00 -- October 10, 1994
+
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc.  All rights reserved.
+
+THE SOFTWARE is provided on an "AS IS" basis and without warranty.
+To the maximum extent permitted by applicable law,
+MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
+INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+and any warranty against infringement with regard to the SOFTWARE
+(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
+
+To the maximum extent permitted by applicable law,
+IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
+(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
+BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
+ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
+Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
+
+You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
+so long as this entire notice is retained without alteration in any modified and/or
+redistributed versions, and that such modified versions are clearly identified as such.
+No licenses are granted by implication, estoppel or otherwise under any patents
+or trademarks of Motorola, Inc.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# freal.s:
+#	This file is appended to the top of the 060FPSP package
+# and contains the entry points into the package. The user, in
+# effect, branches to one of the branch table entries located
+# after _060FPSP_TABLE.
+#	Also, subroutine stubs exist in this file (_fpsp_done for
+# example) that are referenced by the FPSP package itself in order
+# to call a given routine. The stub routine actually performs the
+# callout. The FPSP code does a "bsr" to the stub routine. This
+# extra layer of hierarchy adds a slight performance penalty but
+# it makes the FPSP code easier to read and more mainatinable.
+#
+
+set	_off_bsun,	0x00
+set	_off_snan,	0x04
+set	_off_operr,	0x08
+set	_off_ovfl,	0x0c
+set	_off_unfl,	0x10
+set	_off_dz,	0x14
+set	_off_inex,	0x18
+set	_off_fline,	0x1c
+set	_off_fpu_dis,	0x20
+set	_off_trap,	0x24
+set	_off_trace,	0x28
+set	_off_access,	0x2c
+set	_off_done,	0x30
+
+set	_off_imr,	0x40
+set	_off_dmr,	0x44
+set	_off_dmw,	0x48
+set	_off_irw,	0x4c
+set	_off_irl,	0x50
+set	_off_drb,	0x54
+set	_off_drw,	0x58
+set	_off_drl,	0x5c
+set	_off_dwb,	0x60
+set	_off_dww,	0x64
+set	_off_dwl,	0x68
+
+_060FPSP_TABLE:
+
+###############################################################
+
+# Here's the table of ENTRY POINTS for those linking the package.
+	bra.l		_fpsp_snan
+	short		0x0000
+	bra.l		_fpsp_operr
+	short		0x0000
+	bra.l		_fpsp_ovfl
+	short		0x0000
+	bra.l		_fpsp_unfl
+	short		0x0000
+	bra.l		_fpsp_dz
+	short		0x0000
+	bra.l		_fpsp_inex
+	short		0x0000
+	bra.l		_fpsp_fline
+	short		0x0000
+	bra.l		_fpsp_unsupp
+	short		0x0000
+	bra.l		_fpsp_effadd
+	short		0x0000
+
+	space		56
+
+###############################################################
+	global		_fpsp_done
+_fpsp_done:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_done,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_ovfl
+_real_ovfl:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_ovfl,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_unfl
+_real_unfl:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_unfl,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_inex
+_real_inex:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_inex,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_bsun
+_real_bsun:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_bsun,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_operr
+_real_operr:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_operr,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_snan
+_real_snan:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_snan,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_dz
+_real_dz:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_dz,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_fline
+_real_fline:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_fline,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_fpu_disabled
+_real_fpu_disabled:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_fpu_dis,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_trap
+_real_trap:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_trap,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_trace
+_real_trace:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_trace,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_access
+_real_access:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_access,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+#######################################
+
+	global		_imem_read
+_imem_read:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_imr,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_read
+_dmem_read:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_dmr,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_write
+_dmem_write:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_dmw,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_imem_read_word
+_imem_read_word:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_irw,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_imem_read_long
+_imem_read_long:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_irl,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_read_byte
+_dmem_read_byte:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_drb,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_read_word
+_dmem_read_word:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_drw,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_read_long
+_dmem_read_long:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_drl,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_write_byte
+_dmem_write_byte:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_dwb,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_write_word
+_dmem_write_word:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_dww,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_write_long
+_dmem_write_long:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_dwl,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+#
+# This file contains a set of define statements for constants
+# in order to promote readability within the corecode itself.
+#
+
+set LOCAL_SIZE,		192			# stack frame size(bytes)
+set LV,			-LOCAL_SIZE		# stack offset
+
+set EXC_SR,		0x4			# stack status register
+set EXC_PC,		0x6			# stack pc
+set EXC_VOFF,		0xa			# stacked vector offset
+set EXC_EA,		0xc			# stacked <ea>
+
+set EXC_FP,		0x0			# frame pointer
+
+set EXC_AREGS,		-68			# offset of all address regs
+set EXC_DREGS,		-100			# offset of all data regs
+set EXC_FPREGS,		-36			# offset of all fp regs
+
+set EXC_A7,		EXC_AREGS+(7*4)		# offset of saved a7
+set OLD_A7,		EXC_AREGS+(6*4)		# extra copy of saved a7
+set EXC_A6,		EXC_AREGS+(6*4)		# offset of saved a6
+set EXC_A5,		EXC_AREGS+(5*4)
+set EXC_A4,		EXC_AREGS+(4*4)
+set EXC_A3,		EXC_AREGS+(3*4)
+set EXC_A2,		EXC_AREGS+(2*4)
+set EXC_A1,		EXC_AREGS+(1*4)
+set EXC_A0,		EXC_AREGS+(0*4)
+set EXC_D7,		EXC_DREGS+(7*4)
+set EXC_D6,		EXC_DREGS+(6*4)
+set EXC_D5,		EXC_DREGS+(5*4)
+set EXC_D4,		EXC_DREGS+(4*4)
+set EXC_D3,		EXC_DREGS+(3*4)
+set EXC_D2,		EXC_DREGS+(2*4)
+set EXC_D1,		EXC_DREGS+(1*4)
+set EXC_D0,		EXC_DREGS+(0*4)
+
+set EXC_FP0,		EXC_FPREGS+(0*12)	# offset of saved fp0
+set EXC_FP1,		EXC_FPREGS+(1*12)	# offset of saved fp1
+set EXC_FP2,		EXC_FPREGS+(2*12)	# offset of saved fp2 (not used)
+
+set FP_SCR1,		LV+80			# fp scratch 1
+set FP_SCR1_EX,		FP_SCR1+0
+set FP_SCR1_SGN,	FP_SCR1+2
+set FP_SCR1_HI,		FP_SCR1+4
+set FP_SCR1_LO,		FP_SCR1+8
+
+set FP_SCR0,		LV+68			# fp scratch 0
+set FP_SCR0_EX,		FP_SCR0+0
+set FP_SCR0_SGN,	FP_SCR0+2
+set FP_SCR0_HI,		FP_SCR0+4
+set FP_SCR0_LO,		FP_SCR0+8
+
+set FP_DST,		LV+56			# fp destination operand
+set FP_DST_EX,		FP_DST+0
+set FP_DST_SGN,		FP_DST+2
+set FP_DST_HI,		FP_DST+4
+set FP_DST_LO,		FP_DST+8
+
+set FP_SRC,		LV+44			# fp source operand
+set FP_SRC_EX,		FP_SRC+0
+set FP_SRC_SGN,		FP_SRC+2
+set FP_SRC_HI,		FP_SRC+4
+set FP_SRC_LO,		FP_SRC+8
+
+set USER_FPIAR,		LV+40			# FP instr address register
+
+set USER_FPSR,		LV+36			# FP status register
+set FPSR_CC,		USER_FPSR+0		# FPSR condition codes
+set FPSR_QBYTE,		USER_FPSR+1		# FPSR qoutient byte
+set FPSR_EXCEPT,	USER_FPSR+2		# FPSR exception status byte
+set FPSR_AEXCEPT,	USER_FPSR+3		# FPSR accrued exception byte
+
+set USER_FPCR,		LV+32			# FP control register
+set FPCR_ENABLE,	USER_FPCR+2		# FPCR exception enable
+set FPCR_MODE,		USER_FPCR+3		# FPCR rounding mode control
+
+set L_SCR3,		LV+28			# integer scratch 3
+set L_SCR2,		LV+24			# integer scratch 2
+set L_SCR1,		LV+20			# integer scratch 1
+
+set STORE_FLG,		LV+19			# flag: operand store (ie. not fcmp/ftst)
+
+set EXC_TEMP2,		LV+24			# temporary space
+set EXC_TEMP,		LV+16			# temporary space
+
+set DTAG,		LV+15			# destination operand type
+set STAG,		LV+14			# source operand type
+
+set SPCOND_FLG,		LV+10			# flag: special case (see below)
+
+set EXC_CC,		LV+8			# saved condition codes
+set EXC_EXTWPTR,	LV+4			# saved current PC (active)
+set EXC_EXTWORD,	LV+2			# saved extension word
+set EXC_CMDREG,		LV+2			# saved extension word
+set EXC_OPWORD,		LV+0			# saved operation word
+
+################################
+
+# Helpful macros
+
+set FTEMP,		0			# offsets within an
+set FTEMP_EX,		0			# extended precision
+set FTEMP_SGN,		2			# value saved in memory.
+set FTEMP_HI,		4
+set FTEMP_LO,		8
+set FTEMP_GRS,		12
+
+set LOCAL,		0			# offsets within an
+set LOCAL_EX,		0			# extended precision
+set LOCAL_SGN,		2			# value saved in memory.
+set LOCAL_HI,		4
+set LOCAL_LO,		8
+set LOCAL_GRS,		12
+
+set DST,		0			# offsets within an
+set DST_EX,		0			# extended precision
+set DST_HI,		4			# value saved in memory.
+set DST_LO,		8
+
+set SRC,		0			# offsets within an
+set SRC_EX,		0			# extended precision
+set SRC_HI,		4			# value saved in memory.
+set SRC_LO,		8
+
+set SGL_LO,		0x3f81			# min sgl prec exponent
+set SGL_HI,		0x407e			# max sgl prec exponent
+set DBL_LO,		0x3c01			# min dbl prec exponent
+set DBL_HI,		0x43fe			# max dbl prec exponent
+set EXT_LO,		0x0			# min ext prec exponent
+set EXT_HI,		0x7ffe			# max ext prec exponent
+
+set EXT_BIAS,		0x3fff			# extended precision bias
+set SGL_BIAS,		0x007f			# single precision bias
+set DBL_BIAS,		0x03ff			# double precision bias
+
+set NORM,		0x00			# operand type for STAG/DTAG
+set ZERO,		0x01			# operand type for STAG/DTAG
+set INF,		0x02			# operand type for STAG/DTAG
+set QNAN,		0x03			# operand type for STAG/DTAG
+set DENORM,		0x04			# operand type for STAG/DTAG
+set SNAN,		0x05			# operand type for STAG/DTAG
+set UNNORM,		0x06			# operand type for STAG/DTAG
+
+##################
+# FPSR/FPCR bits #
+##################
+set neg_bit,		0x3			# negative result
+set z_bit,		0x2			# zero result
+set inf_bit,		0x1			# infinite result
+set nan_bit,		0x0			# NAN result
+
+set q_sn_bit,		0x7			# sign bit of quotient byte
+
+set bsun_bit,		7			# branch on unordered
+set snan_bit,		6			# signalling NAN
+set operr_bit,		5			# operand error
+set ovfl_bit,		4			# overflow
+set unfl_bit,		3			# underflow
+set dz_bit,		2			# divide by zero
+set inex2_bit,		1			# inexact result 2
+set inex1_bit,		0			# inexact result 1
+
+set aiop_bit,		7			# accrued inexact operation bit
+set aovfl_bit,		6			# accrued overflow bit
+set aunfl_bit,		5			# accrued underflow bit
+set adz_bit,		4			# accrued dz bit
+set ainex_bit,		3			# accrued inexact bit
+
+#############################
+# FPSR individual bit masks #
+#############################
+set neg_mask,		0x08000000		# negative bit mask (lw)
+set inf_mask,		0x02000000		# infinity bit mask (lw)
+set z_mask,		0x04000000		# zero bit mask (lw)
+set nan_mask,		0x01000000		# nan bit mask (lw)
+
+set neg_bmask,		0x08			# negative bit mask (byte)
+set inf_bmask,		0x02			# infinity bit mask (byte)
+set z_bmask,		0x04			# zero bit mask (byte)
+set nan_bmask,		0x01			# nan bit mask (byte)
+
+set bsun_mask,		0x00008000		# bsun exception mask
+set snan_mask,		0x00004000		# snan exception mask
+set operr_mask,		0x00002000		# operr exception mask
+set ovfl_mask,		0x00001000		# overflow exception mask
+set unfl_mask,		0x00000800		# underflow exception mask
+set dz_mask,		0x00000400		# dz exception mask
+set inex2_mask,		0x00000200		# inex2 exception mask
+set inex1_mask,		0x00000100		# inex1 exception mask
+
+set aiop_mask,		0x00000080		# accrued illegal operation
+set aovfl_mask,		0x00000040		# accrued overflow
+set aunfl_mask,		0x00000020		# accrued underflow
+set adz_mask,		0x00000010		# accrued divide by zero
+set ainex_mask,		0x00000008		# accrued inexact
+
+######################################
+# FPSR combinations used in the FPSP #
+######################################
+set dzinf_mask,		inf_mask+dz_mask+adz_mask
+set opnan_mask,		nan_mask+operr_mask+aiop_mask
+set nzi_mask,		0x01ffffff		#clears N, Z, and I
+set unfinx_mask,	unfl_mask+inex2_mask+aunfl_mask+ainex_mask
+set unf2inx_mask,	unfl_mask+inex2_mask+ainex_mask
+set ovfinx_mask,	ovfl_mask+inex2_mask+aovfl_mask+ainex_mask
+set inx1a_mask,		inex1_mask+ainex_mask
+set inx2a_mask,		inex2_mask+ainex_mask
+set snaniop_mask,	nan_mask+snan_mask+aiop_mask
+set snaniop2_mask,	snan_mask+aiop_mask
+set naniop_mask,	nan_mask+aiop_mask
+set neginf_mask,	neg_mask+inf_mask
+set infaiop_mask,	inf_mask+aiop_mask
+set negz_mask,		neg_mask+z_mask
+set opaop_mask,		operr_mask+aiop_mask
+set unfl_inx_mask,	unfl_mask+aunfl_mask+ainex_mask
+set ovfl_inx_mask,	ovfl_mask+aovfl_mask+ainex_mask
+
+#########
+# misc. #
+#########
+set rnd_stky_bit,	29			# stky bit pos in longword
+
+set sign_bit,		0x7			# sign bit
+set signan_bit,		0x6			# signalling nan bit
+
+set sgl_thresh,		0x3f81			# minimum sgl exponent
+set dbl_thresh,		0x3c01			# minimum dbl exponent
+
+set x_mode,		0x0			# extended precision
+set s_mode,		0x4			# single precision
+set d_mode,		0x8			# double precision
+
+set rn_mode,		0x0			# round-to-nearest
+set rz_mode,		0x1			# round-to-zero
+set rm_mode,		0x2			# round-tp-minus-infinity
+set rp_mode,		0x3			# round-to-plus-infinity
+
+set mantissalen,	64			# length of mantissa in bits
+
+set BYTE,		1			# len(byte) == 1 byte
+set WORD,		2			# len(word) == 2 bytes
+set LONG,		4			# len(longword) == 2 bytes
+
+set BSUN_VEC,		0xc0			# bsun    vector offset
+set INEX_VEC,		0xc4			# inexact vector offset
+set DZ_VEC,		0xc8			# dz      vector offset
+set UNFL_VEC,		0xcc			# unfl    vector offset
+set OPERR_VEC,		0xd0			# operr   vector offset
+set OVFL_VEC,		0xd4			# ovfl    vector offset
+set SNAN_VEC,		0xd8			# snan    vector offset
+
+###########################
+# SPecial CONDition FLaGs #
+###########################
+set ftrapcc_flg,	0x01			# flag bit: ftrapcc exception
+set fbsun_flg,		0x02			# flag bit: bsun exception
+set mia7_flg,		0x04			# flag bit: (a7)+ <ea>
+set mda7_flg,		0x08			# flag bit: -(a7) <ea>
+set fmovm_flg,		0x40			# flag bit: fmovm instruction
+set immed_flg,		0x80			# flag bit: &<data> <ea>
+
+set ftrapcc_bit,	0x0
+set fbsun_bit,		0x1
+set mia7_bit,		0x2
+set mda7_bit,		0x3
+set immed_bit,		0x7
+
+##################################
+# TRANSCENDENTAL "LAST-OP" FLAGS #
+##################################
+set FMUL_OP,		0x0			# fmul instr performed last
+set FDIV_OP,		0x1			# fdiv performed last
+set FADD_OP,		0x2			# fadd performed last
+set FMOV_OP,		0x3			# fmov performed last
+
+#############
+# CONSTANTS #
+#############
+T1:	long		0x40C62D38,0xD3D64634	# 16381 LOG2 LEAD
+T2:	long		0x3D6F90AE,0xB1E75CC7	# 16381 LOG2 TRAIL
+
+PI:	long		0x40000000,0xC90FDAA2,0x2168C235,0x00000000
+PIBY2:	long		0x3FFF0000,0xC90FDAA2,0x2168C235,0x00000000
+
+TWOBYPI:
+	long		0x3FE45F30,0x6DC9C883
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_fpsp_ovfl(): 060FPSP entry point for FP Overflow exception.	#
+#									#
+#	This handler should be the first code executed upon taking the	#
+#	FP Overflow exception in an operating system.			#
+#									#
+# XREF ****************************************************************	#
+#	_imem_read_long() - read instruction longword			#
+#	fix_skewed_ops() - adjust src operand in fsave frame		#
+#	set_tag_x() - determine optype of src/dst operands		#
+#	store_fpreg() - store opclass 0 or 2 result to FP regfile	#
+#	unnorm_fix() - change UNNORM operands to NORM or ZERO		#
+#	load_fpn2() - load dst operand from FP regfile			#
+#	fout() - emulate an opclass 3 instruction			#
+#	tbl_unsupp - add of table of emulation routines for opclass 0,2	#
+#	_fpsp_done() - "callout" for 060FPSP exit (all work done!)	#
+#	_real_ovfl() - "callout" for Overflow exception enabled code	#
+#	_real_inex() - "callout" for Inexact exception enabled code	#
+#	_real_trace() - "callout" for Trace exception code		#
+#									#
+# INPUT ***************************************************************	#
+#	- The system stack contains the FP Ovfl exception stack frame	#
+#	- The fsave frame contains the source operand			#
+#									#
+# OUTPUT **************************************************************	#
+#	Overflow Exception enabled:					#
+#	- The system stack is unchanged					#
+#	- The fsave frame contains the adjusted src op for opclass 0,2	#
+#	Overflow Exception disabled:					#
+#	- The system stack is unchanged					#
+#	- The "exception present" flag in the fsave frame is cleared	#
+#									#
+# ALGORITHM ***********************************************************	#
+#	On the 060, if an FP overflow is present as the result of any	#
+# instruction, the 060 will take an overflow exception whether the	#
+# exception is enabled or disabled in the FPCR. For the disabled case,	#
+# This handler emulates the instruction to determine what the correct	#
+# default result should be for the operation. This default result is	#
+# then stored in either the FP regfile, data regfile, or memory.	#
+# Finally, the handler exits through the "callout" _fpsp_done()		#
+# denoting that no exceptional conditions exist within the machine.	#
+#	If the exception is enabled, then this handler must create the	#
+# exceptional operand and plave it in the fsave state frame, and store	#
+# the default result (only if the instruction is opclass 3). For	#
+# exceptions enabled, this handler must exit through the "callout"	#
+# _real_ovfl() so that the operating system enabled overflow handler	#
+# can handle this case.							#
+#	Two other conditions exist. First, if overflow was disabled	#
+# but the inexact exception was enabled, this handler must exit		#
+# through the "callout" _real_inex() regardless of whether the result	#
+# was inexact.								#
+#	Also, in the case of an opclass three instruction where		#
+# overflow was disabled and the trace exception was enabled, this	#
+# handler must exit through the "callout" _real_trace().		#
+#									#
+#########################################################################
+
+	global		_fpsp_ovfl
+_fpsp_ovfl:
+
+#$#	sub.l		&24,%sp			# make room for src/dst
+
+	link.w		%a6,&-LOCAL_SIZE	# init stack frame
+
+	fsave		FP_SRC(%a6)		# grab the "busy" frame
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FPREGS(%a6)	# save fp0-fp1 on stack
+
+# the FPIAR holds the "current PC" of the faulting instruction
+	mov.l		USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch the instruction words
+	mov.l		%d0,EXC_OPWORD(%a6)
+
+##############################################################################
+
+	btst		&0x5,EXC_CMDREG(%a6)	# is instr an fmove out?
+	bne.w		fovfl_out
+
+
+	lea		FP_SRC(%a6),%a0		# pass: ptr to src op
+	bsr.l		fix_skewed_ops		# fix src op
+
+# since, I believe, only NORMs and DENORMs can come through here,
+# maybe we can avoid the subroutine call.
+	lea		FP_SRC(%a6),%a0		# pass: ptr to src op
+	bsr.l		set_tag_x		# tag the operand type
+	mov.b		%d0,STAG(%a6)		# maybe NORM,DENORM
+
+# bit five of the fp extension word separates the monadic and dyadic operations
+# that can pass through fpsp_ovfl(). remember that fcmp, ftst, and fsincos
+# will never take this exception.
+	btst		&0x5,1+EXC_CMDREG(%a6)	# is operation monadic or dyadic?
+	beq.b		fovfl_extract		# monadic
+
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
+	bsr.l		load_fpn2		# load dst into FP_DST
+
+	lea		FP_DST(%a6),%a0		# pass: ptr to dst op
+	bsr.l		set_tag_x		# tag the operand type
+	cmpi.b		%d0,&UNNORM		# is operand an UNNORM?
+	bne.b		fovfl_op2_done		# no
+	bsr.l		unnorm_fix		# yes; convert to NORM,DENORM,or ZERO
+fovfl_op2_done:
+	mov.b		%d0,DTAG(%a6)		# save dst optype tag
+
+fovfl_extract:
+
+#$#	mov.l		FP_SRC_EX(%a6),TRAP_SRCOP_EX(%a6)
+#$#	mov.l		FP_SRC_HI(%a6),TRAP_SRCOP_HI(%a6)
+#$#	mov.l		FP_SRC_LO(%a6),TRAP_SRCOP_LO(%a6)
+#$#	mov.l		FP_DST_EX(%a6),TRAP_DSTOP_EX(%a6)
+#$#	mov.l		FP_DST_HI(%a6),TRAP_DSTOP_HI(%a6)
+#$#	mov.l		FP_DST_LO(%a6),TRAP_DSTOP_LO(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd prec/mode
+
+	mov.b		1+EXC_CMDREG(%a6),%d1
+	andi.w		&0x007f,%d1		# extract extension
+
+	andi.l		&0x00ff01ff,USER_FPSR(%a6) # zero all but accured field
+
+	fmov.l		&0x0,%fpcr		# zero current control regs
+	fmov.l		&0x0,%fpsr
+
+	lea		FP_SRC(%a6),%a0
+	lea		FP_DST(%a6),%a1
+
+# maybe we can make these entry points ONLY the OVFL entry points of each routine.
+	mov.l		(tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr
+	jsr		(tbl_unsupp.l,%pc,%d1.l*1)
+
+# the operation has been emulated. the result is in fp0.
+# the EXOP, if an exception occurred, is in fp1.
+# we must save the default result regardless of whether
+# traps are enabled or disabled.
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0
+	bsr.l		store_fpreg
+
+# the exceptional possibilities we have left ourselves with are ONLY overflow
+# and inexact. and, the inexact is such that overflow occurred and was disabled
+# but inexact was enabled.
+	btst		&ovfl_bit,FPCR_ENABLE(%a6)
+	bne.b		fovfl_ovfl_on
+
+	btst		&inex2_bit,FPCR_ENABLE(%a6)
+	bne.b		fovfl_inex_on
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+#$#	add.l		&24,%sp
+	bra.l		_fpsp_done
+
+# overflow is enabled AND overflow, of course, occurred. so, we have the EXOP
+# in fp1. now, simply jump to _real_ovfl()!
+fovfl_ovfl_on:
+	fmovm.x		&0x40,FP_SRC(%a6)	# save EXOP (fp1) to stack
+
+	mov.w		&0xe005,2+FP_SRC(%a6)	# save exc status
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)		# do this after fmovm,other f<op>s!
+
+	unlk		%a6
+
+	bra.l		_real_ovfl
+
+# overflow occurred but is disabled. meanwhile, inexact is enabled. therefore,
+# we must jump to real_inex().
+fovfl_inex_on:
+
+	fmovm.x		&0x40,FP_SRC(%a6)	# save EXOP (fp1) to stack
+
+	mov.b		&0xc4,1+EXC_VOFF(%a6)	# vector offset = 0xc4
+	mov.w		&0xe001,2+FP_SRC(%a6)	# save exc status
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)		# do this after fmovm,other f<op>s!
+
+	unlk		%a6
+
+	bra.l		_real_inex
+
+########################################################################
+fovfl_out:
+
+
+#$#	mov.l		FP_SRC_EX(%a6),TRAP_SRCOP_EX(%a6)
+#$#	mov.l		FP_SRC_HI(%a6),TRAP_SRCOP_HI(%a6)
+#$#	mov.l		FP_SRC_LO(%a6),TRAP_SRCOP_LO(%a6)
+
+# the src operand is definitely a NORM(!), so tag it as such
+	mov.b		&NORM,STAG(%a6)		# set src optype tag
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd prec/mode
+
+	and.l		&0xffff00ff,USER_FPSR(%a6) # zero all but accured field
+
+	fmov.l		&0x0,%fpcr		# zero current control regs
+	fmov.l		&0x0,%fpsr
+
+	lea		FP_SRC(%a6),%a0		# pass ptr to src operand
+
+	bsr.l		fout
+
+	btst		&ovfl_bit,FPCR_ENABLE(%a6)
+	bne.w		fovfl_ovfl_on
+
+	btst		&inex2_bit,FPCR_ENABLE(%a6)
+	bne.w		fovfl_inex_on
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+#$#	add.l		&24,%sp
+
+	btst		&0x7,(%sp)		# is trace on?
+	beq.l		_fpsp_done		# no
+
+	fmov.l		%fpiar,0x8(%sp)		# "Current PC" is in FPIAR
+	mov.w		&0x2024,0x6(%sp)	# stk fmt = 0x2; voff = 0x024
+	bra.l		_real_trace
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_fpsp_unfl(): 060FPSP entry point for FP Underflow exception.	#
+#									#
+#	This handler should be the first code executed upon taking the	#
+#	FP Underflow exception in an operating system.			#
+#									#
+# XREF ****************************************************************	#
+#	_imem_read_long() - read instruction longword			#
+#	fix_skewed_ops() - adjust src operand in fsave frame		#
+#	set_tag_x() - determine optype of src/dst operands		#
+#	store_fpreg() - store opclass 0 or 2 result to FP regfile	#
+#	unnorm_fix() - change UNNORM operands to NORM or ZERO		#
+#	load_fpn2() - load dst operand from FP regfile			#
+#	fout() - emulate an opclass 3 instruction			#
+#	tbl_unsupp - add of table of emulation routines for opclass 0,2	#
+#	_fpsp_done() - "callout" for 060FPSP exit (all work done!)	#
+#	_real_ovfl() - "callout" for Overflow exception enabled code	#
+#	_real_inex() - "callout" for Inexact exception enabled code	#
+#	_real_trace() - "callout" for Trace exception code		#
+#									#
+# INPUT ***************************************************************	#
+#	- The system stack contains the FP Unfl exception stack frame	#
+#	- The fsave frame contains the source operand			#
+#									#
+# OUTPUT **************************************************************	#
+#	Underflow Exception enabled:					#
+#	- The system stack is unchanged					#
+#	- The fsave frame contains the adjusted src op for opclass 0,2	#
+#	Underflow Exception disabled:					#
+#	- The system stack is unchanged					#
+#	- The "exception present" flag in the fsave frame is cleared	#
+#									#
+# ALGORITHM ***********************************************************	#
+#	On the 060, if an FP underflow is present as the result of any	#
+# instruction, the 060 will take an underflow exception whether the	#
+# exception is enabled or disabled in the FPCR. For the disabled case,	#
+# This handler emulates the instruction to determine what the correct	#
+# default result should be for the operation. This default result is	#
+# then stored in either the FP regfile, data regfile, or memory.	#
+# Finally, the handler exits through the "callout" _fpsp_done()		#
+# denoting that no exceptional conditions exist within the machine.	#
+#	If the exception is enabled, then this handler must create the	#
+# exceptional operand and plave it in the fsave state frame, and store	#
+# the default result (only if the instruction is opclass 3). For	#
+# exceptions enabled, this handler must exit through the "callout"	#
+# _real_unfl() so that the operating system enabled overflow handler	#
+# can handle this case.							#
+#	Two other conditions exist. First, if underflow was disabled	#
+# but the inexact exception was enabled and the result was inexact,	#
+# this handler must exit through the "callout" _real_inex().		#
+# was inexact.								#
+#	Also, in the case of an opclass three instruction where		#
+# underflow was disabled and the trace exception was enabled, this	#
+# handler must exit through the "callout" _real_trace().		#
+#									#
+#########################################################################
+
+	global		_fpsp_unfl
+_fpsp_unfl:
+
+#$#	sub.l		&24,%sp			# make room for src/dst
+
+	link.w		%a6,&-LOCAL_SIZE	# init stack frame
+
+	fsave		FP_SRC(%a6)		# grab the "busy" frame
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FPREGS(%a6)	# save fp0-fp1 on stack
+
+# the FPIAR holds the "current PC" of the faulting instruction
+	mov.l		USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch the instruction words
+	mov.l		%d0,EXC_OPWORD(%a6)
+
+##############################################################################
+
+	btst		&0x5,EXC_CMDREG(%a6)	# is instr an fmove out?
+	bne.w		funfl_out
+
+
+	lea		FP_SRC(%a6),%a0		# pass: ptr to src op
+	bsr.l		fix_skewed_ops		# fix src op
+
+	lea		FP_SRC(%a6),%a0		# pass: ptr to src op
+	bsr.l		set_tag_x		# tag the operand type
+	mov.b		%d0,STAG(%a6)		# maybe NORM,DENORM
+
+# bit five of the fp ext word separates the monadic and dyadic operations
+# that can pass through fpsp_unfl(). remember that fcmp, and ftst
+# will never take this exception.
+	btst		&0x5,1+EXC_CMDREG(%a6)	# is op monadic or dyadic?
+	beq.b		funfl_extract		# monadic
+
+# now, what's left that's not dyadic is fsincos. we can distinguish it
+# from all dyadics by the '0110xxx pattern
+	btst		&0x4,1+EXC_CMDREG(%a6)	# is op an fsincos?
+	bne.b		funfl_extract		# yes
+
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
+	bsr.l		load_fpn2		# load dst into FP_DST
+
+	lea		FP_DST(%a6),%a0		# pass: ptr to dst op
+	bsr.l		set_tag_x		# tag the operand type
+	cmpi.b		%d0,&UNNORM		# is operand an UNNORM?
+	bne.b		funfl_op2_done		# no
+	bsr.l		unnorm_fix		# yes; convert to NORM,DENORM,or ZERO
+funfl_op2_done:
+	mov.b		%d0,DTAG(%a6)		# save dst optype tag
+
+funfl_extract:
+
+#$#	mov.l		FP_SRC_EX(%a6),TRAP_SRCOP_EX(%a6)
+#$#	mov.l		FP_SRC_HI(%a6),TRAP_SRCOP_HI(%a6)
+#$#	mov.l		FP_SRC_LO(%a6),TRAP_SRCOP_LO(%a6)
+#$#	mov.l		FP_DST_EX(%a6),TRAP_DSTOP_EX(%a6)
+#$#	mov.l		FP_DST_HI(%a6),TRAP_DSTOP_HI(%a6)
+#$#	mov.l		FP_DST_LO(%a6),TRAP_DSTOP_LO(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd prec/mode
+
+	mov.b		1+EXC_CMDREG(%a6),%d1
+	andi.w		&0x007f,%d1		# extract extension
+
+	andi.l		&0x00ff01ff,USER_FPSR(%a6)
+
+	fmov.l		&0x0,%fpcr		# zero current control regs
+	fmov.l		&0x0,%fpsr
+
+	lea		FP_SRC(%a6),%a0
+	lea		FP_DST(%a6),%a1
+
+# maybe we can make these entry points ONLY the OVFL entry points of each routine.
+	mov.l		(tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr
+	jsr		(tbl_unsupp.l,%pc,%d1.l*1)
+
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0
+	bsr.l		store_fpreg
+
+# The `060 FPU multiplier hardware is such that if the result of a
+# multiply operation is the smallest possible normalized number
+# (0x00000000_80000000_00000000), then the machine will take an
+# underflow exception. Since this is incorrect, we need to check
+# if our emulation, after re-doing the operation, decided that
+# no underflow was called for. We do these checks only in
+# funfl_{unfl,inex}_on() because w/ both exceptions disabled, this
+# special case will simply exit gracefully with the correct result.
+
+# the exceptional possibilities we have left ourselves with are ONLY overflow
+# and inexact. and, the inexact is such that overflow occurred and was disabled
+# but inexact was enabled.
+	btst		&unfl_bit,FPCR_ENABLE(%a6)
+	bne.b		funfl_unfl_on
+
+funfl_chkinex:
+	btst		&inex2_bit,FPCR_ENABLE(%a6)
+	bne.b		funfl_inex_on
+
+funfl_exit:
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+#$#	add.l		&24,%sp
+	bra.l		_fpsp_done
+
+# overflow is enabled AND overflow, of course, occurred. so, we have the EXOP
+# in fp1 (don't forget to save fp0). what to do now?
+# well, we simply have to get to go to _real_unfl()!
+funfl_unfl_on:
+
+# The `060 FPU multiplier hardware is such that if the result of a
+# multiply operation is the smallest possible normalized number
+# (0x00000000_80000000_00000000), then the machine will take an
+# underflow exception. Since this is incorrect, we check here to see
+# if our emulation, after re-doing the operation, decided that
+# no underflow was called for.
+	btst		&unfl_bit,FPSR_EXCEPT(%a6)
+	beq.w		funfl_chkinex
+
+funfl_unfl_on2:
+	fmovm.x		&0x40,FP_SRC(%a6)	# save EXOP (fp1) to stack
+
+	mov.w		&0xe003,2+FP_SRC(%a6)	# save exc status
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)		# do this after fmovm,other f<op>s!
+
+	unlk		%a6
+
+	bra.l		_real_unfl
+
+# undeflow occurred but is disabled. meanwhile, inexact is enabled. therefore,
+# we must jump to real_inex().
+funfl_inex_on:
+
+# The `060 FPU multiplier hardware is such that if the result of a
+# multiply operation is the smallest possible normalized number
+# (0x00000000_80000000_00000000), then the machine will take an
+# underflow exception.
+# But, whether bogus or not, if inexact is enabled AND it occurred,
+# then we have to branch to real_inex.
+
+	btst		&inex2_bit,FPSR_EXCEPT(%a6)
+	beq.w		funfl_exit
+
+funfl_inex_on2:
+
+	fmovm.x		&0x40,FP_SRC(%a6)	# save EXOP to stack
+
+	mov.b		&0xc4,1+EXC_VOFF(%a6)	# vector offset = 0xc4
+	mov.w		&0xe001,2+FP_SRC(%a6)	# save exc status
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)		# do this after fmovm,other f<op>s!
+
+	unlk		%a6
+
+	bra.l		_real_inex
+
+#######################################################################
+funfl_out:
+
+
+#$#	mov.l		FP_SRC_EX(%a6),TRAP_SRCOP_EX(%a6)
+#$#	mov.l		FP_SRC_HI(%a6),TRAP_SRCOP_HI(%a6)
+#$#	mov.l		FP_SRC_LO(%a6),TRAP_SRCOP_LO(%a6)
+
+# the src operand is definitely a NORM(!), so tag it as such
+	mov.b		&NORM,STAG(%a6)		# set src optype tag
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd prec/mode
+
+	and.l		&0xffff00ff,USER_FPSR(%a6) # zero all but accured field
+
+	fmov.l		&0x0,%fpcr		# zero current control regs
+	fmov.l		&0x0,%fpsr
+
+	lea		FP_SRC(%a6),%a0		# pass ptr to src operand
+
+	bsr.l		fout
+
+	btst		&unfl_bit,FPCR_ENABLE(%a6)
+	bne.w		funfl_unfl_on2
+
+	btst		&inex2_bit,FPCR_ENABLE(%a6)
+	bne.w		funfl_inex_on2
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+#$#	add.l		&24,%sp
+
+	btst		&0x7,(%sp)		# is trace on?
+	beq.l		_fpsp_done		# no
+
+	fmov.l		%fpiar,0x8(%sp)		# "Current PC" is in FPIAR
+	mov.w		&0x2024,0x6(%sp)	# stk fmt = 0x2; voff = 0x024
+	bra.l		_real_trace
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_fpsp_unsupp(): 060FPSP entry point for FP "Unimplemented	#
+#		        Data Type" exception.				#
+#									#
+#	This handler should be the first code executed upon taking the	#
+#	FP Unimplemented Data Type exception in an operating system.	#
+#									#
+# XREF ****************************************************************	#
+#	_imem_read_{word,long}() - read instruction word/longword	#
+#	fix_skewed_ops() - adjust src operand in fsave frame		#
+#	set_tag_x() - determine optype of src/dst operands		#
+#	store_fpreg() - store opclass 0 or 2 result to FP regfile	#
+#	unnorm_fix() - change UNNORM operands to NORM or ZERO		#
+#	load_fpn2() - load dst operand from FP regfile			#
+#	load_fpn1() - load src operand from FP regfile			#
+#	fout() - emulate an opclass 3 instruction			#
+#	tbl_unsupp - add of table of emulation routines for opclass 0,2	#
+#	_real_inex() - "callout" to operating system inexact handler	#
+#	_fpsp_done() - "callout" for exit; work all done		#
+#	_real_trace() - "callout" for Trace enabled exception		#
+#	funimp_skew() - adjust fsave src ops to "incorrect" value	#
+#	_real_snan() - "callout" for SNAN exception			#
+#	_real_operr() - "callout" for OPERR exception			#
+#	_real_ovfl() - "callout" for OVFL exception			#
+#	_real_unfl() - "callout" for UNFL exception			#
+#	get_packed() - fetch packed operand from memory			#
+#									#
+# INPUT ***************************************************************	#
+#	- The system stack contains the "Unimp Data Type" stk frame	#
+#	- The fsave frame contains the ssrc op (for UNNORM/DENORM)	#
+#									#
+# OUTPUT **************************************************************	#
+#	If Inexact exception (opclass 3):				#
+#	- The system stack is changed to an Inexact exception stk frame	#
+#	If SNAN exception (opclass 3):					#
+#	- The system stack is changed to an SNAN exception stk frame	#
+#	If OPERR exception (opclass 3):					#
+#	- The system stack is changed to an OPERR exception stk frame	#
+#	If OVFL exception (opclass 3):					#
+#	- The system stack is changed to an OVFL exception stk frame	#
+#	If UNFL exception (opclass 3):					#
+#	- The system stack is changed to an UNFL exception stack frame	#
+#	If Trace exception enabled:					#
+#	- The system stack is changed to a Trace exception stack frame	#
+#	Else: (normal case)						#
+#	- Correct result has been stored as appropriate			#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Two main instruction types can enter here: (1) DENORM or UNNORM	#
+# unimplemented data types. These can be either opclass 0,2 or 3	#
+# instructions, and (2) PACKED unimplemented data format instructions	#
+# also of opclasses 0,2, or 3.						#
+#	For UNNORM/DENORM opclass 0 and 2, the handler fetches the src	#
+# operand from the fsave state frame and the dst operand (if dyadic)	#
+# from the FP register file. The instruction is then emulated by	#
+# choosing an emulation routine from a table of routines indexed by	#
+# instruction type. Once the instruction has been emulated and result	#
+# saved, then we check to see if any enabled exceptions resulted from	#
+# instruction emulation. If none, then we exit through the "callout"	#
+# _fpsp_done(). If there is an enabled FP exception, then we insert	#
+# this exception into the FPU in the fsave state frame and then exit	#
+# through _fpsp_done().							#
+#	PACKED opclass 0 and 2 is similar in how the instruction is	#
+# emulated and exceptions handled. The differences occur in how the	#
+# handler loads the packed op (by calling get_packed() routine) and	#
+# by the fact that a Trace exception could be pending for PACKED ops.	#
+# If a Trace exception is pending, then the current exception stack	#
+# frame is changed to a Trace exception stack frame and an exit is	#
+# made through _real_trace().						#
+#	For UNNORM/DENORM opclass 3, the actual move out to memory is	#
+# performed by calling the routine fout(). If no exception should occur	#
+# as the result of emulation, then an exit either occurs through	#
+# _fpsp_done() or through _real_trace() if a Trace exception is pending	#
+# (a Trace stack frame must be created here, too). If an FP exception	#
+# should occur, then we must create an exception stack frame of that	#
+# type and jump to either _real_snan(), _real_operr(), _real_inex(),	#
+# _real_unfl(), or _real_ovfl() as appropriate. PACKED opclass 3	#
+# emulation is performed in a similar manner.				#
+#									#
+#########################################################################
+
+#
+# (1) DENORM and UNNORM (unimplemented) data types:
+#
+#				post-instruction
+#				*****************
+#				*      EA	*
+#	 pre-instruction	*		*
+#	*****************	*****************
+#	* 0x0 *  0x0dc  *	* 0x3 *  0x0dc  *
+#	*****************	*****************
+#	*     Next	*	*     Next	*
+#	*      PC	*	*      PC	*
+#	*****************	*****************
+#	*      SR	*	*      SR	*
+#	*****************	*****************
+#
+# (2) PACKED format (unsupported) opclasses two and three:
+#	*****************
+#	*      EA	*
+#	*		*
+#	*****************
+#	* 0x2 *  0x0dc	*
+#	*****************
+#	*     Next	*
+#	*      PC	*
+#	*****************
+#	*      SR	*
+#	*****************
+#
+	global		_fpsp_unsupp
+_fpsp_unsupp:
+
+	link.w		%a6,&-LOCAL_SIZE	# init stack frame
+
+	fsave		FP_SRC(%a6)		# save fp state
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FPREGS(%a6)	# save fp0-fp1 on stack
+
+	btst		&0x5,EXC_SR(%a6)	# user or supervisor mode?
+	bne.b		fu_s
+fu_u:
+	mov.l		%usp,%a0		# fetch user stack pointer
+	mov.l		%a0,EXC_A7(%a6)		# save on stack
+	bra.b		fu_cont
+# if the exception is an opclass zero or two unimplemented data type
+# exception, then the a7' calculated here is wrong since it doesn't
+# stack an ea. however, we don't need an a7' for this case anyways.
+fu_s:
+	lea		0x4+EXC_EA(%a6),%a0	# load old a7'
+	mov.l		%a0,EXC_A7(%a6)		# save on stack
+
+fu_cont:
+
+# the FPIAR holds the "current PC" of the faulting instruction
+# the FPIAR should be set correctly for ALL exceptions passing through
+# this point.
+	mov.l		USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch the instruction words
+	mov.l		%d0,EXC_OPWORD(%a6)	# store OPWORD and EXTWORD
+
+############################
+
+	clr.b		SPCOND_FLG(%a6)		# clear special condition flag
+
+# Separate opclass three (fpn-to-mem) ops since they have a different
+# stack frame and protocol.
+	btst		&0x5,EXC_CMDREG(%a6)	# is it an fmove out?
+	bne.w		fu_out			# yes
+
+# Separate packed opclass two instructions.
+	bfextu		EXC_CMDREG(%a6){&0:&6},%d0
+	cmpi.b		%d0,&0x13
+	beq.w		fu_in_pack
+
+
+# I'm not sure at this point what FPSR bits are valid for this instruction.
+# so, since the emulation routines re-create them anyways, zero exception field
+	andi.l		&0x00ff00ff,USER_FPSR(%a6) # zero exception field
+
+	fmov.l		&0x0,%fpcr		# zero current control regs
+	fmov.l		&0x0,%fpsr
+
+# Opclass two w/ memory-to-fpn operation will have an incorrect extended
+# precision format if the src format was single or double and the
+# source data type was an INF, NAN, DENORM, or UNNORM
+	lea		FP_SRC(%a6),%a0		# pass ptr to input
+	bsr.l		fix_skewed_ops
+
+# we don't know whether the src operand or the dst operand (or both) is the
+# UNNORM or DENORM. call the function that tags the operand type. if the
+# input is an UNNORM, then convert it to a NORM, DENORM, or ZERO.
+	lea		FP_SRC(%a6),%a0		# pass: ptr to src op
+	bsr.l		set_tag_x		# tag the operand type
+	cmpi.b		%d0,&UNNORM		# is operand an UNNORM?
+	bne.b		fu_op2			# no
+	bsr.l		unnorm_fix		# yes; convert to NORM,DENORM,or ZERO
+
+fu_op2:
+	mov.b		%d0,STAG(%a6)		# save src optype tag
+
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
+
+# bit five of the fp extension word separates the monadic and dyadic operations
+# at this point
+	btst		&0x5,1+EXC_CMDREG(%a6)	# is operation monadic or dyadic?
+	beq.b		fu_extract		# monadic
+	cmpi.b		1+EXC_CMDREG(%a6),&0x3a	# is operation an ftst?
+	beq.b		fu_extract		# yes, so it's monadic, too
+
+	bsr.l		load_fpn2		# load dst into FP_DST
+
+	lea		FP_DST(%a6),%a0		# pass: ptr to dst op
+	bsr.l		set_tag_x		# tag the operand type
+	cmpi.b		%d0,&UNNORM		# is operand an UNNORM?
+	bne.b		fu_op2_done		# no
+	bsr.l		unnorm_fix		# yes; convert to NORM,DENORM,or ZERO
+fu_op2_done:
+	mov.b		%d0,DTAG(%a6)		# save dst optype tag
+
+fu_extract:
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# fetch rnd mode/prec
+
+	bfextu		1+EXC_CMDREG(%a6){&1:&7},%d1 # extract extension
+
+	lea		FP_SRC(%a6),%a0
+	lea		FP_DST(%a6),%a1
+
+	mov.l		(tbl_unsupp.l,%pc,%d1.l*4),%d1 # fetch routine addr
+	jsr		(tbl_unsupp.l,%pc,%d1.l*1)
+
+#
+# Exceptions in order of precedence:
+#	BSUN	: none
+#	SNAN	: all dyadic ops
+#	OPERR	: fsqrt(-NORM)
+#	OVFL	: all except ftst,fcmp
+#	UNFL	: all except ftst,fcmp
+#	DZ	: fdiv
+#	INEX2	: all except ftst,fcmp
+#	INEX1	: none (packed doesn't go through here)
+#
+
+# we determine the highest priority exception(if any) set by the
+# emulation routine that has also been enabled by the user.
+	mov.b		FPCR_ENABLE(%a6),%d0	# fetch exceptions set
+	bne.b		fu_in_ena		# some are enabled
+
+fu_in_cont:
+# fcmp and ftst do not store any result.
+	mov.b		1+EXC_CMDREG(%a6),%d0	# fetch extension
+	andi.b		&0x38,%d0		# extract bits 3-5
+	cmpi.b		%d0,&0x38		# is instr fcmp or ftst?
+	beq.b		fu_in_exit		# yes
+
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
+	bsr.l		store_fpreg		# store the result
+
+fu_in_exit:
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+
+	bra.l		_fpsp_done
+
+fu_in_ena:
+	and.b		FPSR_EXCEPT(%a6),%d0	# keep only ones enabled
+	bfffo		%d0{&24:&8},%d0		# find highest priority exception
+	bne.b		fu_in_exc		# there is at least one set
+
+#
+# No exceptions occurred that were also enabled. Now:
+#
+#	if (OVFL && ovfl_disabled && inexact_enabled) {
+#	    branch to _real_inex() (even if the result was exact!);
+#	} else {
+#	    save the result in the proper fp reg (unless the op is fcmp or ftst);
+#	    return;
+#	}
+#
+	btst		&ovfl_bit,FPSR_EXCEPT(%a6) # was overflow set?
+	beq.b		fu_in_cont		# no
+
+fu_in_ovflchk:
+	btst		&inex2_bit,FPCR_ENABLE(%a6) # was inexact enabled?
+	beq.b		fu_in_cont		# no
+	bra.w		fu_in_exc_ovfl		# go insert overflow frame
+
+#
+# An exception occurred and that exception was enabled:
+#
+#	shift enabled exception field into lo byte of d0;
+#	if (((INEX2 || INEX1) && inex_enabled && OVFL && ovfl_disabled) ||
+#	    ((INEX2 || INEX1) && inex_enabled && UNFL && unfl_disabled)) {
+#		/*
+#		 * this is the case where we must call _real_inex() now or else
+#		 * there will be no other way to pass it the exceptional operand
+#		 */
+#		call _real_inex();
+#	} else {
+#		restore exc state (SNAN||OPERR||OVFL||UNFL||DZ||INEX) into the FPU;
+#	}
+#
+fu_in_exc:
+	subi.l		&24,%d0			# fix offset to be 0-8
+	cmpi.b		%d0,&0x6		# is exception INEX? (6)
+	bne.b		fu_in_exc_exit		# no
+
+# the enabled exception was inexact
+	btst		&unfl_bit,FPSR_EXCEPT(%a6) # did disabled underflow occur?
+	bne.w		fu_in_exc_unfl		# yes
+	btst		&ovfl_bit,FPSR_EXCEPT(%a6) # did disabled overflow occur?
+	bne.w		fu_in_exc_ovfl		# yes
+
+# here, we insert the correct fsave status value into the fsave frame for the
+# corresponding exception. the operand in the fsave frame should be the original
+# src operand.
+fu_in_exc_exit:
+	mov.l		%d0,-(%sp)		# save d0
+	bsr.l		funimp_skew		# skew sgl or dbl inputs
+	mov.l		(%sp)+,%d0		# restore d0
+
+	mov.w		(tbl_except.b,%pc,%d0.w*2),2+FP_SRC(%a6) # create exc status
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)		# restore src op
+
+	unlk		%a6
+
+	bra.l		_fpsp_done
+
+tbl_except:
+	short		0xe000,0xe006,0xe004,0xe005
+	short		0xe003,0xe002,0xe001,0xe001
+
+fu_in_exc_unfl:
+	mov.w		&0x4,%d0
+	bra.b		fu_in_exc_exit
+fu_in_exc_ovfl:
+	mov.w		&0x03,%d0
+	bra.b		fu_in_exc_exit
+
+# If the input operand to this operation was opclass two and a single
+# or double precision denorm, inf, or nan, the operand needs to be
+# "corrected" in order to have the proper equivalent extended precision
+# number.
+	global		fix_skewed_ops
+fix_skewed_ops:
+	bfextu		EXC_CMDREG(%a6){&0:&6},%d0 # extract opclass,src fmt
+	cmpi.b		%d0,&0x11		# is class = 2 & fmt = sgl?
+	beq.b		fso_sgl			# yes
+	cmpi.b		%d0,&0x15		# is class = 2 & fmt = dbl?
+	beq.b		fso_dbl			# yes
+	rts					# no
+
+fso_sgl:
+	mov.w		LOCAL_EX(%a0),%d0	# fetch src exponent
+	andi.w		&0x7fff,%d0		# strip sign
+	cmpi.w		%d0,&0x3f80		# is |exp| == $3f80?
+	beq.b		fso_sgl_dnrm_zero	# yes
+	cmpi.w		%d0,&0x407f		# no; is |exp| == $407f?
+	beq.b		fso_infnan		# yes
+	rts					# no
+
+fso_sgl_dnrm_zero:
+	andi.l		&0x7fffffff,LOCAL_HI(%a0) # clear j-bit
+	beq.b		fso_zero		# it's a skewed zero
+fso_sgl_dnrm:
+# here, we count on norm not to alter a0...
+	bsr.l		norm			# normalize mantissa
+	neg.w		%d0			# -shft amt
+	addi.w		&0x3f81,%d0		# adjust new exponent
+	andi.w		&0x8000,LOCAL_EX(%a0)	# clear old exponent
+	or.w		%d0,LOCAL_EX(%a0)	# insert new exponent
+	rts
+
+fso_zero:
+	andi.w		&0x8000,LOCAL_EX(%a0)	# clear bogus exponent
+	rts
+
+fso_infnan:
+	andi.b		&0x7f,LOCAL_HI(%a0)	# clear j-bit
+	ori.w		&0x7fff,LOCAL_EX(%a0)	# make exponent = $7fff
+	rts
+
+fso_dbl:
+	mov.w		LOCAL_EX(%a0),%d0	# fetch src exponent
+	andi.w		&0x7fff,%d0		# strip sign
+	cmpi.w		%d0,&0x3c00		# is |exp| == $3c00?
+	beq.b		fso_dbl_dnrm_zero	# yes
+	cmpi.w		%d0,&0x43ff		# no; is |exp| == $43ff?
+	beq.b		fso_infnan		# yes
+	rts					# no
+
+fso_dbl_dnrm_zero:
+	andi.l		&0x7fffffff,LOCAL_HI(%a0) # clear j-bit
+	bne.b		fso_dbl_dnrm		# it's a skewed denorm
+	tst.l		LOCAL_LO(%a0)		# is it a zero?
+	beq.b		fso_zero		# yes
+fso_dbl_dnrm:
+# here, we count on norm not to alter a0...
+	bsr.l		norm			# normalize mantissa
+	neg.w		%d0			# -shft amt
+	addi.w		&0x3c01,%d0		# adjust new exponent
+	andi.w		&0x8000,LOCAL_EX(%a0)	# clear old exponent
+	or.w		%d0,LOCAL_EX(%a0)	# insert new exponent
+	rts
+
+#################################################################
+
+# fmove out took an unimplemented data type exception.
+# the src operand is in FP_SRC. Call _fout() to write out the result and
+# to determine which exceptions, if any, to take.
+fu_out:
+
+# Separate packed move outs from the UNNORM and DENORM move outs.
+	bfextu		EXC_CMDREG(%a6){&3:&3},%d0
+	cmpi.b		%d0,&0x3
+	beq.w		fu_out_pack
+	cmpi.b		%d0,&0x7
+	beq.w		fu_out_pack
+
+
+# I'm not sure at this point what FPSR bits are valid for this instruction.
+# so, since the emulation routines re-create them anyways, zero exception field.
+# fmove out doesn't affect ccodes.
+	and.l		&0xffff00ff,USER_FPSR(%a6) # zero exception field
+
+	fmov.l		&0x0,%fpcr		# zero current control regs
+	fmov.l		&0x0,%fpsr
+
+# the src can ONLY be a DENORM or an UNNORM! so, don't make any big subroutine
+# call here. just figure out what it is...
+	mov.w		FP_SRC_EX(%a6),%d0	# get exponent
+	andi.w		&0x7fff,%d0		# strip sign
+	beq.b		fu_out_denorm		# it's a DENORM
+
+	lea		FP_SRC(%a6),%a0
+	bsr.l		unnorm_fix		# yes; fix it
+
+	mov.b		%d0,STAG(%a6)
+
+	bra.b		fu_out_cont
+fu_out_denorm:
+	mov.b		&DENORM,STAG(%a6)
+fu_out_cont:
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# fetch rnd mode/prec
+
+	lea		FP_SRC(%a6),%a0		# pass ptr to src operand
+
+	mov.l		(%a6),EXC_A6(%a6)	# in case a6 changes
+	bsr.l		fout			# call fmove out routine
+
+# Exceptions in order of precedence:
+#	BSUN	: none
+#	SNAN	: none
+#	OPERR	: fmove.{b,w,l} out of large UNNORM
+#	OVFL	: fmove.{s,d}
+#	UNFL	: fmove.{s,d,x}
+#	DZ	: none
+#	INEX2	: all
+#	INEX1	: none (packed doesn't travel through here)
+
+# determine the highest priority exception(if any) set by the
+# emulation routine that has also been enabled by the user.
+	mov.b		FPCR_ENABLE(%a6),%d0	# fetch exceptions enabled
+	bne.w		fu_out_ena		# some are enabled
+
+fu_out_done:
+
+	mov.l		EXC_A6(%a6),(%a6)	# in case a6 changed
+
+# on extended precision opclass three instructions using pre-decrement or
+# post-increment addressing mode, the address register is not updated. is the
+# address register was the stack pointer used from user mode, then let's update
+# it here. if it was used from supervisor mode, then we have to handle this
+# as a special case.
+	btst		&0x5,EXC_SR(%a6)
+	bne.b		fu_out_done_s
+
+	mov.l		EXC_A7(%a6),%a0		# restore a7
+	mov.l		%a0,%usp
+
+fu_out_done_cont:
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+
+	btst		&0x7,(%sp)		# is trace on?
+	bne.b		fu_out_trace		# yes
+
+	bra.l		_fpsp_done
+
+# is the ea mode pre-decrement of the stack pointer from supervisor mode?
+# ("fmov.x fpm,-(a7)") if so,
+fu_out_done_s:
+	cmpi.b		SPCOND_FLG(%a6),&mda7_flg
+	bne.b		fu_out_done_cont
+
+# the extended precision result is still in fp0. but, we need to save it
+# somewhere on the stack until we can copy it to its final resting place.
+# here, we're counting on the top of the stack to be the old place-holders
+# for fp0/fp1 which have already been restored. that way, we can write
+# over those destinations with the shifted stack frame.
+	fmovm.x		&0x80,FP_SRC(%a6)	# put answer on stack
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.l		(%a6),%a6		# restore frame pointer
+
+	mov.l		LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
+	mov.l		LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
+
+# now, copy the result to the proper place on the stack
+	mov.l		LOCAL_SIZE+FP_SRC_EX(%sp),LOCAL_SIZE+EXC_SR+0x0(%sp)
+	mov.l		LOCAL_SIZE+FP_SRC_HI(%sp),LOCAL_SIZE+EXC_SR+0x4(%sp)
+	mov.l		LOCAL_SIZE+FP_SRC_LO(%sp),LOCAL_SIZE+EXC_SR+0x8(%sp)
+
+	add.l		&LOCAL_SIZE-0x8,%sp
+
+	btst		&0x7,(%sp)
+	bne.b		fu_out_trace
+
+	bra.l		_fpsp_done
+
+fu_out_ena:
+	and.b		FPSR_EXCEPT(%a6),%d0	# keep only ones enabled
+	bfffo		%d0{&24:&8},%d0		# find highest priority exception
+	bne.b		fu_out_exc		# there is at least one set
+
+# no exceptions were set.
+# if a disabled overflow occurred and inexact was enabled but the result
+# was exact, then a branch to _real_inex() is made.
+	btst		&ovfl_bit,FPSR_EXCEPT(%a6) # was overflow set?
+	beq.w		fu_out_done		# no
+
+fu_out_ovflchk:
+	btst		&inex2_bit,FPCR_ENABLE(%a6) # was inexact enabled?
+	beq.w		fu_out_done		# no
+	bra.w		fu_inex			# yes
+
+#
+# The fp move out that took the "Unimplemented Data Type" exception was
+# being traced. Since the stack frames are similar, get the "current" PC
+# from FPIAR and put it in the trace stack frame then jump to _real_trace().
+#
+#		  UNSUPP FRAME		   TRACE FRAME
+#		*****************	*****************
+#		*      EA	*	*    Current	*
+#		*		*	*      PC	*
+#		*****************	*****************
+#		* 0x3 *  0x0dc	*	* 0x2 *  0x024	*
+#		*****************	*****************
+#		*     Next	*	*     Next	*
+#		*      PC	*	*      PC	*
+#		*****************	*****************
+#		*      SR	*	*      SR	*
+#		*****************	*****************
+#
+fu_out_trace:
+	mov.w		&0x2024,0x6(%sp)
+	fmov.l		%fpiar,0x8(%sp)
+	bra.l		_real_trace
+
+# an exception occurred and that exception was enabled.
+fu_out_exc:
+	subi.l		&24,%d0			# fix offset to be 0-8
+
+# we don't mess with the existing fsave frame. just re-insert it and
+# jump to the "_real_{}()" handler...
+	mov.w		(tbl_fu_out.b,%pc,%d0.w*2),%d0
+	jmp		(tbl_fu_out.b,%pc,%d0.w*1)
+
+	swbeg		&0x8
+tbl_fu_out:
+	short		tbl_fu_out	- tbl_fu_out	# BSUN can't happen
+	short		tbl_fu_out	- tbl_fu_out	# SNAN can't happen
+	short		fu_operr	- tbl_fu_out	# OPERR
+	short		fu_ovfl		- tbl_fu_out	# OVFL
+	short		fu_unfl		- tbl_fu_out	# UNFL
+	short		tbl_fu_out	- tbl_fu_out	# DZ can't happen
+	short		fu_inex		- tbl_fu_out	# INEX2
+	short		tbl_fu_out	- tbl_fu_out	# INEX1 won't make it here
+
+# for snan,operr,ovfl,unfl, src op is still in FP_SRC so just
+# frestore it.
+fu_snan:
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.w		&0x30d8,EXC_VOFF(%a6)	# vector offset = 0xd8
+	mov.w		&0xe006,2+FP_SRC(%a6)
+
+	frestore	FP_SRC(%a6)
+
+	unlk		%a6
+
+
+	bra.l		_real_snan
+
+fu_operr:
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.w		&0x30d0,EXC_VOFF(%a6)	# vector offset = 0xd0
+	mov.w		&0xe004,2+FP_SRC(%a6)
+
+	frestore	FP_SRC(%a6)
+
+	unlk		%a6
+
+
+	bra.l		_real_operr
+
+fu_ovfl:
+	fmovm.x		&0x40,FP_SRC(%a6)	# save EXOP to the stack
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.w		&0x30d4,EXC_VOFF(%a6)	# vector offset = 0xd4
+	mov.w		&0xe005,2+FP_SRC(%a6)
+
+	frestore	FP_SRC(%a6)		# restore EXOP
+
+	unlk		%a6
+
+	bra.l		_real_ovfl
+
+# underflow can happen for extended precision. extended precision opclass
+# three instruction exceptions don't update the stack pointer. so, if the
+# exception occurred from user mode, then simply update a7 and exit normally.
+# if the exception occurred from supervisor mode, check if
+fu_unfl:
+	mov.l		EXC_A6(%a6),(%a6)	# restore a6
+
+	btst		&0x5,EXC_SR(%a6)
+	bne.w		fu_unfl_s
+
+	mov.l		EXC_A7(%a6),%a0		# restore a7 whether we need
+	mov.l		%a0,%usp		# to or not...
+
+fu_unfl_cont:
+	fmovm.x		&0x40,FP_SRC(%a6)	# save EXOP to the stack
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.w		&0x30cc,EXC_VOFF(%a6)	# vector offset = 0xcc
+	mov.w		&0xe003,2+FP_SRC(%a6)
+
+	frestore	FP_SRC(%a6)		# restore EXOP
+
+	unlk		%a6
+
+	bra.l		_real_unfl
+
+fu_unfl_s:
+	cmpi.b		SPCOND_FLG(%a6),&mda7_flg # was the <ea> mode -(sp)?
+	bne.b		fu_unfl_cont
+
+# the extended precision result is still in fp0. but, we need to save it
+# somewhere on the stack until we can copy it to its final resting place
+# (where the exc frame is currently). make sure it's not at the top of the
+# frame or it will get overwritten when the exc stack frame is shifted "down".
+	fmovm.x		&0x80,FP_SRC(%a6)	# put answer on stack
+	fmovm.x		&0x40,FP_DST(%a6)	# put EXOP on stack
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.w		&0x30cc,EXC_VOFF(%a6)	# vector offset = 0xcc
+	mov.w		&0xe003,2+FP_DST(%a6)
+
+	frestore	FP_DST(%a6)		# restore EXOP
+
+	mov.l		(%a6),%a6		# restore frame pointer
+
+	mov.l		LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
+	mov.l		LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
+	mov.l		LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
+
+# now, copy the result to the proper place on the stack
+	mov.l		LOCAL_SIZE+FP_SRC_EX(%sp),LOCAL_SIZE+EXC_SR+0x0(%sp)
+	mov.l		LOCAL_SIZE+FP_SRC_HI(%sp),LOCAL_SIZE+EXC_SR+0x4(%sp)
+	mov.l		LOCAL_SIZE+FP_SRC_LO(%sp),LOCAL_SIZE+EXC_SR+0x8(%sp)
+
+	add.l		&LOCAL_SIZE-0x8,%sp
+
+	bra.l		_real_unfl
+
+# fmove in and out enter here.
+fu_inex:
+	fmovm.x		&0x40,FP_SRC(%a6)	# save EXOP to the stack
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.w		&0x30c4,EXC_VOFF(%a6)	# vector offset = 0xc4
+	mov.w		&0xe001,2+FP_SRC(%a6)
+
+	frestore	FP_SRC(%a6)		# restore EXOP
+
+	unlk		%a6
+
+
+	bra.l		_real_inex
+
+#########################################################################
+#########################################################################
+fu_in_pack:
+
+
+# I'm not sure at this point what FPSR bits are valid for this instruction.
+# so, since the emulation routines re-create them anyways, zero exception field
+	andi.l		&0x0ff00ff,USER_FPSR(%a6) # zero exception field
+
+	fmov.l		&0x0,%fpcr		# zero current control regs
+	fmov.l		&0x0,%fpsr
+
+	bsr.l		get_packed		# fetch packed src operand
+
+	lea		FP_SRC(%a6),%a0		# pass ptr to src
+	bsr.l		set_tag_x		# set src optype tag
+
+	mov.b		%d0,STAG(%a6)		# save src optype tag
+
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
+
+# bit five of the fp extension word separates the monadic and dyadic operations
+# at this point
+	btst		&0x5,1+EXC_CMDREG(%a6)	# is operation monadic or dyadic?
+	beq.b		fu_extract_p		# monadic
+	cmpi.b		1+EXC_CMDREG(%a6),&0x3a	# is operation an ftst?
+	beq.b		fu_extract_p		# yes, so it's monadic, too
+
+	bsr.l		load_fpn2		# load dst into FP_DST
+
+	lea		FP_DST(%a6),%a0		# pass: ptr to dst op
+	bsr.l		set_tag_x		# tag the operand type
+	cmpi.b		%d0,&UNNORM		# is operand an UNNORM?
+	bne.b		fu_op2_done_p		# no
+	bsr.l		unnorm_fix		# yes; convert to NORM,DENORM,or ZERO
+fu_op2_done_p:
+	mov.b		%d0,DTAG(%a6)		# save dst optype tag
+
+fu_extract_p:
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# fetch rnd mode/prec
+
+	bfextu		1+EXC_CMDREG(%a6){&1:&7},%d1 # extract extension
+
+	lea		FP_SRC(%a6),%a0
+	lea		FP_DST(%a6),%a1
+
+	mov.l		(tbl_unsupp.l,%pc,%d1.l*4),%d1 # fetch routine addr
+	jsr		(tbl_unsupp.l,%pc,%d1.l*1)
+
+#
+# Exceptions in order of precedence:
+#	BSUN	: none
+#	SNAN	: all dyadic ops
+#	OPERR	: fsqrt(-NORM)
+#	OVFL	: all except ftst,fcmp
+#	UNFL	: all except ftst,fcmp
+#	DZ	: fdiv
+#	INEX2	: all except ftst,fcmp
+#	INEX1	: all
+#
+
+# we determine the highest priority exception(if any) set by the
+# emulation routine that has also been enabled by the user.
+	mov.b		FPCR_ENABLE(%a6),%d0	# fetch exceptions enabled
+	bne.w		fu_in_ena_p		# some are enabled
+
+fu_in_cont_p:
+# fcmp and ftst do not store any result.
+	mov.b		1+EXC_CMDREG(%a6),%d0	# fetch extension
+	andi.b		&0x38,%d0		# extract bits 3-5
+	cmpi.b		%d0,&0x38		# is instr fcmp or ftst?
+	beq.b		fu_in_exit_p		# yes
+
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
+	bsr.l		store_fpreg		# store the result
+
+fu_in_exit_p:
+
+	btst		&0x5,EXC_SR(%a6)	# user or supervisor?
+	bne.w		fu_in_exit_s_p		# supervisor
+
+	mov.l		EXC_A7(%a6),%a0		# update user a7
+	mov.l		%a0,%usp
+
+fu_in_exit_cont_p:
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6			# unravel stack frame
+
+	btst		&0x7,(%sp)		# is trace on?
+	bne.w		fu_trace_p		# yes
+
+	bra.l		_fpsp_done		# exit to os
+
+# the exception occurred in supervisor mode. check to see if the
+# addressing mode was (a7)+. if so, we'll need to shift the
+# stack frame "up".
+fu_in_exit_s_p:
+	btst		&mia7_bit,SPCOND_FLG(%a6) # was ea mode (a7)+
+	beq.b		fu_in_exit_cont_p	# no
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6			# unravel stack frame
+
+# shift the stack frame "up". we don't really care about the <ea> field.
+	mov.l		0x4(%sp),0x10(%sp)
+	mov.l		0x0(%sp),0xc(%sp)
+	add.l		&0xc,%sp
+
+	btst		&0x7,(%sp)		# is trace on?
+	bne.w		fu_trace_p		# yes
+
+	bra.l		_fpsp_done		# exit to os
+
+fu_in_ena_p:
+	and.b		FPSR_EXCEPT(%a6),%d0	# keep only ones enabled & set
+	bfffo		%d0{&24:&8},%d0		# find highest priority exception
+	bne.b		fu_in_exc_p		# at least one was set
+
+#
+# No exceptions occurred that were also enabled. Now:
+#
+#	if (OVFL && ovfl_disabled && inexact_enabled) {
+#	    branch to _real_inex() (even if the result was exact!);
+#	} else {
+#	    save the result in the proper fp reg (unless the op is fcmp or ftst);
+#	    return;
+#	}
+#
+	btst		&ovfl_bit,FPSR_EXCEPT(%a6) # was overflow set?
+	beq.w		fu_in_cont_p		# no
+
+fu_in_ovflchk_p:
+	btst		&inex2_bit,FPCR_ENABLE(%a6) # was inexact enabled?
+	beq.w		fu_in_cont_p		# no
+	bra.w		fu_in_exc_ovfl_p	# do _real_inex() now
+
+#
+# An exception occurred and that exception was enabled:
+#
+#	shift enabled exception field into lo byte of d0;
+#	if (((INEX2 || INEX1) && inex_enabled && OVFL && ovfl_disabled) ||
+#	    ((INEX2 || INEX1) && inex_enabled && UNFL && unfl_disabled)) {
+#		/*
+#		 * this is the case where we must call _real_inex() now or else
+#		 * there will be no other way to pass it the exceptional operand
+#		 */
+#		call _real_inex();
+#	} else {
+#		restore exc state (SNAN||OPERR||OVFL||UNFL||DZ||INEX) into the FPU;
+#	}
+#
+fu_in_exc_p:
+	subi.l		&24,%d0			# fix offset to be 0-8
+	cmpi.b		%d0,&0x6		# is exception INEX? (6 or 7)
+	blt.b		fu_in_exc_exit_p	# no
+
+# the enabled exception was inexact
+	btst		&unfl_bit,FPSR_EXCEPT(%a6) # did disabled underflow occur?
+	bne.w		fu_in_exc_unfl_p	# yes
+	btst		&ovfl_bit,FPSR_EXCEPT(%a6) # did disabled overflow occur?
+	bne.w		fu_in_exc_ovfl_p	# yes
+
+# here, we insert the correct fsave status value into the fsave frame for the
+# corresponding exception. the operand in the fsave frame should be the original
+# src operand.
+# as a reminder for future predicted pain and agony, we are passing in fsave the
+# "non-skewed" operand for cases of sgl and dbl src INFs,NANs, and DENORMs.
+# this is INCORRECT for enabled SNAN which would give to the user the skewed SNAN!!!
+fu_in_exc_exit_p:
+	btst		&0x5,EXC_SR(%a6)	# user or supervisor?
+	bne.w		fu_in_exc_exit_s_p	# supervisor
+
+	mov.l		EXC_A7(%a6),%a0		# update user a7
+	mov.l		%a0,%usp
+
+fu_in_exc_exit_cont_p:
+	mov.w		(tbl_except_p.b,%pc,%d0.w*2),2+FP_SRC(%a6)
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)		# restore src op
+
+	unlk		%a6
+
+	btst		&0x7,(%sp)		# is trace enabled?
+	bne.w		fu_trace_p		# yes
+
+	bra.l		_fpsp_done
+
+tbl_except_p:
+	short		0xe000,0xe006,0xe004,0xe005
+	short		0xe003,0xe002,0xe001,0xe001
+
+fu_in_exc_ovfl_p:
+	mov.w		&0x3,%d0
+	bra.w		fu_in_exc_exit_p
+
+fu_in_exc_unfl_p:
+	mov.w		&0x4,%d0
+	bra.w		fu_in_exc_exit_p
+
+fu_in_exc_exit_s_p:
+	btst		&mia7_bit,SPCOND_FLG(%a6)
+	beq.b		fu_in_exc_exit_cont_p
+
+	mov.w		(tbl_except_p.b,%pc,%d0.w*2),2+FP_SRC(%a6)
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)		# restore src op
+
+	unlk		%a6			# unravel stack frame
+
+# shift stack frame "up". who cares about <ea> field.
+	mov.l		0x4(%sp),0x10(%sp)
+	mov.l		0x0(%sp),0xc(%sp)
+	add.l		&0xc,%sp
+
+	btst		&0x7,(%sp)		# is trace on?
+	bne.b		fu_trace_p		# yes
+
+	bra.l		_fpsp_done		# exit to os
+
+#
+# The opclass two PACKED instruction that took an "Unimplemented Data Type"
+# exception was being traced. Make the "current" PC the FPIAR and put it in the
+# trace stack frame then jump to _real_trace().
+#
+#		  UNSUPP FRAME		   TRACE FRAME
+#		*****************	*****************
+#		*      EA	*	*    Current	*
+#		*		*	*      PC	*
+#		*****************	*****************
+#		* 0x2 *	0x0dc	*	* 0x2 *  0x024	*
+#		*****************	*****************
+#		*     Next	*	*     Next	*
+#		*      PC	*	*      PC	*
+#		*****************	*****************
+#		*      SR	*	*      SR	*
+#		*****************	*****************
+fu_trace_p:
+	mov.w		&0x2024,0x6(%sp)
+	fmov.l		%fpiar,0x8(%sp)
+
+	bra.l		_real_trace
+
+#########################################################
+#########################################################
+fu_out_pack:
+
+
+# I'm not sure at this point what FPSR bits are valid for this instruction.
+# so, since the emulation routines re-create them anyways, zero exception field.
+# fmove out doesn't affect ccodes.
+	and.l		&0xffff00ff,USER_FPSR(%a6) # zero exception field
+
+	fmov.l		&0x0,%fpcr		# zero current control regs
+	fmov.l		&0x0,%fpsr
+
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0
+	bsr.l		load_fpn1
+
+# unlike other opclass 3, unimplemented data type exceptions, packed must be
+# able to detect all operand types.
+	lea		FP_SRC(%a6),%a0
+	bsr.l		set_tag_x		# tag the operand type
+	cmpi.b		%d0,&UNNORM		# is operand an UNNORM?
+	bne.b		fu_op2_p		# no
+	bsr.l		unnorm_fix		# yes; convert to NORM,DENORM,or ZERO
+
+fu_op2_p:
+	mov.b		%d0,STAG(%a6)		# save src optype tag
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# fetch rnd mode/prec
+
+	lea		FP_SRC(%a6),%a0		# pass ptr to src operand
+
+	mov.l		(%a6),EXC_A6(%a6)	# in case a6 changes
+	bsr.l		fout			# call fmove out routine
+
+# Exceptions in order of precedence:
+#	BSUN	: no
+#	SNAN	: yes
+#	OPERR	: if ((k_factor > +17) || (dec. exp exceeds 3 digits))
+#	OVFL	: no
+#	UNFL	: no
+#	DZ	: no
+#	INEX2	: yes
+#	INEX1	: no
+
+# determine the highest priority exception(if any) set by the
+# emulation routine that has also been enabled by the user.
+	mov.b		FPCR_ENABLE(%a6),%d0	# fetch exceptions enabled
+	bne.w		fu_out_ena_p		# some are enabled
+
+fu_out_exit_p:
+	mov.l		EXC_A6(%a6),(%a6)	# restore a6
+
+	btst		&0x5,EXC_SR(%a6)	# user or supervisor?
+	bne.b		fu_out_exit_s_p		# supervisor
+
+	mov.l		EXC_A7(%a6),%a0		# update user a7
+	mov.l		%a0,%usp
+
+fu_out_exit_cont_p:
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6			# unravel stack frame
+
+	btst		&0x7,(%sp)		# is trace on?
+	bne.w		fu_trace_p		# yes
+
+	bra.l		_fpsp_done		# exit to os
+
+# the exception occurred in supervisor mode. check to see if the
+# addressing mode was -(a7). if so, we'll need to shift the
+# stack frame "down".
+fu_out_exit_s_p:
+	btst		&mda7_bit,SPCOND_FLG(%a6) # was ea mode -(a7)
+	beq.b		fu_out_exit_cont_p	# no
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.l		(%a6),%a6		# restore frame pointer
+
+	mov.l		LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
+	mov.l		LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
+
+# now, copy the result to the proper place on the stack
+	mov.l		LOCAL_SIZE+FP_DST_EX(%sp),LOCAL_SIZE+EXC_SR+0x0(%sp)
+	mov.l		LOCAL_SIZE+FP_DST_HI(%sp),LOCAL_SIZE+EXC_SR+0x4(%sp)
+	mov.l		LOCAL_SIZE+FP_DST_LO(%sp),LOCAL_SIZE+EXC_SR+0x8(%sp)
+
+	add.l		&LOCAL_SIZE-0x8,%sp
+
+	btst		&0x7,(%sp)
+	bne.w		fu_trace_p
+
+	bra.l		_fpsp_done
+
+fu_out_ena_p:
+	and.b		FPSR_EXCEPT(%a6),%d0	# keep only ones enabled
+	bfffo		%d0{&24:&8},%d0		# find highest priority exception
+	beq.w		fu_out_exit_p
+
+	mov.l		EXC_A6(%a6),(%a6)	# restore a6
+
+# an exception occurred and that exception was enabled.
+# the only exception possible on packed move out are INEX, OPERR, and SNAN.
+fu_out_exc_p:
+	cmpi.b		%d0,&0x1a
+	bgt.w		fu_inex_p2
+	beq.w		fu_operr_p
+
+fu_snan_p:
+	btst		&0x5,EXC_SR(%a6)
+	bne.b		fu_snan_s_p
+
+	mov.l		EXC_A7(%a6),%a0
+	mov.l		%a0,%usp
+	bra.w		fu_snan
+
+fu_snan_s_p:
+	cmpi.b		SPCOND_FLG(%a6),&mda7_flg
+	bne.w		fu_snan
+
+# the instruction was "fmove.p fpn,-(a7)" from supervisor mode.
+# the strategy is to move the exception frame "down" 12 bytes. then, we
+# can store the default result where the exception frame was.
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.w		&0x30d8,EXC_VOFF(%a6)	# vector offset = 0xd0
+	mov.w		&0xe006,2+FP_SRC(%a6)	# set fsave status
+
+	frestore	FP_SRC(%a6)		# restore src operand
+
+	mov.l		(%a6),%a6		# restore frame pointer
+
+	mov.l		LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
+	mov.l		LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
+	mov.l		LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
+
+# now, we copy the default result to its proper location
+	mov.l		LOCAL_SIZE+FP_DST_EX(%sp),LOCAL_SIZE+0x4(%sp)
+	mov.l		LOCAL_SIZE+FP_DST_HI(%sp),LOCAL_SIZE+0x8(%sp)
+	mov.l		LOCAL_SIZE+FP_DST_LO(%sp),LOCAL_SIZE+0xc(%sp)
+
+	add.l		&LOCAL_SIZE-0x8,%sp
+
+
+	bra.l		_real_snan
+
+fu_operr_p:
+	btst		&0x5,EXC_SR(%a6)
+	bne.w		fu_operr_p_s
+
+	mov.l		EXC_A7(%a6),%a0
+	mov.l		%a0,%usp
+	bra.w		fu_operr
+
+fu_operr_p_s:
+	cmpi.b		SPCOND_FLG(%a6),&mda7_flg
+	bne.w		fu_operr
+
+# the instruction was "fmove.p fpn,-(a7)" from supervisor mode.
+# the strategy is to move the exception frame "down" 12 bytes. then, we
+# can store the default result where the exception frame was.
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.w		&0x30d0,EXC_VOFF(%a6)	# vector offset = 0xd0
+	mov.w		&0xe004,2+FP_SRC(%a6)	# set fsave status
+
+	frestore	FP_SRC(%a6)		# restore src operand
+
+	mov.l		(%a6),%a6		# restore frame pointer
+
+	mov.l		LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
+	mov.l		LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
+	mov.l		LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
+
+# now, we copy the default result to its proper location
+	mov.l		LOCAL_SIZE+FP_DST_EX(%sp),LOCAL_SIZE+0x4(%sp)
+	mov.l		LOCAL_SIZE+FP_DST_HI(%sp),LOCAL_SIZE+0x8(%sp)
+	mov.l		LOCAL_SIZE+FP_DST_LO(%sp),LOCAL_SIZE+0xc(%sp)
+
+	add.l		&LOCAL_SIZE-0x8,%sp
+
+
+	bra.l		_real_operr
+
+fu_inex_p2:
+	btst		&0x5,EXC_SR(%a6)
+	bne.w		fu_inex_s_p2
+
+	mov.l		EXC_A7(%a6),%a0
+	mov.l		%a0,%usp
+	bra.w		fu_inex
+
+fu_inex_s_p2:
+	cmpi.b		SPCOND_FLG(%a6),&mda7_flg
+	bne.w		fu_inex
+
+# the instruction was "fmove.p fpn,-(a7)" from supervisor mode.
+# the strategy is to move the exception frame "down" 12 bytes. then, we
+# can store the default result where the exception frame was.
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.w		&0x30c4,EXC_VOFF(%a6)	# vector offset = 0xc4
+	mov.w		&0xe001,2+FP_SRC(%a6)	# set fsave status
+
+	frestore	FP_SRC(%a6)		# restore src operand
+
+	mov.l		(%a6),%a6		# restore frame pointer
+
+	mov.l		LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
+	mov.l		LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
+	mov.l		LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
+
+# now, we copy the default result to its proper location
+	mov.l		LOCAL_SIZE+FP_DST_EX(%sp),LOCAL_SIZE+0x4(%sp)
+	mov.l		LOCAL_SIZE+FP_DST_HI(%sp),LOCAL_SIZE+0x8(%sp)
+	mov.l		LOCAL_SIZE+FP_DST_LO(%sp),LOCAL_SIZE+0xc(%sp)
+
+	add.l		&LOCAL_SIZE-0x8,%sp
+
+
+	bra.l		_real_inex
+
+#########################################################################
+
+#
+# if we're stuffing a source operand back into an fsave frame then we
+# have to make sure that for single or double source operands that the
+# format stuffed is as weird as the hardware usually makes it.
+#
+	global		funimp_skew
+funimp_skew:
+	bfextu		EXC_EXTWORD(%a6){&3:&3},%d0 # extract src specifier
+	cmpi.b		%d0,&0x1		# was src sgl?
+	beq.b		funimp_skew_sgl		# yes
+	cmpi.b		%d0,&0x5		# was src dbl?
+	beq.b		funimp_skew_dbl		# yes
+	rts
+
+funimp_skew_sgl:
+	mov.w		FP_SRC_EX(%a6),%d0	# fetch DENORM exponent
+	andi.w		&0x7fff,%d0		# strip sign
+	beq.b		funimp_skew_sgl_not
+	cmpi.w		%d0,&0x3f80
+	bgt.b		funimp_skew_sgl_not
+	neg.w		%d0			# make exponent negative
+	addi.w		&0x3f81,%d0		# find amt to shift
+	mov.l		FP_SRC_HI(%a6),%d1	# fetch DENORM hi(man)
+	lsr.l		%d0,%d1			# shift it
+	bset		&31,%d1			# set j-bit
+	mov.l		%d1,FP_SRC_HI(%a6)	# insert new hi(man)
+	andi.w		&0x8000,FP_SRC_EX(%a6)	# clear old exponent
+	ori.w		&0x3f80,FP_SRC_EX(%a6)	# insert new "skewed" exponent
+funimp_skew_sgl_not:
+	rts
+
+funimp_skew_dbl:
+	mov.w		FP_SRC_EX(%a6),%d0	# fetch DENORM exponent
+	andi.w		&0x7fff,%d0		# strip sign
+	beq.b		funimp_skew_dbl_not
+	cmpi.w		%d0,&0x3c00
+	bgt.b		funimp_skew_dbl_not
+
+	tst.b		FP_SRC_EX(%a6)		# make "internal format"
+	smi.b		0x2+FP_SRC(%a6)
+	mov.w		%d0,FP_SRC_EX(%a6)	# insert exponent with cleared sign
+	clr.l		%d0			# clear g,r,s
+	lea		FP_SRC(%a6),%a0		# pass ptr to src op
+	mov.w		&0x3c01,%d1		# pass denorm threshold
+	bsr.l		dnrm_lp			# denorm it
+	mov.w		&0x3c00,%d0		# new exponent
+	tst.b		0x2+FP_SRC(%a6)		# is sign set?
+	beq.b		fss_dbl_denorm_done	# no
+	bset		&15,%d0			# set sign
+fss_dbl_denorm_done:
+	bset		&0x7,FP_SRC_HI(%a6)	# set j-bit
+	mov.w		%d0,FP_SRC_EX(%a6)	# insert new exponent
+funimp_skew_dbl_not:
+	rts
+
+#########################################################################
+	global		_mem_write2
+_mem_write2:
+	btst		&0x5,EXC_SR(%a6)
+	beq.l		_dmem_write
+	mov.l		0x0(%a0),FP_DST_EX(%a6)
+	mov.l		0x4(%a0),FP_DST_HI(%a6)
+	mov.l		0x8(%a0),FP_DST_LO(%a6)
+	clr.l		%d1
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_fpsp_effadd(): 060FPSP entry point for FP "Unimplemented	#
+#			effective address" exception.			#
+#									#
+#	This handler should be the first code executed upon taking the	#
+#	FP Unimplemented Effective Address exception in an operating	#
+#	system.								#
+#									#
+# XREF ****************************************************************	#
+#	_imem_read_long() - read instruction longword			#
+#	fix_skewed_ops() - adjust src operand in fsave frame		#
+#	set_tag_x() - determine optype of src/dst operands		#
+#	store_fpreg() - store opclass 0 or 2 result to FP regfile	#
+#	unnorm_fix() - change UNNORM operands to NORM or ZERO		#
+#	load_fpn2() - load dst operand from FP regfile			#
+#	tbl_unsupp - add of table of emulation routines for opclass 0,2	#
+#	decbin() - convert packed data to FP binary data		#
+#	_real_fpu_disabled() - "callout" for "FPU disabled" exception	#
+#	_real_access() - "callout" for access error exception		#
+#	_mem_read() - read extended immediate operand from memory	#
+#	_fpsp_done() - "callout" for exit; work all done		#
+#	_real_trace() - "callout" for Trace enabled exception		#
+#	fmovm_dynamic() - emulate dynamic fmovm instruction		#
+#	fmovm_ctrl() - emulate fmovm control instruction		#
+#									#
+# INPUT ***************************************************************	#
+#	- The system stack contains the "Unimplemented <ea>" stk frame	#
+#									#
+# OUTPUT **************************************************************	#
+#	If access error:						#
+#	- The system stack is changed to an access error stack frame	#
+#	If FPU disabled:						#
+#	- The system stack is changed to an FPU disabled stack frame	#
+#	If Trace exception enabled:					#
+#	- The system stack is changed to a Trace exception stack frame	#
+#	Else: (normal case)						#
+#	- None (correct result has been stored as appropriate)		#
+#									#
+# ALGORITHM ***********************************************************	#
+#	This exception handles 3 types of operations:			#
+# (1) FP Instructions using extended precision or packed immediate	#
+#     addressing mode.							#
+# (2) The "fmovm.x" instruction w/ dynamic register specification.	#
+# (3) The "fmovm.l" instruction w/ 2 or 3 control registers.		#
+#									#
+#	For immediate data operations, the data is read in w/ a		#
+# _mem_read() "callout", converted to FP binary (if packed), and used	#
+# as the source operand to the instruction specified by the instruction	#
+# word. If no FP exception should be reported ads a result of the	#
+# emulation, then the result is stored to the destination register and	#
+# the handler exits through _fpsp_done(). If an enabled exc has been	#
+# signalled as a result of emulation, then an fsave state frame		#
+# corresponding to the FP exception type must be entered into the 060	#
+# FPU before exiting. In either the enabled or disabled cases, we	#
+# must also check if a Trace exception is pending, in which case, we	#
+# must create a Trace exception stack frame from the current exception	#
+# stack frame. If no Trace is pending, we simply exit through		#
+# _fpsp_done().								#
+#	For "fmovm.x", call the routine fmovm_dynamic() which will	#
+# decode and emulate the instruction. No FP exceptions can be pending	#
+# as a result of this operation emulation. A Trace exception can be	#
+# pending, though, which means the current stack frame must be changed	#
+# to a Trace stack frame and an exit made through _real_trace().	#
+# For the case of "fmovm.x Dn,-(a7)", where the offending instruction	#
+# was executed from supervisor mode, this handler must store the FP	#
+# register file values to the system stack by itself since		#
+# fmovm_dynamic() can't handle this. A normal exit is made through	#
+# fpsp_done().								#
+#	For "fmovm.l", fmovm_ctrl() is used to emulate the instruction.	#
+# Again, a Trace exception may be pending and an exit made through	#
+# _real_trace(). Else, a normal exit is made through _fpsp_done().	#
+#									#
+#	Before any of the above is attempted, it must be checked to	#
+# see if the FPU is disabled. Since the "Unimp <ea>" exception is taken	#
+# before the "FPU disabled" exception, but the "FPU disabled" exception	#
+# has higher priority, we check the disabled bit in the PCR. If set,	#
+# then we must create an 8 word "FPU disabled" exception stack frame	#
+# from the current 4 word exception stack frame. This includes		#
+# reproducing the effective address of the instruction to put on the	#
+# new stack frame.							#
+#									#
+#	In the process of all emulation work, if a _mem_read()		#
+# "callout" returns a failing result indicating an access error, then	#
+# we must create an access error stack frame from the current stack	#
+# frame. This information includes a faulting address and a fault-	#
+# status-longword. These are created within this handler.		#
+#									#
+#########################################################################
+
+	global		_fpsp_effadd
+_fpsp_effadd:
+
+# This exception type takes priority over the "Line F Emulator"
+# exception. Therefore, the FPU could be disabled when entering here.
+# So, we must check to see if it's disabled and handle that case separately.
+	mov.l		%d0,-(%sp)		# save d0
+	movc		%pcr,%d0		# load proc cr
+	btst		&0x1,%d0		# is FPU disabled?
+	bne.w		iea_disabled		# yes
+	mov.l		(%sp)+,%d0		# restore d0
+
+	link		%a6,&-LOCAL_SIZE	# init stack frame
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FPREGS(%a6)	# save fp0-fp1 on stack
+
+# PC of instruction that took the exception is the PC in the frame
+	mov.l		EXC_PC(%a6),EXC_EXTWPTR(%a6)
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch the instruction words
+	mov.l		%d0,EXC_OPWORD(%a6)	# store OPWORD and EXTWORD
+
+#########################################################################
+
+	tst.w		%d0			# is operation fmovem?
+	bmi.w		iea_fmovm		# yes
+
+#
+# here, we will have:
+#	fabs	fdabs	fsabs		facos		fmod
+#	fadd	fdadd	fsadd		fasin		frem
+#	fcmp				fatan		fscale
+#	fdiv	fddiv	fsdiv		fatanh		fsin
+#	fint				fcos		fsincos
+#	fintrz				fcosh		fsinh
+#	fmove	fdmove	fsmove		fetox		ftan
+#	fmul	fdmul	fsmul		fetoxm1		ftanh
+#	fneg	fdneg	fsneg		fgetexp		ftentox
+#	fsgldiv				fgetman		ftwotox
+#	fsglmul				flog10
+#	fsqrt				flog2
+#	fsub	fdsub	fssub		flogn
+#	ftst				flognp1
+# which can all use f<op>.{x,p}
+# so, now it's immediate data extended precision AND PACKED FORMAT!
+#
+iea_op:
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	btst		&0xa,%d0		# is src fmt x or p?
+	bne.b		iea_op_pack		# packed
+
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# pass: ptr to #<data>
+	lea		FP_SRC(%a6),%a1		# pass: ptr to super addr
+	mov.l		&0xc,%d0		# pass: 12 bytes
+	bsr.l		_imem_read		# read extended immediate
+
+	tst.l		%d1			# did ifetch fail?
+	bne.w		iea_iacc		# yes
+
+	bra.b		iea_op_setsrc
+
+iea_op_pack:
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# pass: ptr to #<data>
+	lea		FP_SRC(%a6),%a1		# pass: ptr to super dst
+	mov.l		&0xc,%d0		# pass: 12 bytes
+	bsr.l		_imem_read		# read packed operand
+
+	tst.l		%d1			# did ifetch fail?
+	bne.w		iea_iacc		# yes
+
+# The packed operand is an INF or a NAN if the exponent field is all ones.
+	bfextu		FP_SRC(%a6){&1:&15},%d0	# get exp
+	cmpi.w		%d0,&0x7fff		# INF or NAN?
+	beq.b		iea_op_setsrc		# operand is an INF or NAN
+
+# The packed operand is a zero if the mantissa is all zero, else it's
+# a normal packed op.
+	mov.b		3+FP_SRC(%a6),%d0	# get byte 4
+	andi.b		&0x0f,%d0		# clear all but last nybble
+	bne.b		iea_op_gp_not_spec	# not a zero
+	tst.l		FP_SRC_HI(%a6)		# is lw 2 zero?
+	bne.b		iea_op_gp_not_spec	# not a zero
+	tst.l		FP_SRC_LO(%a6)		# is lw 3 zero?
+	beq.b		iea_op_setsrc		# operand is a ZERO
+iea_op_gp_not_spec:
+	lea		FP_SRC(%a6),%a0		# pass: ptr to packed op
+	bsr.l		decbin			# convert to extended
+	fmovm.x		&0x80,FP_SRC(%a6)	# make this the srcop
+
+iea_op_setsrc:
+	addi.l		&0xc,EXC_EXTWPTR(%a6)	# update extension word pointer
+
+# FP_SRC now holds the src operand.
+	lea		FP_SRC(%a6),%a0		# pass: ptr to src op
+	bsr.l		set_tag_x		# tag the operand type
+	mov.b		%d0,STAG(%a6)		# could be ANYTHING!!!
+	cmpi.b		%d0,&UNNORM		# is operand an UNNORM?
+	bne.b		iea_op_getdst		# no
+	bsr.l		unnorm_fix		# yes; convert to NORM/DENORM/ZERO
+	mov.b		%d0,STAG(%a6)		# set new optype tag
+iea_op_getdst:
+	clr.b		STORE_FLG(%a6)		# clear "store result" boolean
+
+	btst		&0x5,1+EXC_CMDREG(%a6)	# is operation monadic or dyadic?
+	beq.b		iea_op_extract		# monadic
+	btst		&0x4,1+EXC_CMDREG(%a6)	# is operation fsincos,ftst,fcmp?
+	bne.b		iea_op_spec		# yes
+
+iea_op_loaddst:
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0 # fetch dst regno
+	bsr.l		load_fpn2		# load dst operand
+
+	lea		FP_DST(%a6),%a0		# pass: ptr to dst op
+	bsr.l		set_tag_x		# tag the operand type
+	mov.b		%d0,DTAG(%a6)		# could be ANYTHING!!!
+	cmpi.b		%d0,&UNNORM		# is operand an UNNORM?
+	bne.b		iea_op_extract		# no
+	bsr.l		unnorm_fix		# yes; convert to NORM/DENORM/ZERO
+	mov.b		%d0,DTAG(%a6)		# set new optype tag
+	bra.b		iea_op_extract
+
+# the operation is fsincos, ftst, or fcmp. only fcmp is dyadic
+iea_op_spec:
+	btst		&0x3,1+EXC_CMDREG(%a6)	# is operation fsincos?
+	beq.b		iea_op_extract		# yes
+# now, we're left with ftst and fcmp. so, first let's tag them so that they don't
+# store a result. then, only fcmp will branch back and pick up a dst operand.
+	st		STORE_FLG(%a6)		# don't store a final result
+	btst		&0x1,1+EXC_CMDREG(%a6)	# is operation fcmp?
+	beq.b		iea_op_loaddst		# yes
+
+iea_op_extract:
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass: rnd mode,prec
+
+	mov.b		1+EXC_CMDREG(%a6),%d1
+	andi.w		&0x007f,%d1		# extract extension
+
+	fmov.l		&0x0,%fpcr
+	fmov.l		&0x0,%fpsr
+
+	lea		FP_SRC(%a6),%a0
+	lea		FP_DST(%a6),%a1
+
+	mov.l		(tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr
+	jsr		(tbl_unsupp.l,%pc,%d1.l*1)
+
+#
+# Exceptions in order of precedence:
+#	BSUN	: none
+#	SNAN	: all operations
+#	OPERR	: all reg-reg or mem-reg operations that can normally operr
+#	OVFL	: same as OPERR
+#	UNFL	: same as OPERR
+#	DZ	: same as OPERR
+#	INEX2	: same as OPERR
+#	INEX1	: all packed immediate operations
+#
+
+# we determine the highest priority exception(if any) set by the
+# emulation routine that has also been enabled by the user.
+	mov.b		FPCR_ENABLE(%a6),%d0	# fetch exceptions enabled
+	bne.b		iea_op_ena		# some are enabled
+
+# now, we save the result, unless, of course, the operation was ftst or fcmp.
+# these don't save results.
+iea_op_save:
+	tst.b		STORE_FLG(%a6)		# does this op store a result?
+	bne.b		iea_op_exit1		# exit with no frestore
+
+iea_op_store:
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0 # fetch dst regno
+	bsr.l		store_fpreg		# store the result
+
+iea_op_exit1:
+	mov.l		EXC_PC(%a6),USER_FPIAR(%a6) # set FPIAR to "Current PC"
+	mov.l		EXC_EXTWPTR(%a6),EXC_PC(%a6) # set "Next PC" in exc frame
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6			# unravel the frame
+
+	btst		&0x7,(%sp)		# is trace on?
+	bne.w		iea_op_trace		# yes
+
+	bra.l		_fpsp_done		# exit to os
+
+iea_op_ena:
+	and.b		FPSR_EXCEPT(%a6),%d0	# keep only ones enable and set
+	bfffo		%d0{&24:&8},%d0		# find highest priority exception
+	bne.b		iea_op_exc		# at least one was set
+
+# no exception occurred. now, did a disabled, exact overflow occur with inexact
+# enabled? if so, then we have to stuff an overflow frame into the FPU.
+	btst		&ovfl_bit,FPSR_EXCEPT(%a6) # did overflow occur?
+	beq.b		iea_op_save
+
+iea_op_ovfl:
+	btst		&inex2_bit,FPCR_ENABLE(%a6) # is inexact enabled?
+	beq.b		iea_op_store		# no
+	bra.b		iea_op_exc_ovfl		# yes
+
+# an enabled exception occurred. we have to insert the exception type back into
+# the machine.
+iea_op_exc:
+	subi.l		&24,%d0			# fix offset to be 0-8
+	cmpi.b		%d0,&0x6		# is exception INEX?
+	bne.b		iea_op_exc_force	# no
+
+# the enabled exception was inexact. so, if it occurs with an overflow
+# or underflow that was disabled, then we have to force an overflow or
+# underflow frame.
+	btst		&ovfl_bit,FPSR_EXCEPT(%a6) # did overflow occur?
+	bne.b		iea_op_exc_ovfl		# yes
+	btst		&unfl_bit,FPSR_EXCEPT(%a6) # did underflow occur?
+	bne.b		iea_op_exc_unfl		# yes
+
+iea_op_exc_force:
+	mov.w		(tbl_iea_except.b,%pc,%d0.w*2),2+FP_SRC(%a6)
+	bra.b		iea_op_exit2		# exit with frestore
+
+tbl_iea_except:
+	short		0xe002, 0xe006, 0xe004, 0xe005
+	short		0xe003, 0xe002, 0xe001, 0xe001
+
+iea_op_exc_ovfl:
+	mov.w		&0xe005,2+FP_SRC(%a6)
+	bra.b		iea_op_exit2
+
+iea_op_exc_unfl:
+	mov.w		&0xe003,2+FP_SRC(%a6)
+
+iea_op_exit2:
+	mov.l		EXC_PC(%a6),USER_FPIAR(%a6) # set FPIAR to "Current PC"
+	mov.l		EXC_EXTWPTR(%a6),EXC_PC(%a6) # set "Next PC" in exc frame
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)		# restore exceptional state
+
+	unlk		%a6			# unravel the frame
+
+	btst		&0x7,(%sp)		# is trace on?
+	bne.b		iea_op_trace		# yes
+
+	bra.l		_fpsp_done		# exit to os
+
+#
+# The opclass two instruction that took an "Unimplemented Effective Address"
+# exception was being traced. Make the "current" PC the FPIAR and put it in
+# the trace stack frame then jump to _real_trace().
+#
+#		 UNIMP EA FRAME		   TRACE FRAME
+#		*****************	*****************
+#		* 0x0 *  0x0f0	*	*    Current	*
+#		*****************	*      PC	*
+#		*    Current	*	*****************
+#		*      PC	*	* 0x2 *  0x024	*
+#		*****************	*****************
+#		*      SR	*	*     Next	*
+#		*****************	*      PC	*
+#					*****************
+#					*      SR	*
+#					*****************
+iea_op_trace:
+	mov.l		(%sp),-(%sp)		# shift stack frame "down"
+	mov.w		0x8(%sp),0x4(%sp)
+	mov.w		&0x2024,0x6(%sp)	# stk fmt = 0x2; voff = 0x024
+	fmov.l		%fpiar,0x8(%sp)		# "Current PC" is in FPIAR
+
+	bra.l		_real_trace
+
+#########################################################################
+iea_fmovm:
+	btst		&14,%d0			# ctrl or data reg
+	beq.w		iea_fmovm_ctrl
+
+iea_fmovm_data:
+
+	btst		&0x5,EXC_SR(%a6)	# user or supervisor mode
+	bne.b		iea_fmovm_data_s
+
+iea_fmovm_data_u:
+	mov.l		%usp,%a0
+	mov.l		%a0,EXC_A7(%a6)		# store current a7
+	bsr.l		fmovm_dynamic		# do dynamic fmovm
+	mov.l		EXC_A7(%a6),%a0		# load possibly new a7
+	mov.l		%a0,%usp		# update usp
+	bra.w		iea_fmovm_exit
+
+iea_fmovm_data_s:
+	clr.b		SPCOND_FLG(%a6)
+	lea		0x2+EXC_VOFF(%a6),%a0
+	mov.l		%a0,EXC_A7(%a6)
+	bsr.l		fmovm_dynamic		# do dynamic fmovm
+
+	cmpi.b		SPCOND_FLG(%a6),&mda7_flg
+	beq.w		iea_fmovm_data_predec
+	cmpi.b		SPCOND_FLG(%a6),&mia7_flg
+	bne.w		iea_fmovm_exit
+
+# right now, d0 = the size.
+# the data has been fetched from the supervisor stack, but we have not
+# incremented the stack pointer by the appropriate number of bytes.
+# do it here.
+iea_fmovm_data_postinc:
+	btst		&0x7,EXC_SR(%a6)
+	bne.b		iea_fmovm_data_pi_trace
+
+	mov.w		EXC_SR(%a6),(EXC_SR,%a6,%d0)
+	mov.l		EXC_EXTWPTR(%a6),(EXC_PC,%a6,%d0)
+	mov.w		&0x00f0,(EXC_VOFF,%a6,%d0)
+
+	lea		(EXC_SR,%a6,%d0),%a0
+	mov.l		%a0,EXC_SR(%a6)
+
+	fmovm.x		EXC_FP0(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+	mov.l		(%sp)+,%sp
+	bra.l		_fpsp_done
+
+iea_fmovm_data_pi_trace:
+	mov.w		EXC_SR(%a6),(EXC_SR-0x4,%a6,%d0)
+	mov.l		EXC_EXTWPTR(%a6),(EXC_PC-0x4,%a6,%d0)
+	mov.w		&0x2024,(EXC_VOFF-0x4,%a6,%d0)
+	mov.l		EXC_PC(%a6),(EXC_VOFF+0x2-0x4,%a6,%d0)
+
+	lea		(EXC_SR-0x4,%a6,%d0),%a0
+	mov.l		%a0,EXC_SR(%a6)
+
+	fmovm.x		EXC_FP0(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+	mov.l		(%sp)+,%sp
+	bra.l		_real_trace
+
+# right now, d1 = size and d0 = the strg.
+iea_fmovm_data_predec:
+	mov.b		%d1,EXC_VOFF(%a6)	# store strg
+	mov.b		%d0,0x1+EXC_VOFF(%a6)	# store size
+
+	fmovm.x		EXC_FP0(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.l		(%a6),-(%sp)		# make a copy of a6
+	mov.l		%d0,-(%sp)		# save d0
+	mov.l		%d1,-(%sp)		# save d1
+	mov.l		EXC_EXTWPTR(%a6),-(%sp)	# make a copy of Next PC
+
+	clr.l		%d0
+	mov.b		0x1+EXC_VOFF(%a6),%d0	# fetch size
+	neg.l		%d0			# get negative of size
+
+	btst		&0x7,EXC_SR(%a6)	# is trace enabled?
+	beq.b		iea_fmovm_data_p2
+
+	mov.w		EXC_SR(%a6),(EXC_SR-0x4,%a6,%d0)
+	mov.l		EXC_PC(%a6),(EXC_VOFF-0x2,%a6,%d0)
+	mov.l		(%sp)+,(EXC_PC-0x4,%a6,%d0)
+	mov.w		&0x2024,(EXC_VOFF-0x4,%a6,%d0)
+
+	pea		(%a6,%d0)		# create final sp
+	bra.b		iea_fmovm_data_p3
+
+iea_fmovm_data_p2:
+	mov.w		EXC_SR(%a6),(EXC_SR,%a6,%d0)
+	mov.l		(%sp)+,(EXC_PC,%a6,%d0)
+	mov.w		&0x00f0,(EXC_VOFF,%a6,%d0)
+
+	pea		(0x4,%a6,%d0)		# create final sp
+
+iea_fmovm_data_p3:
+	clr.l		%d1
+	mov.b		EXC_VOFF(%a6),%d1	# fetch strg
+
+	tst.b		%d1
+	bpl.b		fm_1
+	fmovm.x		&0x80,(0x4+0x8,%a6,%d0)
+	addi.l		&0xc,%d0
+fm_1:
+	lsl.b		&0x1,%d1
+	bpl.b		fm_2
+	fmovm.x		&0x40,(0x4+0x8,%a6,%d0)
+	addi.l		&0xc,%d0
+fm_2:
+	lsl.b		&0x1,%d1
+	bpl.b		fm_3
+	fmovm.x		&0x20,(0x4+0x8,%a6,%d0)
+	addi.l		&0xc,%d0
+fm_3:
+	lsl.b		&0x1,%d1
+	bpl.b		fm_4
+	fmovm.x		&0x10,(0x4+0x8,%a6,%d0)
+	addi.l		&0xc,%d0
+fm_4:
+	lsl.b		&0x1,%d1
+	bpl.b		fm_5
+	fmovm.x		&0x08,(0x4+0x8,%a6,%d0)
+	addi.l		&0xc,%d0
+fm_5:
+	lsl.b		&0x1,%d1
+	bpl.b		fm_6
+	fmovm.x		&0x04,(0x4+0x8,%a6,%d0)
+	addi.l		&0xc,%d0
+fm_6:
+	lsl.b		&0x1,%d1
+	bpl.b		fm_7
+	fmovm.x		&0x02,(0x4+0x8,%a6,%d0)
+	addi.l		&0xc,%d0
+fm_7:
+	lsl.b		&0x1,%d1
+	bpl.b		fm_end
+	fmovm.x		&0x01,(0x4+0x8,%a6,%d0)
+fm_end:
+	mov.l		0x4(%sp),%d1
+	mov.l		0x8(%sp),%d0
+	mov.l		0xc(%sp),%a6
+	mov.l		(%sp)+,%sp
+
+	btst		&0x7,(%sp)		# is trace enabled?
+	beq.l		_fpsp_done
+	bra.l		_real_trace
+
+#########################################################################
+iea_fmovm_ctrl:
+
+	bsr.l		fmovm_ctrl		# load ctrl regs
+
+iea_fmovm_exit:
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	btst		&0x7,EXC_SR(%a6)	# is trace on?
+	bne.b		iea_fmovm_trace		# yes
+
+	mov.l		EXC_EXTWPTR(%a6),EXC_PC(%a6) # set Next PC
+
+	unlk		%a6			# unravel the frame
+
+	bra.l		_fpsp_done		# exit to os
+
+#
+# The control reg instruction that took an "Unimplemented Effective Address"
+# exception was being traced. The "Current PC" for the trace frame is the
+# PC stacked for Unimp EA. The "Next PC" is in EXC_EXTWPTR.
+# After fixing the stack frame, jump to _real_trace().
+#
+#		 UNIMP EA FRAME		   TRACE FRAME
+#		*****************	*****************
+#		* 0x0 *  0x0f0	*	*    Current	*
+#		*****************	*      PC	*
+#		*    Current	*	*****************
+#		*      PC	*	* 0x2 *  0x024	*
+#		*****************	*****************
+#		*      SR	*	*     Next	*
+#		*****************	*      PC	*
+#					*****************
+#					*      SR	*
+#					*****************
+# this ain't a pretty solution, but it works:
+# -restore a6 (not with unlk)
+# -shift stack frame down over where old a6 used to be
+# -add LOCAL_SIZE to stack pointer
+iea_fmovm_trace:
+	mov.l		(%a6),%a6		# restore frame pointer
+	mov.w		EXC_SR+LOCAL_SIZE(%sp),0x0+LOCAL_SIZE(%sp)
+	mov.l		EXC_PC+LOCAL_SIZE(%sp),0x8+LOCAL_SIZE(%sp)
+	mov.l		EXC_EXTWPTR+LOCAL_SIZE(%sp),0x2+LOCAL_SIZE(%sp)
+	mov.w		&0x2024,0x6+LOCAL_SIZE(%sp) # stk fmt = 0x2; voff = 0x024
+	add.l		&LOCAL_SIZE,%sp		# clear stack frame
+
+	bra.l		_real_trace
+
+#########################################################################
+# The FPU is disabled and so we should really have taken the "Line
+# F Emulator" exception. So, here we create an 8-word stack frame
+# from our 4-word stack frame. This means we must calculate the length
+# the faulting instruction to get the "next PC". This is trivial for
+# immediate operands but requires some extra work for fmovm dynamic
+# which can use most addressing modes.
+iea_disabled:
+	mov.l		(%sp)+,%d0		# restore d0
+
+	link		%a6,&-LOCAL_SIZE	# init stack frame
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+
+# PC of instruction that took the exception is the PC in the frame
+	mov.l		EXC_PC(%a6),EXC_EXTWPTR(%a6)
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch the instruction words
+	mov.l		%d0,EXC_OPWORD(%a6)	# store OPWORD and EXTWORD
+
+	tst.w		%d0			# is instr fmovm?
+	bmi.b		iea_dis_fmovm		# yes
+# instruction is using an extended precision immediate operand. therefore,
+# the total instruction length is 16 bytes.
+iea_dis_immed:
+	mov.l		&0x10,%d0		# 16 bytes of instruction
+	bra.b		iea_dis_cont
+iea_dis_fmovm:
+	btst		&0xe,%d0		# is instr fmovm ctrl
+	bne.b		iea_dis_fmovm_data	# no
+# the instruction is a fmovm.l with 2 or 3 registers.
+	bfextu		%d0{&19:&3},%d1
+	mov.l		&0xc,%d0
+	cmpi.b		%d1,&0x7		# move all regs?
+	bne.b		iea_dis_cont
+	addq.l		&0x4,%d0
+	bra.b		iea_dis_cont
+# the instruction is an fmovm.x dynamic which can use many addressing
+# modes and thus can have several different total instruction lengths.
+# call fmovm_calc_ea which will go through the ea calc process and,
+# as a by-product, will tell us how long the instruction is.
+iea_dis_fmovm_data:
+	clr.l		%d0
+	bsr.l		fmovm_calc_ea
+	mov.l		EXC_EXTWPTR(%a6),%d0
+	sub.l		EXC_PC(%a6),%d0
+iea_dis_cont:
+	mov.w		%d0,EXC_VOFF(%a6)	# store stack shift value
+
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+
+# here, we actually create the 8-word frame from the 4-word frame,
+# with the "next PC" as additional info.
+# the <ea> field is let as undefined.
+	subq.l		&0x8,%sp		# make room for new stack
+	mov.l		%d0,-(%sp)		# save d0
+	mov.w		0xc(%sp),0x4(%sp)	# move SR
+	mov.l		0xe(%sp),0x6(%sp)	# move Current PC
+	clr.l		%d0
+	mov.w		0x12(%sp),%d0
+	mov.l		0x6(%sp),0x10(%sp)	# move Current PC
+	add.l		%d0,0x6(%sp)		# make Next PC
+	mov.w		&0x402c,0xa(%sp)	# insert offset,frame format
+	mov.l		(%sp)+,%d0		# restore d0
+
+	bra.l		_real_fpu_disabled
+
+##########
+
+iea_iacc:
+	movc		%pcr,%d0
+	btst		&0x1,%d0
+	bne.b		iea_iacc_cont
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1 on stack
+iea_iacc_cont:
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+
+	subq.w		&0x8,%sp		# make stack frame bigger
+	mov.l		0x8(%sp),(%sp)		# store SR,hi(PC)
+	mov.w		0xc(%sp),0x4(%sp)	# store lo(PC)
+	mov.w		&0x4008,0x6(%sp)	# store voff
+	mov.l		0x2(%sp),0x8(%sp)	# store ea
+	mov.l		&0x09428001,0xc(%sp)	# store fslw
+
+iea_acc_done:
+	btst		&0x5,(%sp)		# user or supervisor mode?
+	beq.b		iea_acc_done2		# user
+	bset		&0x2,0xd(%sp)		# set supervisor TM bit
+
+iea_acc_done2:
+	bra.l		_real_access
+
+iea_dacc:
+	lea		-LOCAL_SIZE(%a6),%sp
+
+	movc		%pcr,%d1
+	btst		&0x1,%d1
+	bne.b		iea_dacc_cont
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1 on stack
+	fmovm.l		LOCAL_SIZE+USER_FPCR(%sp),%fpcr,%fpsr,%fpiar # restore ctrl regs
+iea_dacc_cont:
+	mov.l		(%a6),%a6
+
+	mov.l		0x4+LOCAL_SIZE(%sp),-0x8+0x4+LOCAL_SIZE(%sp)
+	mov.w		0x8+LOCAL_SIZE(%sp),-0x8+0x8+LOCAL_SIZE(%sp)
+	mov.w		&0x4008,-0x8+0xa+LOCAL_SIZE(%sp)
+	mov.l		%a0,-0x8+0xc+LOCAL_SIZE(%sp)
+	mov.w		%d0,-0x8+0x10+LOCAL_SIZE(%sp)
+	mov.w		&0x0001,-0x8+0x12+LOCAL_SIZE(%sp)
+
+	movm.l		LOCAL_SIZE+EXC_DREGS(%sp),&0x0303 # restore d0-d1/a0-a1
+	add.w		&LOCAL_SIZE-0x4,%sp
+
+	bra.b		iea_acc_done
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_fpsp_operr(): 060FPSP entry point for FP Operr exception.	#
+#									#
+#	This handler should be the first code executed upon taking the	#
+#	FP Operand Error exception in an operating system.		#
+#									#
+# XREF ****************************************************************	#
+#	_imem_read_long() - read instruction longword			#
+#	fix_skewed_ops() - adjust src operand in fsave frame		#
+#	_real_operr() - "callout" to operating system operr handler	#
+#	_dmem_write_{byte,word,long}() - store data to mem (opclass 3)	#
+#	store_dreg_{b,w,l}() - store data to data regfile (opclass 3)	#
+#	facc_out_{b,w,l}() - store to memory took access error (opcl 3)	#
+#									#
+# INPUT ***************************************************************	#
+#	- The system stack contains the FP Operr exception frame	#
+#	- The fsave frame contains the source operand			#
+#									#
+# OUTPUT **************************************************************	#
+#	No access error:						#
+#	- The system stack is unchanged					#
+#	- The fsave frame contains the adjusted src op for opclass 0,2	#
+#									#
+# ALGORITHM ***********************************************************	#
+#	In a system where the FP Operr exception is enabled, the goal	#
+# is to get to the handler specified at _real_operr(). But, on the 060,	#
+# for opclass zero and two instruction taking this exception, the	#
+# input operand in the fsave frame may be incorrect for some cases	#
+# and needs to be corrected. This handler calls fix_skewed_ops() to	#
+# do just this and then exits through _real_operr().			#
+#	For opclass 3 instructions, the 060 doesn't store the default	#
+# operr result out to memory or data register file as it should.	#
+# This code must emulate the move out before finally exiting through	#
+# _real_inex(). The move out, if to memory, is performed using		#
+# _mem_write() "callout" routines that may return a failing result.	#
+# In this special case, the handler must exit through facc_out()	#
+# which creates an access error stack frame from the current operr	#
+# stack frame.								#
+#									#
+#########################################################################
+
+	global		_fpsp_operr
+_fpsp_operr:
+
+	link.w		%a6,&-LOCAL_SIZE	# init stack frame
+
+	fsave		FP_SRC(%a6)		# grab the "busy" frame
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FPREGS(%a6)	# save fp0-fp1 on stack
+
+# the FPIAR holds the "current PC" of the faulting instruction
+	mov.l		USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch the instruction words
+	mov.l		%d0,EXC_OPWORD(%a6)
+
+##############################################################################
+
+	btst		&13,%d0			# is instr an fmove out?
+	bne.b		foperr_out		# fmove out
+
+
+# here, we simply see if the operand in the fsave frame needs to be "unskewed".
+# this would be the case for opclass two operations with a source infinity or
+# denorm operand in the sgl or dbl format. NANs also become skewed, but can't
+# cause an operr so we don't need to check for them here.
+	lea		FP_SRC(%a6),%a0		# pass: ptr to src op
+	bsr.l		fix_skewed_ops		# fix src op
+
+foperr_exit:
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)
+
+	unlk		%a6
+	bra.l		_real_operr
+
+########################################################################
+
+#
+# the hardware does not save the default result to memory on enabled
+# operand error exceptions. we do this here before passing control to
+# the user operand error handler.
+#
+# byte, word, and long destination format operations can pass
+# through here. we simply need to test the sign of the src
+# operand and save the appropriate minimum or maximum integer value
+# to the effective address as pointed to by the stacked effective address.
+#
+# although packed opclass three operations can take operand error
+# exceptions, they won't pass through here since they are caught
+# first by the unsupported data format exception handler. that handler
+# sends them directly to _real_operr() if necessary.
+#
+foperr_out:
+
+	mov.w		FP_SRC_EX(%a6),%d1	# fetch exponent
+	andi.w		&0x7fff,%d1
+	cmpi.w		%d1,&0x7fff
+	bne.b		foperr_out_not_qnan
+# the operand is either an infinity or a QNAN.
+	tst.l		FP_SRC_LO(%a6)
+	bne.b		foperr_out_qnan
+	mov.l		FP_SRC_HI(%a6),%d1
+	andi.l		&0x7fffffff,%d1
+	beq.b		foperr_out_not_qnan
+foperr_out_qnan:
+	mov.l		FP_SRC_HI(%a6),L_SCR1(%a6)
+	bra.b		foperr_out_jmp
+
+foperr_out_not_qnan:
+	mov.l		&0x7fffffff,%d1
+	tst.b		FP_SRC_EX(%a6)
+	bpl.b		foperr_out_not_qnan2
+	addq.l		&0x1,%d1
+foperr_out_not_qnan2:
+	mov.l		%d1,L_SCR1(%a6)
+
+foperr_out_jmp:
+	bfextu		%d0{&19:&3},%d0		# extract dst format field
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract <ea> mode,reg
+	mov.w		(tbl_operr.b,%pc,%d0.w*2),%a0
+	jmp		(tbl_operr.b,%pc,%a0)
+
+tbl_operr:
+	short		foperr_out_l - tbl_operr # long word integer
+	short		tbl_operr    - tbl_operr # sgl prec shouldn't happen
+	short		tbl_operr    - tbl_operr # ext prec shouldn't happen
+	short		foperr_exit  - tbl_operr # packed won't enter here
+	short		foperr_out_w - tbl_operr # word integer
+	short		tbl_operr    - tbl_operr # dbl prec shouldn't happen
+	short		foperr_out_b - tbl_operr # byte integer
+	short		tbl_operr    - tbl_operr # packed won't enter here
+
+foperr_out_b:
+	mov.b		L_SCR1(%a6),%d0		# load positive default result
+	cmpi.b		%d1,&0x7		# is <ea> mode a data reg?
+	ble.b		foperr_out_b_save_dn	# yes
+	mov.l		EXC_EA(%a6),%a0		# pass: <ea> of default result
+	bsr.l		_dmem_write_byte	# write the default result
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_b		# yes
+
+	bra.w		foperr_exit
+foperr_out_b_save_dn:
+	andi.w		&0x0007,%d1
+	bsr.l		store_dreg_b		# store result to regfile
+	bra.w		foperr_exit
+
+foperr_out_w:
+	mov.w		L_SCR1(%a6),%d0		# load positive default result
+	cmpi.b		%d1,&0x7		# is <ea> mode a data reg?
+	ble.b		foperr_out_w_save_dn	# yes
+	mov.l		EXC_EA(%a6),%a0		# pass: <ea> of default result
+	bsr.l		_dmem_write_word	# write the default result
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_w		# yes
+
+	bra.w		foperr_exit
+foperr_out_w_save_dn:
+	andi.w		&0x0007,%d1
+	bsr.l		store_dreg_w		# store result to regfile
+	bra.w		foperr_exit
+
+foperr_out_l:
+	mov.l		L_SCR1(%a6),%d0		# load positive default result
+	cmpi.b		%d1,&0x7		# is <ea> mode a data reg?
+	ble.b		foperr_out_l_save_dn	# yes
+	mov.l		EXC_EA(%a6),%a0		# pass: <ea> of default result
+	bsr.l		_dmem_write_long	# write the default result
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_l		# yes
+
+	bra.w		foperr_exit
+foperr_out_l_save_dn:
+	andi.w		&0x0007,%d1
+	bsr.l		store_dreg_l		# store result to regfile
+	bra.w		foperr_exit
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_fpsp_snan(): 060FPSP entry point for FP SNAN exception.	#
+#									#
+#	This handler should be the first code executed upon taking the	#
+#	FP Signalling NAN exception in an operating system.		#
+#									#
+# XREF ****************************************************************	#
+#	_imem_read_long() - read instruction longword			#
+#	fix_skewed_ops() - adjust src operand in fsave frame		#
+#	_real_snan() - "callout" to operating system SNAN handler	#
+#	_dmem_write_{byte,word,long}() - store data to mem (opclass 3)	#
+#	store_dreg_{b,w,l}() - store data to data regfile (opclass 3)	#
+#	facc_out_{b,w,l,d,x}() - store to mem took acc error (opcl 3)	#
+#	_calc_ea_fout() - fix An if <ea> is -() or ()+; also get <ea>	#
+#									#
+# INPUT ***************************************************************	#
+#	- The system stack contains the FP SNAN exception frame		#
+#	- The fsave frame contains the source operand			#
+#									#
+# OUTPUT **************************************************************	#
+#	No access error:						#
+#	- The system stack is unchanged					#
+#	- The fsave frame contains the adjusted src op for opclass 0,2	#
+#									#
+# ALGORITHM ***********************************************************	#
+#	In a system where the FP SNAN exception is enabled, the goal	#
+# is to get to the handler specified at _real_snan(). But, on the 060,	#
+# for opclass zero and two instructions taking this exception, the	#
+# input operand in the fsave frame may be incorrect for some cases	#
+# and needs to be corrected. This handler calls fix_skewed_ops() to	#
+# do just this and then exits through _real_snan().			#
+#	For opclass 3 instructions, the 060 doesn't store the default	#
+# SNAN result out to memory or data register file as it should.		#
+# This code must emulate the move out before finally exiting through	#
+# _real_snan(). The move out, if to memory, is performed using		#
+# _mem_write() "callout" routines that may return a failing result.	#
+# In this special case, the handler must exit through facc_out()	#
+# which creates an access error stack frame from the current SNAN	#
+# stack frame.								#
+#	For the case of an extended precision opclass 3 instruction,	#
+# if the effective addressing mode was -() or ()+, then the address	#
+# register must get updated by calling _calc_ea_fout(). If the <ea>	#
+# was -(a7) from supervisor mode, then the exception frame currently	#
+# on the system stack must be carefully moved "down" to make room	#
+# for the operand being moved.						#
+#									#
+#########################################################################
+
+	global		_fpsp_snan
+_fpsp_snan:
+
+	link.w		%a6,&-LOCAL_SIZE	# init stack frame
+
+	fsave		FP_SRC(%a6)		# grab the "busy" frame
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FPREGS(%a6)	# save fp0-fp1 on stack
+
+# the FPIAR holds the "current PC" of the faulting instruction
+	mov.l		USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch the instruction words
+	mov.l		%d0,EXC_OPWORD(%a6)
+
+##############################################################################
+
+	btst		&13,%d0			# is instr an fmove out?
+	bne.w		fsnan_out		# fmove out
+
+
+# here, we simply see if the operand in the fsave frame needs to be "unskewed".
+# this would be the case for opclass two operations with a source infinity or
+# denorm operand in the sgl or dbl format. NANs also become skewed and must be
+# fixed here.
+	lea		FP_SRC(%a6),%a0		# pass: ptr to src op
+	bsr.l		fix_skewed_ops		# fix src op
+
+fsnan_exit:
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)
+
+	unlk		%a6
+	bra.l		_real_snan
+
+########################################################################
+
+#
+# the hardware does not save the default result to memory on enabled
+# snan exceptions. we do this here before passing control to
+# the user snan handler.
+#
+# byte, word, long, and packed destination format operations can pass
+# through here. since packed format operations already were handled by
+# fpsp_unsupp(), then we need to do nothing else for them here.
+# for byte, word, and long, we simply need to test the sign of the src
+# operand and save the appropriate minimum or maximum integer value
+# to the effective address as pointed to by the stacked effective address.
+#
+fsnan_out:
+
+	bfextu		%d0{&19:&3},%d0		# extract dst format field
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract <ea> mode,reg
+	mov.w		(tbl_snan.b,%pc,%d0.w*2),%a0
+	jmp		(tbl_snan.b,%pc,%a0)
+
+tbl_snan:
+	short		fsnan_out_l - tbl_snan # long word integer
+	short		fsnan_out_s - tbl_snan # sgl prec shouldn't happen
+	short		fsnan_out_x - tbl_snan # ext prec shouldn't happen
+	short		tbl_snan    - tbl_snan # packed needs no help
+	short		fsnan_out_w - tbl_snan # word integer
+	short		fsnan_out_d - tbl_snan # dbl prec shouldn't happen
+	short		fsnan_out_b - tbl_snan # byte integer
+	short		tbl_snan    - tbl_snan # packed needs no help
+
+fsnan_out_b:
+	mov.b		FP_SRC_HI(%a6),%d0	# load upper byte of SNAN
+	bset		&6,%d0			# set SNAN bit
+	cmpi.b		%d1,&0x7		# is <ea> mode a data reg?
+	ble.b		fsnan_out_b_dn		# yes
+	mov.l		EXC_EA(%a6),%a0		# pass: <ea> of default result
+	bsr.l		_dmem_write_byte	# write the default result
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_b		# yes
+
+	bra.w		fsnan_exit
+fsnan_out_b_dn:
+	andi.w		&0x0007,%d1
+	bsr.l		store_dreg_b		# store result to regfile
+	bra.w		fsnan_exit
+
+fsnan_out_w:
+	mov.w		FP_SRC_HI(%a6),%d0	# load upper word of SNAN
+	bset		&14,%d0			# set SNAN bit
+	cmpi.b		%d1,&0x7		# is <ea> mode a data reg?
+	ble.b		fsnan_out_w_dn		# yes
+	mov.l		EXC_EA(%a6),%a0		# pass: <ea> of default result
+	bsr.l		_dmem_write_word	# write the default result
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_w		# yes
+
+	bra.w		fsnan_exit
+fsnan_out_w_dn:
+	andi.w		&0x0007,%d1
+	bsr.l		store_dreg_w		# store result to regfile
+	bra.w		fsnan_exit
+
+fsnan_out_l:
+	mov.l		FP_SRC_HI(%a6),%d0	# load upper longword of SNAN
+	bset		&30,%d0			# set SNAN bit
+	cmpi.b		%d1,&0x7		# is <ea> mode a data reg?
+	ble.b		fsnan_out_l_dn		# yes
+	mov.l		EXC_EA(%a6),%a0		# pass: <ea> of default result
+	bsr.l		_dmem_write_long	# write the default result
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_l		# yes
+
+	bra.w		fsnan_exit
+fsnan_out_l_dn:
+	andi.w		&0x0007,%d1
+	bsr.l		store_dreg_l		# store result to regfile
+	bra.w		fsnan_exit
+
+fsnan_out_s:
+	cmpi.b		%d1,&0x7		# is <ea> mode a data reg?
+	ble.b		fsnan_out_d_dn		# yes
+	mov.l		FP_SRC_EX(%a6),%d0	# fetch SNAN sign
+	andi.l		&0x80000000,%d0		# keep sign
+	ori.l		&0x7fc00000,%d0		# insert new exponent,SNAN bit
+	mov.l		FP_SRC_HI(%a6),%d1	# load mantissa
+	lsr.l		&0x8,%d1		# shift mantissa for sgl
+	or.l		%d1,%d0			# create sgl SNAN
+	mov.l		EXC_EA(%a6),%a0		# pass: <ea> of default result
+	bsr.l		_dmem_write_long	# write the default result
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_l		# yes
+
+	bra.w		fsnan_exit
+fsnan_out_d_dn:
+	mov.l		FP_SRC_EX(%a6),%d0	# fetch SNAN sign
+	andi.l		&0x80000000,%d0		# keep sign
+	ori.l		&0x7fc00000,%d0		# insert new exponent,SNAN bit
+	mov.l		%d1,-(%sp)
+	mov.l		FP_SRC_HI(%a6),%d1	# load mantissa
+	lsr.l		&0x8,%d1		# shift mantissa for sgl
+	or.l		%d1,%d0			# create sgl SNAN
+	mov.l		(%sp)+,%d1
+	andi.w		&0x0007,%d1
+	bsr.l		store_dreg_l		# store result to regfile
+	bra.w		fsnan_exit
+
+fsnan_out_d:
+	mov.l		FP_SRC_EX(%a6),%d0	# fetch SNAN sign
+	andi.l		&0x80000000,%d0		# keep sign
+	ori.l		&0x7ff80000,%d0		# insert new exponent,SNAN bit
+	mov.l		FP_SRC_HI(%a6),%d1	# load hi mantissa
+	mov.l		%d0,FP_SCR0_EX(%a6)	# store to temp space
+	mov.l		&11,%d0			# load shift amt
+	lsr.l		%d0,%d1
+	or.l		%d1,FP_SCR0_EX(%a6)	# create dbl hi
+	mov.l		FP_SRC_HI(%a6),%d1	# load hi mantissa
+	andi.l		&0x000007ff,%d1
+	ror.l		%d0,%d1
+	mov.l		%d1,FP_SCR0_HI(%a6)	# store to temp space
+	mov.l		FP_SRC_LO(%a6),%d1	# load lo mantissa
+	lsr.l		%d0,%d1
+	or.l		%d1,FP_SCR0_HI(%a6)	# create dbl lo
+	lea		FP_SCR0(%a6),%a0	# pass: ptr to operand
+	mov.l		EXC_EA(%a6),%a1		# pass: dst addr
+	movq.l		&0x8,%d0		# pass: size of 8 bytes
+	bsr.l		_dmem_write		# write the default result
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_d		# yes
+
+	bra.w		fsnan_exit
+
+# for extended precision, if the addressing mode is pre-decrement or
+# post-increment, then the address register did not get updated.
+# in addition, for pre-decrement, the stacked <ea> is incorrect.
+fsnan_out_x:
+	clr.b		SPCOND_FLG(%a6)		# clear special case flag
+
+	mov.w		FP_SRC_EX(%a6),FP_SCR0_EX(%a6)
+	clr.w		2+FP_SCR0(%a6)
+	mov.l		FP_SRC_HI(%a6),%d0
+	bset		&30,%d0
+	mov.l		%d0,FP_SCR0_HI(%a6)
+	mov.l		FP_SRC_LO(%a6),FP_SCR0_LO(%a6)
+
+	btst		&0x5,EXC_SR(%a6)	# supervisor mode exception?
+	bne.b		fsnan_out_x_s		# yes
+
+	mov.l		%usp,%a0		# fetch user stack pointer
+	mov.l		%a0,EXC_A7(%a6)		# save on stack for calc_ea()
+	mov.l		(%a6),EXC_A6(%a6)
+
+	bsr.l		_calc_ea_fout		# find the correct ea,update An
+	mov.l		%a0,%a1
+	mov.l		%a0,EXC_EA(%a6)		# stack correct <ea>
+
+	mov.l		EXC_A7(%a6),%a0
+	mov.l		%a0,%usp		# restore user stack pointer
+	mov.l		EXC_A6(%a6),(%a6)
+
+fsnan_out_x_save:
+	lea		FP_SCR0(%a6),%a0	# pass: ptr to operand
+	movq.l		&0xc,%d0		# pass: size of extended
+	bsr.l		_dmem_write		# write the default result
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_x		# yes
+
+	bra.w		fsnan_exit
+
+fsnan_out_x_s:
+	mov.l		(%a6),EXC_A6(%a6)
+
+	bsr.l		_calc_ea_fout		# find the correct ea,update An
+	mov.l		%a0,%a1
+	mov.l		%a0,EXC_EA(%a6)		# stack correct <ea>
+
+	mov.l		EXC_A6(%a6),(%a6)
+
+	cmpi.b		SPCOND_FLG(%a6),&mda7_flg # is <ea> mode -(a7)?
+	bne.b		fsnan_out_x_save	# no
+
+# the operation was "fmove.x SNAN,-(a7)" from supervisor mode.
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)
+
+	mov.l		EXC_A6(%a6),%a6		# restore frame pointer
+
+	mov.l		LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
+	mov.l		LOCAL_SIZE+EXC_PC+0x2(%sp),LOCAL_SIZE+EXC_PC+0x2-0xc(%sp)
+	mov.l		LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
+
+	mov.l		LOCAL_SIZE+FP_SCR0_EX(%sp),LOCAL_SIZE+EXC_SR(%sp)
+	mov.l		LOCAL_SIZE+FP_SCR0_HI(%sp),LOCAL_SIZE+EXC_PC+0x2(%sp)
+	mov.l		LOCAL_SIZE+FP_SCR0_LO(%sp),LOCAL_SIZE+EXC_EA(%sp)
+
+	add.l		&LOCAL_SIZE-0x8,%sp
+
+	bra.l		_real_snan
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_fpsp_inex(): 060FPSP entry point for FP Inexact exception.	#
+#									#
+#	This handler should be the first code executed upon taking the	#
+#	FP Inexact exception in an operating system.			#
+#									#
+# XREF ****************************************************************	#
+#	_imem_read_long() - read instruction longword			#
+#	fix_skewed_ops() - adjust src operand in fsave frame		#
+#	set_tag_x() - determine optype of src/dst operands		#
+#	store_fpreg() - store opclass 0 or 2 result to FP regfile	#
+#	unnorm_fix() - change UNNORM operands to NORM or ZERO		#
+#	load_fpn2() - load dst operand from FP regfile			#
+#	smovcr() - emulate an "fmovcr" instruction			#
+#	fout() - emulate an opclass 3 instruction			#
+#	tbl_unsupp - add of table of emulation routines for opclass 0,2	#
+#	_real_inex() - "callout" to operating system inexact handler	#
+#									#
+# INPUT ***************************************************************	#
+#	- The system stack contains the FP Inexact exception frame	#
+#	- The fsave frame contains the source operand			#
+#									#
+# OUTPUT **************************************************************	#
+#	- The system stack is unchanged					#
+#	- The fsave frame contains the adjusted src op for opclass 0,2	#
+#									#
+# ALGORITHM ***********************************************************	#
+#	In a system where the FP Inexact exception is enabled, the goal	#
+# is to get to the handler specified at _real_inex(). But, on the 060,	#
+# for opclass zero and two instruction taking this exception, the	#
+# hardware doesn't store the correct result to the destination FP	#
+# register as did the '040 and '881/2. This handler must emulate the	#
+# instruction in order to get this value and then store it to the	#
+# correct register before calling _real_inex().				#
+#	For opclass 3 instructions, the 060 doesn't store the default	#
+# inexact result out to memory or data register file as it should.	#
+# This code must emulate the move out by calling fout() before finally	#
+# exiting through _real_inex().						#
+#									#
+#########################################################################
+
+	global		_fpsp_inex
+_fpsp_inex:
+
+	link.w		%a6,&-LOCAL_SIZE	# init stack frame
+
+	fsave		FP_SRC(%a6)		# grab the "busy" frame
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FPREGS(%a6)	# save fp0-fp1 on stack
+
+# the FPIAR holds the "current PC" of the faulting instruction
+	mov.l		USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch the instruction words
+	mov.l		%d0,EXC_OPWORD(%a6)
+
+##############################################################################
+
+	btst		&13,%d0			# is instr an fmove out?
+	bne.w		finex_out		# fmove out
+
+
+# the hardware, for "fabs" and "fneg" w/ a long source format, puts the
+# longword integer directly into the upper longword of the mantissa along
+# w/ an exponent value of 0x401e. we convert this to extended precision here.
+	bfextu		%d0{&19:&3},%d0		# fetch instr size
+	bne.b		finex_cont		# instr size is not long
+	cmpi.w		FP_SRC_EX(%a6),&0x401e	# is exponent 0x401e?
+	bne.b		finex_cont		# no
+	fmov.l		&0x0,%fpcr
+	fmov.l		FP_SRC_HI(%a6),%fp0	# load integer src
+	fmov.x		%fp0,FP_SRC(%a6)	# store integer as extended precision
+	mov.w		&0xe001,0x2+FP_SRC(%a6)
+
+finex_cont:
+	lea		FP_SRC(%a6),%a0		# pass: ptr to src op
+	bsr.l		fix_skewed_ops		# fix src op
+
+# Here, we zero the ccode and exception byte field since we're going to
+# emulate the whole instruction. Notice, though, that we don't kill the
+# INEX1 bit. This is because a packed op has long since been converted
+# to extended before arriving here. Therefore, we need to retain the
+# INEX1 bit from when the operand was first converted.
+	andi.l		&0x00ff01ff,USER_FPSR(%a6) # zero all but accured field
+
+	fmov.l		&0x0,%fpcr		# zero current control regs
+	fmov.l		&0x0,%fpsr
+
+	bfextu		EXC_EXTWORD(%a6){&0:&6},%d1 # extract upper 6 of cmdreg
+	cmpi.b		%d1,&0x17		# is op an fmovecr?
+	beq.w		finex_fmovcr		# yes
+
+	lea		FP_SRC(%a6),%a0		# pass: ptr to src op
+	bsr.l		set_tag_x		# tag the operand type
+	mov.b		%d0,STAG(%a6)		# maybe NORM,DENORM
+
+# bits four and five of the fp extension word separate the monadic and dyadic
+# operations that can pass through fpsp_inex(). remember that fcmp and ftst
+# will never take this exception, but fsincos will.
+	btst		&0x5,1+EXC_CMDREG(%a6)	# is operation monadic or dyadic?
+	beq.b		finex_extract		# monadic
+
+	btst		&0x4,1+EXC_CMDREG(%a6)	# is operation an fsincos?
+	bne.b		finex_extract		# yes
+
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
+	bsr.l		load_fpn2		# load dst into FP_DST
+
+	lea		FP_DST(%a6),%a0		# pass: ptr to dst op
+	bsr.l		set_tag_x		# tag the operand type
+	cmpi.b		%d0,&UNNORM		# is operand an UNNORM?
+	bne.b		finex_op2_done		# no
+	bsr.l		unnorm_fix		# yes; convert to NORM,DENORM,or ZERO
+finex_op2_done:
+	mov.b		%d0,DTAG(%a6)		# save dst optype tag
+
+finex_extract:
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd prec/mode
+
+	mov.b		1+EXC_CMDREG(%a6),%d1
+	andi.w		&0x007f,%d1		# extract extension
+
+	lea		FP_SRC(%a6),%a0
+	lea		FP_DST(%a6),%a1
+
+	mov.l		(tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr
+	jsr		(tbl_unsupp.l,%pc,%d1.l*1)
+
+# the operation has been emulated. the result is in fp0.
+finex_save:
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0
+	bsr.l		store_fpreg
+
+finex_exit:
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)
+
+	unlk		%a6
+	bra.l		_real_inex
+
+finex_fmovcr:
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd prec,mode
+	mov.b		1+EXC_CMDREG(%a6),%d1
+	andi.l		&0x0000007f,%d1		# pass rom offset
+	bsr.l		smovcr
+	bra.b		finex_save
+
+########################################################################
+
+#
+# the hardware does not save the default result to memory on enabled
+# inexact exceptions. we do this here before passing control to
+# the user inexact handler.
+#
+# byte, word, and long destination format operations can pass
+# through here. so can double and single precision.
+# although packed opclass three operations can take inexact
+# exceptions, they won't pass through here since they are caught
+# first by the unsupported data format exception handler. that handler
+# sends them directly to _real_inex() if necessary.
+#
+finex_out:
+
+	mov.b		&NORM,STAG(%a6)		# src is a NORM
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd prec,mode
+
+	andi.l		&0xffff00ff,USER_FPSR(%a6) # zero exception field
+
+	lea		FP_SRC(%a6),%a0		# pass ptr to src operand
+
+	bsr.l		fout			# store the default result
+
+	bra.b		finex_exit
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_fpsp_dz(): 060FPSP entry point for FP DZ exception.		#
+#									#
+#	This handler should be the first code executed upon taking	#
+#	the FP DZ exception in an operating system.			#
+#									#
+# XREF ****************************************************************	#
+#	_imem_read_long() - read instruction longword from memory	#
+#	fix_skewed_ops() - adjust fsave operand				#
+#	_real_dz() - "callout" exit point from FP DZ handler		#
+#									#
+# INPUT ***************************************************************	#
+#	- The system stack contains the FP DZ exception stack.		#
+#	- The fsave frame contains the source operand.			#
+#									#
+# OUTPUT **************************************************************	#
+#	- The system stack contains the FP DZ exception stack.		#
+#	- The fsave frame contains the adjusted source operand.		#
+#									#
+# ALGORITHM ***********************************************************	#
+#	In a system where the DZ exception is enabled, the goal is to	#
+# get to the handler specified at _real_dz(). But, on the 060, when the	#
+# exception is taken, the input operand in the fsave state frame may	#
+# be incorrect for some cases and need to be adjusted. So, this package	#
+# adjusts the operand using fix_skewed_ops() and then branches to	#
+# _real_dz().								#
+#									#
+#########################################################################
+
+	global		_fpsp_dz
+_fpsp_dz:
+
+	link.w		%a6,&-LOCAL_SIZE	# init stack frame
+
+	fsave		FP_SRC(%a6)		# grab the "busy" frame
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FPREGS(%a6)	# save fp0-fp1 on stack
+
+# the FPIAR holds the "current PC" of the faulting instruction
+	mov.l		USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch the instruction words
+	mov.l		%d0,EXC_OPWORD(%a6)
+
+##############################################################################
+
+
+# here, we simply see if the operand in the fsave frame needs to be "unskewed".
+# this would be the case for opclass two operations with a source zero
+# in the sgl or dbl format.
+	lea		FP_SRC(%a6),%a0		# pass: ptr to src op
+	bsr.l		fix_skewed_ops		# fix src op
+
+fdz_exit:
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)
+
+	unlk		%a6
+	bra.l		_real_dz
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_fpsp_fline(): 060FPSP entry point for "Line F emulator" exc.	#
+#									#
+#	This handler should be the first code executed upon taking the	#
+#	"Line F Emulator" exception in an operating system.		#
+#									#
+# XREF ****************************************************************	#
+#	_fpsp_unimp() - handle "FP Unimplemented" exceptions		#
+#	_real_fpu_disabled() - handle "FPU disabled" exceptions		#
+#	_real_fline() - handle "FLINE" exceptions			#
+#	_imem_read_long() - read instruction longword			#
+#									#
+# INPUT ***************************************************************	#
+#	- The system stack contains a "Line F Emulator" exception	#
+#	  stack frame.							#
+#									#
+# OUTPUT **************************************************************	#
+#	- The system stack is unchanged					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	When a "Line F Emulator" exception occurs, there are 3 possible	#
+# exception types, denoted by the exception stack frame format number:	#
+#	(1) FPU unimplemented instruction (6 word stack frame)		#
+#	(2) FPU disabled (8 word stack frame)				#
+#	(3) Line F (4 word stack frame)					#
+#									#
+#	This module determines which and forks the flow off to the	#
+# appropriate "callout" (for "disabled" and "Line F") or to the		#
+# correct emulation code (for "FPU unimplemented").			#
+#	This code also must check for "fmovecr" instructions w/ a	#
+# non-zero <ea> field. These may get flagged as "Line F" but should	#
+# really be flagged as "FPU Unimplemented". (This is a "feature" on	#
+# the '060.								#
+#									#
+#########################################################################
+
+	global		_fpsp_fline
+_fpsp_fline:
+
+# check to see if this exception is a "FP Unimplemented Instruction"
+# exception. if so, branch directly to that handler's entry point.
+	cmpi.w		0x6(%sp),&0x202c
+	beq.l		_fpsp_unimp
+
+# check to see if the FPU is disabled. if so, jump to the OS entry
+# point for that condition.
+	cmpi.w		0x6(%sp),&0x402c
+	beq.l		_real_fpu_disabled
+
+# the exception was an "F-Line Illegal" exception. we check to see
+# if the F-Line instruction is an "fmovecr" w/ a non-zero <ea>. if
+# so, convert the F-Line exception stack frame to an FP Unimplemented
+# Instruction exception stack frame else branch to the OS entry
+# point for the F-Line exception handler.
+	link.w		%a6,&-LOCAL_SIZE	# init stack frame
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+
+	mov.l		EXC_PC(%a6),EXC_EXTWPTR(%a6)
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch instruction words
+
+	bfextu		%d0{&0:&10},%d1		# is it an fmovecr?
+	cmpi.w		%d1,&0x03c8
+	bne.b		fline_fline		# no
+
+	bfextu		%d0{&16:&6},%d1		# is it an fmovecr?
+	cmpi.b		%d1,&0x17
+	bne.b		fline_fline		# no
+
+# it's an fmovecr w/ a non-zero <ea> that has entered through
+# the F-Line Illegal exception.
+# so, we need to convert the F-Line exception stack frame into an
+# FP Unimplemented Instruction stack frame and jump to that entry
+# point.
+#
+# but, if the FPU is disabled, then we need to jump to the FPU diabled
+# entry point.
+	movc		%pcr,%d0
+	btst		&0x1,%d0
+	beq.b		fline_fmovcr
+
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+
+	sub.l		&0x8,%sp		# make room for "Next PC", <ea>
+	mov.w		0x8(%sp),(%sp)
+	mov.l		0xa(%sp),0x2(%sp)	# move "Current PC"
+	mov.w		&0x402c,0x6(%sp)
+	mov.l		0x2(%sp),0xc(%sp)
+	addq.l		&0x4,0x2(%sp)		# set "Next PC"
+
+	bra.l		_real_fpu_disabled
+
+fline_fmovcr:
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+
+	fmov.l		0x2(%sp),%fpiar		# set current PC
+	addq.l		&0x4,0x2(%sp)		# set Next PC
+
+	mov.l		(%sp),-(%sp)
+	mov.l		0x8(%sp),0x4(%sp)
+	mov.b		&0x20,0x6(%sp)
+
+	bra.l		_fpsp_unimp
+
+fline_fline:
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+
+	bra.l		_real_fline
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_fpsp_unimp(): 060FPSP entry point for FP "Unimplemented	#
+#		       Instruction" exception.				#
+#									#
+#	This handler should be the first code executed upon taking the	#
+#	FP Unimplemented Instruction exception in an operating system.	#
+#									#
+# XREF ****************************************************************	#
+#	_imem_read_{word,long}() - read instruction word/longword	#
+#	load_fop() - load src/dst ops from memory and/or FP regfile	#
+#	store_fpreg() - store opclass 0 or 2 result to FP regfile	#
+#	tbl_trans - addr of table of emulation routines for trnscndls	#
+#	_real_access() - "callout" for access error exception		#
+#	_fpsp_done() - "callout" for exit; work all done		#
+#	_real_trace() - "callout" for Trace enabled exception		#
+#	smovcr() - emulate "fmovecr" instruction			#
+#	funimp_skew() - adjust fsave src ops to "incorrect" value	#
+#	_ftrapcc() - emulate an "ftrapcc" instruction			#
+#	_fdbcc() - emulate an "fdbcc" instruction			#
+#	_fscc() - emulate an "fscc" instruction				#
+#	_real_trap() - "callout" for Trap exception			#
+#	_real_bsun() - "callout" for enabled Bsun exception		#
+#									#
+# INPUT ***************************************************************	#
+#	- The system stack contains the "Unimplemented Instr" stk frame	#
+#									#
+# OUTPUT **************************************************************	#
+#	If access error:						#
+#	- The system stack is changed to an access error stack frame	#
+#	If Trace exception enabled:					#
+#	- The system stack is changed to a Trace exception stack frame	#
+#	Else: (normal case)						#
+#	- Correct result has been stored as appropriate			#
+#									#
+# ALGORITHM ***********************************************************	#
+#	There are two main cases of instructions that may enter here to	#
+# be emulated: (1) the FPgen instructions, most of which were also	#
+# unimplemented on the 040, and (2) "ftrapcc", "fscc", and "fdbcc".	#
+#	For the first set, this handler calls the routine load_fop()	#
+# to load the source and destination (for dyadic) operands to be used	#
+# for instruction emulation. The correct emulation routine is then	#
+# chosen by decoding the instruction type and indexing into an		#
+# emulation subroutine index table. After emulation returns, this	#
+# handler checks to see if an exception should occur as a result of the #
+# FP instruction emulation. If so, then an FP exception of the correct	#
+# type is inserted into the FPU state frame using the "frestore"	#
+# instruction before exiting through _fpsp_done(). In either the	#
+# exceptional or non-exceptional cases, we must check to see if the	#
+# Trace exception is enabled. If so, then we must create a Trace	#
+# exception frame from the current exception frame and exit through	#
+# _real_trace().							#
+#	For "fdbcc", "ftrapcc", and "fscc", the emulation subroutines	#
+# _fdbcc(), _ftrapcc(), and _fscc() respectively are used. All three	#
+# may flag that a BSUN exception should be taken. If so, then the	#
+# current exception stack frame is converted into a BSUN exception	#
+# stack frame and an exit is made through _real_bsun(). If the		#
+# instruction was "ftrapcc" and a Trap exception should result, a Trap	#
+# exception stack frame is created from the current frame and an exit	#
+# is made through _real_trap(). If a Trace exception is pending, then	#
+# a Trace exception frame is created from the current frame and a jump	#
+# is made to _real_trace(). Finally, if none of these conditions exist,	#
+# then the handler exits though the callout _fpsp_done().		#
+#									#
+#	In any of the above scenarios, if a _mem_read() or _mem_write()	#
+# "callout" returns a failing value, then an access error stack frame	#
+# is created from the current stack frame and an exit is made through	#
+# _real_access().							#
+#									#
+#########################################################################
+
+#
+# FP UNIMPLEMENTED INSTRUCTION STACK FRAME:
+#
+#	*****************
+#	*		* => <ea> of fp unimp instr.
+#	-      EA	-
+#	*		*
+#	*****************
+#	* 0x2 *  0x02c	* => frame format and vector offset(vector #11)
+#	*****************
+#	*		*
+#	-    Next PC	- => PC of instr to execute after exc handling
+#	*		*
+#	*****************
+#	*      SR	* => SR at the time the exception was taken
+#	*****************
+#
+# Note: the !NULL bit does not get set in the fsave frame when the
+# machine encounters an fp unimp exception. Therefore, it must be set
+# before leaving this handler.
+#
+	global		_fpsp_unimp
+_fpsp_unimp:
+
+	link.w		%a6,&-LOCAL_SIZE	# init stack frame
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FPREGS(%a6)	# save fp0-fp1
+
+	btst		&0x5,EXC_SR(%a6)	# user mode exception?
+	bne.b		funimp_s		# no; supervisor mode
+
+# save the value of the user stack pointer onto the stack frame
+funimp_u:
+	mov.l		%usp,%a0		# fetch user stack pointer
+	mov.l		%a0,EXC_A7(%a6)		# store in stack frame
+	bra.b		funimp_cont
+
+# store the value of the supervisor stack pointer BEFORE the exc occurred.
+# old_sp is address just above stacked effective address.
+funimp_s:
+	lea		4+EXC_EA(%a6),%a0	# load old a7'
+	mov.l		%a0,EXC_A7(%a6)		# store a7'
+	mov.l		%a0,OLD_A7(%a6)		# make a copy
+
+funimp_cont:
+
+# the FPIAR holds the "current PC" of the faulting instruction.
+	mov.l		USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch the instruction words
+	mov.l		%d0,EXC_OPWORD(%a6)
+
+############################################################################
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	clr.b		SPCOND_FLG(%a6)		# clear "special case" flag
+
+# Divide the fp instructions into 8 types based on the TYPE field in
+# bits 6-8 of the opword(classes 6,7 are undefined).
+# (for the '060, only two types  can take this exception)
+#	bftst		%d0{&7:&3}		# test TYPE
+	btst		&22,%d0			# type 0 or 1 ?
+	bne.w		funimp_misc		# type 1
+
+#########################################
+# TYPE == 0: General instructions	#
+#########################################
+funimp_gen:
+
+	clr.b		STORE_FLG(%a6)		# clear "store result" flag
+
+# clear the ccode byte and exception status byte
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	bfextu		%d0{&16:&6},%d1		# extract upper 6 of cmdreg
+	cmpi.b		%d1,&0x17		# is op an fmovecr?
+	beq.w		funimp_fmovcr		# yes
+
+funimp_gen_op:
+	bsr.l		_load_fop		# load
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# fetch rnd mode
+
+	mov.b		1+EXC_CMDREG(%a6),%d1
+	andi.w		&0x003f,%d1		# extract extension bits
+	lsl.w		&0x3,%d1		# shift right 3 bits
+	or.b		STAG(%a6),%d1		# insert src optag bits
+
+	lea		FP_DST(%a6),%a1		# pass dst ptr in a1
+	lea		FP_SRC(%a6),%a0		# pass src ptr in a0
+
+	mov.w		(tbl_trans.w,%pc,%d1.w*2),%d1
+	jsr		(tbl_trans.w,%pc,%d1.w*1) # emulate
+
+funimp_fsave:
+	mov.b		FPCR_ENABLE(%a6),%d0	# fetch exceptions enabled
+	bne.w		funimp_ena		# some are enabled
+
+funimp_store:
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0 # fetch Dn
+	bsr.l		store_fpreg		# store result to fp regfile
+
+funimp_gen_exit:
+	fmovm.x		EXC_FP0(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+funimp_gen_exit_cmp:
+	cmpi.b		SPCOND_FLG(%a6),&mia7_flg # was the ea mode (sp)+ ?
+	beq.b		funimp_gen_exit_a7	# yes
+
+	cmpi.b		SPCOND_FLG(%a6),&mda7_flg # was the ea mode -(sp) ?
+	beq.b		funimp_gen_exit_a7	# yes
+
+funimp_gen_exit_cont:
+	unlk		%a6
+
+funimp_gen_exit_cont2:
+	btst		&0x7,(%sp)		# is trace on?
+	beq.l		_fpsp_done		# no
+
+# this catches a problem with the case where an exception will be re-inserted
+# into the machine. the frestore has already been executed...so, the fmov.l
+# alone of the control register would trigger an unwanted exception.
+# until I feel like fixing this, we'll sidestep the exception.
+	fsave		-(%sp)
+	fmov.l		%fpiar,0x14(%sp)	# "Current PC" is in FPIAR
+	frestore	(%sp)+
+	mov.w		&0x2024,0x6(%sp)	# stk fmt = 0x2; voff = 0x24
+	bra.l		_real_trace
+
+funimp_gen_exit_a7:
+	btst		&0x5,EXC_SR(%a6)	# supervisor or user mode?
+	bne.b		funimp_gen_exit_a7_s	# supervisor
+
+	mov.l		%a0,-(%sp)
+	mov.l		EXC_A7(%a6),%a0
+	mov.l		%a0,%usp
+	mov.l		(%sp)+,%a0
+	bra.b		funimp_gen_exit_cont
+
+# if the instruction was executed from supervisor mode and the addressing
+# mode was (a7)+, then the stack frame for the rte must be shifted "up"
+# "n" bytes where "n" is the size of the src operand type.
+# f<op>.{b,w,l,s,d,x,p}
+funimp_gen_exit_a7_s:
+	mov.l		%d0,-(%sp)		# save d0
+	mov.l		EXC_A7(%a6),%d0		# load new a7'
+	sub.l		OLD_A7(%a6),%d0		# subtract old a7'
+	mov.l		0x2+EXC_PC(%a6),(0x2+EXC_PC,%a6,%d0) # shift stack frame
+	mov.l		EXC_SR(%a6),(EXC_SR,%a6,%d0) # shift stack frame
+	mov.w		%d0,EXC_SR(%a6)		# store incr number
+	mov.l		(%sp)+,%d0		# restore d0
+
+	unlk		%a6
+
+	add.w		(%sp),%sp		# stack frame shifted
+	bra.b		funimp_gen_exit_cont2
+
+######################
+# fmovecr.x #ccc,fpn #
+######################
+funimp_fmovcr:
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0
+	mov.b		1+EXC_CMDREG(%a6),%d1
+	andi.l		&0x0000007f,%d1		# pass rom offset in d1
+	bsr.l		smovcr
+	bra.w		funimp_fsave
+
+#########################################################################
+
+#
+# the user has enabled some exceptions. we figure not to see this too
+# often so that's why it gets lower priority.
+#
+funimp_ena:
+
+# was an exception set that was also enabled?
+	and.b		FPSR_EXCEPT(%a6),%d0	# keep only ones enabled and set
+	bfffo		%d0{&24:&8},%d0		# find highest priority exception
+	bne.b		funimp_exc		# at least one was set
+
+# no exception that was enabled was set BUT if we got an exact overflow
+# and overflow wasn't enabled but inexact was (yech!) then this is
+# an inexact exception; otherwise, return to normal non-exception flow.
+	btst		&ovfl_bit,FPSR_EXCEPT(%a6) # did overflow occur?
+	beq.w		funimp_store		# no; return to normal flow
+
+# the overflow w/ exact result happened but was inexact set in the FPCR?
+funimp_ovfl:
+	btst		&inex2_bit,FPCR_ENABLE(%a6) # is inexact enabled?
+	beq.w		funimp_store		# no; return to normal flow
+	bra.b		funimp_exc_ovfl		# yes
+
+# some exception happened that was actually enabled.
+# we'll insert this new exception into the FPU and then return.
+funimp_exc:
+	subi.l		&24,%d0			# fix offset to be 0-8
+	cmpi.b		%d0,&0x6		# is exception INEX?
+	bne.b		funimp_exc_force	# no
+
+# the enabled exception was inexact. so, if it occurs with an overflow
+# or underflow that was disabled, then we have to force an overflow or
+# underflow frame. the eventual overflow or underflow handler will see that
+# it's actually an inexact and act appropriately. this is the only easy
+# way to have the EXOP available for the enabled inexact handler when
+# a disabled overflow or underflow has also happened.
+	btst		&ovfl_bit,FPSR_EXCEPT(%a6) # did overflow occur?
+	bne.b		funimp_exc_ovfl		# yes
+	btst		&unfl_bit,FPSR_EXCEPT(%a6) # did underflow occur?
+	bne.b		funimp_exc_unfl		# yes
+
+# force the fsave exception status bits to signal an exception of the
+# appropriate type. don't forget to "skew" the source operand in case we
+# "unskewed" the one the hardware initially gave us.
+funimp_exc_force:
+	mov.l		%d0,-(%sp)		# save d0
+	bsr.l		funimp_skew		# check for special case
+	mov.l		(%sp)+,%d0		# restore d0
+	mov.w		(tbl_funimp_except.b,%pc,%d0.w*2),2+FP_SRC(%a6)
+	bra.b		funimp_gen_exit2	# exit with frestore
+
+tbl_funimp_except:
+	short		0xe002, 0xe006, 0xe004, 0xe005
+	short		0xe003, 0xe002, 0xe001, 0xe001
+
+# insert an overflow frame
+funimp_exc_ovfl:
+	bsr.l		funimp_skew		# check for special case
+	mov.w		&0xe005,2+FP_SRC(%a6)
+	bra.b		funimp_gen_exit2
+
+# insert an underflow frame
+funimp_exc_unfl:
+	bsr.l		funimp_skew		# check for special case
+	mov.w		&0xe003,2+FP_SRC(%a6)
+
+# this is the general exit point for an enabled exception that will be
+# restored into the machine for the instruction just emulated.
+funimp_gen_exit2:
+	fmovm.x		EXC_FP0(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)		# insert exceptional status
+
+	bra.w		funimp_gen_exit_cmp
+
+############################################################################
+
+#
+# TYPE == 1: FDB<cc>, FS<cc>, FTRAP<cc>
+#
+# These instructions were implemented on the '881/2 and '040 in hardware but
+# are emulated in software on the '060.
+#
+funimp_misc:
+	bfextu		%d0{&10:&3},%d1		# extract mode field
+	cmpi.b		%d1,&0x1		# is it an fdb<cc>?
+	beq.w		funimp_fdbcc		# yes
+	cmpi.b		%d1,&0x7		# is it an fs<cc>?
+	bne.w		funimp_fscc		# yes
+	bfextu		%d0{&13:&3},%d1
+	cmpi.b		%d1,&0x2		# is it an fs<cc>?
+	blt.w		funimp_fscc		# yes
+
+#########################
+# ftrap<cc>		#
+# ftrap<cc>.w #<data>	#
+# ftrap<cc>.l #<data>	#
+#########################
+funimp_ftrapcc:
+
+	bsr.l		_ftrapcc		# FTRAP<cc>()
+
+	cmpi.b		SPCOND_FLG(%a6),&fbsun_flg # is enabled bsun occurring?
+	beq.w		funimp_bsun		# yes
+
+	cmpi.b		SPCOND_FLG(%a6),&ftrapcc_flg # should a trap occur?
+	bne.w		funimp_done		# no
+
+#	 FP UNIMP FRAME		   TRAP  FRAME
+#	*****************	*****************
+#	**    <EA>     **	**  Current PC **
+#	*****************	*****************
+#	* 0x2 *  0x02c	*	* 0x2 *  0x01c  *
+#	*****************	*****************
+#	**   Next PC   **	**   Next PC   **
+#	*****************	*****************
+#	*      SR	*	*      SR	*
+#	*****************	*****************
+#	    (6 words)		    (6 words)
+#
+# the ftrapcc instruction should take a trap. so, here we must create a
+# trap stack frame from an unimplemented fp instruction stack frame and
+# jump to the user supplied entry point for the trap exception
+funimp_ftrapcc_tp:
+	mov.l		USER_FPIAR(%a6),EXC_EA(%a6) # Address = Current PC
+	mov.w		&0x201c,EXC_VOFF(%a6)	# Vector Offset = 0x01c
+
+	fmovm.x		EXC_FP0(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+	bra.l		_real_trap
+
+#########################
+# fdb<cc> Dn,<label>	#
+#########################
+funimp_fdbcc:
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word		# read displacement
+
+	tst.l		%d1			# did ifetch fail?
+	bne.w		funimp_iacc		# yes
+
+	ext.l		%d0			# sign extend displacement
+
+	bsr.l		_fdbcc			# FDB<cc>()
+
+	cmpi.b		SPCOND_FLG(%a6),&fbsun_flg # is enabled bsun occurring?
+	beq.w		funimp_bsun
+
+	bra.w		funimp_done		# branch to finish
+
+#################
+# fs<cc>.b <ea>	#
+#################
+funimp_fscc:
+
+	bsr.l		_fscc			# FS<cc>()
+
+# I am assuming here that an "fs<cc>.b -(An)" or "fs<cc>.b (An)+" instruction
+# does not need to update "An" before taking a bsun exception.
+	cmpi.b		SPCOND_FLG(%a6),&fbsun_flg # is enabled bsun occurring?
+	beq.w		funimp_bsun
+
+	btst		&0x5,EXC_SR(%a6)	# yes; is it a user mode exception?
+	bne.b		funimp_fscc_s		# no
+
+funimp_fscc_u:
+	mov.l		EXC_A7(%a6),%a0		# yes; set new USP
+	mov.l		%a0,%usp
+	bra.w		funimp_done		# branch to finish
+
+# remember, I'm assuming that post-increment is bogus...(it IS!!!)
+# so, the least significant WORD of the stacked effective address got
+# overwritten by the "fs<cc> -(An)". We must shift the stack frame "down"
+# so that the rte will work correctly without destroying the result.
+# even though the operation size is byte, the stack ptr is decr by 2.
+#
+# remember, also, this instruction may be traced.
+funimp_fscc_s:
+	cmpi.b		SPCOND_FLG(%a6),&mda7_flg # was a7 modified?
+	bne.w		funimp_done		# no
+
+	fmovm.x		EXC_FP0(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+
+	btst		&0x7,(%sp)		# is trace enabled?
+	bne.b		funimp_fscc_s_trace	# yes
+
+	subq.l		&0x2,%sp
+	mov.l		0x2(%sp),(%sp)		# shift SR,hi(PC) "down"
+	mov.l		0x6(%sp),0x4(%sp)	# shift lo(PC),voff "down"
+	bra.l		_fpsp_done
+
+funimp_fscc_s_trace:
+	subq.l		&0x2,%sp
+	mov.l		0x2(%sp),(%sp)		# shift SR,hi(PC) "down"
+	mov.w		0x6(%sp),0x4(%sp)	# shift lo(PC)
+	mov.w		&0x2024,0x6(%sp)	# fmt/voff = $2024
+	fmov.l		%fpiar,0x8(%sp)		# insert "current PC"
+
+	bra.l		_real_trace
+
+#
+# The ftrap<cc>, fs<cc>, or fdb<cc> is to take an enabled bsun. we must convert
+# the fp unimplemented instruction exception stack frame into a bsun stack frame,
+# restore a bsun exception into the machine, and branch to the user
+# supplied bsun hook.
+#
+#	 FP UNIMP FRAME		   BSUN FRAME
+#	*****************	*****************
+#	**    <EA>     **	* 0x0 * 0x0c0	*
+#	*****************	*****************
+#	* 0x2 *  0x02c  *	** Current PC  **
+#	*****************	*****************
+#	**   Next PC   **	*      SR	*
+#	*****************	*****************
+#	*      SR	*	    (4 words)
+#	*****************
+#	    (6 words)
+#
+funimp_bsun:
+	mov.w		&0x00c0,2+EXC_EA(%a6)	# Fmt = 0x0; Vector Offset = 0x0c0
+	mov.l		USER_FPIAR(%a6),EXC_VOFF(%a6) # PC = Current PC
+	mov.w		EXC_SR(%a6),2+EXC_PC(%a6) # shift SR "up"
+
+	mov.w		&0xe000,2+FP_SRC(%a6)	# bsun exception enabled
+
+	fmovm.x		EXC_FP0(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)		# restore bsun exception
+
+	unlk		%a6
+
+	addq.l		&0x4,%sp		# erase sludge
+
+	bra.l		_real_bsun		# branch to user bsun hook
+
+#
+# all ftrapcc/fscc/fdbcc processing has been completed. unwind the stack frame
+# and return.
+#
+# as usual, we have to check for trace mode being on here. since instructions
+# modifying the supervisor stack frame don't pass through here, this is a
+# relatively easy task.
+#
+funimp_done:
+	fmovm.x		EXC_FP0(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+
+	btst		&0x7,(%sp)		# is trace enabled?
+	bne.b		funimp_trace		# yes
+
+	bra.l		_fpsp_done
+
+#	 FP UNIMP FRAME		  TRACE  FRAME
+#	*****************	*****************
+#	**    <EA>     **	**  Current PC **
+#	*****************	*****************
+#	* 0x2 *  0x02c	*	* 0x2 *  0x024  *
+#	*****************	*****************
+#	**   Next PC   **	**   Next PC   **
+#	*****************	*****************
+#	*      SR	*	*      SR	*
+#	*****************	*****************
+#	    (6 words)		    (6 words)
+#
+# the fscc instruction should take a trace trap. so, here we must create a
+# trace stack frame from an unimplemented fp instruction stack frame and
+# jump to the user supplied entry point for the trace exception
+funimp_trace:
+	fmov.l		%fpiar,0x8(%sp)		# current PC is in fpiar
+	mov.b		&0x24,0x7(%sp)		# vector offset = 0x024
+
+	bra.l		_real_trace
+
+################################################################
+
+	global		tbl_trans
+	swbeg		&0x1c0
+tbl_trans:
+	short		tbl_trans - tbl_trans	# $00-0 fmovecr all
+	short		tbl_trans - tbl_trans	# $00-1 fmovecr all
+	short		tbl_trans - tbl_trans	# $00-2 fmovecr all
+	short		tbl_trans - tbl_trans	# $00-3 fmovecr all
+	short		tbl_trans - tbl_trans	# $00-4 fmovecr all
+	short		tbl_trans - tbl_trans	# $00-5 fmovecr all
+	short		tbl_trans - tbl_trans	# $00-6 fmovecr all
+	short		tbl_trans - tbl_trans	# $00-7 fmovecr all
+
+	short		tbl_trans - tbl_trans	# $01-0 fint norm
+	short		tbl_trans - tbl_trans	# $01-1 fint zero
+	short		tbl_trans - tbl_trans	# $01-2 fint inf
+	short		tbl_trans - tbl_trans	# $01-3 fint qnan
+	short		tbl_trans - tbl_trans	# $01-5 fint denorm
+	short		tbl_trans - tbl_trans	# $01-4 fint snan
+	short		tbl_trans - tbl_trans	# $01-6 fint unnorm
+	short		tbl_trans - tbl_trans	# $01-7 ERROR
+
+	short		ssinh	 - tbl_trans	# $02-0 fsinh norm
+	short		src_zero - tbl_trans	# $02-1 fsinh zero
+	short		src_inf	 - tbl_trans	# $02-2 fsinh inf
+	short		src_qnan - tbl_trans	# $02-3 fsinh qnan
+	short		ssinhd	 - tbl_trans	# $02-5 fsinh denorm
+	short		src_snan - tbl_trans	# $02-4 fsinh snan
+	short		tbl_trans - tbl_trans	# $02-6 fsinh unnorm
+	short		tbl_trans - tbl_trans	# $02-7 ERROR
+
+	short		tbl_trans - tbl_trans	# $03-0 fintrz norm
+	short		tbl_trans - tbl_trans	# $03-1 fintrz zero
+	short		tbl_trans - tbl_trans	# $03-2 fintrz inf
+	short		tbl_trans - tbl_trans	# $03-3 fintrz qnan
+	short		tbl_trans - tbl_trans	# $03-5 fintrz denorm
+	short		tbl_trans - tbl_trans	# $03-4 fintrz snan
+	short		tbl_trans - tbl_trans	# $03-6 fintrz unnorm
+	short		tbl_trans - tbl_trans	# $03-7 ERROR
+
+	short		tbl_trans - tbl_trans	# $04-0 fsqrt norm
+	short		tbl_trans - tbl_trans	# $04-1 fsqrt zero
+	short		tbl_trans - tbl_trans	# $04-2 fsqrt inf
+	short		tbl_trans - tbl_trans	# $04-3 fsqrt qnan
+	short		tbl_trans - tbl_trans	# $04-5 fsqrt denorm
+	short		tbl_trans - tbl_trans	# $04-4 fsqrt snan
+	short		tbl_trans - tbl_trans	# $04-6 fsqrt unnorm
+	short		tbl_trans - tbl_trans	# $04-7 ERROR
+
+	short		tbl_trans - tbl_trans	# $05-0 ERROR
+	short		tbl_trans - tbl_trans	# $05-1 ERROR
+	short		tbl_trans - tbl_trans	# $05-2 ERROR
+	short		tbl_trans - tbl_trans	# $05-3 ERROR
+	short		tbl_trans - tbl_trans	# $05-4 ERROR
+	short		tbl_trans - tbl_trans	# $05-5 ERROR
+	short		tbl_trans - tbl_trans	# $05-6 ERROR
+	short		tbl_trans - tbl_trans	# $05-7 ERROR
+
+	short		slognp1	 - tbl_trans	# $06-0 flognp1 norm
+	short		src_zero - tbl_trans	# $06-1 flognp1 zero
+	short		sopr_inf - tbl_trans	# $06-2 flognp1 inf
+	short		src_qnan - tbl_trans	# $06-3 flognp1 qnan
+	short		slognp1d - tbl_trans	# $06-5 flognp1 denorm
+	short		src_snan - tbl_trans	# $06-4 flognp1 snan
+	short		tbl_trans - tbl_trans	# $06-6 flognp1 unnorm
+	short		tbl_trans - tbl_trans	# $06-7 ERROR
+
+	short		tbl_trans - tbl_trans	# $07-0 ERROR
+	short		tbl_trans - tbl_trans	# $07-1 ERROR
+	short		tbl_trans - tbl_trans	# $07-2 ERROR
+	short		tbl_trans - tbl_trans	# $07-3 ERROR
+	short		tbl_trans - tbl_trans	# $07-4 ERROR
+	short		tbl_trans - tbl_trans	# $07-5 ERROR
+	short		tbl_trans - tbl_trans	# $07-6 ERROR
+	short		tbl_trans - tbl_trans	# $07-7 ERROR
+
+	short		setoxm1	 - tbl_trans	# $08-0 fetoxm1 norm
+	short		src_zero - tbl_trans	# $08-1 fetoxm1 zero
+	short		setoxm1i - tbl_trans	# $08-2 fetoxm1 inf
+	short		src_qnan - tbl_trans	# $08-3 fetoxm1 qnan
+	short		setoxm1d - tbl_trans	# $08-5 fetoxm1 denorm
+	short		src_snan - tbl_trans	# $08-4 fetoxm1 snan
+	short		tbl_trans - tbl_trans	# $08-6 fetoxm1 unnorm
+	short		tbl_trans - tbl_trans	# $08-7 ERROR
+
+	short		stanh	 - tbl_trans	# $09-0 ftanh norm
+	short		src_zero - tbl_trans	# $09-1 ftanh zero
+	short		src_one	 - tbl_trans	# $09-2 ftanh inf
+	short		src_qnan - tbl_trans	# $09-3 ftanh qnan
+	short		stanhd	 - tbl_trans	# $09-5 ftanh denorm
+	short		src_snan - tbl_trans	# $09-4 ftanh snan
+	short		tbl_trans - tbl_trans	# $09-6 ftanh unnorm
+	short		tbl_trans - tbl_trans	# $09-7 ERROR
+
+	short		satan	 - tbl_trans	# $0a-0 fatan norm
+	short		src_zero - tbl_trans	# $0a-1 fatan zero
+	short		spi_2	 - tbl_trans	# $0a-2 fatan inf
+	short		src_qnan - tbl_trans	# $0a-3 fatan qnan
+	short		satand	 - tbl_trans	# $0a-5 fatan denorm
+	short		src_snan - tbl_trans	# $0a-4 fatan snan
+	short		tbl_trans - tbl_trans	# $0a-6 fatan unnorm
+	short		tbl_trans - tbl_trans	# $0a-7 ERROR
+
+	short		tbl_trans - tbl_trans	# $0b-0 ERROR
+	short		tbl_trans - tbl_trans	# $0b-1 ERROR
+	short		tbl_trans - tbl_trans	# $0b-2 ERROR
+	short		tbl_trans - tbl_trans	# $0b-3 ERROR
+	short		tbl_trans - tbl_trans	# $0b-4 ERROR
+	short		tbl_trans - tbl_trans	# $0b-5 ERROR
+	short		tbl_trans - tbl_trans	# $0b-6 ERROR
+	short		tbl_trans - tbl_trans	# $0b-7 ERROR
+
+	short		sasin	 - tbl_trans	# $0c-0 fasin norm
+	short		src_zero - tbl_trans	# $0c-1 fasin zero
+	short		t_operr	 - tbl_trans	# $0c-2 fasin inf
+	short		src_qnan - tbl_trans	# $0c-3 fasin qnan
+	short		sasind	 - tbl_trans	# $0c-5 fasin denorm
+	short		src_snan - tbl_trans	# $0c-4 fasin snan
+	short		tbl_trans - tbl_trans	# $0c-6 fasin unnorm
+	short		tbl_trans - tbl_trans	# $0c-7 ERROR
+
+	short		satanh	 - tbl_trans	# $0d-0 fatanh norm
+	short		src_zero - tbl_trans	# $0d-1 fatanh zero
+	short		t_operr	 - tbl_trans	# $0d-2 fatanh inf
+	short		src_qnan - tbl_trans	# $0d-3 fatanh qnan
+	short		satanhd	 - tbl_trans	# $0d-5 fatanh denorm
+	short		src_snan - tbl_trans	# $0d-4 fatanh snan
+	short		tbl_trans - tbl_trans	# $0d-6 fatanh unnorm
+	short		tbl_trans - tbl_trans	# $0d-7 ERROR
+
+	short		ssin	 - tbl_trans	# $0e-0 fsin norm
+	short		src_zero - tbl_trans	# $0e-1 fsin zero
+	short		t_operr	 - tbl_trans	# $0e-2 fsin inf
+	short		src_qnan - tbl_trans	# $0e-3 fsin qnan
+	short		ssind	 - tbl_trans	# $0e-5 fsin denorm
+	short		src_snan - tbl_trans	# $0e-4 fsin snan
+	short		tbl_trans - tbl_trans	# $0e-6 fsin unnorm
+	short		tbl_trans - tbl_trans	# $0e-7 ERROR
+
+	short		stan	 - tbl_trans	# $0f-0 ftan norm
+	short		src_zero - tbl_trans	# $0f-1 ftan zero
+	short		t_operr	 - tbl_trans	# $0f-2 ftan inf
+	short		src_qnan - tbl_trans	# $0f-3 ftan qnan
+	short		stand	 - tbl_trans	# $0f-5 ftan denorm
+	short		src_snan - tbl_trans	# $0f-4 ftan snan
+	short		tbl_trans - tbl_trans	# $0f-6 ftan unnorm
+	short		tbl_trans - tbl_trans	# $0f-7 ERROR
+
+	short		setox	 - tbl_trans	# $10-0 fetox norm
+	short		ld_pone	 - tbl_trans	# $10-1 fetox zero
+	short		szr_inf	 - tbl_trans	# $10-2 fetox inf
+	short		src_qnan - tbl_trans	# $10-3 fetox qnan
+	short		setoxd	 - tbl_trans	# $10-5 fetox denorm
+	short		src_snan - tbl_trans	# $10-4 fetox snan
+	short		tbl_trans - tbl_trans	# $10-6 fetox unnorm
+	short		tbl_trans - tbl_trans	# $10-7 ERROR
+
+	short		stwotox	 - tbl_trans	# $11-0 ftwotox norm
+	short		ld_pone	 - tbl_trans	# $11-1 ftwotox zero
+	short		szr_inf	 - tbl_trans	# $11-2 ftwotox inf
+	short		src_qnan - tbl_trans	# $11-3 ftwotox qnan
+	short		stwotoxd - tbl_trans	# $11-5 ftwotox denorm
+	short		src_snan - tbl_trans	# $11-4 ftwotox snan
+	short		tbl_trans - tbl_trans	# $11-6 ftwotox unnorm
+	short		tbl_trans - tbl_trans	# $11-7 ERROR
+
+	short		stentox	 - tbl_trans	# $12-0 ftentox norm
+	short		ld_pone	 - tbl_trans	# $12-1 ftentox zero
+	short		szr_inf	 - tbl_trans	# $12-2 ftentox inf
+	short		src_qnan - tbl_trans	# $12-3 ftentox qnan
+	short		stentoxd - tbl_trans	# $12-5 ftentox denorm
+	short		src_snan - tbl_trans	# $12-4 ftentox snan
+	short		tbl_trans - tbl_trans	# $12-6 ftentox unnorm
+	short		tbl_trans - tbl_trans	# $12-7 ERROR
+
+	short		tbl_trans - tbl_trans	# $13-0 ERROR
+	short		tbl_trans - tbl_trans	# $13-1 ERROR
+	short		tbl_trans - tbl_trans	# $13-2 ERROR
+	short		tbl_trans - tbl_trans	# $13-3 ERROR
+	short		tbl_trans - tbl_trans	# $13-4 ERROR
+	short		tbl_trans - tbl_trans	# $13-5 ERROR
+	short		tbl_trans - tbl_trans	# $13-6 ERROR
+	short		tbl_trans - tbl_trans	# $13-7 ERROR
+
+	short		slogn	 - tbl_trans	# $14-0 flogn norm
+	short		t_dz2	 - tbl_trans	# $14-1 flogn zero
+	short		sopr_inf - tbl_trans	# $14-2 flogn inf
+	short		src_qnan - tbl_trans	# $14-3 flogn qnan
+	short		slognd	 - tbl_trans	# $14-5 flogn denorm
+	short		src_snan - tbl_trans	# $14-4 flogn snan
+	short		tbl_trans - tbl_trans	# $14-6 flogn unnorm
+	short		tbl_trans - tbl_trans	# $14-7 ERROR
+
+	short		slog10	 - tbl_trans	# $15-0 flog10 norm
+	short		t_dz2	 - tbl_trans	# $15-1 flog10 zero
+	short		sopr_inf - tbl_trans	# $15-2 flog10 inf
+	short		src_qnan - tbl_trans	# $15-3 flog10 qnan
+	short		slog10d	 - tbl_trans	# $15-5 flog10 denorm
+	short		src_snan - tbl_trans	# $15-4 flog10 snan
+	short		tbl_trans - tbl_trans	# $15-6 flog10 unnorm
+	short		tbl_trans - tbl_trans	# $15-7 ERROR
+
+	short		slog2	 - tbl_trans	# $16-0 flog2 norm
+	short		t_dz2	 - tbl_trans	# $16-1 flog2 zero
+	short		sopr_inf - tbl_trans	# $16-2 flog2 inf
+	short		src_qnan - tbl_trans	# $16-3 flog2 qnan
+	short		slog2d	 - tbl_trans	# $16-5 flog2 denorm
+	short		src_snan - tbl_trans	# $16-4 flog2 snan
+	short		tbl_trans - tbl_trans	# $16-6 flog2 unnorm
+	short		tbl_trans - tbl_trans	# $16-7 ERROR
+
+	short		tbl_trans - tbl_trans	# $17-0 ERROR
+	short		tbl_trans - tbl_trans	# $17-1 ERROR
+	short		tbl_trans - tbl_trans	# $17-2 ERROR
+	short		tbl_trans - tbl_trans	# $17-3 ERROR
+	short		tbl_trans - tbl_trans	# $17-4 ERROR
+	short		tbl_trans - tbl_trans	# $17-5 ERROR
+	short		tbl_trans - tbl_trans	# $17-6 ERROR
+	short		tbl_trans - tbl_trans	# $17-7 ERROR
+
+	short		tbl_trans - tbl_trans	# $18-0 fabs norm
+	short		tbl_trans - tbl_trans	# $18-1 fabs zero
+	short		tbl_trans - tbl_trans	# $18-2 fabs inf
+	short		tbl_trans - tbl_trans	# $18-3 fabs qnan
+	short		tbl_trans - tbl_trans	# $18-5 fabs denorm
+	short		tbl_trans - tbl_trans	# $18-4 fabs snan
+	short		tbl_trans - tbl_trans	# $18-6 fabs unnorm
+	short		tbl_trans - tbl_trans	# $18-7 ERROR
+
+	short		scosh	 - tbl_trans	# $19-0 fcosh norm
+	short		ld_pone	 - tbl_trans	# $19-1 fcosh zero
+	short		ld_pinf	 - tbl_trans	# $19-2 fcosh inf
+	short		src_qnan - tbl_trans	# $19-3 fcosh qnan
+	short		scoshd	 - tbl_trans	# $19-5 fcosh denorm
+	short		src_snan - tbl_trans	# $19-4 fcosh snan
+	short		tbl_trans - tbl_trans	# $19-6 fcosh unnorm
+	short		tbl_trans - tbl_trans	# $19-7 ERROR
+
+	short		tbl_trans - tbl_trans	# $1a-0 fneg norm
+	short		tbl_trans - tbl_trans	# $1a-1 fneg zero
+	short		tbl_trans - tbl_trans	# $1a-2 fneg inf
+	short		tbl_trans - tbl_trans	# $1a-3 fneg qnan
+	short		tbl_trans - tbl_trans	# $1a-5 fneg denorm
+	short		tbl_trans - tbl_trans	# $1a-4 fneg snan
+	short		tbl_trans - tbl_trans	# $1a-6 fneg unnorm
+	short		tbl_trans - tbl_trans	# $1a-7 ERROR
+
+	short		tbl_trans - tbl_trans	# $1b-0 ERROR
+	short		tbl_trans - tbl_trans	# $1b-1 ERROR
+	short		tbl_trans - tbl_trans	# $1b-2 ERROR
+	short		tbl_trans - tbl_trans	# $1b-3 ERROR
+	short		tbl_trans - tbl_trans	# $1b-4 ERROR
+	short		tbl_trans - tbl_trans	# $1b-5 ERROR
+	short		tbl_trans - tbl_trans	# $1b-6 ERROR
+	short		tbl_trans - tbl_trans	# $1b-7 ERROR
+
+	short		sacos	 - tbl_trans	# $1c-0 facos norm
+	short		ld_ppi2	 - tbl_trans	# $1c-1 facos zero
+	short		t_operr	 - tbl_trans	# $1c-2 facos inf
+	short		src_qnan - tbl_trans	# $1c-3 facos qnan
+	short		sacosd	 - tbl_trans	# $1c-5 facos denorm
+	short		src_snan - tbl_trans	# $1c-4 facos snan
+	short		tbl_trans - tbl_trans	# $1c-6 facos unnorm
+	short		tbl_trans - tbl_trans	# $1c-7 ERROR
+
+	short		scos	 - tbl_trans	# $1d-0 fcos norm
+	short		ld_pone	 - tbl_trans	# $1d-1 fcos zero
+	short		t_operr	 - tbl_trans	# $1d-2 fcos inf
+	short		src_qnan - tbl_trans	# $1d-3 fcos qnan
+	short		scosd	 - tbl_trans	# $1d-5 fcos denorm
+	short		src_snan - tbl_trans	# $1d-4 fcos snan
+	short		tbl_trans - tbl_trans	# $1d-6 fcos unnorm
+	short		tbl_trans - tbl_trans	# $1d-7 ERROR
+
+	short		sgetexp	 - tbl_trans	# $1e-0 fgetexp norm
+	short		src_zero - tbl_trans	# $1e-1 fgetexp zero
+	short		t_operr	 - tbl_trans	# $1e-2 fgetexp inf
+	short		src_qnan - tbl_trans	# $1e-3 fgetexp qnan
+	short		sgetexpd - tbl_trans	# $1e-5 fgetexp denorm
+	short		src_snan - tbl_trans	# $1e-4 fgetexp snan
+	short		tbl_trans - tbl_trans	# $1e-6 fgetexp unnorm
+	short		tbl_trans - tbl_trans	# $1e-7 ERROR
+
+	short		sgetman	 - tbl_trans	# $1f-0 fgetman norm
+	short		src_zero - tbl_trans	# $1f-1 fgetman zero
+	short		t_operr	 - tbl_trans	# $1f-2 fgetman inf
+	short		src_qnan - tbl_trans	# $1f-3 fgetman qnan
+	short		sgetmand - tbl_trans	# $1f-5 fgetman denorm
+	short		src_snan - tbl_trans	# $1f-4 fgetman snan
+	short		tbl_trans - tbl_trans	# $1f-6 fgetman unnorm
+	short		tbl_trans - tbl_trans	# $1f-7 ERROR
+
+	short		tbl_trans - tbl_trans	# $20-0 fdiv norm
+	short		tbl_trans - tbl_trans	# $20-1 fdiv zero
+	short		tbl_trans - tbl_trans	# $20-2 fdiv inf
+	short		tbl_trans - tbl_trans	# $20-3 fdiv qnan
+	short		tbl_trans - tbl_trans	# $20-5 fdiv denorm
+	short		tbl_trans - tbl_trans	# $20-4 fdiv snan
+	short		tbl_trans - tbl_trans	# $20-6 fdiv unnorm
+	short		tbl_trans - tbl_trans	# $20-7 ERROR
+
+	short		smod_snorm - tbl_trans	# $21-0 fmod norm
+	short		smod_szero - tbl_trans	# $21-1 fmod zero
+	short		smod_sinf - tbl_trans	# $21-2 fmod inf
+	short		sop_sqnan - tbl_trans	# $21-3 fmod qnan
+	short		smod_sdnrm - tbl_trans	# $21-5 fmod denorm
+	short		sop_ssnan - tbl_trans	# $21-4 fmod snan
+	short		tbl_trans - tbl_trans	# $21-6 fmod unnorm
+	short		tbl_trans - tbl_trans	# $21-7 ERROR
+
+	short		tbl_trans - tbl_trans	# $22-0 fadd norm
+	short		tbl_trans - tbl_trans	# $22-1 fadd zero
+	short		tbl_trans - tbl_trans	# $22-2 fadd inf
+	short		tbl_trans - tbl_trans	# $22-3 fadd qnan
+	short		tbl_trans - tbl_trans	# $22-5 fadd denorm
+	short		tbl_trans - tbl_trans	# $22-4 fadd snan
+	short		tbl_trans - tbl_trans	# $22-6 fadd unnorm
+	short		tbl_trans - tbl_trans	# $22-7 ERROR
+
+	short		tbl_trans - tbl_trans	# $23-0 fmul norm
+	short		tbl_trans - tbl_trans	# $23-1 fmul zero
+	short		tbl_trans - tbl_trans	# $23-2 fmul inf
+	short		tbl_trans - tbl_trans	# $23-3 fmul qnan
+	short		tbl_trans - tbl_trans	# $23-5 fmul denorm
+	short		tbl_trans - tbl_trans	# $23-4 fmul snan
+	short		tbl_trans - tbl_trans	# $23-6 fmul unnorm
+	short		tbl_trans - tbl_trans	# $23-7 ERROR
+
+	short		tbl_trans - tbl_trans	# $24-0 fsgldiv norm
+	short		tbl_trans - tbl_trans	# $24-1 fsgldiv zero
+	short		tbl_trans - tbl_trans	# $24-2 fsgldiv inf
+	short		tbl_trans - tbl_trans	# $24-3 fsgldiv qnan
+	short		tbl_trans - tbl_trans	# $24-5 fsgldiv denorm
+	short		tbl_trans - tbl_trans	# $24-4 fsgldiv snan
+	short		tbl_trans - tbl_trans	# $24-6 fsgldiv unnorm
+	short		tbl_trans - tbl_trans	# $24-7 ERROR
+
+	short		srem_snorm - tbl_trans	# $25-0 frem norm
+	short		srem_szero - tbl_trans	# $25-1 frem zero
+	short		srem_sinf - tbl_trans	# $25-2 frem inf
+	short		sop_sqnan - tbl_trans	# $25-3 frem qnan
+	short		srem_sdnrm - tbl_trans	# $25-5 frem denorm
+	short		sop_ssnan - tbl_trans	# $25-4 frem snan
+	short		tbl_trans - tbl_trans	# $25-6 frem unnorm
+	short		tbl_trans - tbl_trans	# $25-7 ERROR
+
+	short		sscale_snorm - tbl_trans # $26-0 fscale norm
+	short		sscale_szero - tbl_trans # $26-1 fscale zero
+	short		sscale_sinf - tbl_trans	# $26-2 fscale inf
+	short		sop_sqnan - tbl_trans	# $26-3 fscale qnan
+	short		sscale_sdnrm - tbl_trans # $26-5 fscale denorm
+	short		sop_ssnan - tbl_trans	# $26-4 fscale snan
+	short		tbl_trans - tbl_trans	# $26-6 fscale unnorm
+	short		tbl_trans - tbl_trans	# $26-7 ERROR
+
+	short		tbl_trans - tbl_trans	# $27-0 fsglmul norm
+	short		tbl_trans - tbl_trans	# $27-1 fsglmul zero
+	short		tbl_trans - tbl_trans	# $27-2 fsglmul inf
+	short		tbl_trans - tbl_trans	# $27-3 fsglmul qnan
+	short		tbl_trans - tbl_trans	# $27-5 fsglmul denorm
+	short		tbl_trans - tbl_trans	# $27-4 fsglmul snan
+	short		tbl_trans - tbl_trans	# $27-6 fsglmul unnorm
+	short		tbl_trans - tbl_trans	# $27-7 ERROR
+
+	short		tbl_trans - tbl_trans	# $28-0 fsub norm
+	short		tbl_trans - tbl_trans	# $28-1 fsub zero
+	short		tbl_trans - tbl_trans	# $28-2 fsub inf
+	short		tbl_trans - tbl_trans	# $28-3 fsub qnan
+	short		tbl_trans - tbl_trans	# $28-5 fsub denorm
+	short		tbl_trans - tbl_trans	# $28-4 fsub snan
+	short		tbl_trans - tbl_trans	# $28-6 fsub unnorm
+	short		tbl_trans - tbl_trans	# $28-7 ERROR
+
+	short		tbl_trans - tbl_trans	# $29-0 ERROR
+	short		tbl_trans - tbl_trans	# $29-1 ERROR
+	short		tbl_trans - tbl_trans	# $29-2 ERROR
+	short		tbl_trans - tbl_trans	# $29-3 ERROR
+	short		tbl_trans - tbl_trans	# $29-4 ERROR
+	short		tbl_trans - tbl_trans	# $29-5 ERROR
+	short		tbl_trans - tbl_trans	# $29-6 ERROR
+	short		tbl_trans - tbl_trans	# $29-7 ERROR
+
+	short		tbl_trans - tbl_trans	# $2a-0 ERROR
+	short		tbl_trans - tbl_trans	# $2a-1 ERROR
+	short		tbl_trans - tbl_trans	# $2a-2 ERROR
+	short		tbl_trans - tbl_trans	# $2a-3 ERROR
+	short		tbl_trans - tbl_trans	# $2a-4 ERROR
+	short		tbl_trans - tbl_trans	# $2a-5 ERROR
+	short		tbl_trans - tbl_trans	# $2a-6 ERROR
+	short		tbl_trans - tbl_trans	# $2a-7 ERROR
+
+	short		tbl_trans - tbl_trans	# $2b-0 ERROR
+	short		tbl_trans - tbl_trans	# $2b-1 ERROR
+	short		tbl_trans - tbl_trans	# $2b-2 ERROR
+	short		tbl_trans - tbl_trans	# $2b-3 ERROR
+	short		tbl_trans - tbl_trans	# $2b-4 ERROR
+	short		tbl_trans - tbl_trans	# $2b-5 ERROR
+	short		tbl_trans - tbl_trans	# $2b-6 ERROR
+	short		tbl_trans - tbl_trans	# $2b-7 ERROR
+
+	short		tbl_trans - tbl_trans	# $2c-0 ERROR
+	short		tbl_trans - tbl_trans	# $2c-1 ERROR
+	short		tbl_trans - tbl_trans	# $2c-2 ERROR
+	short		tbl_trans - tbl_trans	# $2c-3 ERROR
+	short		tbl_trans - tbl_trans	# $2c-4 ERROR
+	short		tbl_trans - tbl_trans	# $2c-5 ERROR
+	short		tbl_trans - tbl_trans	# $2c-6 ERROR
+	short		tbl_trans - tbl_trans	# $2c-7 ERROR
+
+	short		tbl_trans - tbl_trans	# $2d-0 ERROR
+	short		tbl_trans - tbl_trans	# $2d-1 ERROR
+	short		tbl_trans - tbl_trans	# $2d-2 ERROR
+	short		tbl_trans - tbl_trans	# $2d-3 ERROR
+	short		tbl_trans - tbl_trans	# $2d-4 ERROR
+	short		tbl_trans - tbl_trans	# $2d-5 ERROR
+	short		tbl_trans - tbl_trans	# $2d-6 ERROR
+	short		tbl_trans - tbl_trans	# $2d-7 ERROR
+
+	short		tbl_trans - tbl_trans	# $2e-0 ERROR
+	short		tbl_trans - tbl_trans	# $2e-1 ERROR
+	short		tbl_trans - tbl_trans	# $2e-2 ERROR
+	short		tbl_trans - tbl_trans	# $2e-3 ERROR
+	short		tbl_trans - tbl_trans	# $2e-4 ERROR
+	short		tbl_trans - tbl_trans	# $2e-5 ERROR
+	short		tbl_trans - tbl_trans	# $2e-6 ERROR
+	short		tbl_trans - tbl_trans	# $2e-7 ERROR
+
+	short		tbl_trans - tbl_trans	# $2f-0 ERROR
+	short		tbl_trans - tbl_trans	# $2f-1 ERROR
+	short		tbl_trans - tbl_trans	# $2f-2 ERROR
+	short		tbl_trans - tbl_trans	# $2f-3 ERROR
+	short		tbl_trans - tbl_trans	# $2f-4 ERROR
+	short		tbl_trans - tbl_trans	# $2f-5 ERROR
+	short		tbl_trans - tbl_trans	# $2f-6 ERROR
+	short		tbl_trans - tbl_trans	# $2f-7 ERROR
+
+	short		ssincos	 - tbl_trans	# $30-0 fsincos norm
+	short		ssincosz - tbl_trans	# $30-1 fsincos zero
+	short		ssincosi - tbl_trans	# $30-2 fsincos inf
+	short		ssincosqnan - tbl_trans	# $30-3 fsincos qnan
+	short		ssincosd - tbl_trans	# $30-5 fsincos denorm
+	short		ssincossnan - tbl_trans	# $30-4 fsincos snan
+	short		tbl_trans - tbl_trans	# $30-6 fsincos unnorm
+	short		tbl_trans - tbl_trans	# $30-7 ERROR
+
+	short		ssincos	 - tbl_trans	# $31-0 fsincos norm
+	short		ssincosz - tbl_trans	# $31-1 fsincos zero
+	short		ssincosi - tbl_trans	# $31-2 fsincos inf
+	short		ssincosqnan - tbl_trans	# $31-3 fsincos qnan
+	short		ssincosd - tbl_trans	# $31-5 fsincos denorm
+	short		ssincossnan - tbl_trans	# $31-4 fsincos snan
+	short		tbl_trans - tbl_trans	# $31-6 fsincos unnorm
+	short		tbl_trans - tbl_trans	# $31-7 ERROR
+
+	short		ssincos	 - tbl_trans	# $32-0 fsincos norm
+	short		ssincosz - tbl_trans	# $32-1 fsincos zero
+	short		ssincosi - tbl_trans	# $32-2 fsincos inf
+	short		ssincosqnan - tbl_trans	# $32-3 fsincos qnan
+	short		ssincosd - tbl_trans	# $32-5 fsincos denorm
+	short		ssincossnan - tbl_trans	# $32-4 fsincos snan
+	short		tbl_trans - tbl_trans	# $32-6 fsincos unnorm
+	short		tbl_trans - tbl_trans	# $32-7 ERROR
+
+	short		ssincos	 - tbl_trans	# $33-0 fsincos norm
+	short		ssincosz - tbl_trans	# $33-1 fsincos zero
+	short		ssincosi - tbl_trans	# $33-2 fsincos inf
+	short		ssincosqnan - tbl_trans	# $33-3 fsincos qnan
+	short		ssincosd - tbl_trans	# $33-5 fsincos denorm
+	short		ssincossnan - tbl_trans	# $33-4 fsincos snan
+	short		tbl_trans - tbl_trans	# $33-6 fsincos unnorm
+	short		tbl_trans - tbl_trans	# $33-7 ERROR
+
+	short		ssincos	 - tbl_trans	# $34-0 fsincos norm
+	short		ssincosz - tbl_trans	# $34-1 fsincos zero
+	short		ssincosi - tbl_trans	# $34-2 fsincos inf
+	short		ssincosqnan - tbl_trans	# $34-3 fsincos qnan
+	short		ssincosd - tbl_trans	# $34-5 fsincos denorm
+	short		ssincossnan - tbl_trans	# $34-4 fsincos snan
+	short		tbl_trans - tbl_trans	# $34-6 fsincos unnorm
+	short		tbl_trans - tbl_trans	# $34-7 ERROR
+
+	short		ssincos	 - tbl_trans	# $35-0 fsincos norm
+	short		ssincosz - tbl_trans	# $35-1 fsincos zero
+	short		ssincosi - tbl_trans	# $35-2 fsincos inf
+	short		ssincosqnan - tbl_trans	# $35-3 fsincos qnan
+	short		ssincosd - tbl_trans	# $35-5 fsincos denorm
+	short		ssincossnan - tbl_trans	# $35-4 fsincos snan
+	short		tbl_trans - tbl_trans	# $35-6 fsincos unnorm
+	short		tbl_trans - tbl_trans	# $35-7 ERROR
+
+	short		ssincos	 - tbl_trans	# $36-0 fsincos norm
+	short		ssincosz - tbl_trans	# $36-1 fsincos zero
+	short		ssincosi - tbl_trans	# $36-2 fsincos inf
+	short		ssincosqnan - tbl_trans	# $36-3 fsincos qnan
+	short		ssincosd - tbl_trans	# $36-5 fsincos denorm
+	short		ssincossnan - tbl_trans	# $36-4 fsincos snan
+	short		tbl_trans - tbl_trans	# $36-6 fsincos unnorm
+	short		tbl_trans - tbl_trans	# $36-7 ERROR
+
+	short		ssincos	 - tbl_trans	# $37-0 fsincos norm
+	short		ssincosz - tbl_trans	# $37-1 fsincos zero
+	short		ssincosi - tbl_trans	# $37-2 fsincos inf
+	short		ssincosqnan - tbl_trans	# $37-3 fsincos qnan
+	short		ssincosd - tbl_trans	# $37-5 fsincos denorm
+	short		ssincossnan - tbl_trans	# $37-4 fsincos snan
+	short		tbl_trans - tbl_trans	# $37-6 fsincos unnorm
+	short		tbl_trans - tbl_trans	# $37-7 ERROR
+
+##########
+
+# the instruction fetch access for the displacement word for the
+# fdbcc emulation failed. here, we create an access error frame
+# from the current frame and branch to _real_access().
+funimp_iacc:
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+
+	mov.l		USER_FPIAR(%a6),EXC_PC(%a6) # store current PC
+
+	unlk		%a6
+
+	mov.l		(%sp),-(%sp)		# store SR,hi(PC)
+	mov.w		0x8(%sp),0x4(%sp)	# store lo(PC)
+	mov.w		&0x4008,0x6(%sp)	# store voff
+	mov.l		0x2(%sp),0x8(%sp)	# store EA
+	mov.l		&0x09428001,0xc(%sp)	# store FSLW
+
+	btst		&0x5,(%sp)		# user or supervisor mode?
+	beq.b		funimp_iacc_end		# user
+	bset		&0x2,0xd(%sp)		# set supervisor TM bit
+
+funimp_iacc_end:
+	bra.l		_real_access
+
+#########################################################################
+# ssin():     computes the sine of a normalized input			#
+# ssind():    computes the sine of a denormalized input			#
+# scos():     computes the cosine of a normalized input			#
+# scosd():    computes the cosine of a denormalized input		#
+# ssincos():  computes the sine and cosine of a normalized input	#
+# ssincosd(): computes the sine and cosine of a denormalized input	#
+#									#
+# INPUT *************************************************************** #
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT ************************************************************** #
+#	fp0 = sin(X) or cos(X)						#
+#									#
+#    For ssincos(X):							#
+#	fp0 = sin(X)							#
+#	fp1 = cos(X)							#
+#									#
+# ACCURACY and MONOTONICITY ******************************************* #
+#	The returned result is within 1 ulp in 64 significant bit, i.e.	#
+#	within 0.5001 ulp to 53 bits if the result is subsequently	#
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM ***********************************************************	#
+#									#
+#	SIN and COS:							#
+#	1. If SIN is invoked, set AdjN := 0; otherwise, set AdjN := 1.	#
+#									#
+#	2. If |X| >= 15Pi or |X| < 2**(-40), go to 7.			#
+#									#
+#	3. Decompose X as X = N(Pi/2) + r where |r| <= Pi/4. Let	#
+#		k = N mod 4, so in particular, k = 0,1,2,or 3.		#
+#		Overwrite k by k := k + AdjN.				#
+#									#
+#	4. If k is even, go to 6.					#
+#									#
+#	5. (k is odd) Set j := (k-1)/2, sgn := (-1)**j.			#
+#		Return sgn*cos(r) where cos(r) is approximated by an	#
+#		even polynomial in r, 1 + r*r*(B1+s*(B2+ ... + s*B8)),	#
+#		s = r*r.						#
+#		Exit.							#
+#									#
+#	6. (k is even) Set j := k/2, sgn := (-1)**j. Return sgn*sin(r)	#
+#		where sin(r) is approximated by an odd polynomial in r	#
+#		r + r*s*(A1+s*(A2+ ... + s*A7)),	s = r*r.	#
+#		Exit.							#
+#									#
+#	7. If |X| > 1, go to 9.						#
+#									#
+#	8. (|X|<2**(-40)) If SIN is invoked, return X;			#
+#		otherwise return 1.					#
+#									#
+#	9. Overwrite X by X := X rem 2Pi. Now that |X| <= Pi,		#
+#		go back to 3.						#
+#									#
+#	SINCOS:								#
+#	1. If |X| >= 15Pi or |X| < 2**(-40), go to 6.			#
+#									#
+#	2. Decompose X as X = N(Pi/2) + r where |r| <= Pi/4. Let	#
+#		k = N mod 4, so in particular, k = 0,1,2,or 3.		#
+#									#
+#	3. If k is even, go to 5.					#
+#									#
+#	4. (k is odd) Set j1 := (k-1)/2, j2 := j1 (EOR) (k mod 2), ie.	#
+#		j1 exclusive or with the l.s.b. of k.			#
+#		sgn1 := (-1)**j1, sgn2 := (-1)**j2.			#
+#		SIN(X) = sgn1 * cos(r) and COS(X) = sgn2*sin(r) where	#
+#		sin(r) and cos(r) are computed as odd and even		#
+#		polynomials in r, respectively. Exit			#
+#									#
+#	5. (k is even) Set j1 := k/2, sgn1 := (-1)**j1.			#
+#		SIN(X) = sgn1 * sin(r) and COS(X) = sgn1*cos(r) where	#
+#		sin(r) and cos(r) are computed as odd and even		#
+#		polynomials in r, respectively. Exit			#
+#									#
+#	6. If |X| > 1, go to 8.						#
+#									#
+#	7. (|X|<2**(-40)) SIN(X) = X and COS(X) = 1. Exit.		#
+#									#
+#	8. Overwrite X by X := X rem 2Pi. Now that |X| <= Pi,		#
+#		go back to 2.						#
+#									#
+#########################################################################
+
+SINA7:	long		0xBD6AAA77,0xCCC994F5
+SINA6:	long		0x3DE61209,0x7AAE8DA1
+SINA5:	long		0xBE5AE645,0x2A118AE4
+SINA4:	long		0x3EC71DE3,0xA5341531
+SINA3:	long		0xBF2A01A0,0x1A018B59,0x00000000,0x00000000
+SINA2:	long		0x3FF80000,0x88888888,0x888859AF,0x00000000
+SINA1:	long		0xBFFC0000,0xAAAAAAAA,0xAAAAAA99,0x00000000
+
+COSB8:	long		0x3D2AC4D0,0xD6011EE3
+COSB7:	long		0xBDA9396F,0x9F45AC19
+COSB6:	long		0x3E21EED9,0x0612C972
+COSB5:	long		0xBE927E4F,0xB79D9FCF
+COSB4:	long		0x3EFA01A0,0x1A01D423,0x00000000,0x00000000
+COSB3:	long		0xBFF50000,0xB60B60B6,0x0B61D438,0x00000000
+COSB2:	long		0x3FFA0000,0xAAAAAAAA,0xAAAAAB5E
+COSB1:	long		0xBF000000
+
+	set		INARG,FP_SCR0
+
+	set		X,FP_SCR0
+#	set		XDCARE,X+2
+	set		XFRAC,X+4
+
+	set		RPRIME,FP_SCR0
+	set		SPRIME,FP_SCR1
+
+	set		POSNEG1,L_SCR1
+	set		TWOTO63,L_SCR1
+
+	set		ENDFLAG,L_SCR2
+	set		INT,L_SCR2
+
+	set		ADJN,L_SCR3
+
+############################################
+	global		ssin
+ssin:
+	mov.l		&0,ADJN(%a6)		# yes; SET ADJN TO 0
+	bra.b		SINBGN
+
+############################################
+	global		scos
+scos:
+	mov.l		&1,ADJN(%a6)		# yes; SET ADJN TO 1
+
+############################################
+SINBGN:
+#--SAVE FPCR, FP1. CHECK IF |X| IS TOO SMALL OR LARGE
+
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+	fmov.x		%fp0,X(%a6)		# save input at X
+
+# "COMPACTIFY" X
+	mov.l		(%a0),%d1		# put exp in hi word
+	mov.w		4(%a0),%d1		# fetch hi(man)
+	and.l		&0x7FFFFFFF,%d1		# strip sign
+
+	cmpi.l		%d1,&0x3FD78000		# is |X| >= 2**(-40)?
+	bge.b		SOK1			# no
+	bra.w		SINSM			# yes; input is very small
+
+SOK1:
+	cmp.l		%d1,&0x4004BC7E		# is |X| < 15 PI?
+	blt.b		SINMAIN			# no
+	bra.w		SREDUCEX		# yes; input is very large
+
+#--THIS IS THE USUAL CASE, |X| <= 15 PI.
+#--THE ARGUMENT REDUCTION IS DONE BY TABLE LOOK UP.
+SINMAIN:
+	fmov.x		%fp0,%fp1
+	fmul.d		TWOBYPI(%pc),%fp1	# X*2/PI
+
+	lea		PITBL+0x200(%pc),%a1	# TABLE OF N*PI/2, N = -32,...,32
+
+	fmov.l		%fp1,INT(%a6)		# CONVERT TO INTEGER
+
+	mov.l		INT(%a6),%d1		# make a copy of N
+	asl.l		&4,%d1			# N *= 16
+	add.l		%d1,%a1			# tbl_addr = a1 + (N*16)
+
+# A1 IS THE ADDRESS OF N*PIBY2
+# ...WHICH IS IN TWO PIECES Y1 & Y2
+	fsub.x		(%a1)+,%fp0		# X-Y1
+	fsub.s		(%a1),%fp0		# fp0 = R = (X-Y1)-Y2
+
+SINCONT:
+#--continuation from REDUCEX
+
+#--GET N+ADJN AND SEE IF SIN(R) OR COS(R) IS NEEDED
+	mov.l		INT(%a6),%d1
+	add.l		ADJN(%a6),%d1		# SEE IF D0 IS ODD OR EVEN
+	ror.l		&1,%d1			# D0 WAS ODD IFF D0 IS NEGATIVE
+	cmp.l		%d1,&0
+	blt.w		COSPOLY
+
+#--LET J BE THE LEAST SIG. BIT OF D0, LET SGN := (-1)**J.
+#--THEN WE RETURN	SGN*SIN(R). SGN*SIN(R) IS COMPUTED BY
+#--R' + R'*S*(A1 + S(A2 + S(A3 + S(A4 + ... + SA7)))), WHERE
+#--R' = SGN*R, S=R*R. THIS CAN BE REWRITTEN AS
+#--R' + R'*S*( [A1+T(A3+T(A5+TA7))] + [S(A2+T(A4+TA6))])
+#--WHERE T=S*S.
+#--NOTE THAT A3 THROUGH A7 ARE STORED IN DOUBLE PRECISION
+#--WHILE A1 AND A2 ARE IN DOUBLE-EXTENDED FORMAT.
+SINPOLY:
+	fmovm.x		&0x0c,-(%sp)		# save fp2/fp3
+
+	fmov.x		%fp0,X(%a6)		# X IS R
+	fmul.x		%fp0,%fp0		# FP0 IS S
+
+	fmov.d		SINA7(%pc),%fp3
+	fmov.d		SINA6(%pc),%fp2
+
+	fmov.x		%fp0,%fp1
+	fmul.x		%fp1,%fp1		# FP1 IS T
+
+	ror.l		&1,%d1
+	and.l		&0x80000000,%d1
+# ...LEAST SIG. BIT OF D0 IN SIGN POSITION
+	eor.l		%d1,X(%a6)		# X IS NOW R'= SGN*R
+
+	fmul.x		%fp1,%fp3		# TA7
+	fmul.x		%fp1,%fp2		# TA6
+
+	fadd.d		SINA5(%pc),%fp3		# A5+TA7
+	fadd.d		SINA4(%pc),%fp2		# A4+TA6
+
+	fmul.x		%fp1,%fp3		# T(A5+TA7)
+	fmul.x		%fp1,%fp2		# T(A4+TA6)
+
+	fadd.d		SINA3(%pc),%fp3		# A3+T(A5+TA7)
+	fadd.x		SINA2(%pc),%fp2		# A2+T(A4+TA6)
+
+	fmul.x		%fp3,%fp1		# T(A3+T(A5+TA7))
+
+	fmul.x		%fp0,%fp2		# S(A2+T(A4+TA6))
+	fadd.x		SINA1(%pc),%fp1		# A1+T(A3+T(A5+TA7))
+	fmul.x		X(%a6),%fp0		# R'*S
+
+	fadd.x		%fp2,%fp1		# [A1+T(A3+T(A5+TA7))]+[S(A2+T(A4+TA6))]
+
+	fmul.x		%fp1,%fp0		# SIN(R')-R'
+
+	fmovm.x		(%sp)+,&0x30		# restore fp2/fp3
+
+	fmov.l		%d0,%fpcr		# restore users round mode,prec
+	fadd.x		X(%a6),%fp0		# last inst - possible exception set
+	bra		t_inx2
+
+#--LET J BE THE LEAST SIG. BIT OF D0, LET SGN := (-1)**J.
+#--THEN WE RETURN	SGN*COS(R). SGN*COS(R) IS COMPUTED BY
+#--SGN + S'*(B1 + S(B2 + S(B3 + S(B4 + ... + SB8)))), WHERE
+#--S=R*R AND S'=SGN*S. THIS CAN BE REWRITTEN AS
+#--SGN + S'*([B1+T(B3+T(B5+TB7))] + [S(B2+T(B4+T(B6+TB8)))])
+#--WHERE T=S*S.
+#--NOTE THAT B4 THROUGH B8 ARE STORED IN DOUBLE PRECISION
+#--WHILE B2 AND B3 ARE IN DOUBLE-EXTENDED FORMAT, B1 IS -1/2
+#--AND IS THEREFORE STORED AS SINGLE PRECISION.
+COSPOLY:
+	fmovm.x		&0x0c,-(%sp)		# save fp2/fp3
+
+	fmul.x		%fp0,%fp0		# FP0 IS S
+
+	fmov.d		COSB8(%pc),%fp2
+	fmov.d		COSB7(%pc),%fp3
+
+	fmov.x		%fp0,%fp1
+	fmul.x		%fp1,%fp1		# FP1 IS T
+
+	fmov.x		%fp0,X(%a6)		# X IS S
+	ror.l		&1,%d1
+	and.l		&0x80000000,%d1
+# ...LEAST SIG. BIT OF D0 IN SIGN POSITION
+
+	fmul.x		%fp1,%fp2		# TB8
+
+	eor.l		%d1,X(%a6)		# X IS NOW S'= SGN*S
+	and.l		&0x80000000,%d1
+
+	fmul.x		%fp1,%fp3		# TB7
+
+	or.l		&0x3F800000,%d1		# D0 IS SGN IN SINGLE
+	mov.l		%d1,POSNEG1(%a6)
+
+	fadd.d		COSB6(%pc),%fp2		# B6+TB8
+	fadd.d		COSB5(%pc),%fp3		# B5+TB7
+
+	fmul.x		%fp1,%fp2		# T(B6+TB8)
+	fmul.x		%fp1,%fp3		# T(B5+TB7)
+
+	fadd.d		COSB4(%pc),%fp2		# B4+T(B6+TB8)
+	fadd.x		COSB3(%pc),%fp3		# B3+T(B5+TB7)
+
+	fmul.x		%fp1,%fp2		# T(B4+T(B6+TB8))
+	fmul.x		%fp3,%fp1		# T(B3+T(B5+TB7))
+
+	fadd.x		COSB2(%pc),%fp2		# B2+T(B4+T(B6+TB8))
+	fadd.s		COSB1(%pc),%fp1		# B1+T(B3+T(B5+TB7))
+
+	fmul.x		%fp2,%fp0		# S(B2+T(B4+T(B6+TB8)))
+
+	fadd.x		%fp1,%fp0
+
+	fmul.x		X(%a6),%fp0
+
+	fmovm.x		(%sp)+,&0x30		# restore fp2/fp3
+
+	fmov.l		%d0,%fpcr		# restore users round mode,prec
+	fadd.s		POSNEG1(%a6),%fp0	# last inst - possible exception set
+	bra		t_inx2
+
+##############################################
+
+# SINe: Big OR Small?
+#--IF |X| > 15PI, WE USE THE GENERAL ARGUMENT REDUCTION.
+#--IF |X| < 2**(-40), RETURN X OR 1.
+SINBORS:
+	cmp.l		%d1,&0x3FFF8000
+	bgt.l		SREDUCEX
+
+SINSM:
+	mov.l		ADJN(%a6),%d1
+	cmp.l		%d1,&0
+	bgt.b		COSTINY
+
+# here, the operation may underflow iff the precision is sgl or dbl.
+# extended denorms are handled through another entry point.
+SINTINY:
+#	mov.w		&0x0000,XDCARE(%a6)	# JUST IN CASE
+
+	fmov.l		%d0,%fpcr		# restore users round mode,prec
+	mov.b		&FMOV_OP,%d1		# last inst is MOVE
+	fmov.x		X(%a6),%fp0		# last inst - possible exception set
+	bra		t_catch
+
+COSTINY:
+	fmov.s		&0x3F800000,%fp0	# fp0 = 1.0
+	fmov.l		%d0,%fpcr		# restore users round mode,prec
+	fadd.s		&0x80800000,%fp0	# last inst - possible exception set
+	bra		t_pinx2
+
+################################################
+	global		ssind
+#--SIN(X) = X FOR DENORMALIZED X
+ssind:
+	bra		t_extdnrm
+
+############################################
+	global		scosd
+#--COS(X) = 1 FOR DENORMALIZED X
+scosd:
+	fmov.s		&0x3F800000,%fp0	# fp0 = 1.0
+	bra		t_pinx2
+
+##################################################
+
+	global		ssincos
+ssincos:
+#--SET ADJN TO 4
+	mov.l		&4,ADJN(%a6)
+
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+	fmov.x		%fp0,X(%a6)
+
+	mov.l		(%a0),%d1
+	mov.w		4(%a0),%d1
+	and.l		&0x7FFFFFFF,%d1		# COMPACTIFY X
+
+	cmp.l		%d1,&0x3FD78000		# |X| >= 2**(-40)?
+	bge.b		SCOK1
+	bra.w		SCSM
+
+SCOK1:
+	cmp.l		%d1,&0x4004BC7E		# |X| < 15 PI?
+	blt.b		SCMAIN
+	bra.w		SREDUCEX
+
+
+#--THIS IS THE USUAL CASE, |X| <= 15 PI.
+#--THE ARGUMENT REDUCTION IS DONE BY TABLE LOOK UP.
+SCMAIN:
+	fmov.x		%fp0,%fp1
+
+	fmul.d		TWOBYPI(%pc),%fp1	# X*2/PI
+
+	lea		PITBL+0x200(%pc),%a1	# TABLE OF N*PI/2, N = -32,...,32
+
+	fmov.l		%fp1,INT(%a6)		# CONVERT TO INTEGER
+
+	mov.l		INT(%a6),%d1
+	asl.l		&4,%d1
+	add.l		%d1,%a1			# ADDRESS OF N*PIBY2, IN Y1, Y2
+
+	fsub.x		(%a1)+,%fp0		# X-Y1
+	fsub.s		(%a1),%fp0		# FP0 IS R = (X-Y1)-Y2
+
+SCCONT:
+#--continuation point from REDUCEX
+
+	mov.l		INT(%a6),%d1
+	ror.l		&1,%d1
+	cmp.l		%d1,&0			# D0 < 0 IFF N IS ODD
+	bge.w		NEVEN
+
+SNODD:
+#--REGISTERS SAVED SO FAR: D0, A0, FP2.
+	fmovm.x		&0x04,-(%sp)		# save fp2
+
+	fmov.x		%fp0,RPRIME(%a6)
+	fmul.x		%fp0,%fp0		# FP0 IS S = R*R
+	fmov.d		SINA7(%pc),%fp1		# A7
+	fmov.d		COSB8(%pc),%fp2		# B8
+	fmul.x		%fp0,%fp1		# SA7
+	fmul.x		%fp0,%fp2		# SB8
+
+	mov.l		%d2,-(%sp)
+	mov.l		%d1,%d2
+	ror.l		&1,%d2
+	and.l		&0x80000000,%d2
+	eor.l		%d1,%d2
+	and.l		&0x80000000,%d2
+
+	fadd.d		SINA6(%pc),%fp1		# A6+SA7
+	fadd.d		COSB7(%pc),%fp2		# B7+SB8
+
+	fmul.x		%fp0,%fp1		# S(A6+SA7)
+	eor.l		%d2,RPRIME(%a6)
+	mov.l		(%sp)+,%d2
+	fmul.x		%fp0,%fp2		# S(B7+SB8)
+	ror.l		&1,%d1
+	and.l		&0x80000000,%d1
+	mov.l		&0x3F800000,POSNEG1(%a6)
+	eor.l		%d1,POSNEG1(%a6)
+
+	fadd.d		SINA5(%pc),%fp1		# A5+S(A6+SA7)
+	fadd.d		COSB6(%pc),%fp2		# B6+S(B7+SB8)
+
+	fmul.x		%fp0,%fp1		# S(A5+S(A6+SA7))
+	fmul.x		%fp0,%fp2		# S(B6+S(B7+SB8))
+	fmov.x		%fp0,SPRIME(%a6)
+
+	fadd.d		SINA4(%pc),%fp1		# A4+S(A5+S(A6+SA7))
+	eor.l		%d1,SPRIME(%a6)
+	fadd.d		COSB5(%pc),%fp2		# B5+S(B6+S(B7+SB8))
+
+	fmul.x		%fp0,%fp1		# S(A4+...)
+	fmul.x		%fp0,%fp2		# S(B5+...)
+
+	fadd.d		SINA3(%pc),%fp1		# A3+S(A4+...)
+	fadd.d		COSB4(%pc),%fp2		# B4+S(B5+...)
+
+	fmul.x		%fp0,%fp1		# S(A3+...)
+	fmul.x		%fp0,%fp2		# S(B4+...)
+
+	fadd.x		SINA2(%pc),%fp1		# A2+S(A3+...)
+	fadd.x		COSB3(%pc),%fp2		# B3+S(B4+...)
+
+	fmul.x		%fp0,%fp1		# S(A2+...)
+	fmul.x		%fp0,%fp2		# S(B3+...)
+
+	fadd.x		SINA1(%pc),%fp1		# A1+S(A2+...)
+	fadd.x		COSB2(%pc),%fp2		# B2+S(B3+...)
+
+	fmul.x		%fp0,%fp1		# S(A1+...)
+	fmul.x		%fp2,%fp0		# S(B2+...)
+
+	fmul.x		RPRIME(%a6),%fp1	# R'S(A1+...)
+	fadd.s		COSB1(%pc),%fp0		# B1+S(B2...)
+	fmul.x		SPRIME(%a6),%fp0	# S'(B1+S(B2+...))
+
+	fmovm.x		(%sp)+,&0x20		# restore fp2
+
+	fmov.l		%d0,%fpcr
+	fadd.x		RPRIME(%a6),%fp1	# COS(X)
+	bsr		sto_cos			# store cosine result
+	fadd.s		POSNEG1(%a6),%fp0	# SIN(X)
+	bra		t_inx2
+
+NEVEN:
+#--REGISTERS SAVED SO FAR: FP2.
+	fmovm.x		&0x04,-(%sp)		# save fp2
+
+	fmov.x		%fp0,RPRIME(%a6)
+	fmul.x		%fp0,%fp0		# FP0 IS S = R*R
+
+	fmov.d		COSB8(%pc),%fp1		# B8
+	fmov.d		SINA7(%pc),%fp2		# A7
+
+	fmul.x		%fp0,%fp1		# SB8
+	fmov.x		%fp0,SPRIME(%a6)
+	fmul.x		%fp0,%fp2		# SA7
+
+	ror.l		&1,%d1
+	and.l		&0x80000000,%d1
+
+	fadd.d		COSB7(%pc),%fp1		# B7+SB8
+	fadd.d		SINA6(%pc),%fp2		# A6+SA7
+
+	eor.l		%d1,RPRIME(%a6)
+	eor.l		%d1,SPRIME(%a6)
+
+	fmul.x		%fp0,%fp1		# S(B7+SB8)
+
+	or.l		&0x3F800000,%d1
+	mov.l		%d1,POSNEG1(%a6)
+
+	fmul.x		%fp0,%fp2		# S(A6+SA7)
+
+	fadd.d		COSB6(%pc),%fp1		# B6+S(B7+SB8)
+	fadd.d		SINA5(%pc),%fp2		# A5+S(A6+SA7)
+
+	fmul.x		%fp0,%fp1		# S(B6+S(B7+SB8))
+	fmul.x		%fp0,%fp2		# S(A5+S(A6+SA7))
+
+	fadd.d		COSB5(%pc),%fp1		# B5+S(B6+S(B7+SB8))
+	fadd.d		SINA4(%pc),%fp2		# A4+S(A5+S(A6+SA7))
+
+	fmul.x		%fp0,%fp1		# S(B5+...)
+	fmul.x		%fp0,%fp2		# S(A4+...)
+
+	fadd.d		COSB4(%pc),%fp1		# B4+S(B5+...)
+	fadd.d		SINA3(%pc),%fp2		# A3+S(A4+...)
+
+	fmul.x		%fp0,%fp1		# S(B4+...)
+	fmul.x		%fp0,%fp2		# S(A3+...)
+
+	fadd.x		COSB3(%pc),%fp1		# B3+S(B4+...)
+	fadd.x		SINA2(%pc),%fp2		# A2+S(A3+...)
+
+	fmul.x		%fp0,%fp1		# S(B3+...)
+	fmul.x		%fp0,%fp2		# S(A2+...)
+
+	fadd.x		COSB2(%pc),%fp1		# B2+S(B3+...)
+	fadd.x		SINA1(%pc),%fp2		# A1+S(A2+...)
+
+	fmul.x		%fp0,%fp1		# S(B2+...)
+	fmul.x		%fp2,%fp0		# s(a1+...)
+
+
+	fadd.s		COSB1(%pc),%fp1		# B1+S(B2...)
+	fmul.x		RPRIME(%a6),%fp0	# R'S(A1+...)
+	fmul.x		SPRIME(%a6),%fp1	# S'(B1+S(B2+...))
+
+	fmovm.x		(%sp)+,&0x20		# restore fp2
+
+	fmov.l		%d0,%fpcr
+	fadd.s		POSNEG1(%a6),%fp1	# COS(X)
+	bsr		sto_cos			# store cosine result
+	fadd.x		RPRIME(%a6),%fp0	# SIN(X)
+	bra		t_inx2
+
+################################################
+
+SCBORS:
+	cmp.l		%d1,&0x3FFF8000
+	bgt.w		SREDUCEX
+
+################################################
+
+SCSM:
+#	mov.w		&0x0000,XDCARE(%a6)
+	fmov.s		&0x3F800000,%fp1
+
+	fmov.l		%d0,%fpcr
+	fsub.s		&0x00800000,%fp1
+	bsr		sto_cos			# store cosine result
+	fmov.l		%fpcr,%d0		# d0 must have fpcr,too
+	mov.b		&FMOV_OP,%d1		# last inst is MOVE
+	fmov.x		X(%a6),%fp0
+	bra		t_catch
+
+##############################################
+
+	global		ssincosd
+#--SIN AND COS OF X FOR DENORMALIZED X
+ssincosd:
+	mov.l		%d0,-(%sp)		# save d0
+	fmov.s		&0x3F800000,%fp1
+	bsr		sto_cos			# store cosine result
+	mov.l		(%sp)+,%d0		# restore d0
+	bra		t_extdnrm
+
+############################################
+
+#--WHEN REDUCEX IS USED, THE CODE WILL INEVITABLY BE SLOW.
+#--THIS REDUCTION METHOD, HOWEVER, IS MUCH FASTER THAN USING
+#--THE REMAINDER INSTRUCTION WHICH IS NOW IN SOFTWARE.
+SREDUCEX:
+	fmovm.x		&0x3c,-(%sp)		# save {fp2-fp5}
+	mov.l		%d2,-(%sp)		# save d2
+	fmov.s		&0x00000000,%fp1	# fp1 = 0
+
+#--If compact form of abs(arg) in d0=$7ffeffff, argument is so large that
+#--there is a danger of unwanted overflow in first LOOP iteration.  In this
+#--case, reduce argument by one remainder step to make subsequent reduction
+#--safe.
+	cmp.l		%d1,&0x7ffeffff		# is arg dangerously large?
+	bne.b		SLOOP			# no
+
+# yes; create 2**16383*PI/2
+	mov.w		&0x7ffe,FP_SCR0_EX(%a6)
+	mov.l		&0xc90fdaa2,FP_SCR0_HI(%a6)
+	clr.l		FP_SCR0_LO(%a6)
+
+# create low half of 2**16383*PI/2 at FP_SCR1
+	mov.w		&0x7fdc,FP_SCR1_EX(%a6)
+	mov.l		&0x85a308d3,FP_SCR1_HI(%a6)
+	clr.l		FP_SCR1_LO(%a6)
+
+	ftest.x		%fp0			# test sign of argument
+	fblt.w		sred_neg
+
+	or.b		&0x80,FP_SCR0_EX(%a6)	# positive arg
+	or.b		&0x80,FP_SCR1_EX(%a6)
+sred_neg:
+	fadd.x		FP_SCR0(%a6),%fp0	# high part of reduction is exact
+	fmov.x		%fp0,%fp1		# save high result in fp1
+	fadd.x		FP_SCR1(%a6),%fp0	# low part of reduction
+	fsub.x		%fp0,%fp1		# determine low component of result
+	fadd.x		FP_SCR1(%a6),%fp1	# fp0/fp1 are reduced argument.
+
+#--ON ENTRY, FP0 IS X, ON RETURN, FP0 IS X REM PI/2, |X| <= PI/4.
+#--integer quotient will be stored in N
+#--Intermeditate remainder is 66-bit long; (R,r) in (FP0,FP1)
+SLOOP:
+	fmov.x		%fp0,INARG(%a6)		# +-2**K * F, 1 <= F < 2
+	mov.w		INARG(%a6),%d1
+	mov.l		%d1,%a1			# save a copy of D0
+	and.l		&0x00007FFF,%d1
+	sub.l		&0x00003FFF,%d1		# d0 = K
+	cmp.l		%d1,&28
+	ble.b		SLASTLOOP
+SCONTLOOP:
+	sub.l		&27,%d1			# d0 = L := K-27
+	mov.b		&0,ENDFLAG(%a6)
+	bra.b		SWORK
+SLASTLOOP:
+	clr.l		%d1			# d0 = L := 0
+	mov.b		&1,ENDFLAG(%a6)
+
+SWORK:
+#--FIND THE REMAINDER OF (R,r) W.R.T.	2**L * (PI/2). L IS SO CHOSEN
+#--THAT	INT( X * (2/PI) / 2**(L) ) < 2**29.
+
+#--CREATE 2**(-L) * (2/PI), SIGN(INARG)*2**(63),
+#--2**L * (PIby2_1), 2**L * (PIby2_2)
+
+	mov.l		&0x00003FFE,%d2		# BIASED EXP OF 2/PI
+	sub.l		%d1,%d2			# BIASED EXP OF 2**(-L)*(2/PI)
+
+	mov.l		&0xA2F9836E,FP_SCR0_HI(%a6)
+	mov.l		&0x4E44152A,FP_SCR0_LO(%a6)
+	mov.w		%d2,FP_SCR0_EX(%a6)	# FP_SCR0 = 2**(-L)*(2/PI)
+
+	fmov.x		%fp0,%fp2
+	fmul.x		FP_SCR0(%a6),%fp2	# fp2 = X * 2**(-L)*(2/PI)
+
+#--WE MUST NOW FIND INT(FP2). SINCE WE NEED THIS VALUE IN
+#--FLOATING POINT FORMAT, THE TWO FMOVE'S	FMOVE.L FP <--> N
+#--WILL BE TOO INEFFICIENT. THE WAY AROUND IT IS THAT
+#--(SIGN(INARG)*2**63	+	FP2) - SIGN(INARG)*2**63 WILL GIVE
+#--US THE DESIRED VALUE IN FLOATING POINT.
+	mov.l		%a1,%d2
+	swap		%d2
+	and.l		&0x80000000,%d2
+	or.l		&0x5F000000,%d2		# d2 = SIGN(INARG)*2**63 IN SGL
+	mov.l		%d2,TWOTO63(%a6)
+	fadd.s		TWOTO63(%a6),%fp2	# THE FRACTIONAL PART OF FP1 IS ROUNDED
+	fsub.s		TWOTO63(%a6),%fp2	# fp2 = N
+#	fint.x		%fp2
+
+#--CREATING 2**(L)*Piby2_1 and 2**(L)*Piby2_2
+	mov.l		%d1,%d2			# d2 = L
+
+	add.l		&0x00003FFF,%d2		# BIASED EXP OF 2**L * (PI/2)
+	mov.w		%d2,FP_SCR0_EX(%a6)
+	mov.l		&0xC90FDAA2,FP_SCR0_HI(%a6)
+	clr.l		FP_SCR0_LO(%a6)		# FP_SCR0 = 2**(L) * Piby2_1
+
+	add.l		&0x00003FDD,%d1
+	mov.w		%d1,FP_SCR1_EX(%a6)
+	mov.l		&0x85A308D3,FP_SCR1_HI(%a6)
+	clr.l		FP_SCR1_LO(%a6)		# FP_SCR1 = 2**(L) * Piby2_2
+
+	mov.b		ENDFLAG(%a6),%d1
+
+#--We are now ready to perform (R+r) - N*P1 - N*P2, P1 = 2**(L) * Piby2_1 and
+#--P2 = 2**(L) * Piby2_2
+	fmov.x		%fp2,%fp4		# fp4 = N
+	fmul.x		FP_SCR0(%a6),%fp4	# fp4 = W = N*P1
+	fmov.x		%fp2,%fp5		# fp5 = N
+	fmul.x		FP_SCR1(%a6),%fp5	# fp5 = w = N*P2
+	fmov.x		%fp4,%fp3		# fp3 = W = N*P1
+
+#--we want P+p = W+w  but  |p| <= half ulp of P
+#--Then, we need to compute  A := R-P   and  a := r-p
+	fadd.x		%fp5,%fp3		# fp3 = P
+	fsub.x		%fp3,%fp4		# fp4 = W-P
+
+	fsub.x		%fp3,%fp0		# fp0 = A := R - P
+	fadd.x		%fp5,%fp4		# fp4 = p = (W-P)+w
+
+	fmov.x		%fp0,%fp3		# fp3 = A
+	fsub.x		%fp4,%fp1		# fp1 = a := r - p
+
+#--Now we need to normalize (A,a) to  "new (R,r)" where R+r = A+a but
+#--|r| <= half ulp of R.
+	fadd.x		%fp1,%fp0		# fp0 = R := A+a
+#--No need to calculate r if this is the last loop
+	cmp.b		%d1,&0
+	bgt.w		SRESTORE
+
+#--Need to calculate r
+	fsub.x		%fp0,%fp3		# fp3 = A-R
+	fadd.x		%fp3,%fp1		# fp1 = r := (A-R)+a
+	bra.w		SLOOP
+
+SRESTORE:
+	fmov.l		%fp2,INT(%a6)
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		(%sp)+,&0x3c		# restore {fp2-fp5}
+
+	mov.l		ADJN(%a6),%d1
+	cmp.l		%d1,&4
+
+	blt.w		SINCONT
+	bra.w		SCCONT
+
+#########################################################################
+# stan():  computes the tangent of a normalized input			#
+# stand(): computes the tangent of a denormalized input			#
+#									#
+# INPUT *************************************************************** #
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT ************************************************************** #
+#	fp0 = tan(X)							#
+#									#
+# ACCURACY and MONOTONICITY ******************************************* #
+#	The returned result is within 3 ulp in 64 significant bit, i.e. #
+#	within 0.5001 ulp to 53 bits if the result is subsequently	#
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM *********************************************************** #
+#									#
+#	1. If |X| >= 15Pi or |X| < 2**(-40), go to 6.			#
+#									#
+#	2. Decompose X as X = N(Pi/2) + r where |r| <= Pi/4. Let	#
+#		k = N mod 2, so in particular, k = 0 or 1.		#
+#									#
+#	3. If k is odd, go to 5.					#
+#									#
+#	4. (k is even) Tan(X) = tan(r) and tan(r) is approximated by a	#
+#		rational function U/V where				#
+#		U = r + r*s*(P1 + s*(P2 + s*P3)), and			#
+#		V = 1 + s*(Q1 + s*(Q2 + s*(Q3 + s*Q4))),  s = r*r.	#
+#		Exit.							#
+#									#
+#	4. (k is odd) Tan(X) = -cot(r). Since tan(r) is approximated by #
+#		a rational function U/V where				#
+#		U = r + r*s*(P1 + s*(P2 + s*P3)), and			#
+#		V = 1 + s*(Q1 + s*(Q2 + s*(Q3 + s*Q4))), s = r*r,	#
+#		-Cot(r) = -V/U. Exit.					#
+#									#
+#	6. If |X| > 1, go to 8.						#
+#									#
+#	7. (|X|<2**(-40)) Tan(X) = X. Exit.				#
+#									#
+#	8. Overwrite X by X := X rem 2Pi. Now that |X| <= Pi, go back	#
+#		to 2.							#
+#									#
+#########################################################################
+
+TANQ4:
+	long		0x3EA0B759,0xF50F8688
+TANP3:
+	long		0xBEF2BAA5,0xA8924F04
+
+TANQ3:
+	long		0xBF346F59,0xB39BA65F,0x00000000,0x00000000
+
+TANP2:
+	long		0x3FF60000,0xE073D3FC,0x199C4A00,0x00000000
+
+TANQ2:
+	long		0x3FF90000,0xD23CD684,0x15D95FA1,0x00000000
+
+TANP1:
+	long		0xBFFC0000,0x8895A6C5,0xFB423BCA,0x00000000
+
+TANQ1:
+	long		0xBFFD0000,0xEEF57E0D,0xA84BC8CE,0x00000000
+
+INVTWOPI:
+	long		0x3FFC0000,0xA2F9836E,0x4E44152A,0x00000000
+
+TWOPI1:
+	long		0x40010000,0xC90FDAA2,0x00000000,0x00000000
+TWOPI2:
+	long		0x3FDF0000,0x85A308D4,0x00000000,0x00000000
+
+#--N*PI/2, -32 <= N <= 32, IN A LEADING TERM IN EXT. AND TRAILING
+#--TERM IN SGL. NOTE THAT PI IS 64-BIT LONG, THUS N*PI/2 IS AT
+#--MOST 69 BITS LONG.
+#	global		PITBL
+PITBL:
+	long		0xC0040000,0xC90FDAA2,0x2168C235,0x21800000
+	long		0xC0040000,0xC2C75BCD,0x105D7C23,0xA0D00000
+	long		0xC0040000,0xBC7EDCF7,0xFF523611,0xA1E80000
+	long		0xC0040000,0xB6365E22,0xEE46F000,0x21480000
+	long		0xC0040000,0xAFEDDF4D,0xDD3BA9EE,0xA1200000
+	long		0xC0040000,0xA9A56078,0xCC3063DD,0x21FC0000
+	long		0xC0040000,0xA35CE1A3,0xBB251DCB,0x21100000
+	long		0xC0040000,0x9D1462CE,0xAA19D7B9,0xA1580000
+	long		0xC0040000,0x96CBE3F9,0x990E91A8,0x21E00000
+	long		0xC0040000,0x90836524,0x88034B96,0x20B00000
+	long		0xC0040000,0x8A3AE64F,0x76F80584,0xA1880000
+	long		0xC0040000,0x83F2677A,0x65ECBF73,0x21C40000
+	long		0xC0030000,0xFB53D14A,0xA9C2F2C2,0x20000000
+	long		0xC0030000,0xEEC2D3A0,0x87AC669F,0x21380000
+	long		0xC0030000,0xE231D5F6,0x6595DA7B,0xA1300000
+	long		0xC0030000,0xD5A0D84C,0x437F4E58,0x9FC00000
+	long		0xC0030000,0xC90FDAA2,0x2168C235,0x21000000
+	long		0xC0030000,0xBC7EDCF7,0xFF523611,0xA1680000
+	long		0xC0030000,0xAFEDDF4D,0xDD3BA9EE,0xA0A00000
+	long		0xC0030000,0xA35CE1A3,0xBB251DCB,0x20900000
+	long		0xC0030000,0x96CBE3F9,0x990E91A8,0x21600000
+	long		0xC0030000,0x8A3AE64F,0x76F80584,0xA1080000
+	long		0xC0020000,0xFB53D14A,0xA9C2F2C2,0x1F800000
+	long		0xC0020000,0xE231D5F6,0x6595DA7B,0xA0B00000
+	long		0xC0020000,0xC90FDAA2,0x2168C235,0x20800000
+	long		0xC0020000,0xAFEDDF4D,0xDD3BA9EE,0xA0200000
+	long		0xC0020000,0x96CBE3F9,0x990E91A8,0x20E00000
+	long		0xC0010000,0xFB53D14A,0xA9C2F2C2,0x1F000000
+	long		0xC0010000,0xC90FDAA2,0x2168C235,0x20000000
+	long		0xC0010000,0x96CBE3F9,0x990E91A8,0x20600000
+	long		0xC0000000,0xC90FDAA2,0x2168C235,0x1F800000
+	long		0xBFFF0000,0xC90FDAA2,0x2168C235,0x1F000000
+	long		0x00000000,0x00000000,0x00000000,0x00000000
+	long		0x3FFF0000,0xC90FDAA2,0x2168C235,0x9F000000
+	long		0x40000000,0xC90FDAA2,0x2168C235,0x9F800000
+	long		0x40010000,0x96CBE3F9,0x990E91A8,0xA0600000
+	long		0x40010000,0xC90FDAA2,0x2168C235,0xA0000000
+	long		0x40010000,0xFB53D14A,0xA9C2F2C2,0x9F000000
+	long		0x40020000,0x96CBE3F9,0x990E91A8,0xA0E00000
+	long		0x40020000,0xAFEDDF4D,0xDD3BA9EE,0x20200000
+	long		0x40020000,0xC90FDAA2,0x2168C235,0xA0800000
+	long		0x40020000,0xE231D5F6,0x6595DA7B,0x20B00000
+	long		0x40020000,0xFB53D14A,0xA9C2F2C2,0x9F800000
+	long		0x40030000,0x8A3AE64F,0x76F80584,0x21080000
+	long		0x40030000,0x96CBE3F9,0x990E91A8,0xA1600000
+	long		0x40030000,0xA35CE1A3,0xBB251DCB,0xA0900000
+	long		0x40030000,0xAFEDDF4D,0xDD3BA9EE,0x20A00000
+	long		0x40030000,0xBC7EDCF7,0xFF523611,0x21680000
+	long		0x40030000,0xC90FDAA2,0x2168C235,0xA1000000
+	long		0x40030000,0xD5A0D84C,0x437F4E58,0x1FC00000
+	long		0x40030000,0xE231D5F6,0x6595DA7B,0x21300000
+	long		0x40030000,0xEEC2D3A0,0x87AC669F,0xA1380000
+	long		0x40030000,0xFB53D14A,0xA9C2F2C2,0xA0000000
+	long		0x40040000,0x83F2677A,0x65ECBF73,0xA1C40000
+	long		0x40040000,0x8A3AE64F,0x76F80584,0x21880000
+	long		0x40040000,0x90836524,0x88034B96,0xA0B00000
+	long		0x40040000,0x96CBE3F9,0x990E91A8,0xA1E00000
+	long		0x40040000,0x9D1462CE,0xAA19D7B9,0x21580000
+	long		0x40040000,0xA35CE1A3,0xBB251DCB,0xA1100000
+	long		0x40040000,0xA9A56078,0xCC3063DD,0xA1FC0000
+	long		0x40040000,0xAFEDDF4D,0xDD3BA9EE,0x21200000
+	long		0x40040000,0xB6365E22,0xEE46F000,0xA1480000
+	long		0x40040000,0xBC7EDCF7,0xFF523611,0x21E80000
+	long		0x40040000,0xC2C75BCD,0x105D7C23,0x20D00000
+	long		0x40040000,0xC90FDAA2,0x2168C235,0xA1800000
+
+	set		INARG,FP_SCR0
+
+	set		TWOTO63,L_SCR1
+	set		INT,L_SCR1
+	set		ENDFLAG,L_SCR2
+
+	global		stan
+stan:
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+
+	mov.l		(%a0),%d1
+	mov.w		4(%a0),%d1
+	and.l		&0x7FFFFFFF,%d1
+
+	cmp.l		%d1,&0x3FD78000		# |X| >= 2**(-40)?
+	bge.b		TANOK1
+	bra.w		TANSM
+TANOK1:
+	cmp.l		%d1,&0x4004BC7E		# |X| < 15 PI?
+	blt.b		TANMAIN
+	bra.w		REDUCEX
+
+TANMAIN:
+#--THIS IS THE USUAL CASE, |X| <= 15 PI.
+#--THE ARGUMENT REDUCTION IS DONE BY TABLE LOOK UP.
+	fmov.x		%fp0,%fp1
+	fmul.d		TWOBYPI(%pc),%fp1	# X*2/PI
+
+	lea.l		PITBL+0x200(%pc),%a1	# TABLE OF N*PI/2, N = -32,...,32
+
+	fmov.l		%fp1,%d1		# CONVERT TO INTEGER
+
+	asl.l		&4,%d1
+	add.l		%d1,%a1			# ADDRESS N*PIBY2 IN Y1, Y2
+
+	fsub.x		(%a1)+,%fp0		# X-Y1
+
+	fsub.s		(%a1),%fp0		# FP0 IS R = (X-Y1)-Y2
+
+	ror.l		&5,%d1
+	and.l		&0x80000000,%d1		# D0 WAS ODD IFF D0 < 0
+
+TANCONT:
+	fmovm.x		&0x0c,-(%sp)		# save fp2,fp3
+
+	cmp.l		%d1,&0
+	blt.w		NODD
+
+	fmov.x		%fp0,%fp1
+	fmul.x		%fp1,%fp1		# S = R*R
+
+	fmov.d		TANQ4(%pc),%fp3
+	fmov.d		TANP3(%pc),%fp2
+
+	fmul.x		%fp1,%fp3		# SQ4
+	fmul.x		%fp1,%fp2		# SP3
+
+	fadd.d		TANQ3(%pc),%fp3		# Q3+SQ4
+	fadd.x		TANP2(%pc),%fp2		# P2+SP3
+
+	fmul.x		%fp1,%fp3		# S(Q3+SQ4)
+	fmul.x		%fp1,%fp2		# S(P2+SP3)
+
+	fadd.x		TANQ2(%pc),%fp3		# Q2+S(Q3+SQ4)
+	fadd.x		TANP1(%pc),%fp2		# P1+S(P2+SP3)
+
+	fmul.x		%fp1,%fp3		# S(Q2+S(Q3+SQ4))
+	fmul.x		%fp1,%fp2		# S(P1+S(P2+SP3))
+
+	fadd.x		TANQ1(%pc),%fp3		# Q1+S(Q2+S(Q3+SQ4))
+	fmul.x		%fp0,%fp2		# RS(P1+S(P2+SP3))
+
+	fmul.x		%fp3,%fp1		# S(Q1+S(Q2+S(Q3+SQ4)))
+
+	fadd.x		%fp2,%fp0		# R+RS(P1+S(P2+SP3))
+
+	fadd.s		&0x3F800000,%fp1	# 1+S(Q1+...)
+
+	fmovm.x		(%sp)+,&0x30		# restore fp2,fp3
+
+	fmov.l		%d0,%fpcr		# restore users round mode,prec
+	fdiv.x		%fp1,%fp0		# last inst - possible exception set
+	bra		t_inx2
+
+NODD:
+	fmov.x		%fp0,%fp1
+	fmul.x		%fp0,%fp0		# S = R*R
+
+	fmov.d		TANQ4(%pc),%fp3
+	fmov.d		TANP3(%pc),%fp2
+
+	fmul.x		%fp0,%fp3		# SQ4
+	fmul.x		%fp0,%fp2		# SP3
+
+	fadd.d		TANQ3(%pc),%fp3		# Q3+SQ4
+	fadd.x		TANP2(%pc),%fp2		# P2+SP3
+
+	fmul.x		%fp0,%fp3		# S(Q3+SQ4)
+	fmul.x		%fp0,%fp2		# S(P2+SP3)
+
+	fadd.x		TANQ2(%pc),%fp3		# Q2+S(Q3+SQ4)
+	fadd.x		TANP1(%pc),%fp2		# P1+S(P2+SP3)
+
+	fmul.x		%fp0,%fp3		# S(Q2+S(Q3+SQ4))
+	fmul.x		%fp0,%fp2		# S(P1+S(P2+SP3))
+
+	fadd.x		TANQ1(%pc),%fp3		# Q1+S(Q2+S(Q3+SQ4))
+	fmul.x		%fp1,%fp2		# RS(P1+S(P2+SP3))
+
+	fmul.x		%fp3,%fp0		# S(Q1+S(Q2+S(Q3+SQ4)))
+
+	fadd.x		%fp2,%fp1		# R+RS(P1+S(P2+SP3))
+	fadd.s		&0x3F800000,%fp0	# 1+S(Q1+...)
+
+	fmovm.x		(%sp)+,&0x30		# restore fp2,fp3
+
+	fmov.x		%fp1,-(%sp)
+	eor.l		&0x80000000,(%sp)
+
+	fmov.l		%d0,%fpcr		# restore users round mode,prec
+	fdiv.x		(%sp)+,%fp0		# last inst - possible exception set
+	bra		t_inx2
+
+TANBORS:
+#--IF |X| > 15PI, WE USE THE GENERAL ARGUMENT REDUCTION.
+#--IF |X| < 2**(-40), RETURN X OR 1.
+	cmp.l		%d1,&0x3FFF8000
+	bgt.b		REDUCEX
+
+TANSM:
+	fmov.x		%fp0,-(%sp)
+	fmov.l		%d0,%fpcr		# restore users round mode,prec
+	mov.b		&FMOV_OP,%d1		# last inst is MOVE
+	fmov.x		(%sp)+,%fp0		# last inst - posibble exception set
+	bra		t_catch
+
+	global		stand
+#--TAN(X) = X FOR DENORMALIZED X
+stand:
+	bra		t_extdnrm
+
+#--WHEN REDUCEX IS USED, THE CODE WILL INEVITABLY BE SLOW.
+#--THIS REDUCTION METHOD, HOWEVER, IS MUCH FASTER THAN USING
+#--THE REMAINDER INSTRUCTION WHICH IS NOW IN SOFTWARE.
+REDUCEX:
+	fmovm.x		&0x3c,-(%sp)		# save {fp2-fp5}
+	mov.l		%d2,-(%sp)		# save d2
+	fmov.s		&0x00000000,%fp1	# fp1 = 0
+
+#--If compact form of abs(arg) in d0=$7ffeffff, argument is so large that
+#--there is a danger of unwanted overflow in first LOOP iteration.  In this
+#--case, reduce argument by one remainder step to make subsequent reduction
+#--safe.
+	cmp.l		%d1,&0x7ffeffff		# is arg dangerously large?
+	bne.b		LOOP			# no
+
+# yes; create 2**16383*PI/2
+	mov.w		&0x7ffe,FP_SCR0_EX(%a6)
+	mov.l		&0xc90fdaa2,FP_SCR0_HI(%a6)
+	clr.l		FP_SCR0_LO(%a6)
+
+# create low half of 2**16383*PI/2 at FP_SCR1
+	mov.w		&0x7fdc,FP_SCR1_EX(%a6)
+	mov.l		&0x85a308d3,FP_SCR1_HI(%a6)
+	clr.l		FP_SCR1_LO(%a6)
+
+	ftest.x		%fp0			# test sign of argument
+	fblt.w		red_neg
+
+	or.b		&0x80,FP_SCR0_EX(%a6)	# positive arg
+	or.b		&0x80,FP_SCR1_EX(%a6)
+red_neg:
+	fadd.x		FP_SCR0(%a6),%fp0	# high part of reduction is exact
+	fmov.x		%fp0,%fp1		# save high result in fp1
+	fadd.x		FP_SCR1(%a6),%fp0	# low part of reduction
+	fsub.x		%fp0,%fp1		# determine low component of result
+	fadd.x		FP_SCR1(%a6),%fp1	# fp0/fp1 are reduced argument.
+
+#--ON ENTRY, FP0 IS X, ON RETURN, FP0 IS X REM PI/2, |X| <= PI/4.
+#--integer quotient will be stored in N
+#--Intermeditate remainder is 66-bit long; (R,r) in (FP0,FP1)
+LOOP:
+	fmov.x		%fp0,INARG(%a6)		# +-2**K * F, 1 <= F < 2
+	mov.w		INARG(%a6),%d1
+	mov.l		%d1,%a1			# save a copy of D0
+	and.l		&0x00007FFF,%d1
+	sub.l		&0x00003FFF,%d1		# d0 = K
+	cmp.l		%d1,&28
+	ble.b		LASTLOOP
+CONTLOOP:
+	sub.l		&27,%d1			# d0 = L := K-27
+	mov.b		&0,ENDFLAG(%a6)
+	bra.b		WORK
+LASTLOOP:
+	clr.l		%d1			# d0 = L := 0
+	mov.b		&1,ENDFLAG(%a6)
+
+WORK:
+#--FIND THE REMAINDER OF (R,r) W.R.T.	2**L * (PI/2). L IS SO CHOSEN
+#--THAT	INT( X * (2/PI) / 2**(L) ) < 2**29.
+
+#--CREATE 2**(-L) * (2/PI), SIGN(INARG)*2**(63),
+#--2**L * (PIby2_1), 2**L * (PIby2_2)
+
+	mov.l		&0x00003FFE,%d2		# BIASED EXP OF 2/PI
+	sub.l		%d1,%d2			# BIASED EXP OF 2**(-L)*(2/PI)
+
+	mov.l		&0xA2F9836E,FP_SCR0_HI(%a6)
+	mov.l		&0x4E44152A,FP_SCR0_LO(%a6)
+	mov.w		%d2,FP_SCR0_EX(%a6)	# FP_SCR0 = 2**(-L)*(2/PI)
+
+	fmov.x		%fp0,%fp2
+	fmul.x		FP_SCR0(%a6),%fp2	# fp2 = X * 2**(-L)*(2/PI)
+
+#--WE MUST NOW FIND INT(FP2). SINCE WE NEED THIS VALUE IN
+#--FLOATING POINT FORMAT, THE TWO FMOVE'S	FMOVE.L FP <--> N
+#--WILL BE TOO INEFFICIENT. THE WAY AROUND IT IS THAT
+#--(SIGN(INARG)*2**63	+	FP2) - SIGN(INARG)*2**63 WILL GIVE
+#--US THE DESIRED VALUE IN FLOATING POINT.
+	mov.l		%a1,%d2
+	swap		%d2
+	and.l		&0x80000000,%d2
+	or.l		&0x5F000000,%d2		# d2 = SIGN(INARG)*2**63 IN SGL
+	mov.l		%d2,TWOTO63(%a6)
+	fadd.s		TWOTO63(%a6),%fp2	# THE FRACTIONAL PART OF FP1 IS ROUNDED
+	fsub.s		TWOTO63(%a6),%fp2	# fp2 = N
+#	fintrz.x	%fp2,%fp2
+
+#--CREATING 2**(L)*Piby2_1 and 2**(L)*Piby2_2
+	mov.l		%d1,%d2			# d2 = L
+
+	add.l		&0x00003FFF,%d2		# BIASED EXP OF 2**L * (PI/2)
+	mov.w		%d2,FP_SCR0_EX(%a6)
+	mov.l		&0xC90FDAA2,FP_SCR0_HI(%a6)
+	clr.l		FP_SCR0_LO(%a6)		# FP_SCR0 = 2**(L) * Piby2_1
+
+	add.l		&0x00003FDD,%d1
+	mov.w		%d1,FP_SCR1_EX(%a6)
+	mov.l		&0x85A308D3,FP_SCR1_HI(%a6)
+	clr.l		FP_SCR1_LO(%a6)		# FP_SCR1 = 2**(L) * Piby2_2
+
+	mov.b		ENDFLAG(%a6),%d1
+
+#--We are now ready to perform (R+r) - N*P1 - N*P2, P1 = 2**(L) * Piby2_1 and
+#--P2 = 2**(L) * Piby2_2
+	fmov.x		%fp2,%fp4		# fp4 = N
+	fmul.x		FP_SCR0(%a6),%fp4	# fp4 = W = N*P1
+	fmov.x		%fp2,%fp5		# fp5 = N
+	fmul.x		FP_SCR1(%a6),%fp5	# fp5 = w = N*P2
+	fmov.x		%fp4,%fp3		# fp3 = W = N*P1
+
+#--we want P+p = W+w  but  |p| <= half ulp of P
+#--Then, we need to compute  A := R-P   and  a := r-p
+	fadd.x		%fp5,%fp3		# fp3 = P
+	fsub.x		%fp3,%fp4		# fp4 = W-P
+
+	fsub.x		%fp3,%fp0		# fp0 = A := R - P
+	fadd.x		%fp5,%fp4		# fp4 = p = (W-P)+w
+
+	fmov.x		%fp0,%fp3		# fp3 = A
+	fsub.x		%fp4,%fp1		# fp1 = a := r - p
+
+#--Now we need to normalize (A,a) to  "new (R,r)" where R+r = A+a but
+#--|r| <= half ulp of R.
+	fadd.x		%fp1,%fp0		# fp0 = R := A+a
+#--No need to calculate r if this is the last loop
+	cmp.b		%d1,&0
+	bgt.w		RESTORE
+
+#--Need to calculate r
+	fsub.x		%fp0,%fp3		# fp3 = A-R
+	fadd.x		%fp3,%fp1		# fp1 = r := (A-R)+a
+	bra.w		LOOP
+
+RESTORE:
+	fmov.l		%fp2,INT(%a6)
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		(%sp)+,&0x3c		# restore {fp2-fp5}
+
+	mov.l		INT(%a6),%d1
+	ror.l		&1,%d1
+
+	bra.w		TANCONT
+
+#########################################################################
+# satan():  computes the arctangent of a normalized number		#
+# satand(): computes the arctangent of a denormalized number		#
+#									#
+# INPUT	*************************************************************** #
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT ************************************************************** #
+#	fp0 = arctan(X)							#
+#									#
+# ACCURACY and MONOTONICITY ******************************************* #
+#	The returned result is within 2 ulps in	64 significant bit,	#
+#	i.e. within 0.5001 ulp to 53 bits if the result is subsequently	#
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM *********************************************************** #
+#	Step 1. If |X| >= 16 or |X| < 1/16, go to Step 5.		#
+#									#
+#	Step 2. Let X = sgn * 2**k * 1.xxxxxxxx...x.			#
+#		Note that k = -4, -3,..., or 3.				#
+#		Define F = sgn * 2**k * 1.xxxx1, i.e. the first 5	#
+#		significant bits of X with a bit-1 attached at the 6-th	#
+#		bit position. Define u to be u = (X-F) / (1 + X*F).	#
+#									#
+#	Step 3. Approximate arctan(u) by a polynomial poly.		#
+#									#
+#	Step 4. Return arctan(F) + poly, arctan(F) is fetched from a	#
+#		table of values calculated beforehand. Exit.		#
+#									#
+#	Step 5. If |X| >= 16, go to Step 7.				#
+#									#
+#	Step 6. Approximate arctan(X) by an odd polynomial in X. Exit.	#
+#									#
+#	Step 7. Define X' = -1/X. Approximate arctan(X') by an odd	#
+#		polynomial in X'.					#
+#		Arctan(X) = sign(X)*Pi/2 + arctan(X'). Exit.		#
+#									#
+#########################################################################
+
+ATANA3:	long		0xBFF6687E,0x314987D8
+ATANA2:	long		0x4002AC69,0x34A26DB3
+ATANA1:	long		0xBFC2476F,0x4E1DA28E
+
+ATANB6:	long		0x3FB34444,0x7F876989
+ATANB5:	long		0xBFB744EE,0x7FAF45DB
+ATANB4:	long		0x3FBC71C6,0x46940220
+ATANB3:	long		0xBFC24924,0x921872F9
+ATANB2:	long		0x3FC99999,0x99998FA9
+ATANB1:	long		0xBFD55555,0x55555555
+
+ATANC5:	long		0xBFB70BF3,0x98539E6A
+ATANC4:	long		0x3FBC7187,0x962D1D7D
+ATANC3:	long		0xBFC24924,0x827107B8
+ATANC2:	long		0x3FC99999,0x9996263E
+ATANC1:	long		0xBFD55555,0x55555536
+
+PPIBY2:	long		0x3FFF0000,0xC90FDAA2,0x2168C235,0x00000000
+NPIBY2:	long		0xBFFF0000,0xC90FDAA2,0x2168C235,0x00000000
+
+PTINY:	long		0x00010000,0x80000000,0x00000000,0x00000000
+NTINY:	long		0x80010000,0x80000000,0x00000000,0x00000000
+
+ATANTBL:
+	long		0x3FFB0000,0x83D152C5,0x060B7A51,0x00000000
+	long		0x3FFB0000,0x8BC85445,0x65498B8B,0x00000000
+	long		0x3FFB0000,0x93BE4060,0x17626B0D,0x00000000
+	long		0x3FFB0000,0x9BB3078D,0x35AEC202,0x00000000
+	long		0x3FFB0000,0xA3A69A52,0x5DDCE7DE,0x00000000
+	long		0x3FFB0000,0xAB98E943,0x62765619,0x00000000
+	long		0x3FFB0000,0xB389E502,0xF9C59862,0x00000000
+	long		0x3FFB0000,0xBB797E43,0x6B09E6FB,0x00000000
+	long		0x3FFB0000,0xC367A5C7,0x39E5F446,0x00000000
+	long		0x3FFB0000,0xCB544C61,0xCFF7D5C6,0x00000000
+	long		0x3FFB0000,0xD33F62F8,0x2488533E,0x00000000
+	long		0x3FFB0000,0xDB28DA81,0x62404C77,0x00000000
+	long		0x3FFB0000,0xE310A407,0x8AD34F18,0x00000000
+	long		0x3FFB0000,0xEAF6B0A8,0x188EE1EB,0x00000000
+	long		0x3FFB0000,0xF2DAF194,0x9DBE79D5,0x00000000
+	long		0x3FFB0000,0xFABD5813,0x61D47E3E,0x00000000
+	long		0x3FFC0000,0x8346AC21,0x0959ECC4,0x00000000
+	long		0x3FFC0000,0x8B232A08,0x304282D8,0x00000000
+	long		0x3FFC0000,0x92FB70B8,0xD29AE2F9,0x00000000
+	long		0x3FFC0000,0x9ACF476F,0x5CCD1CB4,0x00000000
+	long		0x3FFC0000,0xA29E7630,0x4954F23F,0x00000000
+	long		0x3FFC0000,0xAA68C5D0,0x8AB85230,0x00000000
+	long		0x3FFC0000,0xB22DFFFD,0x9D539F83,0x00000000
+	long		0x3FFC0000,0xB9EDEF45,0x3E900EA5,0x00000000
+	long		0x3FFC0000,0xC1A85F1C,0xC75E3EA5,0x00000000
+	long		0x3FFC0000,0xC95D1BE8,0x28138DE6,0x00000000
+	long		0x3FFC0000,0xD10BF300,0x840D2DE4,0x00000000
+	long		0x3FFC0000,0xD8B4B2BA,0x6BC05E7A,0x00000000
+	long		0x3FFC0000,0xE0572A6B,0xB42335F6,0x00000000
+	long		0x3FFC0000,0xE7F32A70,0xEA9CAA8F,0x00000000
+	long		0x3FFC0000,0xEF888432,0x64ECEFAA,0x00000000
+	long		0x3FFC0000,0xF7170A28,0xECC06666,0x00000000
+	long		0x3FFD0000,0x812FD288,0x332DAD32,0x00000000
+	long		0x3FFD0000,0x88A8D1B1,0x218E4D64,0x00000000
+	long		0x3FFD0000,0x9012AB3F,0x23E4AEE8,0x00000000
+	long		0x3FFD0000,0x976CC3D4,0x11E7F1B9,0x00000000
+	long		0x3FFD0000,0x9EB68949,0x3889A227,0x00000000
+	long		0x3FFD0000,0xA5EF72C3,0x4487361B,0x00000000
+	long		0x3FFD0000,0xAD1700BA,0xF07A7227,0x00000000
+	long		0x3FFD0000,0xB42CBCFA,0xFD37EFB7,0x00000000
+	long		0x3FFD0000,0xBB303A94,0x0BA80F89,0x00000000
+	long		0x3FFD0000,0xC22115C6,0xFCAEBBAF,0x00000000
+	long		0x3FFD0000,0xC8FEF3E6,0x86331221,0x00000000
+	long		0x3FFD0000,0xCFC98330,0xB4000C70,0x00000000
+	long		0x3FFD0000,0xD6807AA1,0x102C5BF9,0x00000000
+	long		0x3FFD0000,0xDD2399BC,0x31252AA3,0x00000000
+	long		0x3FFD0000,0xE3B2A855,0x6B8FC517,0x00000000
+	long		0x3FFD0000,0xEA2D764F,0x64315989,0x00000000
+	long		0x3FFD0000,0xF3BF5BF8,0xBAD1A21D,0x00000000
+	long		0x3FFE0000,0x801CE39E,0x0D205C9A,0x00000000
+	long		0x3FFE0000,0x8630A2DA,0xDA1ED066,0x00000000
+	long		0x3FFE0000,0x8C1AD445,0xF3E09B8C,0x00000000
+	long		0x3FFE0000,0x91DB8F16,0x64F350E2,0x00000000
+	long		0x3FFE0000,0x97731420,0x365E538C,0x00000000
+	long		0x3FFE0000,0x9CE1C8E6,0xA0B8CDBA,0x00000000
+	long		0x3FFE0000,0xA22832DB,0xCADAAE09,0x00000000
+	long		0x3FFE0000,0xA746F2DD,0xB7602294,0x00000000
+	long		0x3FFE0000,0xAC3EC0FB,0x997DD6A2,0x00000000
+	long		0x3FFE0000,0xB110688A,0xEBDC6F6A,0x00000000
+	long		0x3FFE0000,0xB5BCC490,0x59ECC4B0,0x00000000
+	long		0x3FFE0000,0xBA44BC7D,0xD470782F,0x00000000
+	long		0x3FFE0000,0xBEA94144,0xFD049AAC,0x00000000
+	long		0x3FFE0000,0xC2EB4ABB,0x661628B6,0x00000000
+	long		0x3FFE0000,0xC70BD54C,0xE602EE14,0x00000000
+	long		0x3FFE0000,0xCD000549,0xADEC7159,0x00000000
+	long		0x3FFE0000,0xD48457D2,0xD8EA4EA3,0x00000000
+	long		0x3FFE0000,0xDB948DA7,0x12DECE3B,0x00000000
+	long		0x3FFE0000,0xE23855F9,0x69E8096A,0x00000000
+	long		0x3FFE0000,0xE8771129,0xC4353259,0x00000000
+	long		0x3FFE0000,0xEE57C16E,0x0D379C0D,0x00000000
+	long		0x3FFE0000,0xF3E10211,0xA87C3779,0x00000000
+	long		0x3FFE0000,0xF919039D,0x758B8D41,0x00000000
+	long		0x3FFE0000,0xFE058B8F,0x64935FB3,0x00000000
+	long		0x3FFF0000,0x8155FB49,0x7B685D04,0x00000000
+	long		0x3FFF0000,0x83889E35,0x49D108E1,0x00000000
+	long		0x3FFF0000,0x859CFA76,0x511D724B,0x00000000
+	long		0x3FFF0000,0x87952ECF,0xFF8131E7,0x00000000
+	long		0x3FFF0000,0x89732FD1,0x9557641B,0x00000000
+	long		0x3FFF0000,0x8B38CAD1,0x01932A35,0x00000000
+	long		0x3FFF0000,0x8CE7A8D8,0x301EE6B5,0x00000000
+	long		0x3FFF0000,0x8F46A39E,0x2EAE5281,0x00000000
+	long		0x3FFF0000,0x922DA7D7,0x91888487,0x00000000
+	long		0x3FFF0000,0x94D19FCB,0xDEDF5241,0x00000000
+	long		0x3FFF0000,0x973AB944,0x19D2A08B,0x00000000
+	long		0x3FFF0000,0x996FF00E,0x08E10B96,0x00000000
+	long		0x3FFF0000,0x9B773F95,0x12321DA7,0x00000000
+	long		0x3FFF0000,0x9D55CC32,0x0F935624,0x00000000
+	long		0x3FFF0000,0x9F100575,0x006CC571,0x00000000
+	long		0x3FFF0000,0xA0A9C290,0xD97CC06C,0x00000000
+	long		0x3FFF0000,0xA22659EB,0xEBC0630A,0x00000000
+	long		0x3FFF0000,0xA388B4AF,0xF6EF0EC9,0x00000000
+	long		0x3FFF0000,0xA4D35F10,0x61D292C4,0x00000000
+	long		0x3FFF0000,0xA60895DC,0xFBE3187E,0x00000000
+	long		0x3FFF0000,0xA72A51DC,0x7367BEAC,0x00000000
+	long		0x3FFF0000,0xA83A5153,0x0956168F,0x00000000
+	long		0x3FFF0000,0xA93A2007,0x7539546E,0x00000000
+	long		0x3FFF0000,0xAA9E7245,0x023B2605,0x00000000
+	long		0x3FFF0000,0xAC4C84BA,0x6FE4D58F,0x00000000
+	long		0x3FFF0000,0xADCE4A4A,0x606B9712,0x00000000
+	long		0x3FFF0000,0xAF2A2DCD,0x8D263C9C,0x00000000
+	long		0x3FFF0000,0xB0656F81,0xF22265C7,0x00000000
+	long		0x3FFF0000,0xB1846515,0x0F71496A,0x00000000
+	long		0x3FFF0000,0xB28AAA15,0x6F9ADA35,0x00000000
+	long		0x3FFF0000,0xB37B44FF,0x3766B895,0x00000000
+	long		0x3FFF0000,0xB458C3DC,0xE9630433,0x00000000
+	long		0x3FFF0000,0xB525529D,0x562246BD,0x00000000
+	long		0x3FFF0000,0xB5E2CCA9,0x5F9D88CC,0x00000000
+	long		0x3FFF0000,0xB692CADA,0x7ACA1ADA,0x00000000
+	long		0x3FFF0000,0xB736AEA7,0xA6925838,0x00000000
+	long		0x3FFF0000,0xB7CFAB28,0x7E9F7B36,0x00000000
+	long		0x3FFF0000,0xB85ECC66,0xCB219835,0x00000000
+	long		0x3FFF0000,0xB8E4FD5A,0x20A593DA,0x00000000
+	long		0x3FFF0000,0xB99F41F6,0x4AFF9BB5,0x00000000
+	long		0x3FFF0000,0xBA7F1E17,0x842BBE7B,0x00000000
+	long		0x3FFF0000,0xBB471285,0x7637E17D,0x00000000
+	long		0x3FFF0000,0xBBFABE8A,0x4788DF6F,0x00000000
+	long		0x3FFF0000,0xBC9D0FAD,0x2B689D79,0x00000000
+	long		0x3FFF0000,0xBD306A39,0x471ECD86,0x00000000
+	long		0x3FFF0000,0xBDB6C731,0x856AF18A,0x00000000
+	long		0x3FFF0000,0xBE31CAC5,0x02E80D70,0x00000000
+	long		0x3FFF0000,0xBEA2D55C,0xE33194E2,0x00000000
+	long		0x3FFF0000,0xBF0B10B7,0xC03128F0,0x00000000
+	long		0x3FFF0000,0xBF6B7A18,0xDACB778D,0x00000000
+	long		0x3FFF0000,0xBFC4EA46,0x63FA18F6,0x00000000
+	long		0x3FFF0000,0xC0181BDE,0x8B89A454,0x00000000
+	long		0x3FFF0000,0xC065B066,0xCFBF6439,0x00000000
+	long		0x3FFF0000,0xC0AE345F,0x56340AE6,0x00000000
+	long		0x3FFF0000,0xC0F22291,0x9CB9E6A7,0x00000000
+
+	set		X,FP_SCR0
+	set		XDCARE,X+2
+	set		XFRAC,X+4
+	set		XFRACLO,X+8
+
+	set		ATANF,FP_SCR1
+	set		ATANFHI,ATANF+4
+	set		ATANFLO,ATANF+8
+
+	global		satan
+#--ENTRY POINT FOR ATAN(X), HERE X IS FINITE, NON-ZERO, AND NOT NAN'S
+satan:
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+
+	mov.l		(%a0),%d1
+	mov.w		4(%a0),%d1
+	fmov.x		%fp0,X(%a6)
+	and.l		&0x7FFFFFFF,%d1
+
+	cmp.l		%d1,&0x3FFB8000		# |X| >= 1/16?
+	bge.b		ATANOK1
+	bra.w		ATANSM
+
+ATANOK1:
+	cmp.l		%d1,&0x4002FFFF		# |X| < 16 ?
+	ble.b		ATANMAIN
+	bra.w		ATANBIG
+
+#--THE MOST LIKELY CASE, |X| IN [1/16, 16). WE USE TABLE TECHNIQUE
+#--THE IDEA IS ATAN(X) = ATAN(F) + ATAN( [X-F] / [1+XF] ).
+#--SO IF F IS CHOSEN TO BE CLOSE TO X AND ATAN(F) IS STORED IN
+#--A TABLE, ALL WE NEED IS TO APPROXIMATE ATAN(U) WHERE
+#--U = (X-F)/(1+XF) IS SMALL (REMEMBER F IS CLOSE TO X). IT IS
+#--TRUE THAT A DIVIDE IS NOW NEEDED, BUT THE APPROXIMATION FOR
+#--ATAN(U) IS A VERY SHORT POLYNOMIAL AND THE INDEXING TO
+#--FETCH F AND SAVING OF REGISTERS CAN BE ALL HIDED UNDER THE
+#--DIVIDE. IN THE END THIS METHOD IS MUCH FASTER THAN A TRADITIONAL
+#--ONE. NOTE ALSO THAT THE TRADITIONAL SCHEME THAT APPROXIMATE
+#--ATAN(X) DIRECTLY WILL NEED TO USE A RATIONAL APPROXIMATION
+#--(DIVISION NEEDED) ANYWAY BECAUSE A POLYNOMIAL APPROXIMATION
+#--WILL INVOLVE A VERY LONG POLYNOMIAL.
+
+#--NOW WE SEE X AS +-2^K * 1.BBBBBBB....B <- 1. + 63 BITS
+#--WE CHOSE F TO BE +-2^K * 1.BBBB1
+#--THAT IS IT MATCHES THE EXPONENT AND FIRST 5 BITS OF X, THE
+#--SIXTH BITS IS SET TO BE 1. SINCE K = -4, -3, ..., 3, THERE
+#--ARE ONLY 8 TIMES 16 = 2^7 = 128 |F|'S. SINCE ATAN(-|F|) IS
+#-- -ATAN(|F|), WE NEED TO STORE ONLY ATAN(|F|).
+
+ATANMAIN:
+
+	and.l		&0xF8000000,XFRAC(%a6)	# FIRST 5 BITS
+	or.l		&0x04000000,XFRAC(%a6)	# SET 6-TH BIT TO 1
+	mov.l		&0x00000000,XFRACLO(%a6) # LOCATION OF X IS NOW F
+
+	fmov.x		%fp0,%fp1		# FP1 IS X
+	fmul.x		X(%a6),%fp1		# FP1 IS X*F, NOTE THAT X*F > 0
+	fsub.x		X(%a6),%fp0		# FP0 IS X-F
+	fadd.s		&0x3F800000,%fp1	# FP1 IS 1 + X*F
+	fdiv.x		%fp1,%fp0		# FP0 IS U = (X-F)/(1+X*F)
+
+#--WHILE THE DIVISION IS TAKING ITS TIME, WE FETCH ATAN(|F|)
+#--CREATE ATAN(F) AND STORE IT IN ATANF, AND
+#--SAVE REGISTERS FP2.
+
+	mov.l		%d2,-(%sp)		# SAVE d2 TEMPORARILY
+	mov.l		%d1,%d2			# THE EXP AND 16 BITS OF X
+	and.l		&0x00007800,%d1		# 4 VARYING BITS OF F'S FRACTION
+	and.l		&0x7FFF0000,%d2		# EXPONENT OF F
+	sub.l		&0x3FFB0000,%d2		# K+4
+	asr.l		&1,%d2
+	add.l		%d2,%d1			# THE 7 BITS IDENTIFYING F
+	asr.l		&7,%d1			# INDEX INTO TBL OF ATAN(|F|)
+	lea		ATANTBL(%pc),%a1
+	add.l		%d1,%a1			# ADDRESS OF ATAN(|F|)
+	mov.l		(%a1)+,ATANF(%a6)
+	mov.l		(%a1)+,ATANFHI(%a6)
+	mov.l		(%a1)+,ATANFLO(%a6)	# ATANF IS NOW ATAN(|F|)
+	mov.l		X(%a6),%d1		# LOAD SIGN AND EXPO. AGAIN
+	and.l		&0x80000000,%d1		# SIGN(F)
+	or.l		%d1,ATANF(%a6)		# ATANF IS NOW SIGN(F)*ATAN(|F|)
+	mov.l		(%sp)+,%d2		# RESTORE d2
+
+#--THAT'S ALL I HAVE TO DO FOR NOW,
+#--BUT ALAS, THE DIVIDE IS STILL CRANKING!
+
+#--U IN FP0, WE ARE NOW READY TO COMPUTE ATAN(U) AS
+#--U + A1*U*V*(A2 + V*(A3 + V)), V = U*U
+#--THE POLYNOMIAL MAY LOOK STRANGE, BUT IS NEVERTHELESS CORRECT.
+#--THE NATURAL FORM IS U + U*V*(A1 + V*(A2 + V*A3))
+#--WHAT WE HAVE HERE IS MERELY	A1 = A3, A2 = A1/A3, A3 = A2/A3.
+#--THE REASON FOR THIS REARRANGEMENT IS TO MAKE THE INDEPENDENT
+#--PARTS A1*U*V AND (A2 + ... STUFF) MORE LOAD-BALANCED
+
+	fmovm.x		&0x04,-(%sp)		# save fp2
+
+	fmov.x		%fp0,%fp1
+	fmul.x		%fp1,%fp1
+	fmov.d		ATANA3(%pc),%fp2
+	fadd.x		%fp1,%fp2		# A3+V
+	fmul.x		%fp1,%fp2		# V*(A3+V)
+	fmul.x		%fp0,%fp1		# U*V
+	fadd.d		ATANA2(%pc),%fp2	# A2+V*(A3+V)
+	fmul.d		ATANA1(%pc),%fp1	# A1*U*V
+	fmul.x		%fp2,%fp1		# A1*U*V*(A2+V*(A3+V))
+	fadd.x		%fp1,%fp0		# ATAN(U), FP1 RELEASED
+
+	fmovm.x		(%sp)+,&0x20		# restore fp2
+
+	fmov.l		%d0,%fpcr		# restore users rnd mode,prec
+	fadd.x		ATANF(%a6),%fp0		# ATAN(X)
+	bra		t_inx2
+
+ATANBORS:
+#--|X| IS IN d0 IN COMPACT FORM. FP1, d0 SAVED.
+#--FP0 IS X AND |X| <= 1/16 OR |X| >= 16.
+	cmp.l		%d1,&0x3FFF8000
+	bgt.w		ATANBIG			# I.E. |X| >= 16
+
+ATANSM:
+#--|X| <= 1/16
+#--IF |X| < 2^(-40), RETURN X AS ANSWER. OTHERWISE, APPROXIMATE
+#--ATAN(X) BY X + X*Y*(B1+Y*(B2+Y*(B3+Y*(B4+Y*(B5+Y*B6)))))
+#--WHICH IS X + X*Y*( [B1+Z*(B3+Z*B5)] + [Y*(B2+Z*(B4+Z*B6)] )
+#--WHERE Y = X*X, AND Z = Y*Y.
+
+	cmp.l		%d1,&0x3FD78000
+	blt.w		ATANTINY
+
+#--COMPUTE POLYNOMIAL
+	fmovm.x		&0x0c,-(%sp)		# save fp2/fp3
+
+	fmul.x		%fp0,%fp0		# FPO IS Y = X*X
+
+	fmov.x		%fp0,%fp1
+	fmul.x		%fp1,%fp1		# FP1 IS Z = Y*Y
+
+	fmov.d		ATANB6(%pc),%fp2
+	fmov.d		ATANB5(%pc),%fp3
+
+	fmul.x		%fp1,%fp2		# Z*B6
+	fmul.x		%fp1,%fp3		# Z*B5
+
+	fadd.d		ATANB4(%pc),%fp2	# B4+Z*B6
+	fadd.d		ATANB3(%pc),%fp3	# B3+Z*B5
+
+	fmul.x		%fp1,%fp2		# Z*(B4+Z*B6)
+	fmul.x		%fp3,%fp1		# Z*(B3+Z*B5)
+
+	fadd.d		ATANB2(%pc),%fp2	# B2+Z*(B4+Z*B6)
+	fadd.d		ATANB1(%pc),%fp1	# B1+Z*(B3+Z*B5)
+
+	fmul.x		%fp0,%fp2		# Y*(B2+Z*(B4+Z*B6))
+	fmul.x		X(%a6),%fp0		# X*Y
+
+	fadd.x		%fp2,%fp1		# [B1+Z*(B3+Z*B5)]+[Y*(B2+Z*(B4+Z*B6))]
+
+	fmul.x		%fp1,%fp0		# X*Y*([B1+Z*(B3+Z*B5)]+[Y*(B2+Z*(B4+Z*B6))])
+
+	fmovm.x		(%sp)+,&0x30		# restore fp2/fp3
+
+	fmov.l		%d0,%fpcr		# restore users rnd mode,prec
+	fadd.x		X(%a6),%fp0
+	bra		t_inx2
+
+ATANTINY:
+#--|X| < 2^(-40), ATAN(X) = X
+
+	fmov.l		%d0,%fpcr		# restore users rnd mode,prec
+	mov.b		&FMOV_OP,%d1		# last inst is MOVE
+	fmov.x		X(%a6),%fp0		# last inst - possible exception set
+
+	bra		t_catch
+
+ATANBIG:
+#--IF |X| > 2^(100), RETURN	SIGN(X)*(PI/2 - TINY). OTHERWISE,
+#--RETURN SIGN(X)*PI/2 + ATAN(-1/X).
+	cmp.l		%d1,&0x40638000
+	bgt.w		ATANHUGE
+
+#--APPROXIMATE ATAN(-1/X) BY
+#--X'+X'*Y*(C1+Y*(C2+Y*(C3+Y*(C4+Y*C5)))), X' = -1/X, Y = X'*X'
+#--THIS CAN BE RE-WRITTEN AS
+#--X'+X'*Y*( [C1+Z*(C3+Z*C5)] + [Y*(C2+Z*C4)] ), Z = Y*Y.
+
+	fmovm.x		&0x0c,-(%sp)		# save fp2/fp3
+
+	fmov.s		&0xBF800000,%fp1	# LOAD -1
+	fdiv.x		%fp0,%fp1		# FP1 IS -1/X
+
+#--DIVIDE IS STILL CRANKING
+
+	fmov.x		%fp1,%fp0		# FP0 IS X'
+	fmul.x		%fp0,%fp0		# FP0 IS Y = X'*X'
+	fmov.x		%fp1,X(%a6)		# X IS REALLY X'
+
+	fmov.x		%fp0,%fp1
+	fmul.x		%fp1,%fp1		# FP1 IS Z = Y*Y
+
+	fmov.d		ATANC5(%pc),%fp3
+	fmov.d		ATANC4(%pc),%fp2
+
+	fmul.x		%fp1,%fp3		# Z*C5
+	fmul.x		%fp1,%fp2		# Z*B4
+
+	fadd.d		ATANC3(%pc),%fp3	# C3+Z*C5
+	fadd.d		ATANC2(%pc),%fp2	# C2+Z*C4
+
+	fmul.x		%fp3,%fp1		# Z*(C3+Z*C5), FP3 RELEASED
+	fmul.x		%fp0,%fp2		# Y*(C2+Z*C4)
+
+	fadd.d		ATANC1(%pc),%fp1	# C1+Z*(C3+Z*C5)
+	fmul.x		X(%a6),%fp0		# X'*Y
+
+	fadd.x		%fp2,%fp1		# [Y*(C2+Z*C4)]+[C1+Z*(C3+Z*C5)]
+
+	fmul.x		%fp1,%fp0		# X'*Y*([B1+Z*(B3+Z*B5)]
+#					...	+[Y*(B2+Z*(B4+Z*B6))])
+	fadd.x		X(%a6),%fp0
+
+	fmovm.x		(%sp)+,&0x30		# restore fp2/fp3
+
+	fmov.l		%d0,%fpcr		# restore users rnd mode,prec
+	tst.b		(%a0)
+	bpl.b		pos_big
+
+neg_big:
+	fadd.x		NPIBY2(%pc),%fp0
+	bra		t_minx2
+
+pos_big:
+	fadd.x		PPIBY2(%pc),%fp0
+	bra		t_pinx2
+
+ATANHUGE:
+#--RETURN SIGN(X)*(PIBY2 - TINY) = SIGN(X)*PIBY2 - SIGN(X)*TINY
+	tst.b		(%a0)
+	bpl.b		pos_huge
+
+neg_huge:
+	fmov.x		NPIBY2(%pc),%fp0
+	fmov.l		%d0,%fpcr
+	fadd.x		PTINY(%pc),%fp0
+	bra		t_minx2
+
+pos_huge:
+	fmov.x		PPIBY2(%pc),%fp0
+	fmov.l		%d0,%fpcr
+	fadd.x		NTINY(%pc),%fp0
+	bra		t_pinx2
+
+	global		satand
+#--ENTRY POINT FOR ATAN(X) FOR DENORMALIZED ARGUMENT
+satand:
+	bra		t_extdnrm
+
+#########################################################################
+# sasin():  computes the inverse sine of a normalized input		#
+# sasind(): computes the inverse sine of a denormalized input		#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = arcsin(X)							#
+#									#
+# ACCURACY and MONOTONICITY *******************************************	#
+#	The returned result is within 3 ulps in	64 significant bit,	#
+#	i.e. within 0.5001 ulp to 53 bits if the result is subsequently	#
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM ***********************************************************	#
+#									#
+#	ASIN								#
+#	1. If |X| >= 1, go to 3.					#
+#									#
+#	2. (|X| < 1) Calculate asin(X) by				#
+#		z := sqrt( [1-X][1+X] )					#
+#		asin(X) = atan( x / z ).				#
+#		Exit.							#
+#									#
+#	3. If |X| > 1, go to 5.						#
+#									#
+#	4. (|X| = 1) sgn := sign(X), return asin(X) := sgn * Pi/2. Exit.#
+#									#
+#	5. (|X| > 1) Generate an invalid operation by 0 * infinity.	#
+#		Exit.							#
+#									#
+#########################################################################
+
+	global		sasin
+sasin:
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+
+	mov.l		(%a0),%d1
+	mov.w		4(%a0),%d1
+	and.l		&0x7FFFFFFF,%d1
+	cmp.l		%d1,&0x3FFF8000
+	bge.b		ASINBIG
+
+# This catch is added here for the '060 QSP. Originally, the call to
+# satan() would handle this case by causing the exception which would
+# not be caught until gen_except(). Now, with the exceptions being
+# detected inside of satan(), the exception would have been handled there
+# instead of inside sasin() as expected.
+	cmp.l		%d1,&0x3FD78000
+	blt.w		ASINTINY
+
+#--THIS IS THE USUAL CASE, |X| < 1
+#--ASIN(X) = ATAN( X / SQRT( (1-X)(1+X) ) )
+
+ASINMAIN:
+	fmov.s		&0x3F800000,%fp1
+	fsub.x		%fp0,%fp1		# 1-X
+	fmovm.x		&0x4,-(%sp)		#  {fp2}
+	fmov.s		&0x3F800000,%fp2
+	fadd.x		%fp0,%fp2		# 1+X
+	fmul.x		%fp2,%fp1		# (1+X)(1-X)
+	fmovm.x		(%sp)+,&0x20		#  {fp2}
+	fsqrt.x		%fp1			# SQRT([1-X][1+X])
+	fdiv.x		%fp1,%fp0		# X/SQRT([1-X][1+X])
+	fmovm.x		&0x01,-(%sp)		# save X/SQRT(...)
+	lea		(%sp),%a0		# pass ptr to X/SQRT(...)
+	bsr		satan
+	add.l		&0xc,%sp		# clear X/SQRT(...) from stack
+	bra		t_inx2
+
+ASINBIG:
+	fabs.x		%fp0			# |X|
+	fcmp.s		%fp0,&0x3F800000
+	fbgt		t_operr			# cause an operr exception
+
+#--|X| = 1, ASIN(X) = +- PI/2.
+ASINONE:
+	fmov.x		PIBY2(%pc),%fp0
+	mov.l		(%a0),%d1
+	and.l		&0x80000000,%d1		# SIGN BIT OF X
+	or.l		&0x3F800000,%d1		# +-1 IN SGL FORMAT
+	mov.l		%d1,-(%sp)		# push SIGN(X) IN SGL-FMT
+	fmov.l		%d0,%fpcr
+	fmul.s		(%sp)+,%fp0
+	bra		t_inx2
+
+#--|X| < 2^(-40), ATAN(X) = X
+ASINTINY:
+	fmov.l		%d0,%fpcr		# restore users rnd mode,prec
+	mov.b		&FMOV_OP,%d1		# last inst is MOVE
+	fmov.x		(%a0),%fp0		# last inst - possible exception
+	bra		t_catch
+
+	global		sasind
+#--ASIN(X) = X FOR DENORMALIZED X
+sasind:
+	bra		t_extdnrm
+
+#########################################################################
+# sacos():  computes the inverse cosine of a normalized input		#
+# sacosd(): computes the inverse cosine of a denormalized input		#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT ************************************************************** #
+#	fp0 = arccos(X)							#
+#									#
+# ACCURACY and MONOTONICITY *******************************************	#
+#	The returned result is within 3 ulps in	64 significant bit,	#
+#	i.e. within 0.5001 ulp to 53 bits if the result is subsequently	#
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM *********************************************************** #
+#									#
+#	ACOS								#
+#	1. If |X| >= 1, go to 3.					#
+#									#
+#	2. (|X| < 1) Calculate acos(X) by				#
+#		z := (1-X) / (1+X)					#
+#		acos(X) = 2 * atan( sqrt(z) ).				#
+#		Exit.							#
+#									#
+#	3. If |X| > 1, go to 5.						#
+#									#
+#	4. (|X| = 1) If X > 0, return 0. Otherwise, return Pi. Exit.	#
+#									#
+#	5. (|X| > 1) Generate an invalid operation by 0 * infinity.	#
+#		Exit.							#
+#									#
+#########################################################################
+
+	global		sacos
+sacos:
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+
+	mov.l		(%a0),%d1		# pack exp w/ upper 16 fraction
+	mov.w		4(%a0),%d1
+	and.l		&0x7FFFFFFF,%d1
+	cmp.l		%d1,&0x3FFF8000
+	bge.b		ACOSBIG
+
+#--THIS IS THE USUAL CASE, |X| < 1
+#--ACOS(X) = 2 * ATAN(	SQRT( (1-X)/(1+X) ) )
+
+ACOSMAIN:
+	fmov.s		&0x3F800000,%fp1
+	fadd.x		%fp0,%fp1		# 1+X
+	fneg.x		%fp0			# -X
+	fadd.s		&0x3F800000,%fp0	# 1-X
+	fdiv.x		%fp1,%fp0		# (1-X)/(1+X)
+	fsqrt.x		%fp0			# SQRT((1-X)/(1+X))
+	mov.l		%d0,-(%sp)		# save original users fpcr
+	clr.l		%d0
+	fmovm.x		&0x01,-(%sp)		# save SQRT(...) to stack
+	lea		(%sp),%a0		# pass ptr to sqrt
+	bsr		satan			# ATAN(SQRT([1-X]/[1+X]))
+	add.l		&0xc,%sp		# clear SQRT(...) from stack
+
+	fmov.l		(%sp)+,%fpcr		# restore users round prec,mode
+	fadd.x		%fp0,%fp0		# 2 * ATAN( STUFF )
+	bra		t_pinx2
+
+ACOSBIG:
+	fabs.x		%fp0
+	fcmp.s		%fp0,&0x3F800000
+	fbgt		t_operr			# cause an operr exception
+
+#--|X| = 1, ACOS(X) = 0 OR PI
+	tst.b		(%a0)			# is X positive or negative?
+	bpl.b		ACOSP1
+
+#--X = -1
+#Returns PI and inexact exception
+ACOSM1:
+	fmov.x		PI(%pc),%fp0		# load PI
+	fmov.l		%d0,%fpcr		# load round mode,prec
+	fadd.s		&0x00800000,%fp0	# add a small value
+	bra		t_pinx2
+
+ACOSP1:
+	bra		ld_pzero		# answer is positive zero
+
+	global		sacosd
+#--ACOS(X) = PI/2 FOR DENORMALIZED X
+sacosd:
+	fmov.l		%d0,%fpcr		# load user's rnd mode/prec
+	fmov.x		PIBY2(%pc),%fp0
+	bra		t_pinx2
+
+#########################################################################
+# setox():    computes the exponential for a normalized input		#
+# setoxd():   computes the exponential for a denormalized input		#
+# setoxm1():  computes the exponential minus 1 for a normalized input	#
+# setoxm1d(): computes the exponential minus 1 for a denormalized input	#
+#									#
+# INPUT	*************************************************************** #
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT ************************************************************** #
+#	fp0 = exp(X) or exp(X)-1					#
+#									#
+# ACCURACY and MONOTONICITY ******************************************* #
+#	The returned result is within 0.85 ulps in 64 significant bit,	#
+#	i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM and IMPLEMENTATION **************************************** #
+#									#
+#	setoxd								#
+#	------								#
+#	Step 1.	Set ans := 1.0						#
+#									#
+#	Step 2.	Return	ans := ans + sign(X)*2^(-126). Exit.		#
+#	Notes:	This will always generate one exception -- inexact.	#
+#									#
+#									#
+#	setox								#
+#	-----								#
+#									#
+#	Step 1.	Filter out extreme cases of input argument.		#
+#		1.1	If |X| >= 2^(-65), go to Step 1.3.		#
+#		1.2	Go to Step 7.					#
+#		1.3	If |X| < 16380 log(2), go to Step 2.		#
+#		1.4	Go to Step 8.					#
+#	Notes:	The usual case should take the branches 1.1 -> 1.3 -> 2.#
+#		To avoid the use of floating-point comparisons, a	#
+#		compact representation of |X| is used. This format is a	#
+#		32-bit integer, the upper (more significant) 16 bits	#
+#		are the sign and biased exponent field of |X|; the	#
+#		lower 16 bits are the 16 most significant fraction	#
+#		(including the explicit bit) bits of |X|. Consequently,	#
+#		the comparisons in Steps 1.1 and 1.3 can be performed	#
+#		by integer comparison. Note also that the constant	#
+#		16380 log(2) used in Step 1.3 is also in the compact	#
+#		form. Thus taking the branch to Step 2 guarantees	#
+#		|X| < 16380 log(2). There is no harm to have a small	#
+#		number of cases where |X| is less than,	but close to,	#
+#		16380 log(2) and the branch to Step 9 is taken.		#
+#									#
+#	Step 2.	Calculate N = round-to-nearest-int( X * 64/log2 ).	#
+#		2.1	Set AdjFlag := 0 (indicates the branch 1.3 -> 2 #
+#			was taken)					#
+#		2.2	N := round-to-nearest-integer( X * 64/log2 ).	#
+#		2.3	Calculate	J = N mod 64; so J = 0,1,2,..., #
+#			or 63.						#
+#		2.4	Calculate	M = (N - J)/64; so N = 64M + J.	#
+#		2.5	Calculate the address of the stored value of	#
+#			2^(J/64).					#
+#		2.6	Create the value Scale = 2^M.			#
+#	Notes:	The calculation in 2.2 is really performed by		#
+#			Z := X * constant				#
+#			N := round-to-nearest-integer(Z)		#
+#		where							#
+#			constant := single-precision( 64/log 2 ).	#
+#									#
+#		Using a single-precision constant avoids memory		#
+#		access. Another effect of using a single-precision	#
+#		"constant" is that the calculated value Z is		#
+#									#
+#			Z = X*(64/log2)*(1+eps), |eps| <= 2^(-24).	#
+#									#
+#		This error has to be considered later in Steps 3 and 4.	#
+#									#
+#	Step 3.	Calculate X - N*log2/64.				#
+#		3.1	R := X + N*L1,					#
+#				where L1 := single-precision(-log2/64).	#
+#		3.2	R := R + N*L2,					#
+#				L2 := extended-precision(-log2/64 - L1).#
+#	Notes:	a) The way L1 and L2 are chosen ensures L1+L2		#
+#		approximate the value -log2/64 to 88 bits of accuracy.	#
+#		b) N*L1 is exact because N is no longer than 22 bits	#
+#		and L1 is no longer than 24 bits.			#
+#		c) The calculation X+N*L1 is also exact due to		#
+#		cancellation. Thus, R is practically X+N(L1+L2) to full	#
+#		64 bits.						#
+#		d) It is important to estimate how large can |R| be	#
+#		after Step 3.2.						#
+#									#
+#		N = rnd-to-int( X*64/log2 (1+eps) ), |eps|<=2^(-24)	#
+#		X*64/log2 (1+eps)	=	N + f,	|f| <= 0.5	#
+#		X*64/log2 - N	=	f - eps*X 64/log2		#
+#		X - N*log2/64	=	f*log2/64 - eps*X		#
+#									#
+#									#
+#		Now |X| <= 16446 log2, thus				#
+#									#
+#			|X - N*log2/64| <= (0.5 + 16446/2^(18))*log2/64	#
+#					<= 0.57 log2/64.		#
+#		 This bound will be used in Step 4.			#
+#									#
+#	Step 4.	Approximate exp(R)-1 by a polynomial			#
+#		p = R + R*R*(A1 + R*(A2 + R*(A3 + R*(A4 + R*A5))))	#
+#	Notes:	a) In order to reduce memory access, the coefficients	#
+#		are made as "short" as possible: A1 (which is 1/2), A4	#
+#		and A5 are single precision; A2 and A3 are double	#
+#		precision.						#
+#		b) Even with the restrictions above,			#
+#		   |p - (exp(R)-1)| < 2^(-68.8) for all |R| <= 0.0062.	#
+#		Note that 0.0062 is slightly bigger than 0.57 log2/64.	#
+#		c) To fully utilize the pipeline, p is separated into	#
+#		two independent pieces of roughly equal complexities	#
+#			p = [ R + R*S*(A2 + S*A4) ]	+		#
+#				[ S*(A1 + S*(A3 + S*A5)) ]		#
+#		where S = R*R.						#
+#									#
+#	Step 5.	Compute 2^(J/64)*exp(R) = 2^(J/64)*(1+p) by		#
+#				ans := T + ( T*p + t)			#
+#		where T and t are the stored values for 2^(J/64).	#
+#	Notes:	2^(J/64) is stored as T and t where T+t approximates	#
+#		2^(J/64) to roughly 85 bits; T is in extended precision	#
+#		and t is in single precision. Note also that T is	#
+#		rounded to 62 bits so that the last two bits of T are	#
+#		zero. The reason for such a special form is that T-1,	#
+#		T-2, and T-8 will all be exact --- a property that will	#
+#		give much more accurate computation of the function	#
+#		EXPM1.							#
+#									#
+#	Step 6.	Reconstruction of exp(X)				#
+#			exp(X) = 2^M * 2^(J/64) * exp(R).		#
+#		6.1	If AdjFlag = 0, go to 6.3			#
+#		6.2	ans := ans * AdjScale				#
+#		6.3	Restore the user FPCR				#
+#		6.4	Return ans := ans * Scale. Exit.		#
+#	Notes:	If AdjFlag = 0, we have X = Mlog2 + Jlog2/64 + R,	#
+#		|M| <= 16380, and Scale = 2^M. Moreover, exp(X) will	#
+#		neither overflow nor underflow. If AdjFlag = 1, that	#
+#		means that						#
+#			X = (M1+M)log2 + Jlog2/64 + R, |M1+M| >= 16380.	#
+#		Hence, exp(X) may overflow or underflow or neither.	#
+#		When that is the case, AdjScale = 2^(M1) where M1 is	#
+#		approximately M. Thus 6.2 will never cause		#
+#		over/underflow. Possible exception in 6.4 is overflow	#
+#		or underflow. The inexact exception is not generated in	#
+#		6.4. Although one can argue that the inexact flag	#
+#		should always be raised, to simulate that exception	#
+#		cost to much than the flag is worth in practical uses.	#
+#									#
+#	Step 7.	Return 1 + X.						#
+#		7.1	ans := X					#
+#		7.2	Restore user FPCR.				#
+#		7.3	Return ans := 1 + ans. Exit			#
+#	Notes:	For non-zero X, the inexact exception will always be	#
+#		raised by 7.3. That is the only exception raised by 7.3.#
+#		Note also that we use the FMOVEM instruction to move X	#
+#		in Step 7.1 to avoid unnecessary trapping. (Although	#
+#		the FMOVEM may not seem relevant since X is normalized,	#
+#		the precaution will be useful in the library version of	#
+#		this code where the separate entry for denormalized	#
+#		inputs will be done away with.)				#
+#									#
+#	Step 8.	Handle exp(X) where |X| >= 16380log2.			#
+#		8.1	If |X| > 16480 log2, go to Step 9.		#
+#		(mimic 2.2 - 2.6)					#
+#		8.2	N := round-to-integer( X * 64/log2 )		#
+#		8.3	Calculate J = N mod 64, J = 0,1,...,63		#
+#		8.4	K := (N-J)/64, M1 := truncate(K/2), M = K-M1,	#
+#			AdjFlag := 1.					#
+#		8.5	Calculate the address of the stored value	#
+#			2^(J/64).					#
+#		8.6	Create the values Scale = 2^M, AdjScale = 2^M1.	#
+#		8.7	Go to Step 3.					#
+#	Notes:	Refer to notes for 2.2 - 2.6.				#
+#									#
+#	Step 9.	Handle exp(X), |X| > 16480 log2.			#
+#		9.1	If X < 0, go to 9.3				#
+#		9.2	ans := Huge, go to 9.4				#
+#		9.3	ans := Tiny.					#
+#		9.4	Restore user FPCR.				#
+#		9.5	Return ans := ans * ans. Exit.			#
+#	Notes:	Exp(X) will surely overflow or underflow, depending on	#
+#		X's sign. "Huge" and "Tiny" are respectively large/tiny	#
+#		extended-precision numbers whose square over/underflow	#
+#		with an inexact result. Thus, 9.5 always raises the	#
+#		inexact together with either overflow or underflow.	#
+#									#
+#	setoxm1d							#
+#	--------							#
+#									#
+#	Step 1.	Set ans := 0						#
+#									#
+#	Step 2.	Return	ans := X + ans. Exit.				#
+#	Notes:	This will return X with the appropriate rounding	#
+#		 precision prescribed by the user FPCR.			#
+#									#
+#	setoxm1								#
+#	-------								#
+#									#
+#	Step 1.	Check |X|						#
+#		1.1	If |X| >= 1/4, go to Step 1.3.			#
+#		1.2	Go to Step 7.					#
+#		1.3	If |X| < 70 log(2), go to Step 2.		#
+#		1.4	Go to Step 10.					#
+#	Notes:	The usual case should take the branches 1.1 -> 1.3 -> 2.#
+#		However, it is conceivable |X| can be small very often	#
+#		because EXPM1 is intended to evaluate exp(X)-1		#
+#		accurately when |X| is small. For further details on	#
+#		the comparisons, see the notes on Step 1 of setox.	#
+#									#
+#	Step 2.	Calculate N = round-to-nearest-int( X * 64/log2 ).	#
+#		2.1	N := round-to-nearest-integer( X * 64/log2 ).	#
+#		2.2	Calculate	J = N mod 64; so J = 0,1,2,..., #
+#			or 63.						#
+#		2.3	Calculate	M = (N - J)/64; so N = 64M + J.	#
+#		2.4	Calculate the address of the stored value of	#
+#			2^(J/64).					#
+#		2.5	Create the values Sc = 2^M and			#
+#			OnebySc := -2^(-M).				#
+#	Notes:	See the notes on Step 2 of setox.			#
+#									#
+#	Step 3.	Calculate X - N*log2/64.				#
+#		3.1	R := X + N*L1,					#
+#				where L1 := single-precision(-log2/64).	#
+#		3.2	R := R + N*L2,					#
+#				L2 := extended-precision(-log2/64 - L1).#
+#	Notes:	Applying the analysis of Step 3 of setox in this case	#
+#		shows that |R| <= 0.0055 (note that |X| <= 70 log2 in	#
+#		this case).						#
+#									#
+#	Step 4.	Approximate exp(R)-1 by a polynomial			#
+#			p = R+R*R*(A1+R*(A2+R*(A3+R*(A4+R*(A5+R*A6)))))	#
+#	Notes:	a) In order to reduce memory access, the coefficients	#
+#		are made as "short" as possible: A1 (which is 1/2), A5	#
+#		and A6 are single precision; A2, A3 and A4 are double	#
+#		precision.						#
+#		b) Even with the restriction above,			#
+#			|p - (exp(R)-1)| <	|R| * 2^(-72.7)		#
+#		for all |R| <= 0.0055.					#
+#		c) To fully utilize the pipeline, p is separated into	#
+#		two independent pieces of roughly equal complexity	#
+#			p = [ R*S*(A2 + S*(A4 + S*A6)) ]	+	#
+#				[ R + S*(A1 + S*(A3 + S*A5)) ]		#
+#		where S = R*R.						#
+#									#
+#	Step 5.	Compute 2^(J/64)*p by					#
+#				p := T*p				#
+#		where T and t are the stored values for 2^(J/64).	#
+#	Notes:	2^(J/64) is stored as T and t where T+t approximates	#
+#		2^(J/64) to roughly 85 bits; T is in extended precision	#
+#		and t is in single precision. Note also that T is	#
+#		rounded to 62 bits so that the last two bits of T are	#
+#		zero. The reason for such a special form is that T-1,	#
+#		T-2, and T-8 will all be exact --- a property that will	#
+#		be exploited in Step 6 below. The total relative error	#
+#		in p is no bigger than 2^(-67.7) compared to the final	#
+#		result.							#
+#									#
+#	Step 6.	Reconstruction of exp(X)-1				#
+#			exp(X)-1 = 2^M * ( 2^(J/64) + p - 2^(-M) ).	#
+#		6.1	If M <= 63, go to Step 6.3.			#
+#		6.2	ans := T + (p + (t + OnebySc)). Go to 6.6	#
+#		6.3	If M >= -3, go to 6.5.				#
+#		6.4	ans := (T + (p + t)) + OnebySc. Go to 6.6	#
+#		6.5	ans := (T + OnebySc) + (p + t).			#
+#		6.6	Restore user FPCR.				#
+#		6.7	Return ans := Sc * ans. Exit.			#
+#	Notes:	The various arrangements of the expressions give	#
+#		accurate evaluations.					#
+#									#
+#	Step 7.	exp(X)-1 for |X| < 1/4.					#
+#		7.1	If |X| >= 2^(-65), go to Step 9.		#
+#		7.2	Go to Step 8.					#
+#									#
+#	Step 8.	Calculate exp(X)-1, |X| < 2^(-65).			#
+#		8.1	If |X| < 2^(-16312), goto 8.3			#
+#		8.2	Restore FPCR; return ans := X - 2^(-16382).	#
+#			Exit.						#
+#		8.3	X := X * 2^(140).				#
+#		8.4	Restore FPCR; ans := ans - 2^(-16382).		#
+#		 Return ans := ans*2^(140). Exit			#
+#	Notes:	The idea is to return "X - tiny" under the user		#
+#		precision and rounding modes. To avoid unnecessary	#
+#		inefficiency, we stay away from denormalized numbers	#
+#		the best we can. For |X| >= 2^(-16312), the		#
+#		straightforward 8.2 generates the inexact exception as	#
+#		the case warrants.					#
+#									#
+#	Step 9.	Calculate exp(X)-1, |X| < 1/4, by a polynomial		#
+#			p = X + X*X*(B1 + X*(B2 + ... + X*B12))		#
+#	Notes:	a) In order to reduce memory access, the coefficients	#
+#		are made as "short" as possible: B1 (which is 1/2), B9	#
+#		to B12 are single precision; B3 to B8 are double	#
+#		precision; and B2 is double extended.			#
+#		b) Even with the restriction above,			#
+#			|p - (exp(X)-1)| < |X| 2^(-70.6)		#
+#		for all |X| <= 0.251.					#
+#		Note that 0.251 is slightly bigger than 1/4.		#
+#		c) To fully preserve accuracy, the polynomial is	#
+#		computed as						#
+#			X + ( S*B1 +	Q ) where S = X*X and		#
+#			Q	=	X*S*(B2 + X*(B3 + ... + X*B12))	#
+#		d) To fully utilize the pipeline, Q is separated into	#
+#		two independent pieces of roughly equal complexity	#
+#			Q = [ X*S*(B2 + S*(B4 + ... + S*B12)) ] +	#
+#				[ S*S*(B3 + S*(B5 + ... + S*B11)) ]	#
+#									#
+#	Step 10. Calculate exp(X)-1 for |X| >= 70 log 2.		#
+#		10.1 If X >= 70log2 , exp(X) - 1 = exp(X) for all	#
+#		practical purposes. Therefore, go to Step 1 of setox.	#
+#		10.2 If X <= -70log2, exp(X) - 1 = -1 for all practical	#
+#		purposes.						#
+#		ans := -1						#
+#		Restore user FPCR					#
+#		Return ans := ans + 2^(-126). Exit.			#
+#	Notes:	10.2 will always create an inexact and return -1 + tiny	#
+#		in the user rounding precision and mode.		#
+#									#
+#########################################################################
+
+L2:	long		0x3FDC0000,0x82E30865,0x4361C4C6,0x00000000
+
+EEXPA3:	long		0x3FA55555,0x55554CC1
+EEXPA2:	long		0x3FC55555,0x55554A54
+
+EM1A4:	long		0x3F811111,0x11174385
+EM1A3:	long		0x3FA55555,0x55554F5A
+
+EM1A2:	long		0x3FC55555,0x55555555,0x00000000,0x00000000
+
+EM1B8:	long		0x3EC71DE3,0xA5774682
+EM1B7:	long		0x3EFA01A0,0x19D7CB68
+
+EM1B6:	long		0x3F2A01A0,0x1A019DF3
+EM1B5:	long		0x3F56C16C,0x16C170E2
+
+EM1B4:	long		0x3F811111,0x11111111
+EM1B3:	long		0x3FA55555,0x55555555
+
+EM1B2:	long		0x3FFC0000,0xAAAAAAAA,0xAAAAAAAB
+	long		0x00000000
+
+TWO140:	long		0x48B00000,0x00000000
+TWON140:
+	long		0x37300000,0x00000000
+
+EEXPTBL:
+	long		0x3FFF0000,0x80000000,0x00000000,0x00000000
+	long		0x3FFF0000,0x8164D1F3,0xBC030774,0x9F841A9B
+	long		0x3FFF0000,0x82CD8698,0xAC2BA1D8,0x9FC1D5B9
+	long		0x3FFF0000,0x843A28C3,0xACDE4048,0xA0728369
+	long		0x3FFF0000,0x85AAC367,0xCC487B14,0x1FC5C95C
+	long		0x3FFF0000,0x871F6196,0x9E8D1010,0x1EE85C9F
+	long		0x3FFF0000,0x88980E80,0x92DA8528,0x9FA20729
+	long		0x3FFF0000,0x8A14D575,0x496EFD9C,0xA07BF9AF
+	long		0x3FFF0000,0x8B95C1E3,0xEA8BD6E8,0xA0020DCF
+	long		0x3FFF0000,0x8D1ADF5B,0x7E5BA9E4,0x205A63DA
+	long		0x3FFF0000,0x8EA4398B,0x45CD53C0,0x1EB70051
+	long		0x3FFF0000,0x9031DC43,0x1466B1DC,0x1F6EB029
+	long		0x3FFF0000,0x91C3D373,0xAB11C338,0xA0781494
+	long		0x3FFF0000,0x935A2B2F,0x13E6E92C,0x9EB319B0
+	long		0x3FFF0000,0x94F4EFA8,0xFEF70960,0x2017457D
+	long		0x3FFF0000,0x96942D37,0x20185A00,0x1F11D537
+	long		0x3FFF0000,0x9837F051,0x8DB8A970,0x9FB952DD
+	long		0x3FFF0000,0x99E04593,0x20B7FA64,0x1FE43087
+	long		0x3FFF0000,0x9B8D39B9,0xD54E5538,0x1FA2A818
+	long		0x3FFF0000,0x9D3ED9A7,0x2CFFB750,0x1FDE494D
+	long		0x3FFF0000,0x9EF53260,0x91A111AC,0x20504890
+	long		0x3FFF0000,0xA0B0510F,0xB9714FC4,0xA073691C
+	long		0x3FFF0000,0xA2704303,0x0C496818,0x1F9B7A05
+	long		0x3FFF0000,0xA43515AE,0x09E680A0,0xA0797126
+	long		0x3FFF0000,0xA5FED6A9,0xB15138EC,0xA071A140
+	long		0x3FFF0000,0xA7CD93B4,0xE9653568,0x204F62DA
+	long		0x3FFF0000,0xA9A15AB4,0xEA7C0EF8,0x1F283C4A
+	long		0x3FFF0000,0xAB7A39B5,0xA93ED338,0x9F9A7FDC
+	long		0x3FFF0000,0xAD583EEA,0x42A14AC8,0xA05B3FAC
+	long		0x3FFF0000,0xAF3B78AD,0x690A4374,0x1FDF2610
+	long		0x3FFF0000,0xB123F581,0xD2AC2590,0x9F705F90
+	long		0x3FFF0000,0xB311C412,0xA9112488,0x201F678A
+	long		0x3FFF0000,0xB504F333,0xF9DE6484,0x1F32FB13
+	long		0x3FFF0000,0xB6FD91E3,0x28D17790,0x20038B30
+	long		0x3FFF0000,0xB8FBAF47,0x62FB9EE8,0x200DC3CC
+	long		0x3FFF0000,0xBAFF5AB2,0x133E45FC,0x9F8B2AE6
+	long		0x3FFF0000,0xBD08A39F,0x580C36C0,0xA02BBF70
+	long		0x3FFF0000,0xBF1799B6,0x7A731084,0xA00BF518
+	long		0x3FFF0000,0xC12C4CCA,0x66709458,0xA041DD41
+	long		0x3FFF0000,0xC346CCDA,0x24976408,0x9FDF137B
+	long		0x3FFF0000,0xC5672A11,0x5506DADC,0x201F1568
+	long		0x3FFF0000,0xC78D74C8,0xABB9B15C,0x1FC13A2E
+	long		0x3FFF0000,0xC9B9BD86,0x6E2F27A4,0xA03F8F03
+	long		0x3FFF0000,0xCBEC14FE,0xF2727C5C,0x1FF4907D
+	long		0x3FFF0000,0xCE248C15,0x1F8480E4,0x9E6E53E4
+	long		0x3FFF0000,0xD06333DA,0xEF2B2594,0x1FD6D45C
+	long		0x3FFF0000,0xD2A81D91,0xF12AE45C,0xA076EDB9
+	long		0x3FFF0000,0xD4F35AAB,0xCFEDFA20,0x9FA6DE21
+	long		0x3FFF0000,0xD744FCCA,0xD69D6AF4,0x1EE69A2F
+	long		0x3FFF0000,0xD99D15C2,0x78AFD7B4,0x207F439F
+	long		0x3FFF0000,0xDBFBB797,0xDAF23754,0x201EC207
+	long		0x3FFF0000,0xDE60F482,0x5E0E9124,0x9E8BE175
+	long		0x3FFF0000,0xE0CCDEEC,0x2A94E110,0x20032C4B
+	long		0x3FFF0000,0xE33F8972,0xBE8A5A50,0x2004DFF5
+	long		0x3FFF0000,0xE5B906E7,0x7C8348A8,0x1E72F47A
+	long		0x3FFF0000,0xE8396A50,0x3C4BDC68,0x1F722F22
+	long		0x3FFF0000,0xEAC0C6E7,0xDD243930,0xA017E945
+	long		0x3FFF0000,0xED4F301E,0xD9942B84,0x1F401A5B
+	long		0x3FFF0000,0xEFE4B99B,0xDCDAF5CC,0x9FB9A9E3
+	long		0x3FFF0000,0xF281773C,0x59FFB138,0x20744C05
+	long		0x3FFF0000,0xF5257D15,0x2486CC2C,0x1F773A19
+	long		0x3FFF0000,0xF7D0DF73,0x0AD13BB8,0x1FFE90D5
+	long		0x3FFF0000,0xFA83B2DB,0x722A033C,0xA041ED22
+	long		0x3FFF0000,0xFD3E0C0C,0xF486C174,0x1F853F3A
+
+	set		ADJFLAG,L_SCR2
+	set		SCALE,FP_SCR0
+	set		ADJSCALE,FP_SCR1
+	set		SC,FP_SCR0
+	set		ONEBYSC,FP_SCR1
+
+	global		setox
+setox:
+#--entry point for EXP(X), here X is finite, non-zero, and not NaN's
+
+#--Step 1.
+	mov.l		(%a0),%d1		# load part of input X
+	and.l		&0x7FFF0000,%d1		# biased expo. of X
+	cmp.l		%d1,&0x3FBE0000		# 2^(-65)
+	bge.b		EXPC1			# normal case
+	bra		EXPSM
+
+EXPC1:
+#--The case |X| >= 2^(-65)
+	mov.w		4(%a0),%d1		# expo. and partial sig. of |X|
+	cmp.l		%d1,&0x400CB167		# 16380 log2 trunc. 16 bits
+	blt.b		EXPMAIN			# normal case
+	bra		EEXPBIG
+
+EXPMAIN:
+#--Step 2.
+#--This is the normal branch:	2^(-65) <= |X| < 16380 log2.
+	fmov.x		(%a0),%fp0		# load input from (a0)
+
+	fmov.x		%fp0,%fp1
+	fmul.s		&0x42B8AA3B,%fp0	# 64/log2 * X
+	fmovm.x		&0xc,-(%sp)		# save fp2 {%fp2/%fp3}
+	mov.l		&0,ADJFLAG(%a6)
+	fmov.l		%fp0,%d1		# N = int( X * 64/log2 )
+	lea		EEXPTBL(%pc),%a1
+	fmov.l		%d1,%fp0		# convert to floating-format
+
+	mov.l		%d1,L_SCR1(%a6)		# save N temporarily
+	and.l		&0x3F,%d1		# D0 is J = N mod 64
+	lsl.l		&4,%d1
+	add.l		%d1,%a1			# address of 2^(J/64)
+	mov.l		L_SCR1(%a6),%d1
+	asr.l		&6,%d1			# D0 is M
+	add.w		&0x3FFF,%d1		# biased expo. of 2^(M)
+	mov.w		L2(%pc),L_SCR1(%a6)	# prefetch L2, no need in CB
+
+EXPCONT1:
+#--Step 3.
+#--fp1,fp2 saved on the stack. fp0 is N, fp1 is X,
+#--a0 points to 2^(J/64), D0 is biased expo. of 2^(M)
+	fmov.x		%fp0,%fp2
+	fmul.s		&0xBC317218,%fp0	# N * L1, L1 = lead(-log2/64)
+	fmul.x		L2(%pc),%fp2		# N * L2, L1+L2 = -log2/64
+	fadd.x		%fp1,%fp0		# X + N*L1
+	fadd.x		%fp2,%fp0		# fp0 is R, reduced arg.
+
+#--Step 4.
+#--WE NOW COMPUTE EXP(R)-1 BY A POLYNOMIAL
+#-- R + R*R*(A1 + R*(A2 + R*(A3 + R*(A4 + R*A5))))
+#--TO FULLY UTILIZE THE PIPELINE, WE COMPUTE S = R*R
+#--[R+R*S*(A2+S*A4)] + [S*(A1+S*(A3+S*A5))]
+
+	fmov.x		%fp0,%fp1
+	fmul.x		%fp1,%fp1		# fp1 IS S = R*R
+
+	fmov.s		&0x3AB60B70,%fp2	# fp2 IS A5
+
+	fmul.x		%fp1,%fp2		# fp2 IS S*A5
+	fmov.x		%fp1,%fp3
+	fmul.s		&0x3C088895,%fp3	# fp3 IS S*A4
+
+	fadd.d		EEXPA3(%pc),%fp2	# fp2 IS A3+S*A5
+	fadd.d		EEXPA2(%pc),%fp3	# fp3 IS A2+S*A4
+
+	fmul.x		%fp1,%fp2		# fp2 IS S*(A3+S*A5)
+	mov.w		%d1,SCALE(%a6)		# SCALE is 2^(M) in extended
+	mov.l		&0x80000000,SCALE+4(%a6)
+	clr.l		SCALE+8(%a6)
+
+	fmul.x		%fp1,%fp3		# fp3 IS S*(A2+S*A4)
+
+	fadd.s		&0x3F000000,%fp2	# fp2 IS A1+S*(A3+S*A5)
+	fmul.x		%fp0,%fp3		# fp3 IS R*S*(A2+S*A4)
+
+	fmul.x		%fp1,%fp2		# fp2 IS S*(A1+S*(A3+S*A5))
+	fadd.x		%fp3,%fp0		# fp0 IS R+R*S*(A2+S*A4),
+
+	fmov.x		(%a1)+,%fp1		# fp1 is lead. pt. of 2^(J/64)
+	fadd.x		%fp2,%fp0		# fp0 is EXP(R) - 1
+
+#--Step 5
+#--final reconstruction process
+#--EXP(X) = 2^M * ( 2^(J/64) + 2^(J/64)*(EXP(R)-1) )
+
+	fmul.x		%fp1,%fp0		# 2^(J/64)*(Exp(R)-1)
+	fmovm.x		(%sp)+,&0x30		# fp2 restored {%fp2/%fp3}
+	fadd.s		(%a1),%fp0		# accurate 2^(J/64)
+
+	fadd.x		%fp1,%fp0		# 2^(J/64) + 2^(J/64)*...
+	mov.l		ADJFLAG(%a6),%d1
+
+#--Step 6
+	tst.l		%d1
+	beq.b		NORMAL
+ADJUST:
+	fmul.x		ADJSCALE(%a6),%fp0
+NORMAL:
+	fmov.l		%d0,%fpcr		# restore user FPCR
+	mov.b		&FMUL_OP,%d1		# last inst is MUL
+	fmul.x		SCALE(%a6),%fp0		# multiply 2^(M)
+	bra		t_catch
+
+EXPSM:
+#--Step 7
+	fmovm.x		(%a0),&0x80		# load X
+	fmov.l		%d0,%fpcr
+	fadd.s		&0x3F800000,%fp0	# 1+X in user mode
+	bra		t_pinx2
+
+EEXPBIG:
+#--Step 8
+	cmp.l		%d1,&0x400CB27C		# 16480 log2
+	bgt.b		EXP2BIG
+#--Steps 8.2 -- 8.6
+	fmov.x		(%a0),%fp0		# load input from (a0)
+
+	fmov.x		%fp0,%fp1
+	fmul.s		&0x42B8AA3B,%fp0	# 64/log2 * X
+	fmovm.x		&0xc,-(%sp)		# save fp2 {%fp2/%fp3}
+	mov.l		&1,ADJFLAG(%a6)
+	fmov.l		%fp0,%d1		# N = int( X * 64/log2 )
+	lea		EEXPTBL(%pc),%a1
+	fmov.l		%d1,%fp0		# convert to floating-format
+	mov.l		%d1,L_SCR1(%a6)		# save N temporarily
+	and.l		&0x3F,%d1		# D0 is J = N mod 64
+	lsl.l		&4,%d1
+	add.l		%d1,%a1			# address of 2^(J/64)
+	mov.l		L_SCR1(%a6),%d1
+	asr.l		&6,%d1			# D0 is K
+	mov.l		%d1,L_SCR1(%a6)		# save K temporarily
+	asr.l		&1,%d1			# D0 is M1
+	sub.l		%d1,L_SCR1(%a6)		# a1 is M
+	add.w		&0x3FFF,%d1		# biased expo. of 2^(M1)
+	mov.w		%d1,ADJSCALE(%a6)	# ADJSCALE := 2^(M1)
+	mov.l		&0x80000000,ADJSCALE+4(%a6)
+	clr.l		ADJSCALE+8(%a6)
+	mov.l		L_SCR1(%a6),%d1		# D0 is M
+	add.w		&0x3FFF,%d1		# biased expo. of 2^(M)
+	bra.w		EXPCONT1		# go back to Step 3
+
+EXP2BIG:
+#--Step 9
+	tst.b		(%a0)			# is X positive or negative?
+	bmi		t_unfl2
+	bra		t_ovfl2
+
+	global		setoxd
+setoxd:
+#--entry point for EXP(X), X is denormalized
+	mov.l		(%a0),-(%sp)
+	andi.l		&0x80000000,(%sp)
+	ori.l		&0x00800000,(%sp)	# sign(X)*2^(-126)
+
+	fmov.s		&0x3F800000,%fp0
+
+	fmov.l		%d0,%fpcr
+	fadd.s		(%sp)+,%fp0
+	bra		t_pinx2
+
+	global		setoxm1
+setoxm1:
+#--entry point for EXPM1(X), here X is finite, non-zero, non-NaN
+
+#--Step 1.
+#--Step 1.1
+	mov.l		(%a0),%d1		# load part of input X
+	and.l		&0x7FFF0000,%d1		# biased expo. of X
+	cmp.l		%d1,&0x3FFD0000		# 1/4
+	bge.b		EM1CON1			# |X| >= 1/4
+	bra		EM1SM
+
+EM1CON1:
+#--Step 1.3
+#--The case |X| >= 1/4
+	mov.w		4(%a0),%d1		# expo. and partial sig. of |X|
+	cmp.l		%d1,&0x4004C215		# 70log2 rounded up to 16 bits
+	ble.b		EM1MAIN			# 1/4 <= |X| <= 70log2
+	bra		EM1BIG
+
+EM1MAIN:
+#--Step 2.
+#--This is the case:	1/4 <= |X| <= 70 log2.
+	fmov.x		(%a0),%fp0		# load input from (a0)
+
+	fmov.x		%fp0,%fp1
+	fmul.s		&0x42B8AA3B,%fp0	# 64/log2 * X
+	fmovm.x		&0xc,-(%sp)		# save fp2 {%fp2/%fp3}
+	fmov.l		%fp0,%d1		# N = int( X * 64/log2 )
+	lea		EEXPTBL(%pc),%a1
+	fmov.l		%d1,%fp0		# convert to floating-format
+
+	mov.l		%d1,L_SCR1(%a6)		# save N temporarily
+	and.l		&0x3F,%d1		# D0 is J = N mod 64
+	lsl.l		&4,%d1
+	add.l		%d1,%a1			# address of 2^(J/64)
+	mov.l		L_SCR1(%a6),%d1
+	asr.l		&6,%d1			# D0 is M
+	mov.l		%d1,L_SCR1(%a6)		# save a copy of M
+
+#--Step 3.
+#--fp1,fp2 saved on the stack. fp0 is N, fp1 is X,
+#--a0 points to 2^(J/64), D0 and a1 both contain M
+	fmov.x		%fp0,%fp2
+	fmul.s		&0xBC317218,%fp0	# N * L1, L1 = lead(-log2/64)
+	fmul.x		L2(%pc),%fp2		# N * L2, L1+L2 = -log2/64
+	fadd.x		%fp1,%fp0		# X + N*L1
+	fadd.x		%fp2,%fp0		# fp0 is R, reduced arg.
+	add.w		&0x3FFF,%d1		# D0 is biased expo. of 2^M
+
+#--Step 4.
+#--WE NOW COMPUTE EXP(R)-1 BY A POLYNOMIAL
+#-- R + R*R*(A1 + R*(A2 + R*(A3 + R*(A4 + R*(A5 + R*A6)))))
+#--TO FULLY UTILIZE THE PIPELINE, WE COMPUTE S = R*R
+#--[R*S*(A2+S*(A4+S*A6))] + [R+S*(A1+S*(A3+S*A5))]
+
+	fmov.x		%fp0,%fp1
+	fmul.x		%fp1,%fp1		# fp1 IS S = R*R
+
+	fmov.s		&0x3950097B,%fp2	# fp2 IS a6
+
+	fmul.x		%fp1,%fp2		# fp2 IS S*A6
+	fmov.x		%fp1,%fp3
+	fmul.s		&0x3AB60B6A,%fp3	# fp3 IS S*A5
+
+	fadd.d		EM1A4(%pc),%fp2		# fp2 IS A4+S*A6
+	fadd.d		EM1A3(%pc),%fp3		# fp3 IS A3+S*A5
+	mov.w		%d1,SC(%a6)		# SC is 2^(M) in extended
+	mov.l		&0x80000000,SC+4(%a6)
+	clr.l		SC+8(%a6)
+
+	fmul.x		%fp1,%fp2		# fp2 IS S*(A4+S*A6)
+	mov.l		L_SCR1(%a6),%d1		# D0 is	M
+	neg.w		%d1			# D0 is -M
+	fmul.x		%fp1,%fp3		# fp3 IS S*(A3+S*A5)
+	add.w		&0x3FFF,%d1		# biased expo. of 2^(-M)
+	fadd.d		EM1A2(%pc),%fp2		# fp2 IS A2+S*(A4+S*A6)
+	fadd.s		&0x3F000000,%fp3	# fp3 IS A1+S*(A3+S*A5)
+
+	fmul.x		%fp1,%fp2		# fp2 IS S*(A2+S*(A4+S*A6))
+	or.w		&0x8000,%d1		# signed/expo. of -2^(-M)
+	mov.w		%d1,ONEBYSC(%a6)	# OnebySc is -2^(-M)
+	mov.l		&0x80000000,ONEBYSC+4(%a6)
+	clr.l		ONEBYSC+8(%a6)
+	fmul.x		%fp3,%fp1		# fp1 IS S*(A1+S*(A3+S*A5))
+
+	fmul.x		%fp0,%fp2		# fp2 IS R*S*(A2+S*(A4+S*A6))
+	fadd.x		%fp1,%fp0		# fp0 IS R+S*(A1+S*(A3+S*A5))
+
+	fadd.x		%fp2,%fp0		# fp0 IS EXP(R)-1
+
+	fmovm.x		(%sp)+,&0x30		# fp2 restored {%fp2/%fp3}
+
+#--Step 5
+#--Compute 2^(J/64)*p
+
+	fmul.x		(%a1),%fp0		# 2^(J/64)*(Exp(R)-1)
+
+#--Step 6
+#--Step 6.1
+	mov.l		L_SCR1(%a6),%d1		# retrieve M
+	cmp.l		%d1,&63
+	ble.b		MLE63
+#--Step 6.2	M >= 64
+	fmov.s		12(%a1),%fp1		# fp1 is t
+	fadd.x		ONEBYSC(%a6),%fp1	# fp1 is t+OnebySc
+	fadd.x		%fp1,%fp0		# p+(t+OnebySc), fp1 released
+	fadd.x		(%a1),%fp0		# T+(p+(t+OnebySc))
+	bra		EM1SCALE
+MLE63:
+#--Step 6.3	M <= 63
+	cmp.l		%d1,&-3
+	bge.b		MGEN3
+MLTN3:
+#--Step 6.4	M <= -4
+	fadd.s		12(%a1),%fp0		# p+t
+	fadd.x		(%a1),%fp0		# T+(p+t)
+	fadd.x		ONEBYSC(%a6),%fp0	# OnebySc + (T+(p+t))
+	bra		EM1SCALE
+MGEN3:
+#--Step 6.5	-3 <= M <= 63
+	fmov.x		(%a1)+,%fp1		# fp1 is T
+	fadd.s		(%a1),%fp0		# fp0 is p+t
+	fadd.x		ONEBYSC(%a6),%fp1	# fp1 is T+OnebySc
+	fadd.x		%fp1,%fp0		# (T+OnebySc)+(p+t)
+
+EM1SCALE:
+#--Step 6.6
+	fmov.l		%d0,%fpcr
+	fmul.x		SC(%a6),%fp0
+	bra		t_inx2
+
+EM1SM:
+#--Step 7	|X| < 1/4.
+	cmp.l		%d1,&0x3FBE0000		# 2^(-65)
+	bge.b		EM1POLY
+
+EM1TINY:
+#--Step 8	|X| < 2^(-65)
+	cmp.l		%d1,&0x00330000		# 2^(-16312)
+	blt.b		EM12TINY
+#--Step 8.2
+	mov.l		&0x80010000,SC(%a6)	# SC is -2^(-16382)
+	mov.l		&0x80000000,SC+4(%a6)
+	clr.l		SC+8(%a6)
+	fmov.x		(%a0),%fp0
+	fmov.l		%d0,%fpcr
+	mov.b		&FADD_OP,%d1		# last inst is ADD
+	fadd.x		SC(%a6),%fp0
+	bra		t_catch
+
+EM12TINY:
+#--Step 8.3
+	fmov.x		(%a0),%fp0
+	fmul.d		TWO140(%pc),%fp0
+	mov.l		&0x80010000,SC(%a6)
+	mov.l		&0x80000000,SC+4(%a6)
+	clr.l		SC+8(%a6)
+	fadd.x		SC(%a6),%fp0
+	fmov.l		%d0,%fpcr
+	mov.b		&FMUL_OP,%d1		# last inst is MUL
+	fmul.d		TWON140(%pc),%fp0
+	bra		t_catch
+
+EM1POLY:
+#--Step 9	exp(X)-1 by a simple polynomial
+	fmov.x		(%a0),%fp0		# fp0 is X
+	fmul.x		%fp0,%fp0		# fp0 is S := X*X
+	fmovm.x		&0xc,-(%sp)		# save fp2 {%fp2/%fp3}
+	fmov.s		&0x2F30CAA8,%fp1	# fp1 is B12
+	fmul.x		%fp0,%fp1		# fp1 is S*B12
+	fmov.s		&0x310F8290,%fp2	# fp2 is B11
+	fadd.s		&0x32D73220,%fp1	# fp1 is B10+S*B12
+
+	fmul.x		%fp0,%fp2		# fp2 is S*B11
+	fmul.x		%fp0,%fp1		# fp1 is S*(B10 + ...
+
+	fadd.s		&0x3493F281,%fp2	# fp2 is B9+S*...
+	fadd.d		EM1B8(%pc),%fp1		# fp1 is B8+S*...
+
+	fmul.x		%fp0,%fp2		# fp2 is S*(B9+...
+	fmul.x		%fp0,%fp1		# fp1 is S*(B8+...
+
+	fadd.d		EM1B7(%pc),%fp2		# fp2 is B7+S*...
+	fadd.d		EM1B6(%pc),%fp1		# fp1 is B6+S*...
+
+	fmul.x		%fp0,%fp2		# fp2 is S*(B7+...
+	fmul.x		%fp0,%fp1		# fp1 is S*(B6+...
+
+	fadd.d		EM1B5(%pc),%fp2		# fp2 is B5+S*...
+	fadd.d		EM1B4(%pc),%fp1		# fp1 is B4+S*...
+
+	fmul.x		%fp0,%fp2		# fp2 is S*(B5+...
+	fmul.x		%fp0,%fp1		# fp1 is S*(B4+...
+
+	fadd.d		EM1B3(%pc),%fp2		# fp2 is B3+S*...
+	fadd.x		EM1B2(%pc),%fp1		# fp1 is B2+S*...
+
+	fmul.x		%fp0,%fp2		# fp2 is S*(B3+...
+	fmul.x		%fp0,%fp1		# fp1 is S*(B2+...
+
+	fmul.x		%fp0,%fp2		# fp2 is S*S*(B3+...)
+	fmul.x		(%a0),%fp1		# fp1 is X*S*(B2...
+
+	fmul.s		&0x3F000000,%fp0	# fp0 is S*B1
+	fadd.x		%fp2,%fp1		# fp1 is Q
+
+	fmovm.x		(%sp)+,&0x30		# fp2 restored {%fp2/%fp3}
+
+	fadd.x		%fp1,%fp0		# fp0 is S*B1+Q
+
+	fmov.l		%d0,%fpcr
+	fadd.x		(%a0),%fp0
+	bra		t_inx2
+
+EM1BIG:
+#--Step 10	|X| > 70 log2
+	mov.l		(%a0),%d1
+	cmp.l		%d1,&0
+	bgt.w		EXPC1
+#--Step 10.2
+	fmov.s		&0xBF800000,%fp0	# fp0 is -1
+	fmov.l		%d0,%fpcr
+	fadd.s		&0x00800000,%fp0	# -1 + 2^(-126)
+	bra		t_minx2
+
+	global		setoxm1d
+setoxm1d:
+#--entry point for EXPM1(X), here X is denormalized
+#--Step 0.
+	bra		t_extdnrm
+
+#########################################################################
+# sgetexp():  returns the exponent portion of the input argument.	#
+#	      The exponent bias is removed and the exponent value is	#
+#	      returned as an extended precision number in fp0.		#
+# sgetexpd(): handles denormalized numbers.				#
+#									#
+# sgetman():  extracts the mantissa of the input argument. The		#
+#	      mantissa is converted to an extended precision number w/	#
+#	      an exponent of $3fff and is returned in fp0. The range of #
+#	      the result is [1.0 - 2.0).				#
+# sgetmand(): handles denormalized numbers.				#
+#									#
+# INPUT *************************************************************** #
+#	a0  = pointer to extended precision input			#
+#									#
+# OUTPUT ************************************************************** #
+#	fp0 = exponent(X) or mantissa(X)				#
+#									#
+#########################################################################
+
+	global		sgetexp
+sgetexp:
+	mov.w		SRC_EX(%a0),%d0		# get the exponent
+	bclr		&0xf,%d0		# clear the sign bit
+	subi.w		&0x3fff,%d0		# subtract off the bias
+	fmov.w		%d0,%fp0		# return exp in fp0
+	blt.b		sgetexpn		# it's negative
+	rts
+
+sgetexpn:
+	mov.b		&neg_bmask,FPSR_CC(%a6)	# set 'N' ccode bit
+	rts
+
+	global		sgetexpd
+sgetexpd:
+	bsr.l		norm			# normalize
+	neg.w		%d0			# new exp = -(shft amt)
+	subi.w		&0x3fff,%d0		# subtract off the bias
+	fmov.w		%d0,%fp0		# return exp in fp0
+	mov.b		&neg_bmask,FPSR_CC(%a6)	# set 'N' ccode bit
+	rts
+
+	global		sgetman
+sgetman:
+	mov.w		SRC_EX(%a0),%d0		# get the exp
+	ori.w		&0x7fff,%d0		# clear old exp
+	bclr		&0xe,%d0		# make it the new exp +-3fff
+
+# here, we build the result in a tmp location so as not to disturb the input
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6) # copy to tmp loc
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6) # copy to tmp loc
+	mov.w		%d0,FP_SCR0_EX(%a6)	# insert new exponent
+	fmov.x		FP_SCR0(%a6),%fp0	# put new value back in fp0
+	bmi.b		sgetmann		# it's negative
+	rts
+
+sgetmann:
+	mov.b		&neg_bmask,FPSR_CC(%a6)	# set 'N' ccode bit
+	rts
+
+#
+# For denormalized numbers, shift the mantissa until the j-bit = 1,
+# then load the exponent with +/1 $3fff.
+#
+	global		sgetmand
+sgetmand:
+	bsr.l		norm			# normalize exponent
+	bra.b		sgetman
+
+#########################################################################
+# scosh():  computes the hyperbolic cosine of a normalized input	#
+# scoshd(): computes the hyperbolic cosine of a denormalized input	#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = cosh(X)							#
+#									#
+# ACCURACY and MONOTONICITY *******************************************	#
+#	The returned result is within 3 ulps in 64 significant bit,	#
+#	i.e. within 0.5001 ulp to 53 bits if the result is subsequently	#
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM ***********************************************************	#
+#									#
+#	COSH								#
+#	1. If |X| > 16380 log2, go to 3.				#
+#									#
+#	2. (|X| <= 16380 log2) Cosh(X) is obtained by the formulae	#
+#		y = |X|, z = exp(Y), and				#
+#		cosh(X) = (1/2)*( z + 1/z ).				#
+#		Exit.							#
+#									#
+#	3. (|X| > 16380 log2). If |X| > 16480 log2, go to 5.		#
+#									#
+#	4. (16380 log2 < |X| <= 16480 log2)				#
+#		cosh(X) = sign(X) * exp(|X|)/2.				#
+#		However, invoking exp(|X|) may cause premature		#
+#		overflow. Thus, we calculate sinh(X) as follows:	#
+#		Y	:= |X|						#
+#		Fact	:=	2**(16380)				#
+#		Y'	:= Y - 16381 log2				#
+#		cosh(X) := Fact * exp(Y').				#
+#		Exit.							#
+#									#
+#	5. (|X| > 16480 log2) sinh(X) must overflow. Return		#
+#		Huge*Huge to generate overflow and an infinity with	#
+#		the appropriate sign. Huge is the largest finite number	#
+#		in extended format. Exit.				#
+#									#
+#########################################################################
+
+TWO16380:
+	long		0x7FFB0000,0x80000000,0x00000000,0x00000000
+
+	global		scosh
+scosh:
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+
+	mov.l		(%a0),%d1
+	mov.w		4(%a0),%d1
+	and.l		&0x7FFFFFFF,%d1
+	cmp.l		%d1,&0x400CB167
+	bgt.b		COSHBIG
+
+#--THIS IS THE USUAL CASE, |X| < 16380 LOG2
+#--COSH(X) = (1/2) * ( EXP(X) + 1/EXP(X) )
+
+	fabs.x		%fp0			# |X|
+
+	mov.l		%d0,-(%sp)
+	clr.l		%d0
+	fmovm.x		&0x01,-(%sp)		# save |X| to stack
+	lea		(%sp),%a0		# pass ptr to |X|
+	bsr		setox			# FP0 IS EXP(|X|)
+	add.l		&0xc,%sp		# erase |X| from stack
+	fmul.s		&0x3F000000,%fp0	# (1/2)EXP(|X|)
+	mov.l		(%sp)+,%d0
+
+	fmov.s		&0x3E800000,%fp1	# (1/4)
+	fdiv.x		%fp0,%fp1		# 1/(2 EXP(|X|))
+
+	fmov.l		%d0,%fpcr
+	mov.b		&FADD_OP,%d1		# last inst is ADD
+	fadd.x		%fp1,%fp0
+	bra		t_catch
+
+COSHBIG:
+	cmp.l		%d1,&0x400CB2B3
+	bgt.b		COSHHUGE
+
+	fabs.x		%fp0
+	fsub.d		T1(%pc),%fp0		# (|X|-16381LOG2_LEAD)
+	fsub.d		T2(%pc),%fp0		# |X| - 16381 LOG2, ACCURATE
+
+	mov.l		%d0,-(%sp)
+	clr.l		%d0
+	fmovm.x		&0x01,-(%sp)		# save fp0 to stack
+	lea		(%sp),%a0		# pass ptr to fp0
+	bsr		setox
+	add.l		&0xc,%sp		# clear fp0 from stack
+	mov.l		(%sp)+,%d0
+
+	fmov.l		%d0,%fpcr
+	mov.b		&FMUL_OP,%d1		# last inst is MUL
+	fmul.x		TWO16380(%pc),%fp0
+	bra		t_catch
+
+COSHHUGE:
+	bra		t_ovfl2
+
+	global		scoshd
+#--COSH(X) = 1 FOR DENORMALIZED X
+scoshd:
+	fmov.s		&0x3F800000,%fp0
+
+	fmov.l		%d0,%fpcr
+	fadd.s		&0x00800000,%fp0
+	bra		t_pinx2
+
+#########################################################################
+# ssinh():  computes the hyperbolic sine of a normalized input		#
+# ssinhd(): computes the hyperbolic sine of a denormalized input	#
+#									#
+# INPUT *************************************************************** #
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT ************************************************************** #
+#	fp0 = sinh(X)							#
+#									#
+# ACCURACY and MONOTONICITY *******************************************	#
+#	The returned result is within 3 ulps in 64 significant bit,	#
+#	i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM *********************************************************** #
+#									#
+#       SINH								#
+#       1. If |X| > 16380 log2, go to 3.				#
+#									#
+#       2. (|X| <= 16380 log2) Sinh(X) is obtained by the formula	#
+#               y = |X|, sgn = sign(X), and z = expm1(Y),		#
+#               sinh(X) = sgn*(1/2)*( z + z/(1+z) ).			#
+#          Exit.							#
+#									#
+#       3. If |X| > 16480 log2, go to 5.				#
+#									#
+#       4. (16380 log2 < |X| <= 16480 log2)				#
+#               sinh(X) = sign(X) * exp(|X|)/2.				#
+#          However, invoking exp(|X|) may cause premature overflow.	#
+#          Thus, we calculate sinh(X) as follows:			#
+#             Y       := |X|						#
+#             sgn     := sign(X)					#
+#             sgnFact := sgn * 2**(16380)				#
+#             Y'      := Y - 16381 log2					#
+#             sinh(X) := sgnFact * exp(Y').				#
+#          Exit.							#
+#									#
+#       5. (|X| > 16480 log2) sinh(X) must overflow. Return		#
+#          sign(X)*Huge*Huge to generate overflow and an infinity with	#
+#          the appropriate sign. Huge is the largest finite number in	#
+#          extended format. Exit.					#
+#									#
+#########################################################################
+
+	global		ssinh
+ssinh:
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+
+	mov.l		(%a0),%d1
+	mov.w		4(%a0),%d1
+	mov.l		%d1,%a1			# save (compacted) operand
+	and.l		&0x7FFFFFFF,%d1
+	cmp.l		%d1,&0x400CB167
+	bgt.b		SINHBIG
+
+#--THIS IS THE USUAL CASE, |X| < 16380 LOG2
+#--Y = |X|, Z = EXPM1(Y), SINH(X) = SIGN(X)*(1/2)*( Z + Z/(1+Z) )
+
+	fabs.x		%fp0			# Y = |X|
+
+	movm.l		&0x8040,-(%sp)		# {a1/d0}
+	fmovm.x		&0x01,-(%sp)		# save Y on stack
+	lea		(%sp),%a0		# pass ptr to Y
+	clr.l		%d0
+	bsr		setoxm1			# FP0 IS Z = EXPM1(Y)
+	add.l		&0xc,%sp		# clear Y from stack
+	fmov.l		&0,%fpcr
+	movm.l		(%sp)+,&0x0201		# {a1/d0}
+
+	fmov.x		%fp0,%fp1
+	fadd.s		&0x3F800000,%fp1	# 1+Z
+	fmov.x		%fp0,-(%sp)
+	fdiv.x		%fp1,%fp0		# Z/(1+Z)
+	mov.l		%a1,%d1
+	and.l		&0x80000000,%d1
+	or.l		&0x3F000000,%d1
+	fadd.x		(%sp)+,%fp0
+	mov.l		%d1,-(%sp)
+
+	fmov.l		%d0,%fpcr
+	mov.b		&FMUL_OP,%d1		# last inst is MUL
+	fmul.s		(%sp)+,%fp0		# last fp inst - possible exceptions set
+	bra		t_catch
+
+SINHBIG:
+	cmp.l		%d1,&0x400CB2B3
+	bgt		t_ovfl
+	fabs.x		%fp0
+	fsub.d		T1(%pc),%fp0		# (|X|-16381LOG2_LEAD)
+	mov.l		&0,-(%sp)
+	mov.l		&0x80000000,-(%sp)
+	mov.l		%a1,%d1
+	and.l		&0x80000000,%d1
+	or.l		&0x7FFB0000,%d1
+	mov.l		%d1,-(%sp)		# EXTENDED FMT
+	fsub.d		T2(%pc),%fp0		# |X| - 16381 LOG2, ACCURATE
+
+	mov.l		%d0,-(%sp)
+	clr.l		%d0
+	fmovm.x		&0x01,-(%sp)		# save fp0 on stack
+	lea		(%sp),%a0		# pass ptr to fp0
+	bsr		setox
+	add.l		&0xc,%sp		# clear fp0 from stack
+
+	mov.l		(%sp)+,%d0
+	fmov.l		%d0,%fpcr
+	mov.b		&FMUL_OP,%d1		# last inst is MUL
+	fmul.x		(%sp)+,%fp0		# possible exception
+	bra		t_catch
+
+	global		ssinhd
+#--SINH(X) = X FOR DENORMALIZED X
+ssinhd:
+	bra		t_extdnrm
+
+#########################################################################
+# stanh():  computes the hyperbolic tangent of a normalized input	#
+# stanhd(): computes the hyperbolic tangent of a denormalized input	#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = tanh(X)							#
+#									#
+# ACCURACY and MONOTONICITY *******************************************	#
+#	The returned result is within 3 ulps in 64 significant bit,	#
+#	i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM ***********************************************************	#
+#									#
+#	TANH								#
+#	1. If |X| >= (5/2) log2 or |X| <= 2**(-40), go to 3.		#
+#									#
+#	2. (2**(-40) < |X| < (5/2) log2) Calculate tanh(X) by		#
+#		sgn := sign(X), y := 2|X|, z := expm1(Y), and		#
+#		tanh(X) = sgn*( z/(2+z) ).				#
+#		Exit.							#
+#									#
+#	3. (|X| <= 2**(-40) or |X| >= (5/2) log2). If |X| < 1,		#
+#		go to 7.						#
+#									#
+#	4. (|X| >= (5/2) log2) If |X| >= 50 log2, go to 6.		#
+#									#
+#	5. ((5/2) log2 <= |X| < 50 log2) Calculate tanh(X) by		#
+#		sgn := sign(X), y := 2|X|, z := exp(Y),			#
+#		tanh(X) = sgn - [ sgn*2/(1+z) ].			#
+#		Exit.							#
+#									#
+#	6. (|X| >= 50 log2) Tanh(X) = +-1 (round to nearest). Thus, we	#
+#		calculate Tanh(X) by					#
+#		sgn := sign(X), Tiny := 2**(-126),			#
+#		tanh(X) := sgn - sgn*Tiny.				#
+#		Exit.							#
+#									#
+#	7. (|X| < 2**(-40)). Tanh(X) = X.	Exit.			#
+#									#
+#########################################################################
+
+	set		X,FP_SCR0
+	set		XFRAC,X+4
+
+	set		SGN,L_SCR3
+
+	set		V,FP_SCR0
+
+	global		stanh
+stanh:
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+
+	fmov.x		%fp0,X(%a6)
+	mov.l		(%a0),%d1
+	mov.w		4(%a0),%d1
+	mov.l		%d1,X(%a6)
+	and.l		&0x7FFFFFFF,%d1
+	cmp.l		%d1, &0x3fd78000	# is |X| < 2^(-40)?
+	blt.w		TANHBORS		# yes
+	cmp.l		%d1, &0x3fffddce	# is |X| > (5/2)LOG2?
+	bgt.w		TANHBORS		# yes
+
+#--THIS IS THE USUAL CASE
+#--Y = 2|X|, Z = EXPM1(Y), TANH(X) = SIGN(X) * Z / (Z+2).
+
+	mov.l		X(%a6),%d1
+	mov.l		%d1,SGN(%a6)
+	and.l		&0x7FFF0000,%d1
+	add.l		&0x00010000,%d1		# EXPONENT OF 2|X|
+	mov.l		%d1,X(%a6)
+	and.l		&0x80000000,SGN(%a6)
+	fmov.x		X(%a6),%fp0		# FP0 IS Y = 2|X|
+
+	mov.l		%d0,-(%sp)
+	clr.l		%d0
+	fmovm.x		&0x1,-(%sp)		# save Y on stack
+	lea		(%sp),%a0		# pass ptr to Y
+	bsr		setoxm1			# FP0 IS Z = EXPM1(Y)
+	add.l		&0xc,%sp		# clear Y from stack
+	mov.l		(%sp)+,%d0
+
+	fmov.x		%fp0,%fp1
+	fadd.s		&0x40000000,%fp1	# Z+2
+	mov.l		SGN(%a6),%d1
+	fmov.x		%fp1,V(%a6)
+	eor.l		%d1,V(%a6)
+
+	fmov.l		%d0,%fpcr		# restore users round prec,mode
+	fdiv.x		V(%a6),%fp0
+	bra		t_inx2
+
+TANHBORS:
+	cmp.l		%d1,&0x3FFF8000
+	blt.w		TANHSM
+
+	cmp.l		%d1,&0x40048AA1
+	bgt.w		TANHHUGE
+
+#-- (5/2) LOG2 < |X| < 50 LOG2,
+#--TANH(X) = 1 - (2/[EXP(2X)+1]). LET Y = 2|X|, SGN = SIGN(X),
+#--TANH(X) = SGN -	SGN*2/[EXP(Y)+1].
+
+	mov.l		X(%a6),%d1
+	mov.l		%d1,SGN(%a6)
+	and.l		&0x7FFF0000,%d1
+	add.l		&0x00010000,%d1		# EXPO OF 2|X|
+	mov.l		%d1,X(%a6)		# Y = 2|X|
+	and.l		&0x80000000,SGN(%a6)
+	mov.l		SGN(%a6),%d1
+	fmov.x		X(%a6),%fp0		# Y = 2|X|
+
+	mov.l		%d0,-(%sp)
+	clr.l		%d0
+	fmovm.x		&0x01,-(%sp)		# save Y on stack
+	lea		(%sp),%a0		# pass ptr to Y
+	bsr		setox			# FP0 IS EXP(Y)
+	add.l		&0xc,%sp		# clear Y from stack
+	mov.l		(%sp)+,%d0
+	mov.l		SGN(%a6),%d1
+	fadd.s		&0x3F800000,%fp0	# EXP(Y)+1
+
+	eor.l		&0xC0000000,%d1		# -SIGN(X)*2
+	fmov.s		%d1,%fp1		# -SIGN(X)*2 IN SGL FMT
+	fdiv.x		%fp0,%fp1		# -SIGN(X)2 / [EXP(Y)+1 ]
+
+	mov.l		SGN(%a6),%d1
+	or.l		&0x3F800000,%d1		# SGN
+	fmov.s		%d1,%fp0		# SGN IN SGL FMT
+
+	fmov.l		%d0,%fpcr		# restore users round prec,mode
+	mov.b		&FADD_OP,%d1		# last inst is ADD
+	fadd.x		%fp1,%fp0
+	bra		t_inx2
+
+TANHSM:
+	fmov.l		%d0,%fpcr		# restore users round prec,mode
+	mov.b		&FMOV_OP,%d1		# last inst is MOVE
+	fmov.x		X(%a6),%fp0		# last inst - possible exception set
+	bra		t_catch
+
+#---RETURN SGN(X) - SGN(X)EPS
+TANHHUGE:
+	mov.l		X(%a6),%d1
+	and.l		&0x80000000,%d1
+	or.l		&0x3F800000,%d1
+	fmov.s		%d1,%fp0
+	and.l		&0x80000000,%d1
+	eor.l		&0x80800000,%d1		# -SIGN(X)*EPS
+
+	fmov.l		%d0,%fpcr		# restore users round prec,mode
+	fadd.s		%d1,%fp0
+	bra		t_inx2
+
+	global		stanhd
+#--TANH(X) = X FOR DENORMALIZED X
+stanhd:
+	bra		t_extdnrm
+
+#########################################################################
+# slogn():    computes the natural logarithm of a normalized input	#
+# slognd():   computes the natural logarithm of a denormalized input	#
+# slognp1():  computes the log(1+X) of a normalized input		#
+# slognp1d(): computes the log(1+X) of a denormalized input		#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = log(X) or log(1+X)					#
+#									#
+# ACCURACY and MONOTONICITY *******************************************	#
+#	The returned result is within 2 ulps in 64 significant bit,	#
+#	i.e. within 0.5001 ulp to 53 bits if the result is subsequently	#
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM ***********************************************************	#
+#	LOGN:								#
+#	Step 1. If |X-1| < 1/16, approximate log(X) by an odd		#
+#		polynomial in u, where u = 2(X-1)/(X+1). Otherwise,	#
+#		move on to Step 2.					#
+#									#
+#	Step 2. X = 2**k * Y where 1 <= Y < 2. Define F to be the first	#
+#		seven significant bits of Y plus 2**(-7), i.e.		#
+#		F = 1.xxxxxx1 in base 2 where the six "x" match those	#
+#		of Y. Note that |Y-F| <= 2**(-7).			#
+#									#
+#	Step 3. Define u = (Y-F)/F. Approximate log(1+u) by a		#
+#		polynomial in u, log(1+u) = poly.			#
+#									#
+#	Step 4. Reconstruct						#
+#		log(X) = log( 2**k * Y ) = k*log(2) + log(F) + log(1+u)	#
+#		by k*log(2) + (log(F) + poly). The values of log(F) are	#
+#		calculated beforehand and stored in the program.	#
+#									#
+#	lognp1:								#
+#	Step 1: If |X| < 1/16, approximate log(1+X) by an odd		#
+#		polynomial in u where u = 2X/(2+X). Otherwise, move on	#
+#		to Step 2.						#
+#									#
+#	Step 2: Let 1+X = 2**k * Y, where 1 <= Y < 2. Define F as done	#
+#		in Step 2 of the algorithm for LOGN and compute		#
+#		log(1+X) as k*log(2) + log(F) + poly where poly		#
+#		approximates log(1+u), u = (Y-F)/F.			#
+#									#
+#	Implementation Notes:						#
+#	Note 1. There are 64 different possible values for F, thus 64	#
+#		log(F)'s need to be tabulated. Moreover, the values of	#
+#		1/F are also tabulated so that the division in (Y-F)/F	#
+#		can be performed by a multiplication.			#
+#									#
+#	Note 2. In Step 2 of lognp1, in order to preserved accuracy,	#
+#		the value Y-F has to be calculated carefully when	#
+#		1/2 <= X < 3/2.						#
+#									#
+#	Note 3. To fully exploit the pipeline, polynomials are usually	#
+#		separated into two parts evaluated independently before	#
+#		being added up.						#
+#									#
+#########################################################################
+LOGOF2:
+	long		0x3FFE0000,0xB17217F7,0xD1CF79AC,0x00000000
+
+one:
+	long		0x3F800000
+zero:
+	long		0x00000000
+infty:
+	long		0x7F800000
+negone:
+	long		0xBF800000
+
+LOGA6:
+	long		0x3FC2499A,0xB5E4040B
+LOGA5:
+	long		0xBFC555B5,0x848CB7DB
+
+LOGA4:
+	long		0x3FC99999,0x987D8730
+LOGA3:
+	long		0xBFCFFFFF,0xFF6F7E97
+
+LOGA2:
+	long		0x3FD55555,0x555555A4
+LOGA1:
+	long		0xBFE00000,0x00000008
+
+LOGB5:
+	long		0x3F175496,0xADD7DAD6
+LOGB4:
+	long		0x3F3C71C2,0xFE80C7E0
+
+LOGB3:
+	long		0x3F624924,0x928BCCFF
+LOGB2:
+	long		0x3F899999,0x999995EC
+
+LOGB1:
+	long		0x3FB55555,0x55555555
+TWO:
+	long		0x40000000,0x00000000
+
+LTHOLD:
+	long		0x3f990000,0x80000000,0x00000000,0x00000000
+
+LOGTBL:
+	long		0x3FFE0000,0xFE03F80F,0xE03F80FE,0x00000000
+	long		0x3FF70000,0xFF015358,0x833C47E2,0x00000000
+	long		0x3FFE0000,0xFA232CF2,0x52138AC0,0x00000000
+	long		0x3FF90000,0xBDC8D83E,0xAD88D549,0x00000000
+	long		0x3FFE0000,0xF6603D98,0x0F6603DA,0x00000000
+	long		0x3FFA0000,0x9CF43DCF,0xF5EAFD48,0x00000000
+	long		0x3FFE0000,0xF2B9D648,0x0F2B9D65,0x00000000
+	long		0x3FFA0000,0xDA16EB88,0xCB8DF614,0x00000000
+	long		0x3FFE0000,0xEF2EB71F,0xC4345238,0x00000000
+	long		0x3FFB0000,0x8B29B775,0x1BD70743,0x00000000
+	long		0x3FFE0000,0xEBBDB2A5,0xC1619C8C,0x00000000
+	long		0x3FFB0000,0xA8D839F8,0x30C1FB49,0x00000000
+	long		0x3FFE0000,0xE865AC7B,0x7603A197,0x00000000
+	long		0x3FFB0000,0xC61A2EB1,0x8CD907AD,0x00000000
+	long		0x3FFE0000,0xE525982A,0xF70C880E,0x00000000
+	long		0x3FFB0000,0xE2F2A47A,0xDE3A18AF,0x00000000
+	long		0x3FFE0000,0xE1FC780E,0x1FC780E2,0x00000000
+	long		0x3FFB0000,0xFF64898E,0xDF55D551,0x00000000
+	long		0x3FFE0000,0xDEE95C4C,0xA037BA57,0x00000000
+	long		0x3FFC0000,0x8DB956A9,0x7B3D0148,0x00000000
+	long		0x3FFE0000,0xDBEB61EE,0xD19C5958,0x00000000
+	long		0x3FFC0000,0x9B8FE100,0xF47BA1DE,0x00000000
+	long		0x3FFE0000,0xD901B203,0x6406C80E,0x00000000
+	long		0x3FFC0000,0xA9372F1D,0x0DA1BD17,0x00000000
+	long		0x3FFE0000,0xD62B80D6,0x2B80D62C,0x00000000
+	long		0x3FFC0000,0xB6B07F38,0xCE90E46B,0x00000000
+	long		0x3FFE0000,0xD3680D36,0x80D3680D,0x00000000
+	long		0x3FFC0000,0xC3FD0329,0x06488481,0x00000000
+	long		0x3FFE0000,0xD0B69FCB,0xD2580D0B,0x00000000
+	long		0x3FFC0000,0xD11DE0FF,0x15AB18CA,0x00000000
+	long		0x3FFE0000,0xCE168A77,0x25080CE1,0x00000000
+	long		0x3FFC0000,0xDE1433A1,0x6C66B150,0x00000000
+	long		0x3FFE0000,0xCB8727C0,0x65C393E0,0x00000000
+	long		0x3FFC0000,0xEAE10B5A,0x7DDC8ADD,0x00000000
+	long		0x3FFE0000,0xC907DA4E,0x871146AD,0x00000000
+	long		0x3FFC0000,0xF7856E5E,0xE2C9B291,0x00000000
+	long		0x3FFE0000,0xC6980C69,0x80C6980C,0x00000000
+	long		0x3FFD0000,0x82012CA5,0xA68206D7,0x00000000
+	long		0x3FFE0000,0xC4372F85,0x5D824CA6,0x00000000
+	long		0x3FFD0000,0x882C5FCD,0x7256A8C5,0x00000000
+	long		0x3FFE0000,0xC1E4BBD5,0x95F6E947,0x00000000
+	long		0x3FFD0000,0x8E44C60B,0x4CCFD7DE,0x00000000
+	long		0x3FFE0000,0xBFA02FE8,0x0BFA02FF,0x00000000
+	long		0x3FFD0000,0x944AD09E,0xF4351AF6,0x00000000
+	long		0x3FFE0000,0xBD691047,0x07661AA3,0x00000000
+	long		0x3FFD0000,0x9A3EECD4,0xC3EAA6B2,0x00000000
+	long		0x3FFE0000,0xBB3EE721,0xA54D880C,0x00000000
+	long		0x3FFD0000,0xA0218434,0x353F1DE8,0x00000000
+	long		0x3FFE0000,0xB92143FA,0x36F5E02E,0x00000000
+	long		0x3FFD0000,0xA5F2FCAB,0xBBC506DA,0x00000000
+	long		0x3FFE0000,0xB70FBB5A,0x19BE3659,0x00000000
+	long		0x3FFD0000,0xABB3B8BA,0x2AD362A5,0x00000000
+	long		0x3FFE0000,0xB509E68A,0x9B94821F,0x00000000
+	long		0x3FFD0000,0xB1641795,0xCE3CA97B,0x00000000
+	long		0x3FFE0000,0xB30F6352,0x8917C80B,0x00000000
+	long		0x3FFD0000,0xB7047551,0x5D0F1C61,0x00000000
+	long		0x3FFE0000,0xB11FD3B8,0x0B11FD3C,0x00000000
+	long		0x3FFD0000,0xBC952AFE,0xEA3D13E1,0x00000000
+	long		0x3FFE0000,0xAF3ADDC6,0x80AF3ADE,0x00000000
+	long		0x3FFD0000,0xC2168ED0,0xF458BA4A,0x00000000
+	long		0x3FFE0000,0xAD602B58,0x0AD602B6,0x00000000
+	long		0x3FFD0000,0xC788F439,0xB3163BF1,0x00000000
+	long		0x3FFE0000,0xAB8F69E2,0x8359CD11,0x00000000
+	long		0x3FFD0000,0xCCECAC08,0xBF04565D,0x00000000
+	long		0x3FFE0000,0xA9C84A47,0xA07F5638,0x00000000
+	long		0x3FFD0000,0xD2420487,0x2DD85160,0x00000000
+	long		0x3FFE0000,0xA80A80A8,0x0A80A80B,0x00000000
+	long		0x3FFD0000,0xD7894992,0x3BC3588A,0x00000000
+	long		0x3FFE0000,0xA655C439,0x2D7B73A8,0x00000000
+	long		0x3FFD0000,0xDCC2C4B4,0x9887DACC,0x00000000
+	long		0x3FFE0000,0xA4A9CF1D,0x96833751,0x00000000
+	long		0x3FFD0000,0xE1EEBD3E,0x6D6A6B9E,0x00000000
+	long		0x3FFE0000,0xA3065E3F,0xAE7CD0E0,0x00000000
+	long		0x3FFD0000,0xE70D785C,0x2F9F5BDC,0x00000000
+	long		0x3FFE0000,0xA16B312E,0xA8FC377D,0x00000000
+	long		0x3FFD0000,0xEC1F392C,0x5179F283,0x00000000
+	long		0x3FFE0000,0x9FD809FD,0x809FD80A,0x00000000
+	long		0x3FFD0000,0xF12440D3,0xE36130E6,0x00000000
+	long		0x3FFE0000,0x9E4CAD23,0xDD5F3A20,0x00000000
+	long		0x3FFD0000,0xF61CCE92,0x346600BB,0x00000000
+	long		0x3FFE0000,0x9CC8E160,0xC3FB19B9,0x00000000
+	long		0x3FFD0000,0xFB091FD3,0x8145630A,0x00000000
+	long		0x3FFE0000,0x9B4C6F9E,0xF03A3CAA,0x00000000
+	long		0x3FFD0000,0xFFE97042,0xBFA4C2AD,0x00000000
+	long		0x3FFE0000,0x99D722DA,0xBDE58F06,0x00000000
+	long		0x3FFE0000,0x825EFCED,0x49369330,0x00000000
+	long		0x3FFE0000,0x9868C809,0x868C8098,0x00000000
+	long		0x3FFE0000,0x84C37A7A,0xB9A905C9,0x00000000
+	long		0x3FFE0000,0x97012E02,0x5C04B809,0x00000000
+	long		0x3FFE0000,0x87224C2E,0x8E645FB7,0x00000000
+	long		0x3FFE0000,0x95A02568,0x095A0257,0x00000000
+	long		0x3FFE0000,0x897B8CAC,0x9F7DE298,0x00000000
+	long		0x3FFE0000,0x94458094,0x45809446,0x00000000
+	long		0x3FFE0000,0x8BCF55DE,0xC4CD05FE,0x00000000
+	long		0x3FFE0000,0x92F11384,0x0497889C,0x00000000
+	long		0x3FFE0000,0x8E1DC0FB,0x89E125E5,0x00000000
+	long		0x3FFE0000,0x91A2B3C4,0xD5E6F809,0x00000000
+	long		0x3FFE0000,0x9066E68C,0x955B6C9B,0x00000000
+	long		0x3FFE0000,0x905A3863,0x3E06C43B,0x00000000
+	long		0x3FFE0000,0x92AADE74,0xC7BE59E0,0x00000000
+	long		0x3FFE0000,0x8F1779D9,0xFDC3A219,0x00000000
+	long		0x3FFE0000,0x94E9BFF6,0x15845643,0x00000000
+	long		0x3FFE0000,0x8DDA5202,0x37694809,0x00000000
+	long		0x3FFE0000,0x9723A1B7,0x20134203,0x00000000
+	long		0x3FFE0000,0x8CA29C04,0x6514E023,0x00000000
+	long		0x3FFE0000,0x995899C8,0x90EB8990,0x00000000
+	long		0x3FFE0000,0x8B70344A,0x139BC75A,0x00000000
+	long		0x3FFE0000,0x9B88BDAA,0x3A3DAE2F,0x00000000
+	long		0x3FFE0000,0x8A42F870,0x5669DB46,0x00000000
+	long		0x3FFE0000,0x9DB4224F,0xFFE1157C,0x00000000
+	long		0x3FFE0000,0x891AC73A,0xE9819B50,0x00000000
+	long		0x3FFE0000,0x9FDADC26,0x8B7A12DA,0x00000000
+	long		0x3FFE0000,0x87F78087,0xF78087F8,0x00000000
+	long		0x3FFE0000,0xA1FCFF17,0xCE733BD4,0x00000000
+	long		0x3FFE0000,0x86D90544,0x7A34ACC6,0x00000000
+	long		0x3FFE0000,0xA41A9E8F,0x5446FB9F,0x00000000
+	long		0x3FFE0000,0x85BF3761,0x2CEE3C9B,0x00000000
+	long		0x3FFE0000,0xA633CD7E,0x6771CD8B,0x00000000
+	long		0x3FFE0000,0x84A9F9C8,0x084A9F9D,0x00000000
+	long		0x3FFE0000,0xA8489E60,0x0B435A5E,0x00000000
+	long		0x3FFE0000,0x83993052,0x3FBE3368,0x00000000
+	long		0x3FFE0000,0xAA59233C,0xCCA4BD49,0x00000000
+	long		0x3FFE0000,0x828CBFBE,0xB9A020A3,0x00000000
+	long		0x3FFE0000,0xAC656DAE,0x6BCC4985,0x00000000
+	long		0x3FFE0000,0x81848DA8,0xFAF0D277,0x00000000
+	long		0x3FFE0000,0xAE6D8EE3,0x60BB2468,0x00000000
+	long		0x3FFE0000,0x80808080,0x80808081,0x00000000
+	long		0x3FFE0000,0xB07197A2,0x3C46C654,0x00000000
+
+	set		ADJK,L_SCR1
+
+	set		X,FP_SCR0
+	set		XDCARE,X+2
+	set		XFRAC,X+4
+
+	set		F,FP_SCR1
+	set		FFRAC,F+4
+
+	set		KLOG2,FP_SCR0
+
+	set		SAVEU,FP_SCR0
+
+	global		slogn
+#--ENTRY POINT FOR LOG(X) FOR X FINITE, NON-ZERO, NOT NAN'S
+slogn:
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+	mov.l		&0x00000000,ADJK(%a6)
+
+LOGBGN:
+#--FPCR SAVED AND CLEARED, INPUT IS 2^(ADJK)*FP0, FP0 CONTAINS
+#--A FINITE, NON-ZERO, NORMALIZED NUMBER.
+
+	mov.l		(%a0),%d1
+	mov.w		4(%a0),%d1
+
+	mov.l		(%a0),X(%a6)
+	mov.l		4(%a0),X+4(%a6)
+	mov.l		8(%a0),X+8(%a6)
+
+	cmp.l		%d1,&0			# CHECK IF X IS NEGATIVE
+	blt.w		LOGNEG			# LOG OF NEGATIVE ARGUMENT IS INVALID
+# X IS POSITIVE, CHECK IF X IS NEAR 1
+	cmp.l		%d1,&0x3ffef07d		# IS X < 15/16?
+	blt.b		LOGMAIN			# YES
+	cmp.l		%d1,&0x3fff8841		# IS X > 17/16?
+	ble.w		LOGNEAR1		# NO
+
+LOGMAIN:
+#--THIS SHOULD BE THE USUAL CASE, X NOT VERY CLOSE TO 1
+
+#--X = 2^(K) * Y, 1 <= Y < 2. THUS, Y = 1.XXXXXXXX....XX IN BINARY.
+#--WE DEFINE F = 1.XXXXXX1, I.E. FIRST 7 BITS OF Y AND ATTACH A 1.
+#--THE IDEA IS THAT LOG(X) = K*LOG2 + LOG(Y)
+#--			 = K*LOG2 + LOG(F) + LOG(1 + (Y-F)/F).
+#--NOTE THAT U = (Y-F)/F IS VERY SMALL AND THUS APPROXIMATING
+#--LOG(1+U) CAN BE VERY EFFICIENT.
+#--ALSO NOTE THAT THE VALUE 1/F IS STORED IN A TABLE SO THAT NO
+#--DIVISION IS NEEDED TO CALCULATE (Y-F)/F.
+
+#--GET K, Y, F, AND ADDRESS OF 1/F.
+	asr.l		&8,%d1
+	asr.l		&8,%d1			# SHIFTED 16 BITS, BIASED EXPO. OF X
+	sub.l		&0x3FFF,%d1		# THIS IS K
+	add.l		ADJK(%a6),%d1		# ADJUST K, ORIGINAL INPUT MAY BE  DENORM.
+	lea		LOGTBL(%pc),%a0		# BASE ADDRESS OF 1/F AND LOG(F)
+	fmov.l		%d1,%fp1		# CONVERT K TO FLOATING-POINT FORMAT
+
+#--WHILE THE CONVERSION IS GOING ON, WE GET F AND ADDRESS OF 1/F
+	mov.l		&0x3FFF0000,X(%a6)	# X IS NOW Y, I.E. 2^(-K)*X
+	mov.l		XFRAC(%a6),FFRAC(%a6)
+	and.l		&0xFE000000,FFRAC(%a6)	# FIRST 7 BITS OF Y
+	or.l		&0x01000000,FFRAC(%a6)	# GET F: ATTACH A 1 AT THE EIGHTH BIT
+	mov.l		FFRAC(%a6),%d1	# READY TO GET ADDRESS OF 1/F
+	and.l		&0x7E000000,%d1
+	asr.l		&8,%d1
+	asr.l		&8,%d1
+	asr.l		&4,%d1			# SHIFTED 20, D0 IS THE DISPLACEMENT
+	add.l		%d1,%a0			# A0 IS THE ADDRESS FOR 1/F
+
+	fmov.x		X(%a6),%fp0
+	mov.l		&0x3fff0000,F(%a6)
+	clr.l		F+8(%a6)
+	fsub.x		F(%a6),%fp0		# Y-F
+	fmovm.x		&0xc,-(%sp)		# SAVE FP2-3 WHILE FP0 IS NOT READY
+#--SUMMARY: FP0 IS Y-F, A0 IS ADDRESS OF 1/F, FP1 IS K
+#--REGISTERS SAVED: FPCR, FP1, FP2
+
+LP1CONT1:
+#--AN RE-ENTRY POINT FOR LOGNP1
+	fmul.x		(%a0),%fp0		# FP0 IS U = (Y-F)/F
+	fmul.x		LOGOF2(%pc),%fp1	# GET K*LOG2 WHILE FP0 IS NOT READY
+	fmov.x		%fp0,%fp2
+	fmul.x		%fp2,%fp2		# FP2 IS V=U*U
+	fmov.x		%fp1,KLOG2(%a6)		# PUT K*LOG2 IN MEMEORY, FREE FP1
+
+#--LOG(1+U) IS APPROXIMATED BY
+#--U + V*(A1+U*(A2+U*(A3+U*(A4+U*(A5+U*A6))))) WHICH IS
+#--[U + V*(A1+V*(A3+V*A5))]  +  [U*V*(A2+V*(A4+V*A6))]
+
+	fmov.x		%fp2,%fp3
+	fmov.x		%fp2,%fp1
+
+	fmul.d		LOGA6(%pc),%fp1		# V*A6
+	fmul.d		LOGA5(%pc),%fp2		# V*A5
+
+	fadd.d		LOGA4(%pc),%fp1		# A4+V*A6
+	fadd.d		LOGA3(%pc),%fp2		# A3+V*A5
+
+	fmul.x		%fp3,%fp1		# V*(A4+V*A6)
+	fmul.x		%fp3,%fp2		# V*(A3+V*A5)
+
+	fadd.d		LOGA2(%pc),%fp1		# A2+V*(A4+V*A6)
+	fadd.d		LOGA1(%pc),%fp2		# A1+V*(A3+V*A5)
+
+	fmul.x		%fp3,%fp1		# V*(A2+V*(A4+V*A6))
+	add.l		&16,%a0			# ADDRESS OF LOG(F)
+	fmul.x		%fp3,%fp2		# V*(A1+V*(A3+V*A5))
+
+	fmul.x		%fp0,%fp1		# U*V*(A2+V*(A4+V*A6))
+	fadd.x		%fp2,%fp0		# U+V*(A1+V*(A3+V*A5))
+
+	fadd.x		(%a0),%fp1		# LOG(F)+U*V*(A2+V*(A4+V*A6))
+	fmovm.x		(%sp)+,&0x30		# RESTORE FP2-3
+	fadd.x		%fp1,%fp0		# FP0 IS LOG(F) + LOG(1+U)
+
+	fmov.l		%d0,%fpcr
+	fadd.x		KLOG2(%a6),%fp0		# FINAL ADD
+	bra		t_inx2
+
+
+LOGNEAR1:
+
+# if the input is exactly equal to one, then exit through ld_pzero.
+# if these 2 lines weren't here, the correct answer would be returned
+# but the INEX2 bit would be set.
+	fcmp.b		%fp0,&0x1		# is it equal to one?
+	fbeq.l		ld_pzero		# yes
+
+#--REGISTERS SAVED: FPCR, FP1. FP0 CONTAINS THE INPUT.
+	fmov.x		%fp0,%fp1
+	fsub.s		one(%pc),%fp1		# FP1 IS X-1
+	fadd.s		one(%pc),%fp0		# FP0 IS X+1
+	fadd.x		%fp1,%fp1		# FP1 IS 2(X-1)
+#--LOG(X) = LOG(1+U/2)-LOG(1-U/2) WHICH IS AN ODD POLYNOMIAL
+#--IN U, U = 2(X-1)/(X+1) = FP1/FP0
+
+LP1CONT2:
+#--THIS IS AN RE-ENTRY POINT FOR LOGNP1
+	fdiv.x		%fp0,%fp1		# FP1 IS U
+	fmovm.x		&0xc,-(%sp)		# SAVE FP2-3
+#--REGISTERS SAVED ARE NOW FPCR,FP1,FP2,FP3
+#--LET V=U*U, W=V*V, CALCULATE
+#--U + U*V*(B1 + V*(B2 + V*(B3 + V*(B4 + V*B5)))) BY
+#--U + U*V*(  [B1 + W*(B3 + W*B5)]  +  [V*(B2 + W*B4)]  )
+	fmov.x		%fp1,%fp0
+	fmul.x		%fp0,%fp0		# FP0 IS V
+	fmov.x		%fp1,SAVEU(%a6)		# STORE U IN MEMORY, FREE FP1
+	fmov.x		%fp0,%fp1
+	fmul.x		%fp1,%fp1		# FP1 IS W
+
+	fmov.d		LOGB5(%pc),%fp3
+	fmov.d		LOGB4(%pc),%fp2
+
+	fmul.x		%fp1,%fp3		# W*B5
+	fmul.x		%fp1,%fp2		# W*B4
+
+	fadd.d		LOGB3(%pc),%fp3		# B3+W*B5
+	fadd.d		LOGB2(%pc),%fp2		# B2+W*B4
+
+	fmul.x		%fp3,%fp1		# W*(B3+W*B5), FP3 RELEASED
+
+	fmul.x		%fp0,%fp2		# V*(B2+W*B4)
+
+	fadd.d		LOGB1(%pc),%fp1		# B1+W*(B3+W*B5)
+	fmul.x		SAVEU(%a6),%fp0		# FP0 IS U*V
+
+	fadd.x		%fp2,%fp1		# B1+W*(B3+W*B5) + V*(B2+W*B4), FP2 RELEASED
+	fmovm.x		(%sp)+,&0x30		# FP2-3 RESTORED
+
+	fmul.x		%fp1,%fp0		# U*V*( [B1+W*(B3+W*B5)] + [V*(B2+W*B4)] )
+
+	fmov.l		%d0,%fpcr
+	fadd.x		SAVEU(%a6),%fp0
+	bra		t_inx2
+
+#--REGISTERS SAVED FPCR. LOG(-VE) IS INVALID
+LOGNEG:
+	bra		t_operr
+
+	global		slognd
+slognd:
+#--ENTRY POINT FOR LOG(X) FOR DENORMALIZED INPUT
+
+	mov.l		&-100,ADJK(%a6)		# INPUT = 2^(ADJK) * FP0
+
+#----normalize the input value by left shifting k bits (k to be determined
+#----below), adjusting exponent and storing -k to  ADJK
+#----the value TWOTO100 is no longer needed.
+#----Note that this code assumes the denormalized input is NON-ZERO.
+
+	movm.l		&0x3f00,-(%sp)		# save some registers  {d2-d7}
+	mov.l		(%a0),%d3		# D3 is exponent of smallest norm. #
+	mov.l		4(%a0),%d4
+	mov.l		8(%a0),%d5		# (D4,D5) is (Hi_X,Lo_X)
+	clr.l		%d2			# D2 used for holding K
+
+	tst.l		%d4
+	bne.b		Hi_not0
+
+Hi_0:
+	mov.l		%d5,%d4
+	clr.l		%d5
+	mov.l		&32,%d2
+	clr.l		%d6
+	bfffo		%d4{&0:&32},%d6
+	lsl.l		%d6,%d4
+	add.l		%d6,%d2			# (D3,D4,D5) is normalized
+
+	mov.l		%d3,X(%a6)
+	mov.l		%d4,XFRAC(%a6)
+	mov.l		%d5,XFRAC+4(%a6)
+	neg.l		%d2
+	mov.l		%d2,ADJK(%a6)
+	fmov.x		X(%a6),%fp0
+	movm.l		(%sp)+,&0xfc		# restore registers {d2-d7}
+	lea		X(%a6),%a0
+	bra.w		LOGBGN			# begin regular log(X)
+
+Hi_not0:
+	clr.l		%d6
+	bfffo		%d4{&0:&32},%d6		# find first 1
+	mov.l		%d6,%d2			# get k
+	lsl.l		%d6,%d4
+	mov.l		%d5,%d7			# a copy of D5
+	lsl.l		%d6,%d5
+	neg.l		%d6
+	add.l		&32,%d6
+	lsr.l		%d6,%d7
+	or.l		%d7,%d4			# (D3,D4,D5) normalized
+
+	mov.l		%d3,X(%a6)
+	mov.l		%d4,XFRAC(%a6)
+	mov.l		%d5,XFRAC+4(%a6)
+	neg.l		%d2
+	mov.l		%d2,ADJK(%a6)
+	fmov.x		X(%a6),%fp0
+	movm.l		(%sp)+,&0xfc		# restore registers {d2-d7}
+	lea		X(%a6),%a0
+	bra.w		LOGBGN			# begin regular log(X)
+
+	global		slognp1
+#--ENTRY POINT FOR LOG(1+X) FOR X FINITE, NON-ZERO, NOT NAN'S
+slognp1:
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+	fabs.x		%fp0			# test magnitude
+	fcmp.x		%fp0,LTHOLD(%pc)	# compare with min threshold
+	fbgt.w		LP1REAL			# if greater, continue
+	fmov.l		%d0,%fpcr
+	mov.b		&FMOV_OP,%d1		# last inst is MOVE
+	fmov.x		(%a0),%fp0		# return signed argument
+	bra		t_catch
+
+LP1REAL:
+	fmov.x		(%a0),%fp0		# LOAD INPUT
+	mov.l		&0x00000000,ADJK(%a6)
+	fmov.x		%fp0,%fp1		# FP1 IS INPUT Z
+	fadd.s		one(%pc),%fp0		# X := ROUND(1+Z)
+	fmov.x		%fp0,X(%a6)
+	mov.w		XFRAC(%a6),XDCARE(%a6)
+	mov.l		X(%a6),%d1
+	cmp.l		%d1,&0
+	ble.w		LP1NEG0			# LOG OF ZERO OR -VE
+	cmp.l		%d1,&0x3ffe8000		# IS BOUNDS [1/2,3/2]?
+	blt.w		LOGMAIN
+	cmp.l		%d1,&0x3fffc000
+	bgt.w		LOGMAIN
+#--IF 1+Z > 3/2 OR 1+Z < 1/2, THEN X, WHICH IS ROUNDING 1+Z,
+#--CONTAINS AT LEAST 63 BITS OF INFORMATION OF Z. IN THAT CASE,
+#--SIMPLY INVOKE LOG(X) FOR LOG(1+Z).
+
+LP1NEAR1:
+#--NEXT SEE IF EXP(-1/16) < X < EXP(1/16)
+	cmp.l		%d1,&0x3ffef07d
+	blt.w		LP1CARE
+	cmp.l		%d1,&0x3fff8841
+	bgt.w		LP1CARE
+
+LP1ONE16:
+#--EXP(-1/16) < X < EXP(1/16). LOG(1+Z) = LOG(1+U/2) - LOG(1-U/2)
+#--WHERE U = 2Z/(2+Z) = 2Z/(1+X).
+	fadd.x		%fp1,%fp1		# FP1 IS 2Z
+	fadd.s		one(%pc),%fp0		# FP0 IS 1+X
+#--U = FP1/FP0
+	bra.w		LP1CONT2
+
+LP1CARE:
+#--HERE WE USE THE USUAL TABLE DRIVEN APPROACH. CARE HAS TO BE
+#--TAKEN BECAUSE 1+Z CAN HAVE 67 BITS OF INFORMATION AND WE MUST
+#--PRESERVE ALL THE INFORMATION. BECAUSE 1+Z IS IN [1/2,3/2],
+#--THERE ARE ONLY TWO CASES.
+#--CASE 1: 1+Z < 1, THEN K = -1 AND Y-F = (2-F) + 2Z
+#--CASE 2: 1+Z > 1, THEN K = 0  AND Y-F = (1-F) + Z
+#--ON RETURNING TO LP1CONT1, WE MUST HAVE K IN FP1, ADDRESS OF
+#--(1/F) IN A0, Y-F IN FP0, AND FP2 SAVED.
+
+	mov.l		XFRAC(%a6),FFRAC(%a6)
+	and.l		&0xFE000000,FFRAC(%a6)
+	or.l		&0x01000000,FFRAC(%a6)	# F OBTAINED
+	cmp.l		%d1,&0x3FFF8000		# SEE IF 1+Z > 1
+	bge.b		KISZERO
+
+KISNEG1:
+	fmov.s		TWO(%pc),%fp0
+	mov.l		&0x3fff0000,F(%a6)
+	clr.l		F+8(%a6)
+	fsub.x		F(%a6),%fp0		# 2-F
+	mov.l		FFRAC(%a6),%d1
+	and.l		&0x7E000000,%d1
+	asr.l		&8,%d1
+	asr.l		&8,%d1
+	asr.l		&4,%d1			# D0 CONTAINS DISPLACEMENT FOR 1/F
+	fadd.x		%fp1,%fp1		# GET 2Z
+	fmovm.x		&0xc,-(%sp)		# SAVE FP2  {%fp2/%fp3}
+	fadd.x		%fp1,%fp0		# FP0 IS Y-F = (2-F)+2Z
+	lea		LOGTBL(%pc),%a0		# A0 IS ADDRESS OF 1/F
+	add.l		%d1,%a0
+	fmov.s		negone(%pc),%fp1	# FP1 IS K = -1
+	bra.w		LP1CONT1
+
+KISZERO:
+	fmov.s		one(%pc),%fp0
+	mov.l		&0x3fff0000,F(%a6)
+	clr.l		F+8(%a6)
+	fsub.x		F(%a6),%fp0		# 1-F
+	mov.l		FFRAC(%a6),%d1
+	and.l		&0x7E000000,%d1
+	asr.l		&8,%d1
+	asr.l		&8,%d1
+	asr.l		&4,%d1
+	fadd.x		%fp1,%fp0		# FP0 IS Y-F
+	fmovm.x		&0xc,-(%sp)		# FP2 SAVED {%fp2/%fp3}
+	lea		LOGTBL(%pc),%a0
+	add.l		%d1,%a0			# A0 IS ADDRESS OF 1/F
+	fmov.s		zero(%pc),%fp1		# FP1 IS K = 0
+	bra.w		LP1CONT1
+
+LP1NEG0:
+#--FPCR SAVED. D0 IS X IN COMPACT FORM.
+	cmp.l		%d1,&0
+	blt.b		LP1NEG
+LP1ZERO:
+	fmov.s		negone(%pc),%fp0
+
+	fmov.l		%d0,%fpcr
+	bra		t_dz
+
+LP1NEG:
+	fmov.s		zero(%pc),%fp0
+
+	fmov.l		%d0,%fpcr
+	bra		t_operr
+
+	global		slognp1d
+#--ENTRY POINT FOR LOG(1+Z) FOR DENORMALIZED INPUT
+# Simply return the denorm
+slognp1d:
+	bra		t_extdnrm
+
+#########################################################################
+# satanh():  computes the inverse hyperbolic tangent of a norm input	#
+# satanhd(): computes the inverse hyperbolic tangent of a denorm input	#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = arctanh(X)						#
+#									#
+# ACCURACY and MONOTONICITY *******************************************	#
+#	The returned result is within 3 ulps in	64 significant bit,	#
+#	i.e. within 0.5001 ulp to 53 bits if the result is subsequently	#
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM ***********************************************************	#
+#									#
+#	ATANH								#
+#	1. If |X| >= 1, go to 3.					#
+#									#
+#	2. (|X| < 1) Calculate atanh(X) by				#
+#		sgn := sign(X)						#
+#		y := |X|						#
+#		z := 2y/(1-y)						#
+#		atanh(X) := sgn * (1/2) * logp1(z)			#
+#		Exit.							#
+#									#
+#	3. If |X| > 1, go to 5.						#
+#									#
+#	4. (|X| = 1) Generate infinity with an appropriate sign and	#
+#		divide-by-zero by					#
+#		sgn := sign(X)						#
+#		atan(X) := sgn / (+0).					#
+#		Exit.							#
+#									#
+#	5. (|X| > 1) Generate an invalid operation by 0 * infinity.	#
+#		Exit.							#
+#									#
+#########################################################################
+
+	global		satanh
+satanh:
+	mov.l		(%a0),%d1
+	mov.w		4(%a0),%d1
+	and.l		&0x7FFFFFFF,%d1
+	cmp.l		%d1,&0x3FFF8000
+	bge.b		ATANHBIG
+
+#--THIS IS THE USUAL CASE, |X| < 1
+#--Y = |X|, Z = 2Y/(1-Y), ATANH(X) = SIGN(X) * (1/2) * LOG1P(Z).
+
+	fabs.x		(%a0),%fp0		# Y = |X|
+	fmov.x		%fp0,%fp1
+	fneg.x		%fp1			# -Y
+	fadd.x		%fp0,%fp0		# 2Y
+	fadd.s		&0x3F800000,%fp1	# 1-Y
+	fdiv.x		%fp1,%fp0		# 2Y/(1-Y)
+	mov.l		(%a0),%d1
+	and.l		&0x80000000,%d1
+	or.l		&0x3F000000,%d1		# SIGN(X)*HALF
+	mov.l		%d1,-(%sp)
+
+	mov.l		%d0,-(%sp)		# save rnd prec,mode
+	clr.l		%d0			# pass ext prec,RN
+	fmovm.x		&0x01,-(%sp)		# save Z on stack
+	lea		(%sp),%a0		# pass ptr to Z
+	bsr		slognp1			# LOG1P(Z)
+	add.l		&0xc,%sp		# clear Z from stack
+
+	mov.l		(%sp)+,%d0		# fetch old prec,mode
+	fmov.l		%d0,%fpcr		# load it
+	mov.b		&FMUL_OP,%d1		# last inst is MUL
+	fmul.s		(%sp)+,%fp0
+	bra		t_catch
+
+ATANHBIG:
+	fabs.x		(%a0),%fp0		# |X|
+	fcmp.s		%fp0,&0x3F800000
+	fbgt		t_operr
+	bra		t_dz
+
+	global		satanhd
+#--ATANH(X) = X FOR DENORMALIZED X
+satanhd:
+	bra		t_extdnrm
+
+#########################################################################
+# slog10():  computes the base-10 logarithm of a normalized input	#
+# slog10d(): computes the base-10 logarithm of a denormalized input	#
+# slog2():   computes the base-2 logarithm of a normalized input	#
+# slog2d():  computes the base-2 logarithm of a denormalized input	#
+#									#
+# INPUT *************************************************************** #
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = log_10(X) or log_2(X)					#
+#									#
+# ACCURACY and MONOTONICITY *******************************************	#
+#	The returned result is within 1.7 ulps in 64 significant bit,	#
+#	i.e. within 0.5003 ulp to 53 bits if the result is subsequently	#
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM ***********************************************************	#
+#									#
+#       slog10d:							#
+#									#
+#       Step 0.	If X < 0, create a NaN and raise the invalid operation	#
+#               flag. Otherwise, save FPCR in D1; set FpCR to default.	#
+#       Notes:  Default means round-to-nearest mode, no floating-point	#
+#               traps, and precision control = double extended.		#
+#									#
+#       Step 1. Call slognd to obtain Y = log(X), the natural log of X.	#
+#       Notes:  Even if X is denormalized, log(X) is always normalized.	#
+#									#
+#       Step 2.  Compute log_10(X) = log(X) * (1/log(10)).		#
+#            2.1 Restore the user FPCR					#
+#            2.2 Return ans := Y * INV_L10.				#
+#									#
+#       slog10:								#
+#									#
+#       Step 0. If X < 0, create a NaN and raise the invalid operation	#
+#               flag. Otherwise, save FPCR in D1; set FpCR to default.	#
+#       Notes:  Default means round-to-nearest mode, no floating-point	#
+#               traps, and precision control = double extended.		#
+#									#
+#       Step 1. Call sLogN to obtain Y = log(X), the natural log of X.	#
+#									#
+#       Step 2.   Compute log_10(X) = log(X) * (1/log(10)).		#
+#            2.1  Restore the user FPCR					#
+#            2.2  Return ans := Y * INV_L10.				#
+#									#
+#       sLog2d:								#
+#									#
+#       Step 0. If X < 0, create a NaN and raise the invalid operation	#
+#               flag. Otherwise, save FPCR in D1; set FpCR to default.	#
+#       Notes:  Default means round-to-nearest mode, no floating-point	#
+#               traps, and precision control = double extended.		#
+#									#
+#       Step 1. Call slognd to obtain Y = log(X), the natural log of X.	#
+#       Notes:  Even if X is denormalized, log(X) is always normalized.	#
+#									#
+#       Step 2.   Compute log_10(X) = log(X) * (1/log(2)).		#
+#            2.1  Restore the user FPCR					#
+#            2.2  Return ans := Y * INV_L2.				#
+#									#
+#       sLog2:								#
+#									#
+#       Step 0. If X < 0, create a NaN and raise the invalid operation	#
+#               flag. Otherwise, save FPCR in D1; set FpCR to default.	#
+#       Notes:  Default means round-to-nearest mode, no floating-point	#
+#               traps, and precision control = double extended.		#
+#									#
+#       Step 1. If X is not an integer power of two, i.e., X != 2^k,	#
+#               go to Step 3.						#
+#									#
+#       Step 2.   Return k.						#
+#            2.1  Get integer k, X = 2^k.				#
+#            2.2  Restore the user FPCR.				#
+#            2.3  Return ans := convert-to-double-extended(k).		#
+#									#
+#       Step 3. Call sLogN to obtain Y = log(X), the natural log of X.	#
+#									#
+#       Step 4.   Compute log_2(X) = log(X) * (1/log(2)).		#
+#            4.1  Restore the user FPCR					#
+#            4.2  Return ans := Y * INV_L2.				#
+#									#
+#########################################################################
+
+INV_L10:
+	long		0x3FFD0000,0xDE5BD8A9,0x37287195,0x00000000
+
+INV_L2:
+	long		0x3FFF0000,0xB8AA3B29,0x5C17F0BC,0x00000000
+
+	global		slog10
+#--entry point for Log10(X), X is normalized
+slog10:
+	fmov.b		&0x1,%fp0
+	fcmp.x		%fp0,(%a0)		# if operand == 1,
+	fbeq.l		ld_pzero		# return an EXACT zero
+
+	mov.l		(%a0),%d1
+	blt.w		invalid
+	mov.l		%d0,-(%sp)
+	clr.l		%d0
+	bsr		slogn			# log(X), X normal.
+	fmov.l		(%sp)+,%fpcr
+	fmul.x		INV_L10(%pc),%fp0
+	bra		t_inx2
+
+	global		slog10d
+#--entry point for Log10(X), X is denormalized
+slog10d:
+	mov.l		(%a0),%d1
+	blt.w		invalid
+	mov.l		%d0,-(%sp)
+	clr.l		%d0
+	bsr		slognd			# log(X), X denorm.
+	fmov.l		(%sp)+,%fpcr
+	fmul.x		INV_L10(%pc),%fp0
+	bra		t_minx2
+
+	global		slog2
+#--entry point for Log2(X), X is normalized
+slog2:
+	mov.l		(%a0),%d1
+	blt.w		invalid
+
+	mov.l		8(%a0),%d1
+	bne.b		continue		# X is not 2^k
+
+	mov.l		4(%a0),%d1
+	and.l		&0x7FFFFFFF,%d1
+	bne.b		continue
+
+#--X = 2^k.
+	mov.w		(%a0),%d1
+	and.l		&0x00007FFF,%d1
+	sub.l		&0x3FFF,%d1
+	beq.l		ld_pzero
+	fmov.l		%d0,%fpcr
+	fmov.l		%d1,%fp0
+	bra		t_inx2
+
+continue:
+	mov.l		%d0,-(%sp)
+	clr.l		%d0
+	bsr		slogn			# log(X), X normal.
+	fmov.l		(%sp)+,%fpcr
+	fmul.x		INV_L2(%pc),%fp0
+	bra		t_inx2
+
+invalid:
+	bra		t_operr
+
+	global		slog2d
+#--entry point for Log2(X), X is denormalized
+slog2d:
+	mov.l		(%a0),%d1
+	blt.w		invalid
+	mov.l		%d0,-(%sp)
+	clr.l		%d0
+	bsr		slognd			# log(X), X denorm.
+	fmov.l		(%sp)+,%fpcr
+	fmul.x		INV_L2(%pc),%fp0
+	bra		t_minx2
+
+#########################################################################
+# stwotox():  computes 2**X for a normalized input			#
+# stwotoxd(): computes 2**X for a denormalized input			#
+# stentox():  computes 10**X for a normalized input			#
+# stentoxd(): computes 10**X for a denormalized input			#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision input			#
+#	d0 = round precision,mode					#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = 2**X or 10**X						#
+#									#
+# ACCURACY and MONOTONICITY *******************************************	#
+#	The returned result is within 2 ulps in 64 significant bit,	#
+#	i.e. within 0.5001 ulp to 53 bits if the result is subsequently	#
+#	rounded to double precision. The result is provably monotonic	#
+#	in double precision.						#
+#									#
+# ALGORITHM ***********************************************************	#
+#									#
+#	twotox								#
+#	1. If |X| > 16480, go to ExpBig.				#
+#									#
+#	2. If |X| < 2**(-70), go to ExpSm.				#
+#									#
+#	3. Decompose X as X = N/64 + r where |r| <= 1/128. Furthermore	#
+#		decompose N as						#
+#		 N = 64(M + M') + j,  j = 0,1,2,...,63.			#
+#									#
+#	4. Overwrite r := r * log2. Then				#
+#		2**X = 2**(M') * 2**(M) * 2**(j/64) * exp(r).		#
+#		Go to expr to compute that expression.			#
+#									#
+#	tentox								#
+#	1. If |X| > 16480*log_10(2) (base 10 log of 2), go to ExpBig.	#
+#									#
+#	2. If |X| < 2**(-70), go to ExpSm.				#
+#									#
+#	3. Set y := X*log_2(10)*64 (base 2 log of 10). Set		#
+#		N := round-to-int(y). Decompose N as			#
+#		 N = 64(M + M') + j,  j = 0,1,2,...,63.			#
+#									#
+#	4. Define r as							#
+#		r := ((X - N*L1)-N*L2) * L10				#
+#		where L1, L2 are the leading and trailing parts of	#
+#		log_10(2)/64 and L10 is the natural log of 10. Then	#
+#		10**X = 2**(M') * 2**(M) * 2**(j/64) * exp(r).		#
+#		Go to expr to compute that expression.			#
+#									#
+#	expr								#
+#	1. Fetch 2**(j/64) from table as Fact1 and Fact2.		#
+#									#
+#	2. Overwrite Fact1 and Fact2 by					#
+#		Fact1 := 2**(M) * Fact1					#
+#		Fact2 := 2**(M) * Fact2					#
+#		Thus Fact1 + Fact2 = 2**(M) * 2**(j/64).		#
+#									#
+#	3. Calculate P where 1 + P approximates exp(r):			#
+#		P = r + r*r*(A1+r*(A2+...+r*A5)).			#
+#									#
+#	4. Let AdjFact := 2**(M'). Return				#
+#		AdjFact * ( Fact1 + ((Fact1*P) + Fact2) ).		#
+#		Exit.							#
+#									#
+#	ExpBig								#
+#	1. Generate overflow by Huge * Huge if X > 0; otherwise,	#
+#	        generate underflow by Tiny * Tiny.			#
+#									#
+#	ExpSm								#
+#	1. Return 1 + X.						#
+#									#
+#########################################################################
+
+L2TEN64:
+	long		0x406A934F,0x0979A371	# 64LOG10/LOG2
+L10TWO1:
+	long		0x3F734413,0x509F8000	# LOG2/64LOG10
+
+L10TWO2:
+	long		0xBFCD0000,0xC0219DC1,0xDA994FD2,0x00000000
+
+LOG10:	long		0x40000000,0x935D8DDD,0xAAA8AC17,0x00000000
+
+LOG2:	long		0x3FFE0000,0xB17217F7,0xD1CF79AC,0x00000000
+
+EXPA5:	long		0x3F56C16D,0x6F7BD0B2
+EXPA4:	long		0x3F811112,0x302C712C
+EXPA3:	long		0x3FA55555,0x55554CC1
+EXPA2:	long		0x3FC55555,0x55554A54
+EXPA1:	long		0x3FE00000,0x00000000,0x00000000,0x00000000
+
+TEXPTBL:
+	long		0x3FFF0000,0x80000000,0x00000000,0x3F738000
+	long		0x3FFF0000,0x8164D1F3,0xBC030773,0x3FBEF7CA
+	long		0x3FFF0000,0x82CD8698,0xAC2BA1D7,0x3FBDF8A9
+	long		0x3FFF0000,0x843A28C3,0xACDE4046,0x3FBCD7C9
+	long		0x3FFF0000,0x85AAC367,0xCC487B15,0xBFBDE8DA
+	long		0x3FFF0000,0x871F6196,0x9E8D1010,0x3FBDE85C
+	long		0x3FFF0000,0x88980E80,0x92DA8527,0x3FBEBBF1
+	long		0x3FFF0000,0x8A14D575,0x496EFD9A,0x3FBB80CA
+	long		0x3FFF0000,0x8B95C1E3,0xEA8BD6E7,0xBFBA8373
+	long		0x3FFF0000,0x8D1ADF5B,0x7E5BA9E6,0xBFBE9670
+	long		0x3FFF0000,0x8EA4398B,0x45CD53C0,0x3FBDB700
+	long		0x3FFF0000,0x9031DC43,0x1466B1DC,0x3FBEEEB0
+	long		0x3FFF0000,0x91C3D373,0xAB11C336,0x3FBBFD6D
+	long		0x3FFF0000,0x935A2B2F,0x13E6E92C,0xBFBDB319
+	long		0x3FFF0000,0x94F4EFA8,0xFEF70961,0x3FBDBA2B
+	long		0x3FFF0000,0x96942D37,0x20185A00,0x3FBE91D5
+	long		0x3FFF0000,0x9837F051,0x8DB8A96F,0x3FBE8D5A
+	long		0x3FFF0000,0x99E04593,0x20B7FA65,0xBFBCDE7B
+	long		0x3FFF0000,0x9B8D39B9,0xD54E5539,0xBFBEBAAF
+	long		0x3FFF0000,0x9D3ED9A7,0x2CFFB751,0xBFBD86DA
+	long		0x3FFF0000,0x9EF53260,0x91A111AE,0xBFBEBEDD
+	long		0x3FFF0000,0xA0B0510F,0xB9714FC2,0x3FBCC96E
+	long		0x3FFF0000,0xA2704303,0x0C496819,0xBFBEC90B
+	long		0x3FFF0000,0xA43515AE,0x09E6809E,0x3FBBD1DB
+	long		0x3FFF0000,0xA5FED6A9,0xB15138EA,0x3FBCE5EB
+	long		0x3FFF0000,0xA7CD93B4,0xE965356A,0xBFBEC274
+	long		0x3FFF0000,0xA9A15AB4,0xEA7C0EF8,0x3FBEA83C
+	long		0x3FFF0000,0xAB7A39B5,0xA93ED337,0x3FBECB00
+	long		0x3FFF0000,0xAD583EEA,0x42A14AC6,0x3FBE9301
+	long		0x3FFF0000,0xAF3B78AD,0x690A4375,0xBFBD8367
+	long		0x3FFF0000,0xB123F581,0xD2AC2590,0xBFBEF05F
+	long		0x3FFF0000,0xB311C412,0xA9112489,0x3FBDFB3C
+	long		0x3FFF0000,0xB504F333,0xF9DE6484,0x3FBEB2FB
+	long		0x3FFF0000,0xB6FD91E3,0x28D17791,0x3FBAE2CB
+	long		0x3FFF0000,0xB8FBAF47,0x62FB9EE9,0x3FBCDC3C
+	long		0x3FFF0000,0xBAFF5AB2,0x133E45FB,0x3FBEE9AA
+	long		0x3FFF0000,0xBD08A39F,0x580C36BF,0xBFBEAEFD
+	long		0x3FFF0000,0xBF1799B6,0x7A731083,0xBFBCBF51
+	long		0x3FFF0000,0xC12C4CCA,0x66709456,0x3FBEF88A
+	long		0x3FFF0000,0xC346CCDA,0x24976407,0x3FBD83B2
+	long		0x3FFF0000,0xC5672A11,0x5506DADD,0x3FBDF8AB
+	long		0x3FFF0000,0xC78D74C8,0xABB9B15D,0xBFBDFB17
+	long		0x3FFF0000,0xC9B9BD86,0x6E2F27A3,0xBFBEFE3C
+	long		0x3FFF0000,0xCBEC14FE,0xF2727C5D,0xBFBBB6F8
+	long		0x3FFF0000,0xCE248C15,0x1F8480E4,0xBFBCEE53
+	long		0x3FFF0000,0xD06333DA,0xEF2B2595,0xBFBDA4AE
+	long		0x3FFF0000,0xD2A81D91,0xF12AE45A,0x3FBC9124
+	long		0x3FFF0000,0xD4F35AAB,0xCFEDFA1F,0x3FBEB243
+	long		0x3FFF0000,0xD744FCCA,0xD69D6AF4,0x3FBDE69A
+	long		0x3FFF0000,0xD99D15C2,0x78AFD7B6,0xBFB8BC61
+	long		0x3FFF0000,0xDBFBB797,0xDAF23755,0x3FBDF610
+	long		0x3FFF0000,0xDE60F482,0x5E0E9124,0xBFBD8BE1
+	long		0x3FFF0000,0xE0CCDEEC,0x2A94E111,0x3FBACB12
+	long		0x3FFF0000,0xE33F8972,0xBE8A5A51,0x3FBB9BFE
+	long		0x3FFF0000,0xE5B906E7,0x7C8348A8,0x3FBCF2F4
+	long		0x3FFF0000,0xE8396A50,0x3C4BDC68,0x3FBEF22F
+	long		0x3FFF0000,0xEAC0C6E7,0xDD24392F,0xBFBDBF4A
+	long		0x3FFF0000,0xED4F301E,0xD9942B84,0x3FBEC01A
+	long		0x3FFF0000,0xEFE4B99B,0xDCDAF5CB,0x3FBE8CAC
+	long		0x3FFF0000,0xF281773C,0x59FFB13A,0xBFBCBB3F
+	long		0x3FFF0000,0xF5257D15,0x2486CC2C,0x3FBEF73A
+	long		0x3FFF0000,0xF7D0DF73,0x0AD13BB9,0xBFB8B795
+	long		0x3FFF0000,0xFA83B2DB,0x722A033A,0x3FBEF84B
+	long		0x3FFF0000,0xFD3E0C0C,0xF486C175,0xBFBEF581
+
+	set		INT,L_SCR1
+
+	set		X,FP_SCR0
+	set		XDCARE,X+2
+	set		XFRAC,X+4
+
+	set		ADJFACT,FP_SCR0
+
+	set		FACT1,FP_SCR0
+	set		FACT1HI,FACT1+4
+	set		FACT1LOW,FACT1+8
+
+	set		FACT2,FP_SCR1
+	set		FACT2HI,FACT2+4
+	set		FACT2LOW,FACT2+8
+
+	global		stwotox
+#--ENTRY POINT FOR 2**(X), HERE X IS FINITE, NON-ZERO, AND NOT NAN'S
+stwotox:
+	fmovm.x		(%a0),&0x80		# LOAD INPUT
+
+	mov.l		(%a0),%d1
+	mov.w		4(%a0),%d1
+	fmov.x		%fp0,X(%a6)
+	and.l		&0x7FFFFFFF,%d1
+
+	cmp.l		%d1,&0x3FB98000		# |X| >= 2**(-70)?
+	bge.b		TWOOK1
+	bra.w		EXPBORS
+
+TWOOK1:
+	cmp.l		%d1,&0x400D80C0		# |X| > 16480?
+	ble.b		TWOMAIN
+	bra.w		EXPBORS
+
+TWOMAIN:
+#--USUAL CASE, 2^(-70) <= |X| <= 16480
+
+	fmov.x		%fp0,%fp1
+	fmul.s		&0x42800000,%fp1	# 64 * X
+	fmov.l		%fp1,INT(%a6)		# N = ROUND-TO-INT(64 X)
+	mov.l		%d2,-(%sp)
+	lea		TEXPTBL(%pc),%a1	# LOAD ADDRESS OF TABLE OF 2^(J/64)
+	fmov.l		INT(%a6),%fp1		# N --> FLOATING FMT
+	mov.l		INT(%a6),%d1
+	mov.l		%d1,%d2
+	and.l		&0x3F,%d1		# D0 IS J
+	asl.l		&4,%d1			# DISPLACEMENT FOR 2^(J/64)
+	add.l		%d1,%a1			# ADDRESS FOR 2^(J/64)
+	asr.l		&6,%d2			# d2 IS L, N = 64L + J
+	mov.l		%d2,%d1
+	asr.l		&1,%d1			# D0 IS M
+	sub.l		%d1,%d2			# d2 IS M', N = 64(M+M') + J
+	add.l		&0x3FFF,%d2
+
+#--SUMMARY: a1 IS ADDRESS FOR THE LEADING PORTION OF 2^(J/64),
+#--D0 IS M WHERE N = 64(M+M') + J. NOTE THAT |M| <= 16140 BY DESIGN.
+#--ADJFACT = 2^(M').
+#--REGISTERS SAVED SO FAR ARE (IN ORDER) FPCR, D0, FP1, a1, AND FP2.
+
+	fmovm.x		&0x0c,-(%sp)		# save fp2/fp3
+
+	fmul.s		&0x3C800000,%fp1	# (1/64)*N
+	mov.l		(%a1)+,FACT1(%a6)
+	mov.l		(%a1)+,FACT1HI(%a6)
+	mov.l		(%a1)+,FACT1LOW(%a6)
+	mov.w		(%a1)+,FACT2(%a6)
+
+	fsub.x		%fp1,%fp0		# X - (1/64)*INT(64 X)
+
+	mov.w		(%a1)+,FACT2HI(%a6)
+	clr.w		FACT2HI+2(%a6)
+	clr.l		FACT2LOW(%a6)
+	add.w		%d1,FACT1(%a6)
+	fmul.x		LOG2(%pc),%fp0		# FP0 IS R
+	add.w		%d1,FACT2(%a6)
+
+	bra.w		expr
+
+EXPBORS:
+#--FPCR, D0 SAVED
+	cmp.l		%d1,&0x3FFF8000
+	bgt.b		TEXPBIG
+
+#--|X| IS SMALL, RETURN 1 + X
+
+	fmov.l		%d0,%fpcr		# restore users round prec,mode
+	fadd.s		&0x3F800000,%fp0	# RETURN 1 + X
+	bra		t_pinx2
+
+TEXPBIG:
+#--|X| IS LARGE, GENERATE OVERFLOW IF X > 0; ELSE GENERATE UNDERFLOW
+#--REGISTERS SAVE SO FAR ARE FPCR AND  D0
+	mov.l		X(%a6),%d1
+	cmp.l		%d1,&0
+	blt.b		EXPNEG
+
+	bra		t_ovfl2			# t_ovfl expects positive value
+
+EXPNEG:
+	bra		t_unfl2			# t_unfl expects positive value
+
+	global		stwotoxd
+stwotoxd:
+#--ENTRY POINT FOR 2**(X) FOR DENORMALIZED ARGUMENT
+
+	fmov.l		%d0,%fpcr		# set user's rounding mode/precision
+	fmov.s		&0x3F800000,%fp0	# RETURN 1 + X
+	mov.l		(%a0),%d1
+	or.l		&0x00800001,%d1
+	fadd.s		%d1,%fp0
+	bra		t_pinx2
+
+	global		stentox
+#--ENTRY POINT FOR 10**(X), HERE X IS FINITE, NON-ZERO, AND NOT NAN'S
+stentox:
+	fmovm.x		(%a0),&0x80		# LOAD INPUT
+
+	mov.l		(%a0),%d1
+	mov.w		4(%a0),%d1
+	fmov.x		%fp0,X(%a6)
+	and.l		&0x7FFFFFFF,%d1
+
+	cmp.l		%d1,&0x3FB98000		# |X| >= 2**(-70)?
+	bge.b		TENOK1
+	bra.w		EXPBORS
+
+TENOK1:
+	cmp.l		%d1,&0x400B9B07		# |X| <= 16480*log2/log10 ?
+	ble.b		TENMAIN
+	bra.w		EXPBORS
+
+TENMAIN:
+#--USUAL CASE, 2^(-70) <= |X| <= 16480 LOG 2 / LOG 10
+
+	fmov.x		%fp0,%fp1
+	fmul.d		L2TEN64(%pc),%fp1	# X*64*LOG10/LOG2
+	fmov.l		%fp1,INT(%a6)		# N=INT(X*64*LOG10/LOG2)
+	mov.l		%d2,-(%sp)
+	lea		TEXPTBL(%pc),%a1	# LOAD ADDRESS OF TABLE OF 2^(J/64)
+	fmov.l		INT(%a6),%fp1		# N --> FLOATING FMT
+	mov.l		INT(%a6),%d1
+	mov.l		%d1,%d2
+	and.l		&0x3F,%d1		# D0 IS J
+	asl.l		&4,%d1			# DISPLACEMENT FOR 2^(J/64)
+	add.l		%d1,%a1			# ADDRESS FOR 2^(J/64)
+	asr.l		&6,%d2			# d2 IS L, N = 64L + J
+	mov.l		%d2,%d1
+	asr.l		&1,%d1			# D0 IS M
+	sub.l		%d1,%d2			# d2 IS M', N = 64(M+M') + J
+	add.l		&0x3FFF,%d2
+
+#--SUMMARY: a1 IS ADDRESS FOR THE LEADING PORTION OF 2^(J/64),
+#--D0 IS M WHERE N = 64(M+M') + J. NOTE THAT |M| <= 16140 BY DESIGN.
+#--ADJFACT = 2^(M').
+#--REGISTERS SAVED SO FAR ARE (IN ORDER) FPCR, D0, FP1, a1, AND FP2.
+	fmovm.x		&0x0c,-(%sp)		# save fp2/fp3
+
+	fmov.x		%fp1,%fp2
+
+	fmul.d		L10TWO1(%pc),%fp1	# N*(LOG2/64LOG10)_LEAD
+	mov.l		(%a1)+,FACT1(%a6)
+
+	fmul.x		L10TWO2(%pc),%fp2	# N*(LOG2/64LOG10)_TRAIL
+
+	mov.l		(%a1)+,FACT1HI(%a6)
+	mov.l		(%a1)+,FACT1LOW(%a6)
+	fsub.x		%fp1,%fp0		# X - N L_LEAD
+	mov.w		(%a1)+,FACT2(%a6)
+
+	fsub.x		%fp2,%fp0		# X - N L_TRAIL
+
+	mov.w		(%a1)+,FACT2HI(%a6)
+	clr.w		FACT2HI+2(%a6)
+	clr.l		FACT2LOW(%a6)
+
+	fmul.x		LOG10(%pc),%fp0		# FP0 IS R
+	add.w		%d1,FACT1(%a6)
+	add.w		%d1,FACT2(%a6)
+
+expr:
+#--FPCR, FP2, FP3 ARE SAVED IN ORDER AS SHOWN.
+#--ADJFACT CONTAINS 2**(M'), FACT1 + FACT2 = 2**(M) * 2**(J/64).
+#--FP0 IS R. THE FOLLOWING CODE COMPUTES
+#--	2**(M'+M) * 2**(J/64) * EXP(R)
+
+	fmov.x		%fp0,%fp1
+	fmul.x		%fp1,%fp1		# FP1 IS S = R*R
+
+	fmov.d		EXPA5(%pc),%fp2		# FP2 IS A5
+	fmov.d		EXPA4(%pc),%fp3		# FP3 IS A4
+
+	fmul.x		%fp1,%fp2		# FP2 IS S*A5
+	fmul.x		%fp1,%fp3		# FP3 IS S*A4
+
+	fadd.d		EXPA3(%pc),%fp2		# FP2 IS A3+S*A5
+	fadd.d		EXPA2(%pc),%fp3		# FP3 IS A2+S*A4
+
+	fmul.x		%fp1,%fp2		# FP2 IS S*(A3+S*A5)
+	fmul.x		%fp1,%fp3		# FP3 IS S*(A2+S*A4)
+
+	fadd.d		EXPA1(%pc),%fp2		# FP2 IS A1+S*(A3+S*A5)
+	fmul.x		%fp0,%fp3		# FP3 IS R*S*(A2+S*A4)
+
+	fmul.x		%fp1,%fp2		# FP2 IS S*(A1+S*(A3+S*A5))
+	fadd.x		%fp3,%fp0		# FP0 IS R+R*S*(A2+S*A4)
+	fadd.x		%fp2,%fp0		# FP0 IS EXP(R) - 1
+
+	fmovm.x		(%sp)+,&0x30		# restore fp2/fp3
+
+#--FINAL RECONSTRUCTION PROCESS
+#--EXP(X) = 2^M*2^(J/64) + 2^M*2^(J/64)*(EXP(R)-1)  -  (1 OR 0)
+
+	fmul.x		FACT1(%a6),%fp0
+	fadd.x		FACT2(%a6),%fp0
+	fadd.x		FACT1(%a6),%fp0
+
+	fmov.l		%d0,%fpcr		# restore users round prec,mode
+	mov.w		%d2,ADJFACT(%a6)	# INSERT EXPONENT
+	mov.l		(%sp)+,%d2
+	mov.l		&0x80000000,ADJFACT+4(%a6)
+	clr.l		ADJFACT+8(%a6)
+	mov.b		&FMUL_OP,%d1		# last inst is MUL
+	fmul.x		ADJFACT(%a6),%fp0	# FINAL ADJUSTMENT
+	bra		t_catch
+
+	global		stentoxd
+stentoxd:
+#--ENTRY POINT FOR 10**(X) FOR DENORMALIZED ARGUMENT
+
+	fmov.l		%d0,%fpcr		# set user's rounding mode/precision
+	fmov.s		&0x3F800000,%fp0	# RETURN 1 + X
+	mov.l		(%a0),%d1
+	or.l		&0x00800001,%d1
+	fadd.s		%d1,%fp0
+	bra		t_pinx2
+
+#########################################################################
+# smovcr(): returns the ROM constant at the offset specified in d1	#
+#	    rounded to the mode and precision specified in d0.		#
+#									#
+# INPUT	***************************************************************	#
+#	d0 = rnd prec,mode						#
+#	d1 = ROM offset							#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = the ROM constant rounded to the user's rounding mode,prec	#
+#									#
+#########################################################################
+
+	global		smovcr
+smovcr:
+	mov.l		%d1,-(%sp)		# save rom offset for a sec
+
+	lsr.b		&0x4,%d0		# shift ctrl bits to lo
+	mov.l		%d0,%d1			# make a copy
+	andi.w		&0x3,%d1		# extract rnd mode
+	andi.w		&0xc,%d0		# extract rnd prec
+	swap		%d0			# put rnd prec in hi
+	mov.w		%d1,%d0			# put rnd mode in lo
+
+	mov.l		(%sp)+,%d1		# get rom offset
+
+#
+# check range of offset
+#
+	tst.b		%d1			# if zero, offset is to pi
+	beq.b		pi_tbl			# it is pi
+	cmpi.b		%d1,&0x0a		# check range $01 - $0a
+	ble.b		z_val			# if in this range, return zero
+	cmpi.b		%d1,&0x0e		# check range $0b - $0e
+	ble.b		sm_tbl			# valid constants in this range
+	cmpi.b		%d1,&0x2f		# check range $10 - $2f
+	ble.b		z_val			# if in this range, return zero
+	cmpi.b		%d1,&0x3f		# check range $30 - $3f
+	ble.b		bg_tbl			# valid constants in this range
+
+z_val:
+	bra.l		ld_pzero		# return a zero
+
+#
+# the answer is PI rounded to the proper precision.
+#
+# fetch a pointer to the answer table relating to the proper rounding
+# precision.
+#
+pi_tbl:
+	tst.b		%d0			# is rmode RN?
+	bne.b		pi_not_rn		# no
+pi_rn:
+	lea.l		PIRN(%pc),%a0		# yes; load PI RN table addr
+	bra.w		set_finx
+pi_not_rn:
+	cmpi.b		%d0,&rp_mode		# is rmode RP?
+	beq.b		pi_rp			# yes
+pi_rzrm:
+	lea.l		PIRZRM(%pc),%a0		# no; load PI RZ,RM table addr
+	bra.b		set_finx
+pi_rp:
+	lea.l		PIRP(%pc),%a0		# load PI RP table addr
+	bra.b		set_finx
+
+#
+# the answer is one of:
+#	$0B	log10(2)	(inexact)
+#	$0C	e		(inexact)
+#	$0D	log2(e)		(inexact)
+#	$0E	log10(e)	(exact)
+#
+# fetch a pointer to the answer table relating to the proper rounding
+# precision.
+#
+sm_tbl:
+	subi.b		&0xb,%d1		# make offset in 0-4 range
+	tst.b		%d0			# is rmode RN?
+	bne.b		sm_not_rn		# no
+sm_rn:
+	lea.l		SMALRN(%pc),%a0		# yes; load RN table addr
+sm_tbl_cont:
+	cmpi.b		%d1,&0x2		# is result log10(e)?
+	ble.b		set_finx		# no; answer is inexact
+	bra.b		no_finx			# yes; answer is exact
+sm_not_rn:
+	cmpi.b		%d0,&rp_mode		# is rmode RP?
+	beq.b		sm_rp			# yes
+sm_rzrm:
+	lea.l		SMALRZRM(%pc),%a0	# no; load RZ,RM table addr
+	bra.b		sm_tbl_cont
+sm_rp:
+	lea.l		SMALRP(%pc),%a0		# load RP table addr
+	bra.b		sm_tbl_cont
+
+#
+# the answer is one of:
+#	$30	ln(2)		(inexact)
+#	$31	ln(10)		(inexact)
+#	$32	10^0		(exact)
+#	$33	10^1		(exact)
+#	$34	10^2		(exact)
+#	$35	10^4		(exact)
+#	$36	10^8		(exact)
+#	$37	10^16		(exact)
+#	$38	10^32		(inexact)
+#	$39	10^64		(inexact)
+#	$3A	10^128		(inexact)
+#	$3B	10^256		(inexact)
+#	$3C	10^512		(inexact)
+#	$3D	10^1024		(inexact)
+#	$3E	10^2048		(inexact)
+#	$3F	10^4096		(inexact)
+#
+# fetch a pointer to the answer table relating to the proper rounding
+# precision.
+#
+bg_tbl:
+	subi.b		&0x30,%d1		# make offset in 0-f range
+	tst.b		%d0			# is rmode RN?
+	bne.b		bg_not_rn		# no
+bg_rn:
+	lea.l		BIGRN(%pc),%a0		# yes; load RN table addr
+bg_tbl_cont:
+	cmpi.b		%d1,&0x1		# is offset <= $31?
+	ble.b		set_finx		# yes; answer is inexact
+	cmpi.b		%d1,&0x7		# is $32 <= offset <= $37?
+	ble.b		no_finx			# yes; answer is exact
+	bra.b		set_finx		# no; answer is inexact
+bg_not_rn:
+	cmpi.b		%d0,&rp_mode		# is rmode RP?
+	beq.b		bg_rp			# yes
+bg_rzrm:
+	lea.l		BIGRZRM(%pc),%a0	# no; load RZ,RM table addr
+	bra.b		bg_tbl_cont
+bg_rp:
+	lea.l		BIGRP(%pc),%a0		# load RP table addr
+	bra.b		bg_tbl_cont
+
+# answer is inexact, so set INEX2 and AINEX in the user's FPSR.
+set_finx:
+	ori.l		&inx2a_mask,USER_FPSR(%a6) # set INEX2/AINEX
+no_finx:
+	mulu.w		&0xc,%d1		# offset points into tables
+	swap		%d0			# put rnd prec in lo word
+	tst.b		%d0			# is precision extended?
+
+	bne.b		not_ext			# if xprec, do not call round
+
+# Precision is extended
+	fmovm.x		(%a0,%d1.w),&0x80	# return result in fp0
+	rts
+
+# Precision is single or double
+not_ext:
+	swap		%d0			# rnd prec in upper word
+
+# call round() to round the answer to the proper precision.
+# exponents out of range for single or double DO NOT cause underflow
+# or overflow.
+	mov.w		0x0(%a0,%d1.w),FP_SCR1_EX(%a6) # load first word
+	mov.l		0x4(%a0,%d1.w),FP_SCR1_HI(%a6) # load second word
+	mov.l		0x8(%a0,%d1.w),FP_SCR1_LO(%a6) # load third word
+	mov.l		%d0,%d1
+	clr.l		%d0			# clear g,r,s
+	lea		FP_SCR1(%a6),%a0	# pass ptr to answer
+	clr.w		LOCAL_SGN(%a0)		# sign always positive
+	bsr.l		_round			# round the mantissa
+
+	fmovm.x		(%a0),&0x80		# return rounded result in fp0
+	rts
+
+	align		0x4
+
+PIRN:	long		0x40000000,0xc90fdaa2,0x2168c235	# pi
+PIRZRM:	long		0x40000000,0xc90fdaa2,0x2168c234	# pi
+PIRP:	long		0x40000000,0xc90fdaa2,0x2168c235	# pi
+
+SMALRN:	long		0x3ffd0000,0x9a209a84,0xfbcff798	# log10(2)
+	long		0x40000000,0xadf85458,0xa2bb4a9a	# e
+	long		0x3fff0000,0xb8aa3b29,0x5c17f0bc	# log2(e)
+	long		0x3ffd0000,0xde5bd8a9,0x37287195	# log10(e)
+	long		0x00000000,0x00000000,0x00000000	# 0.0
+
+SMALRZRM:
+	long		0x3ffd0000,0x9a209a84,0xfbcff798	# log10(2)
+	long		0x40000000,0xadf85458,0xa2bb4a9a	# e
+	long		0x3fff0000,0xb8aa3b29,0x5c17f0bb	# log2(e)
+	long		0x3ffd0000,0xde5bd8a9,0x37287195	# log10(e)
+	long		0x00000000,0x00000000,0x00000000	# 0.0
+
+SMALRP:	long		0x3ffd0000,0x9a209a84,0xfbcff799	# log10(2)
+	long		0x40000000,0xadf85458,0xa2bb4a9b	# e
+	long		0x3fff0000,0xb8aa3b29,0x5c17f0bc	# log2(e)
+	long		0x3ffd0000,0xde5bd8a9,0x37287195	# log10(e)
+	long		0x00000000,0x00000000,0x00000000	# 0.0
+
+BIGRN:	long		0x3ffe0000,0xb17217f7,0xd1cf79ac	# ln(2)
+	long		0x40000000,0x935d8ddd,0xaaa8ac17	# ln(10)
+
+	long		0x3fff0000,0x80000000,0x00000000	# 10 ^ 0
+	long		0x40020000,0xA0000000,0x00000000	# 10 ^ 1
+	long		0x40050000,0xC8000000,0x00000000	# 10 ^ 2
+	long		0x400C0000,0x9C400000,0x00000000	# 10 ^ 4
+	long		0x40190000,0xBEBC2000,0x00000000	# 10 ^ 8
+	long		0x40340000,0x8E1BC9BF,0x04000000	# 10 ^ 16
+	long		0x40690000,0x9DC5ADA8,0x2B70B59E	# 10 ^ 32
+	long		0x40D30000,0xC2781F49,0xFFCFA6D5	# 10 ^ 64
+	long		0x41A80000,0x93BA47C9,0x80E98CE0	# 10 ^ 128
+	long		0x43510000,0xAA7EEBFB,0x9DF9DE8E	# 10 ^ 256
+	long		0x46A30000,0xE319A0AE,0xA60E91C7	# 10 ^ 512
+	long		0x4D480000,0xC9767586,0x81750C17	# 10 ^ 1024
+	long		0x5A920000,0x9E8B3B5D,0xC53D5DE5	# 10 ^ 2048
+	long		0x75250000,0xC4605202,0x8A20979B	# 10 ^ 4096
+
+BIGRZRM:
+	long		0x3ffe0000,0xb17217f7,0xd1cf79ab	# ln(2)
+	long		0x40000000,0x935d8ddd,0xaaa8ac16	# ln(10)
+
+	long		0x3fff0000,0x80000000,0x00000000	# 10 ^ 0
+	long		0x40020000,0xA0000000,0x00000000	# 10 ^ 1
+	long		0x40050000,0xC8000000,0x00000000	# 10 ^ 2
+	long		0x400C0000,0x9C400000,0x00000000	# 10 ^ 4
+	long		0x40190000,0xBEBC2000,0x00000000	# 10 ^ 8
+	long		0x40340000,0x8E1BC9BF,0x04000000	# 10 ^ 16
+	long		0x40690000,0x9DC5ADA8,0x2B70B59D	# 10 ^ 32
+	long		0x40D30000,0xC2781F49,0xFFCFA6D5	# 10 ^ 64
+	long		0x41A80000,0x93BA47C9,0x80E98CDF	# 10 ^ 128
+	long		0x43510000,0xAA7EEBFB,0x9DF9DE8D	# 10 ^ 256
+	long		0x46A30000,0xE319A0AE,0xA60E91C6	# 10 ^ 512
+	long		0x4D480000,0xC9767586,0x81750C17	# 10 ^ 1024
+	long		0x5A920000,0x9E8B3B5D,0xC53D5DE4	# 10 ^ 2048
+	long		0x75250000,0xC4605202,0x8A20979A	# 10 ^ 4096
+
+BIGRP:
+	long		0x3ffe0000,0xb17217f7,0xd1cf79ac	# ln(2)
+	long		0x40000000,0x935d8ddd,0xaaa8ac17	# ln(10)
+
+	long		0x3fff0000,0x80000000,0x00000000	# 10 ^ 0
+	long		0x40020000,0xA0000000,0x00000000	# 10 ^ 1
+	long		0x40050000,0xC8000000,0x00000000	# 10 ^ 2
+	long		0x400C0000,0x9C400000,0x00000000	# 10 ^ 4
+	long		0x40190000,0xBEBC2000,0x00000000	# 10 ^ 8
+	long		0x40340000,0x8E1BC9BF,0x04000000	# 10 ^ 16
+	long		0x40690000,0x9DC5ADA8,0x2B70B59E	# 10 ^ 32
+	long		0x40D30000,0xC2781F49,0xFFCFA6D6	# 10 ^ 64
+	long		0x41A80000,0x93BA47C9,0x80E98CE0	# 10 ^ 128
+	long		0x43510000,0xAA7EEBFB,0x9DF9DE8E	# 10 ^ 256
+	long		0x46A30000,0xE319A0AE,0xA60E91C7	# 10 ^ 512
+	long		0x4D480000,0xC9767586,0x81750C18	# 10 ^ 1024
+	long		0x5A920000,0x9E8B3B5D,0xC53D5DE5	# 10 ^ 2048
+	long		0x75250000,0xC4605202,0x8A20979B	# 10 ^ 4096
+
+#########################################################################
+# sscale(): computes the destination operand scaled by the source	#
+#	    operand. If the absoulute value of the source operand is	#
+#	    >= 2^14, an overflow or underflow is returned.		#
+#									#
+# INPUT *************************************************************** #
+#	a0  = pointer to double-extended source operand X		#
+#	a1  = pointer to double-extended destination operand Y		#
+#									#
+# OUTPUT ************************************************************** #
+#	fp0 =  scale(X,Y)						#
+#									#
+#########################################################################
+
+set	SIGN,		L_SCR1
+
+	global		sscale
+sscale:
+	mov.l		%d0,-(%sp)		# store off ctrl bits for now
+
+	mov.w		DST_EX(%a1),%d1		# get dst exponent
+	smi.b		SIGN(%a6)		# use SIGN to hold dst sign
+	andi.l		&0x00007fff,%d1		# strip sign from dst exp
+
+	mov.w		SRC_EX(%a0),%d0		# check src bounds
+	andi.w		&0x7fff,%d0		# clr src sign bit
+	cmpi.w		%d0,&0x3fff		# is src ~ ZERO?
+	blt.w		src_small		# yes
+	cmpi.w		%d0,&0x400c		# no; is src too big?
+	bgt.w		src_out			# yes
+
+#
+# Source is within 2^14 range.
+#
+src_ok:
+	fintrz.x	SRC(%a0),%fp0		# calc int of src
+	fmov.l		%fp0,%d0		# int src to d0
+# don't want any accrued bits from the fintrz showing up later since
+# we may need to read the fpsr for the last fp op in t_catch2().
+	fmov.l		&0x0,%fpsr
+
+	tst.b		DST_HI(%a1)		# is dst denormalized?
+	bmi.b		sok_norm
+
+# the dst is a DENORM. normalize the DENORM and add the adjustment to
+# the src value. then, jump to the norm part of the routine.
+sok_dnrm:
+	mov.l		%d0,-(%sp)		# save src for now
+
+	mov.w		DST_EX(%a1),FP_SCR0_EX(%a6) # make a copy
+	mov.l		DST_HI(%a1),FP_SCR0_HI(%a6)
+	mov.l		DST_LO(%a1),FP_SCR0_LO(%a6)
+
+	lea		FP_SCR0(%a6),%a0	# pass ptr to DENORM
+	bsr.l		norm			# normalize the DENORM
+	neg.l		%d0
+	add.l		(%sp)+,%d0		# add adjustment to src
+
+	fmovm.x		FP_SCR0(%a6),&0x80	# load normalized DENORM
+
+	cmpi.w		%d0,&-0x3fff		# is the shft amt really low?
+	bge.b		sok_norm2		# thank goodness no
+
+# the multiply factor that we're trying to create should be a denorm
+# for the multiply to work. therefore, we're going to actually do a
+# multiply with a denorm which will cause an unimplemented data type
+# exception to be put into the machine which will be caught and corrected
+# later. we don't do this with the DENORMs above because this method
+# is slower. but, don't fret, I don't see it being used much either.
+	fmov.l		(%sp)+,%fpcr		# restore user fpcr
+	mov.l		&0x80000000,%d1		# load normalized mantissa
+	subi.l		&-0x3fff,%d0		# how many should we shift?
+	neg.l		%d0			# make it positive
+	cmpi.b		%d0,&0x20		# is it > 32?
+	bge.b		sok_dnrm_32		# yes
+	lsr.l		%d0,%d1			# no; bit stays in upper lw
+	clr.l		-(%sp)			# insert zero low mantissa
+	mov.l		%d1,-(%sp)		# insert new high mantissa
+	clr.l		-(%sp)			# make zero exponent
+	bra.b		sok_norm_cont
+sok_dnrm_32:
+	subi.b		&0x20,%d0		# get shift count
+	lsr.l		%d0,%d1			# make low mantissa longword
+	mov.l		%d1,-(%sp)		# insert new low mantissa
+	clr.l		-(%sp)			# insert zero high mantissa
+	clr.l		-(%sp)			# make zero exponent
+	bra.b		sok_norm_cont
+
+# the src will force the dst to a DENORM value or worse. so, let's
+# create an fp multiply that will create the result.
+sok_norm:
+	fmovm.x		DST(%a1),&0x80		# load fp0 with normalized src
+sok_norm2:
+	fmov.l		(%sp)+,%fpcr		# restore user fpcr
+
+	addi.w		&0x3fff,%d0		# turn src amt into exp value
+	swap		%d0			# put exponent in high word
+	clr.l		-(%sp)			# insert new exponent
+	mov.l		&0x80000000,-(%sp)	# insert new high mantissa
+	mov.l		%d0,-(%sp)		# insert new lo mantissa
+
+sok_norm_cont:
+	fmov.l		%fpcr,%d0		# d0 needs fpcr for t_catch2
+	mov.b		&FMUL_OP,%d1		# last inst is MUL
+	fmul.x		(%sp)+,%fp0		# do the multiply
+	bra		t_catch2		# catch any exceptions
+
+#
+# Source is outside of 2^14 range.  Test the sign and branch
+# to the appropriate exception handler.
+#
+src_out:
+	mov.l		(%sp)+,%d0		# restore ctrl bits
+	exg		%a0,%a1			# swap src,dst ptrs
+	tst.b		SRC_EX(%a1)		# is src negative?
+	bmi		t_unfl			# yes; underflow
+	bra		t_ovfl_sc		# no; overflow
+
+#
+# The source input is below 1, so we check for denormalized numbers
+# and set unfl.
+#
+src_small:
+	tst.b		DST_HI(%a1)		# is dst denormalized?
+	bpl.b		ssmall_done		# yes
+
+	mov.l		(%sp)+,%d0
+	fmov.l		%d0,%fpcr		# no; load control bits
+	mov.b		&FMOV_OP,%d1		# last inst is MOVE
+	fmov.x		DST(%a1),%fp0		# simply return dest
+	bra		t_catch2
+ssmall_done:
+	mov.l		(%sp)+,%d0		# load control bits into d1
+	mov.l		%a1,%a0			# pass ptr to dst
+	bra		t_resdnrm
+
+#########################################################################
+# smod(): computes the fp MOD of the input values X,Y.			#
+# srem(): computes the fp (IEEE) REM of the input values X,Y.		#
+#									#
+# INPUT *************************************************************** #
+#	a0 = pointer to extended precision input X			#
+#	a1 = pointer to extended precision input Y			#
+#	d0 = round precision,mode					#
+#									#
+#	The input operands X and Y can be either normalized or		#
+#	denormalized.							#
+#									#
+# OUTPUT ************************************************************** #
+#      fp0 = FREM(X,Y) or FMOD(X,Y)					#
+#									#
+# ALGORITHM *********************************************************** #
+#									#
+#       Step 1.  Save and strip signs of X and Y: signX := sign(X),	#
+#                signY := sign(Y), X := |X|, Y := |Y|,			#
+#                signQ := signX EOR signY. Record whether MOD or REM	#
+#                is requested.						#
+#									#
+#       Step 2.  Set L := expo(X)-expo(Y), k := 0, Q := 0.		#
+#                If (L < 0) then					#
+#                   R := X, go to Step 4.				#
+#                else							#
+#                   R := 2^(-L)X, j := L.				#
+#                endif							#
+#									#
+#       Step 3.  Perform MOD(X,Y)					#
+#            3.1 If R = Y, go to Step 9.				#
+#            3.2 If R > Y, then { R := R - Y, Q := Q + 1}		#
+#            3.3 If j = 0, go to Step 4.				#
+#            3.4 k := k + 1, j := j - 1, Q := 2Q, R := 2R. Go to	#
+#                Step 3.1.						#
+#									#
+#       Step 4.  At this point, R = X - QY = MOD(X,Y). Set		#
+#                Last_Subtract := false (used in Step 7 below). If	#
+#                MOD is requested, go to Step 6.			#
+#									#
+#       Step 5.  R = MOD(X,Y), but REM(X,Y) is requested.		#
+#            5.1 If R < Y/2, then R = MOD(X,Y) = REM(X,Y). Go to	#
+#                Step 6.						#
+#            5.2 If R > Y/2, then { set Last_Subtract := true,		#
+#                Q := Q + 1, Y := signY*Y }. Go to Step 6.		#
+#            5.3 This is the tricky case of R = Y/2. If Q is odd,	#
+#                then { Q := Q + 1, signX := -signX }.			#
+#									#
+#       Step 6.  R := signX*R.						#
+#									#
+#       Step 7.  If Last_Subtract = true, R := R - Y.			#
+#									#
+#       Step 8.  Return signQ, last 7 bits of Q, and R as required.	#
+#									#
+#       Step 9.  At this point, R = 2^(-j)*X - Q Y = Y. Thus,		#
+#                X = 2^(j)*(Q+1)Y. set Q := 2^(j)*(Q+1),		#
+#                R := 0. Return signQ, last 7 bits of Q, and R.		#
+#									#
+#########################################################################
+
+	set		Mod_Flag,L_SCR3
+	set		Sc_Flag,L_SCR3+1
+
+	set		SignY,L_SCR2
+	set		SignX,L_SCR2+2
+	set		SignQ,L_SCR3+2
+
+	set		Y,FP_SCR0
+	set		Y_Hi,Y+4
+	set		Y_Lo,Y+8
+
+	set		R,FP_SCR1
+	set		R_Hi,R+4
+	set		R_Lo,R+8
+
+Scale:
+	long		0x00010000,0x80000000,0x00000000,0x00000000
+
+	global		smod
+smod:
+	clr.b		FPSR_QBYTE(%a6)
+	mov.l		%d0,-(%sp)		# save ctrl bits
+	clr.b		Mod_Flag(%a6)
+	bra.b		Mod_Rem
+
+	global		srem
+srem:
+	clr.b		FPSR_QBYTE(%a6)
+	mov.l		%d0,-(%sp)		# save ctrl bits
+	mov.b		&0x1,Mod_Flag(%a6)
+
+Mod_Rem:
+#..Save sign of X and Y
+	movm.l		&0x3f00,-(%sp)		# save data registers
+	mov.w		SRC_EX(%a0),%d3
+	mov.w		%d3,SignY(%a6)
+	and.l		&0x00007FFF,%d3		# Y := |Y|
+
+#
+	mov.l		SRC_HI(%a0),%d4
+	mov.l		SRC_LO(%a0),%d5		# (D3,D4,D5) is |Y|
+
+	tst.l		%d3
+	bne.b		Y_Normal
+
+	mov.l		&0x00003FFE,%d3		# $3FFD + 1
+	tst.l		%d4
+	bne.b		HiY_not0
+
+HiY_0:
+	mov.l		%d5,%d4
+	clr.l		%d5
+	sub.l		&32,%d3
+	clr.l		%d6
+	bfffo		%d4{&0:&32},%d6
+	lsl.l		%d6,%d4
+	sub.l		%d6,%d3			# (D3,D4,D5) is normalized
+#	                                        ...with bias $7FFD
+	bra.b		Chk_X
+
+HiY_not0:
+	clr.l		%d6
+	bfffo		%d4{&0:&32},%d6
+	sub.l		%d6,%d3
+	lsl.l		%d6,%d4
+	mov.l		%d5,%d7			# a copy of D5
+	lsl.l		%d6,%d5
+	neg.l		%d6
+	add.l		&32,%d6
+	lsr.l		%d6,%d7
+	or.l		%d7,%d4			# (D3,D4,D5) normalized
+#                                       ...with bias $7FFD
+	bra.b		Chk_X
+
+Y_Normal:
+	add.l		&0x00003FFE,%d3		# (D3,D4,D5) normalized
+#                                       ...with bias $7FFD
+
+Chk_X:
+	mov.w		DST_EX(%a1),%d0
+	mov.w		%d0,SignX(%a6)
+	mov.w		SignY(%a6),%d1
+	eor.l		%d0,%d1
+	and.l		&0x00008000,%d1
+	mov.w		%d1,SignQ(%a6)		# sign(Q) obtained
+	and.l		&0x00007FFF,%d0
+	mov.l		DST_HI(%a1),%d1
+	mov.l		DST_LO(%a1),%d2		# (D0,D1,D2) is |X|
+	tst.l		%d0
+	bne.b		X_Normal
+	mov.l		&0x00003FFE,%d0
+	tst.l		%d1
+	bne.b		HiX_not0
+
+HiX_0:
+	mov.l		%d2,%d1
+	clr.l		%d2
+	sub.l		&32,%d0
+	clr.l		%d6
+	bfffo		%d1{&0:&32},%d6
+	lsl.l		%d6,%d1
+	sub.l		%d6,%d0			# (D0,D1,D2) is normalized
+#                                       ...with bias $7FFD
+	bra.b		Init
+
+HiX_not0:
+	clr.l		%d6
+	bfffo		%d1{&0:&32},%d6
+	sub.l		%d6,%d0
+	lsl.l		%d6,%d1
+	mov.l		%d2,%d7			# a copy of D2
+	lsl.l		%d6,%d2
+	neg.l		%d6
+	add.l		&32,%d6
+	lsr.l		%d6,%d7
+	or.l		%d7,%d1			# (D0,D1,D2) normalized
+#                                       ...with bias $7FFD
+	bra.b		Init
+
+X_Normal:
+	add.l		&0x00003FFE,%d0		# (D0,D1,D2) normalized
+#                                       ...with bias $7FFD
+
+Init:
+#
+	mov.l		%d3,L_SCR1(%a6)		# save biased exp(Y)
+	mov.l		%d0,-(%sp)		# save biased exp(X)
+	sub.l		%d3,%d0			# L := expo(X)-expo(Y)
+
+	clr.l		%d6			# D6 := carry <- 0
+	clr.l		%d3			# D3 is Q
+	mov.l		&0,%a1			# A1 is k; j+k=L, Q=0
+
+#..(Carry,D1,D2) is R
+	tst.l		%d0
+	bge.b		Mod_Loop_pre
+
+#..expo(X) < expo(Y). Thus X = mod(X,Y)
+#
+	mov.l		(%sp)+,%d0		# restore d0
+	bra.w		Get_Mod
+
+Mod_Loop_pre:
+	addq.l		&0x4,%sp		# erase exp(X)
+#..At this point  R = 2^(-L)X; Q = 0; k = 0; and  k+j = L
+Mod_Loop:
+	tst.l		%d6			# test carry bit
+	bgt.b		R_GT_Y
+
+#..At this point carry = 0, R = (D1,D2), Y = (D4,D5)
+	cmp.l		%d1,%d4			# compare hi(R) and hi(Y)
+	bne.b		R_NE_Y
+	cmp.l		%d2,%d5			# compare lo(R) and lo(Y)
+	bne.b		R_NE_Y
+
+#..At this point, R = Y
+	bra.w		Rem_is_0
+
+R_NE_Y:
+#..use the borrow of the previous compare
+	bcs.b		R_LT_Y			# borrow is set iff R < Y
+
+R_GT_Y:
+#..If Carry is set, then Y < (Carry,D1,D2) < 2Y. Otherwise, Carry = 0
+#..and Y < (D1,D2) < 2Y. Either way, perform R - Y
+	sub.l		%d5,%d2			# lo(R) - lo(Y)
+	subx.l		%d4,%d1			# hi(R) - hi(Y)
+	clr.l		%d6			# clear carry
+	addq.l		&1,%d3			# Q := Q + 1
+
+R_LT_Y:
+#..At this point, Carry=0, R < Y. R = 2^(k-L)X - QY; k+j = L; j >= 0.
+	tst.l		%d0			# see if j = 0.
+	beq.b		PostLoop
+
+	add.l		%d3,%d3			# Q := 2Q
+	add.l		%d2,%d2			# lo(R) = 2lo(R)
+	roxl.l		&1,%d1			# hi(R) = 2hi(R) + carry
+	scs		%d6			# set Carry if 2(R) overflows
+	addq.l		&1,%a1			# k := k+1
+	subq.l		&1,%d0			# j := j - 1
+#..At this point, R=(Carry,D1,D2) = 2^(k-L)X - QY, j+k=L, j >= 0, R < 2Y.
+
+	bra.b		Mod_Loop
+
+PostLoop:
+#..k = L, j = 0, Carry = 0, R = (D1,D2) = X - QY, R < Y.
+
+#..normalize R.
+	mov.l		L_SCR1(%a6),%d0		# new biased expo of R
+	tst.l		%d1
+	bne.b		HiR_not0
+
+HiR_0:
+	mov.l		%d2,%d1
+	clr.l		%d2
+	sub.l		&32,%d0
+	clr.l		%d6
+	bfffo		%d1{&0:&32},%d6
+	lsl.l		%d6,%d1
+	sub.l		%d6,%d0			# (D0,D1,D2) is normalized
+#                                       ...with bias $7FFD
+	bra.b		Get_Mod
+
+HiR_not0:
+	clr.l		%d6
+	bfffo		%d1{&0:&32},%d6
+	bmi.b		Get_Mod			# already normalized
+	sub.l		%d6,%d0
+	lsl.l		%d6,%d1
+	mov.l		%d2,%d7			# a copy of D2
+	lsl.l		%d6,%d2
+	neg.l		%d6
+	add.l		&32,%d6
+	lsr.l		%d6,%d7
+	or.l		%d7,%d1			# (D0,D1,D2) normalized
+
+#
+Get_Mod:
+	cmp.l		%d0,&0x000041FE
+	bge.b		No_Scale
+Do_Scale:
+	mov.w		%d0,R(%a6)
+	mov.l		%d1,R_Hi(%a6)
+	mov.l		%d2,R_Lo(%a6)
+	mov.l		L_SCR1(%a6),%d6
+	mov.w		%d6,Y(%a6)
+	mov.l		%d4,Y_Hi(%a6)
+	mov.l		%d5,Y_Lo(%a6)
+	fmov.x		R(%a6),%fp0		# no exception
+	mov.b		&1,Sc_Flag(%a6)
+	bra.b		ModOrRem
+No_Scale:
+	mov.l		%d1,R_Hi(%a6)
+	mov.l		%d2,R_Lo(%a6)
+	sub.l		&0x3FFE,%d0
+	mov.w		%d0,R(%a6)
+	mov.l		L_SCR1(%a6),%d6
+	sub.l		&0x3FFE,%d6
+	mov.l		%d6,L_SCR1(%a6)
+	fmov.x		R(%a6),%fp0
+	mov.w		%d6,Y(%a6)
+	mov.l		%d4,Y_Hi(%a6)
+	mov.l		%d5,Y_Lo(%a6)
+	clr.b		Sc_Flag(%a6)
+
+#
+ModOrRem:
+	tst.b		Mod_Flag(%a6)
+	beq.b		Fix_Sign
+
+	mov.l		L_SCR1(%a6),%d6		# new biased expo(Y)
+	subq.l		&1,%d6			# biased expo(Y/2)
+	cmp.l		%d0,%d6
+	blt.b		Fix_Sign
+	bgt.b		Last_Sub
+
+	cmp.l		%d1,%d4
+	bne.b		Not_EQ
+	cmp.l		%d2,%d5
+	bne.b		Not_EQ
+	bra.w		Tie_Case
+
+Not_EQ:
+	bcs.b		Fix_Sign
+
+Last_Sub:
+#
+	fsub.x		Y(%a6),%fp0		# no exceptions
+	addq.l		&1,%d3			# Q := Q + 1
+
+#
+Fix_Sign:
+#..Get sign of X
+	mov.w		SignX(%a6),%d6
+	bge.b		Get_Q
+	fneg.x		%fp0
+
+#..Get Q
+#
+Get_Q:
+	clr.l		%d6
+	mov.w		SignQ(%a6),%d6		# D6 is sign(Q)
+	mov.l		&8,%d7
+	lsr.l		%d7,%d6
+	and.l		&0x0000007F,%d3		# 7 bits of Q
+	or.l		%d6,%d3			# sign and bits of Q
+#	swap		%d3
+#	fmov.l		%fpsr,%d6
+#	and.l		&0xFF00FFFF,%d6
+#	or.l		%d3,%d6
+#	fmov.l		%d6,%fpsr		# put Q in fpsr
+	mov.b		%d3,FPSR_QBYTE(%a6)	# put Q in fpsr
+
+#
+Restore:
+	movm.l		(%sp)+,&0xfc		#  {%d2-%d7}
+	mov.l		(%sp)+,%d0
+	fmov.l		%d0,%fpcr
+	tst.b		Sc_Flag(%a6)
+	beq.b		Finish
+	mov.b		&FMUL_OP,%d1		# last inst is MUL
+	fmul.x		Scale(%pc),%fp0		# may cause underflow
+	bra		t_catch2
+# the '040 package did this apparently to see if the dst operand for the
+# preceding fmul was a denorm. but, it better not have been since the
+# algorithm just got done playing with fp0 and expected no exceptions
+# as a result. trust me...
+#	bra		t_avoid_unsupp		# check for denorm as a
+#						;result of the scaling
+
+Finish:
+	mov.b		&FMOV_OP,%d1		# last inst is MOVE
+	fmov.x		%fp0,%fp0		# capture exceptions & round
+	bra		t_catch2
+
+Rem_is_0:
+#..R = 2^(-j)X - Q Y = Y, thus R = 0 and quotient = 2^j (Q+1)
+	addq.l		&1,%d3
+	cmp.l		%d0,&8			# D0 is j
+	bge.b		Q_Big
+
+	lsl.l		%d0,%d3
+	bra.b		Set_R_0
+
+Q_Big:
+	clr.l		%d3
+
+Set_R_0:
+	fmov.s		&0x00000000,%fp0
+	clr.b		Sc_Flag(%a6)
+	bra.w		Fix_Sign
+
+Tie_Case:
+#..Check parity of Q
+	mov.l		%d3,%d6
+	and.l		&0x00000001,%d6
+	tst.l		%d6
+	beq.w		Fix_Sign		# Q is even
+
+#..Q is odd, Q := Q + 1, signX := -signX
+	addq.l		&1,%d3
+	mov.w		SignX(%a6),%d6
+	eor.l		&0x00008000,%d6
+	mov.w		%d6,SignX(%a6)
+	bra.w		Fix_Sign
+
+qnan:	long		0x7fff0000, 0xffffffff, 0xffffffff
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	t_dz(): Handle DZ exception during transcendental emulation.	#
+#	        Sets N bit according to sign of source operand.		#
+#	t_dz2(): Handle DZ exception during transcendental emulation.	#
+#		 Sets N bit always.					#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to source operand					#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = default result						#
+#									#
+# ALGORITHM ***********************************************************	#
+#	- Store properly signed INF into fp0.				#
+#	- Set FPSR exception status dz bit, ccode inf bit, and		#
+#	  accrued dz bit.						#
+#									#
+#########################################################################
+
+	global		t_dz
+t_dz:
+	tst.b		SRC_EX(%a0)		# no; is src negative?
+	bmi.b		t_dz2			# yes
+
+dz_pinf:
+	fmov.s		&0x7f800000,%fp0	# return +INF in fp0
+	ori.l		&dzinf_mask,USER_FPSR(%a6) # set I/DZ/ADZ
+	rts
+
+	global		t_dz2
+t_dz2:
+	fmov.s		&0xff800000,%fp0	# return -INF in fp0
+	ori.l		&dzinf_mask+neg_mask,USER_FPSR(%a6) # set N/I/DZ/ADZ
+	rts
+
+#################################################################
+# OPERR exception:						#
+#	- set FPSR exception status operr bit, condition code	#
+#	  nan bit; Store default NAN into fp0			#
+#################################################################
+	global		t_operr
+t_operr:
+	ori.l		&opnan_mask,USER_FPSR(%a6) # set NaN/OPERR/AIOP
+	fmovm.x		qnan(%pc),&0x80		# return default NAN in fp0
+	rts
+
+#################################################################
+# Extended DENORM:						#
+#	- For all functions that have a denormalized input and	#
+#	  that f(x)=x, this is the entry point.			#
+#	- we only return the EXOP here if either underflow or	#
+#	  inexact is enabled.					#
+#################################################################
+
+# Entry point for scale w/ extended denorm. The function does
+# NOT set INEX2/AUNFL/AINEX.
+	global		t_resdnrm
+t_resdnrm:
+	ori.l		&unfl_mask,USER_FPSR(%a6) # set UNFL
+	bra.b		xdnrm_con
+
+	global		t_extdnrm
+t_extdnrm:
+	ori.l		&unfinx_mask,USER_FPSR(%a6) # set UNFL/INEX2/AUNFL/AINEX
+
+xdnrm_con:
+	mov.l		%a0,%a1			# make copy of src ptr
+	mov.l		%d0,%d1			# make copy of rnd prec,mode
+	andi.b		&0xc0,%d1		# extended precision?
+	bne.b		xdnrm_sd		# no
+
+# result precision is extended.
+	tst.b		LOCAL_EX(%a0)		# is denorm negative?
+	bpl.b		xdnrm_exit		# no
+
+	bset		&neg_bit,FPSR_CC(%a6)	# yes; set 'N' ccode bit
+	bra.b		xdnrm_exit
+
+# result precision is single or double
+xdnrm_sd:
+	mov.l		%a1,-(%sp)
+	tst.b		LOCAL_EX(%a0)		# is denorm pos or neg?
+	smi.b		%d1			# set d0 accodingly
+	bsr.l		unf_sub
+	mov.l		(%sp)+,%a1
+xdnrm_exit:
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+
+	mov.b		FPCR_ENABLE(%a6),%d0
+	andi.b		&0x0a,%d0		# is UNFL or INEX enabled?
+	bne.b		xdnrm_ena		# yes
+	rts
+
+################
+# unfl enabled #
+################
+# we have a DENORM that needs to be converted into an EXOP.
+# so, normalize the mantissa, add 0x6000 to the new exponent,
+# and return the result in fp1.
+xdnrm_ena:
+	mov.w		LOCAL_EX(%a1),FP_SCR0_EX(%a6)
+	mov.l		LOCAL_HI(%a1),FP_SCR0_HI(%a6)
+	mov.l		LOCAL_LO(%a1),FP_SCR0_LO(%a6)
+
+	lea		FP_SCR0(%a6),%a0
+	bsr.l		norm			# normalize mantissa
+	addi.l		&0x6000,%d0		# add extra bias
+	andi.w		&0x8000,FP_SCR0_EX(%a6)	# keep old sign
+	or.w		%d0,FP_SCR0_EX(%a6)	# insert new exponent
+
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	rts
+
+#################################################################
+# UNFL exception:						#
+#	- This routine is for cases where even an EXOP isn't	#
+#	  large enough to hold the range of this result.	#
+#	  In such a case, the EXOP equals zero.			#
+#	- Return the default result to the proper precision	#
+#	  with the sign of this result being the same as that	#
+#	  of the src operand.					#
+#	- t_unfl2() is provided to force the result sign to	#
+#	  positive which is the desired result for fetox().	#
+#################################################################
+	global		t_unfl
+t_unfl:
+	ori.l		&unfinx_mask,USER_FPSR(%a6) # set UNFL/INEX2/AUNFL/AINEX
+
+	tst.b		(%a0)			# is result pos or neg?
+	smi.b		%d1			# set d1 accordingly
+	bsr.l		unf_sub			# calc default unfl result
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+
+	fmov.s		&0x00000000,%fp1	# return EXOP in fp1
+	rts
+
+# t_unfl2 ALWAYS tells unf_sub to create a positive result
+	global		t_unfl2
+t_unfl2:
+	ori.l		&unfinx_mask,USER_FPSR(%a6) # set UNFL/INEX2/AUNFL/AINEX
+
+	sf.b		%d1			# set d0 to represent positive
+	bsr.l		unf_sub			# calc default unfl result
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+
+	fmov.s		&0x0000000,%fp1		# return EXOP in fp1
+	rts
+
+#################################################################
+# OVFL exception:						#
+#	- This routine is for cases where even an EXOP isn't	#
+#	  large enough to hold the range of this result.	#
+#	- Return the default result to the proper precision	#
+#	  with the sign of this result being the same as that	#
+#	  of the src operand.					#
+#	- t_ovfl2() is provided to force the result sign to	#
+#	  positive which is the desired result for fcosh().	#
+#	- t_ovfl_sc() is provided for scale() which only sets	#
+#	  the inexact bits if the number is inexact for the	#
+#	  precision indicated.					#
+#################################################################
+
+	global		t_ovfl_sc
+t_ovfl_sc:
+	ori.l		&ovfl_inx_mask,USER_FPSR(%a6) # set OVFL/AOVFL/AINEX
+
+	mov.b		%d0,%d1			# fetch rnd mode/prec
+	andi.b		&0xc0,%d1		# extract rnd prec
+	beq.b		ovfl_work		# prec is extended
+
+	tst.b		LOCAL_HI(%a0)		# is dst a DENORM?
+	bmi.b		ovfl_sc_norm		# no
+
+# dst op is a DENORM. we have to normalize the mantissa to see if the
+# result would be inexact for the given precision. make a copy of the
+# dst so we don't screw up the version passed to us.
+	mov.w		LOCAL_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		LOCAL_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		LOCAL_LO(%a0),FP_SCR0_LO(%a6)
+	lea		FP_SCR0(%a6),%a0	# pass ptr to FP_SCR0
+	movm.l		&0xc080,-(%sp)		# save d0-d1/a0
+	bsr.l		norm			# normalize mantissa
+	movm.l		(%sp)+,&0x0103		# restore d0-d1/a0
+
+ovfl_sc_norm:
+	cmpi.b		%d1,&0x40		# is prec dbl?
+	bne.b		ovfl_sc_dbl		# no; sgl
+ovfl_sc_sgl:
+	tst.l		LOCAL_LO(%a0)		# is lo lw of sgl set?
+	bne.b		ovfl_sc_inx		# yes
+	tst.b		3+LOCAL_HI(%a0)		# is lo byte of hi lw set?
+	bne.b		ovfl_sc_inx		# yes
+	bra.b		ovfl_work		# don't set INEX2
+ovfl_sc_dbl:
+	mov.l		LOCAL_LO(%a0),%d1	# are any of lo 11 bits of
+	andi.l		&0x7ff,%d1		# dbl mantissa set?
+	beq.b		ovfl_work		# no; don't set INEX2
+ovfl_sc_inx:
+	ori.l		&inex2_mask,USER_FPSR(%a6) # set INEX2
+	bra.b		ovfl_work		# continue
+
+	global		t_ovfl
+t_ovfl:
+	ori.l		&ovfinx_mask,USER_FPSR(%a6) # set OVFL/INEX2/AOVFL/AINEX
+
+ovfl_work:
+	tst.b		LOCAL_EX(%a0)		# what is the sign?
+	smi.b		%d1			# set d1 accordingly
+	bsr.l		ovf_res			# calc default ovfl result
+	mov.b		%d0,FPSR_CC(%a6)	# insert new ccodes
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+
+	fmov.s		&0x00000000,%fp1	# return EXOP in fp1
+	rts
+
+# t_ovfl2 ALWAYS tells ovf_res to create a positive result
+	global		t_ovfl2
+t_ovfl2:
+	ori.l		&ovfinx_mask,USER_FPSR(%a6) # set OVFL/INEX2/AOVFL/AINEX
+
+	sf.b		%d1			# clear sign flag for positive
+	bsr.l		ovf_res			# calc default ovfl result
+	mov.b		%d0,FPSR_CC(%a6)	# insert new ccodes
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+
+	fmov.s		&0x00000000,%fp1	# return EXOP in fp1
+	rts
+
+#################################################################
+# t_catch():							#
+#	- the last operation of a transcendental emulation	#
+#	  routine may have caused an underflow or overflow.	#
+#	  we find out if this occurred by doing an fsave and	#
+#	  checking the exception bit. if one did occur, then we	#
+#	  jump to fgen_except() which creates the default	#
+#	  result and EXOP for us.				#
+#################################################################
+	global		t_catch
+t_catch:
+
+	fsave		-(%sp)
+	tst.b		0x2(%sp)
+	bmi.b		catch
+	add.l		&0xc,%sp
+
+#################################################################
+# INEX2 exception:						#
+#	- The inex2 and ainex bits are set.			#
+#################################################################
+	global		t_inx2
+t_inx2:
+	fblt.w		t_minx2
+	fbeq.w		inx2_zero
+
+	global		t_pinx2
+t_pinx2:
+	ori.w		&inx2a_mask,2+USER_FPSR(%a6) # set INEX2/AINEX
+	rts
+
+	global		t_minx2
+t_minx2:
+	ori.l		&inx2a_mask+neg_mask,USER_FPSR(%a6) # set N/INEX2/AINEX
+	rts
+
+inx2_zero:
+	mov.b		&z_bmask,FPSR_CC(%a6)
+	ori.w		&inx2a_mask,2+USER_FPSR(%a6) # set INEX2/AINEX
+	rts
+
+# an underflow or overflow exception occurred.
+# we must set INEX/AINEX since the fmul/fdiv/fmov emulation may not!
+catch:
+	ori.w		&inx2a_mask,FPSR_EXCEPT(%a6)
+catch2:
+	bsr.l		fgen_except
+	add.l		&0xc,%sp
+	rts
+
+	global		t_catch2
+t_catch2:
+
+	fsave		-(%sp)
+
+	tst.b		0x2(%sp)
+	bmi.b		catch2
+	add.l		&0xc,%sp
+
+	fmov.l		%fpsr,%d0
+	or.l		%d0,USER_FPSR(%a6)
+
+	rts
+
+#########################################################################
+
+#########################################################################
+# unf_res(): underflow default result calculation for transcendentals	#
+#									#
+# INPUT:								#
+#	d0   : rnd mode,precision					#
+#	d1.b : sign bit of result ('11111111 = (-) ; '00000000 = (+))	#
+# OUTPUT:								#
+#	a0   : points to result (in instruction memory)			#
+#########################################################################
+unf_sub:
+	ori.l		&unfinx_mask,USER_FPSR(%a6)
+
+	andi.w		&0x10,%d1		# keep sign bit in 4th spot
+
+	lsr.b		&0x4,%d0		# shift rnd prec,mode to lo bits
+	andi.b		&0xf,%d0		# strip hi rnd mode bit
+	or.b		%d1,%d0			# concat {sgn,mode,prec}
+
+	mov.l		%d0,%d1			# make a copy
+	lsl.b		&0x1,%d1		# mult index 2 by 2
+
+	mov.b		(tbl_unf_cc.b,%pc,%d0.w*1),FPSR_CC(%a6) # insert ccode bits
+	lea		(tbl_unf_result.b,%pc,%d1.w*8),%a0 # grab result ptr
+	rts
+
+tbl_unf_cc:
+	byte		0x4, 0x4, 0x4, 0x0
+	byte		0x4, 0x4, 0x4, 0x0
+	byte		0x4, 0x4, 0x4, 0x0
+	byte		0x0, 0x0, 0x0, 0x0
+	byte		0x8+0x4, 0x8+0x4, 0x8, 0x8+0x4
+	byte		0x8+0x4, 0x8+0x4, 0x8, 0x8+0x4
+	byte		0x8+0x4, 0x8+0x4, 0x8, 0x8+0x4
+
+tbl_unf_result:
+	long		0x00000000, 0x00000000, 0x00000000, 0x0 # ZERO;ext
+	long		0x00000000, 0x00000000, 0x00000000, 0x0 # ZERO;ext
+	long		0x00000000, 0x00000000, 0x00000000, 0x0 # ZERO;ext
+	long		0x00000000, 0x00000000, 0x00000001, 0x0 # MIN; ext
+
+	long		0x3f810000, 0x00000000, 0x00000000, 0x0 # ZERO;sgl
+	long		0x3f810000, 0x00000000, 0x00000000, 0x0 # ZERO;sgl
+	long		0x3f810000, 0x00000000, 0x00000000, 0x0 # ZERO;sgl
+	long		0x3f810000, 0x00000100, 0x00000000, 0x0 # MIN; sgl
+
+	long		0x3c010000, 0x00000000, 0x00000000, 0x0 # ZERO;dbl
+	long		0x3c010000, 0x00000000, 0x00000000, 0x0 # ZER0;dbl
+	long		0x3c010000, 0x00000000, 0x00000000, 0x0 # ZERO;dbl
+	long		0x3c010000, 0x00000000, 0x00000800, 0x0 # MIN; dbl
+
+	long		0x0,0x0,0x0,0x0
+	long		0x0,0x0,0x0,0x0
+	long		0x0,0x0,0x0,0x0
+	long		0x0,0x0,0x0,0x0
+
+	long		0x80000000, 0x00000000, 0x00000000, 0x0 # ZERO;ext
+	long		0x80000000, 0x00000000, 0x00000000, 0x0 # ZERO;ext
+	long		0x80000000, 0x00000000, 0x00000001, 0x0 # MIN; ext
+	long		0x80000000, 0x00000000, 0x00000000, 0x0 # ZERO;ext
+
+	long		0xbf810000, 0x00000000, 0x00000000, 0x0 # ZERO;sgl
+	long		0xbf810000, 0x00000000, 0x00000000, 0x0 # ZERO;sgl
+	long		0xbf810000, 0x00000100, 0x00000000, 0x0 # MIN; sgl
+	long		0xbf810000, 0x00000000, 0x00000000, 0x0 # ZERO;sgl
+
+	long		0xbc010000, 0x00000000, 0x00000000, 0x0 # ZERO;dbl
+	long		0xbc010000, 0x00000000, 0x00000000, 0x0 # ZERO;dbl
+	long		0xbc010000, 0x00000000, 0x00000800, 0x0 # MIN; dbl
+	long		0xbc010000, 0x00000000, 0x00000000, 0x0 # ZERO;dbl
+
+############################################################
+
+#########################################################################
+# src_zero(): Return signed zero according to sign of src operand.	#
+#########################################################################
+	global		src_zero
+src_zero:
+	tst.b		SRC_EX(%a0)		# get sign of src operand
+	bmi.b		ld_mzero		# if neg, load neg zero
+
+#
+# ld_pzero(): return a positive zero.
+#
+	global		ld_pzero
+ld_pzero:
+	fmov.s		&0x00000000,%fp0	# load +0
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set 'Z' ccode bit
+	rts
+
+# ld_mzero(): return a negative zero.
+	global		ld_mzero
+ld_mzero:
+	fmov.s		&0x80000000,%fp0	# load -0
+	mov.b		&neg_bmask+z_bmask,FPSR_CC(%a6) # set 'N','Z' ccode bits
+	rts
+
+#########################################################################
+# dst_zero(): Return signed zero according to sign of dst operand.	#
+#########################################################################
+	global		dst_zero
+dst_zero:
+	tst.b		DST_EX(%a1)		# get sign of dst operand
+	bmi.b		ld_mzero		# if neg, load neg zero
+	bra.b		ld_pzero		# load positive zero
+
+#########################################################################
+# src_inf(): Return signed inf according to sign of src operand.	#
+#########################################################################
+	global		src_inf
+src_inf:
+	tst.b		SRC_EX(%a0)		# get sign of src operand
+	bmi.b		ld_minf			# if negative branch
+
+#
+# ld_pinf(): return a positive infinity.
+#
+	global		ld_pinf
+ld_pinf:
+	fmov.s		&0x7f800000,%fp0	# load +INF
+	mov.b		&inf_bmask,FPSR_CC(%a6)	# set 'INF' ccode bit
+	rts
+
+#
+# ld_minf():return a negative infinity.
+#
+	global		ld_minf
+ld_minf:
+	fmov.s		&0xff800000,%fp0	# load -INF
+	mov.b		&neg_bmask+inf_bmask,FPSR_CC(%a6) # set 'N','I' ccode bits
+	rts
+
+#########################################################################
+# dst_inf(): Return signed inf according to sign of dst operand.	#
+#########################################################################
+	global		dst_inf
+dst_inf:
+	tst.b		DST_EX(%a1)		# get sign of dst operand
+	bmi.b		ld_minf			# if negative branch
+	bra.b		ld_pinf
+
+	global		szr_inf
+#################################################################
+# szr_inf(): Return +ZERO for a negative src operand or		#
+#	            +INF for a positive src operand.		#
+#	     Routine used for fetox, ftwotox, and ftentox.	#
+#################################################################
+szr_inf:
+	tst.b		SRC_EX(%a0)		# check sign of source
+	bmi.b		ld_pzero
+	bra.b		ld_pinf
+
+#########################################################################
+# sopr_inf(): Return +INF for a positive src operand or			#
+#	      jump to operand error routine for a negative src operand.	#
+#	      Routine used for flogn, flognp1, flog10, and flog2.	#
+#########################################################################
+	global		sopr_inf
+sopr_inf:
+	tst.b		SRC_EX(%a0)		# check sign of source
+	bmi.w		t_operr
+	bra.b		ld_pinf
+
+#################################################################
+# setoxm1i(): Return minus one for a negative src operand or	#
+#	      positive infinity for a positive src operand.	#
+#	      Routine used for fetoxm1.				#
+#################################################################
+	global		setoxm1i
+setoxm1i:
+	tst.b		SRC_EX(%a0)		# check sign of source
+	bmi.b		ld_mone
+	bra.b		ld_pinf
+
+#########################################################################
+# src_one(): Return signed one according to sign of src operand.	#
+#########################################################################
+	global		src_one
+src_one:
+	tst.b		SRC_EX(%a0)		# check sign of source
+	bmi.b		ld_mone
+
+#
+# ld_pone(): return positive one.
+#
+	global		ld_pone
+ld_pone:
+	fmov.s		&0x3f800000,%fp0	# load +1
+	clr.b		FPSR_CC(%a6)
+	rts
+
+#
+# ld_mone(): return negative one.
+#
+	global		ld_mone
+ld_mone:
+	fmov.s		&0xbf800000,%fp0	# load -1
+	mov.b		&neg_bmask,FPSR_CC(%a6)	# set 'N' ccode bit
+	rts
+
+ppiby2:	long		0x3fff0000, 0xc90fdaa2, 0x2168c235
+mpiby2:	long		0xbfff0000, 0xc90fdaa2, 0x2168c235
+
+#################################################################
+# spi_2(): Return signed PI/2 according to sign of src operand.	#
+#################################################################
+	global		spi_2
+spi_2:
+	tst.b		SRC_EX(%a0)		# check sign of source
+	bmi.b		ld_mpi2
+
+#
+# ld_ppi2(): return positive PI/2.
+#
+	global		ld_ppi2
+ld_ppi2:
+	fmov.l		%d0,%fpcr
+	fmov.x		ppiby2(%pc),%fp0	# load +pi/2
+	bra.w		t_pinx2			# set INEX2
+
+#
+# ld_mpi2(): return negative PI/2.
+#
+	global		ld_mpi2
+ld_mpi2:
+	fmov.l		%d0,%fpcr
+	fmov.x		mpiby2(%pc),%fp0	# load -pi/2
+	bra.w		t_minx2			# set INEX2
+
+####################################################
+# The following routines give support for fsincos. #
+####################################################
+
+#
+# ssincosz(): When the src operand is ZERO, store a one in the
+#	      cosine register and return a ZERO in fp0 w/ the same sign
+#	      as the src operand.
+#
+	global		ssincosz
+ssincosz:
+	fmov.s		&0x3f800000,%fp1
+	tst.b		SRC_EX(%a0)		# test sign
+	bpl.b		sincoszp
+	fmov.s		&0x80000000,%fp0	# return sin result in fp0
+	mov.b		&z_bmask+neg_bmask,FPSR_CC(%a6)
+	bra.b		sto_cos			# store cosine result
+sincoszp:
+	fmov.s		&0x00000000,%fp0	# return sin result in fp0
+	mov.b		&z_bmask,FPSR_CC(%a6)
+	bra.b		sto_cos			# store cosine result
+
+#
+# ssincosi(): When the src operand is INF, store a QNAN in the cosine
+#	      register and jump to the operand error routine for negative
+#	      src operands.
+#
+	global		ssincosi
+ssincosi:
+	fmov.x		qnan(%pc),%fp1		# load NAN
+	bsr.l		sto_cos			# store cosine result
+	bra.w		t_operr
+
+#
+# ssincosqnan(): When the src operand is a QNAN, store the QNAN in the cosine
+#		 register and branch to the src QNAN routine.
+#
+	global		ssincosqnan
+ssincosqnan:
+	fmov.x		LOCAL_EX(%a0),%fp1
+	bsr.l		sto_cos
+	bra.w		src_qnan
+
+#
+# ssincossnan(): When the src operand is an SNAN, store the SNAN w/ the SNAN bit set
+#		 in the cosine register and branch to the src SNAN routine.
+#
+	global		ssincossnan
+ssincossnan:
+	fmov.x		LOCAL_EX(%a0),%fp1
+	bsr.l		sto_cos
+	bra.w		src_snan
+
+########################################################################
+
+#########################################################################
+# sto_cos(): store fp1 to the fpreg designated by the CMDREG dst field.	#
+#	     fp1 holds the result of the cosine portion of ssincos().	#
+#	     the value in fp1 will not take any exceptions when moved.	#
+# INPUT:								#
+#	fp1 : fp value to store						#
+# MODIFIED:								#
+#	d0								#
+#########################################################################
+	global		sto_cos
+sto_cos:
+	mov.b		1+EXC_CMDREG(%a6),%d0
+	andi.w		&0x7,%d0
+	mov.w		(tbl_sto_cos.b,%pc,%d0.w*2),%d0
+	jmp		(tbl_sto_cos.b,%pc,%d0.w*1)
+
+tbl_sto_cos:
+	short		sto_cos_0 - tbl_sto_cos
+	short		sto_cos_1 - tbl_sto_cos
+	short		sto_cos_2 - tbl_sto_cos
+	short		sto_cos_3 - tbl_sto_cos
+	short		sto_cos_4 - tbl_sto_cos
+	short		sto_cos_5 - tbl_sto_cos
+	short		sto_cos_6 - tbl_sto_cos
+	short		sto_cos_7 - tbl_sto_cos
+
+sto_cos_0:
+	fmovm.x		&0x40,EXC_FP0(%a6)
+	rts
+sto_cos_1:
+	fmovm.x		&0x40,EXC_FP1(%a6)
+	rts
+sto_cos_2:
+	fmov.x		%fp1,%fp2
+	rts
+sto_cos_3:
+	fmov.x		%fp1,%fp3
+	rts
+sto_cos_4:
+	fmov.x		%fp1,%fp4
+	rts
+sto_cos_5:
+	fmov.x		%fp1,%fp5
+	rts
+sto_cos_6:
+	fmov.x		%fp1,%fp6
+	rts
+sto_cos_7:
+	fmov.x		%fp1,%fp7
+	rts
+
+##################################################################
+	global		smod_sdnrm
+	global		smod_snorm
+smod_sdnrm:
+smod_snorm:
+	mov.b		DTAG(%a6),%d1
+	beq.l		smod
+	cmpi.b		%d1,&ZERO
+	beq.w		smod_zro
+	cmpi.b		%d1,&INF
+	beq.l		t_operr
+	cmpi.b		%d1,&DENORM
+	beq.l		smod
+	cmpi.b		%d1,&SNAN
+	beq.l		dst_snan
+	bra.l		dst_qnan
+
+	global		smod_szero
+smod_szero:
+	mov.b		DTAG(%a6),%d1
+	beq.l		t_operr
+	cmpi.b		%d1,&ZERO
+	beq.l		t_operr
+	cmpi.b		%d1,&INF
+	beq.l		t_operr
+	cmpi.b		%d1,&DENORM
+	beq.l		t_operr
+	cmpi.b		%d1,&QNAN
+	beq.l		dst_qnan
+	bra.l		dst_snan
+
+	global		smod_sinf
+smod_sinf:
+	mov.b		DTAG(%a6),%d1
+	beq.l		smod_fpn
+	cmpi.b		%d1,&ZERO
+	beq.l		smod_zro
+	cmpi.b		%d1,&INF
+	beq.l		t_operr
+	cmpi.b		%d1,&DENORM
+	beq.l		smod_fpn
+	cmpi.b		%d1,&QNAN
+	beq.l		dst_qnan
+	bra.l		dst_snan
+
+smod_zro:
+srem_zro:
+	mov.b		SRC_EX(%a0),%d1		# get src sign
+	mov.b		DST_EX(%a1),%d0		# get dst sign
+	eor.b		%d0,%d1			# get qbyte sign
+	andi.b		&0x80,%d1
+	mov.b		%d1,FPSR_QBYTE(%a6)
+	tst.b		%d0
+	bpl.w		ld_pzero
+	bra.w		ld_mzero
+
+smod_fpn:
+srem_fpn:
+	clr.b		FPSR_QBYTE(%a6)
+	mov.l		%d0,-(%sp)
+	mov.b		SRC_EX(%a0),%d1		# get src sign
+	mov.b		DST_EX(%a1),%d0		# get dst sign
+	eor.b		%d0,%d1			# get qbyte sign
+	andi.b		&0x80,%d1
+	mov.b		%d1,FPSR_QBYTE(%a6)
+	cmpi.b		DTAG(%a6),&DENORM
+	bne.b		smod_nrm
+	lea		DST(%a1),%a0
+	mov.l		(%sp)+,%d0
+	bra		t_resdnrm
+smod_nrm:
+	fmov.l		(%sp)+,%fpcr
+	fmov.x		DST(%a1),%fp0
+	tst.b		DST_EX(%a1)
+	bmi.b		smod_nrm_neg
+	rts
+
+smod_nrm_neg:
+	mov.b		&neg_bmask,FPSR_CC(%a6)	# set 'N' ccode
+	rts
+
+#########################################################################
+	global		srem_snorm
+	global		srem_sdnrm
+srem_sdnrm:
+srem_snorm:
+	mov.b		DTAG(%a6),%d1
+	beq.l		srem
+	cmpi.b		%d1,&ZERO
+	beq.w		srem_zro
+	cmpi.b		%d1,&INF
+	beq.l		t_operr
+	cmpi.b		%d1,&DENORM
+	beq.l		srem
+	cmpi.b		%d1,&QNAN
+	beq.l		dst_qnan
+	bra.l		dst_snan
+
+	global		srem_szero
+srem_szero:
+	mov.b		DTAG(%a6),%d1
+	beq.l		t_operr
+	cmpi.b		%d1,&ZERO
+	beq.l		t_operr
+	cmpi.b		%d1,&INF
+	beq.l		t_operr
+	cmpi.b		%d1,&DENORM
+	beq.l		t_operr
+	cmpi.b		%d1,&QNAN
+	beq.l		dst_qnan
+	bra.l		dst_snan
+
+	global		srem_sinf
+srem_sinf:
+	mov.b		DTAG(%a6),%d1
+	beq.w		srem_fpn
+	cmpi.b		%d1,&ZERO
+	beq.w		srem_zro
+	cmpi.b		%d1,&INF
+	beq.l		t_operr
+	cmpi.b		%d1,&DENORM
+	beq.l		srem_fpn
+	cmpi.b		%d1,&QNAN
+	beq.l		dst_qnan
+	bra.l		dst_snan
+
+#########################################################################
+	global		sscale_snorm
+	global		sscale_sdnrm
+sscale_snorm:
+sscale_sdnrm:
+	mov.b		DTAG(%a6),%d1
+	beq.l		sscale
+	cmpi.b		%d1,&ZERO
+	beq.l		dst_zero
+	cmpi.b		%d1,&INF
+	beq.l		dst_inf
+	cmpi.b		%d1,&DENORM
+	beq.l		sscale
+	cmpi.b		%d1,&QNAN
+	beq.l		dst_qnan
+	bra.l		dst_snan
+
+	global		sscale_szero
+sscale_szero:
+	mov.b		DTAG(%a6),%d1
+	beq.l		sscale
+	cmpi.b		%d1,&ZERO
+	beq.l		dst_zero
+	cmpi.b		%d1,&INF
+	beq.l		dst_inf
+	cmpi.b		%d1,&DENORM
+	beq.l		sscale
+	cmpi.b		%d1,&QNAN
+	beq.l		dst_qnan
+	bra.l		dst_snan
+
+	global		sscale_sinf
+sscale_sinf:
+	mov.b		DTAG(%a6),%d1
+	beq.l		t_operr
+	cmpi.b		%d1,&QNAN
+	beq.l		dst_qnan
+	cmpi.b		%d1,&SNAN
+	beq.l		dst_snan
+	bra.l		t_operr
+
+########################################################################
+
+#
+# sop_sqnan(): The src op for frem/fmod/fscale was a QNAN.
+#
+	global		sop_sqnan
+sop_sqnan:
+	mov.b		DTAG(%a6),%d1
+	cmpi.b		%d1,&QNAN
+	beq.b		dst_qnan
+	cmpi.b		%d1,&SNAN
+	beq.b		dst_snan
+	bra.b		src_qnan
+
+#
+# sop_ssnan(): The src op for frem/fmod/fscale was an SNAN.
+#
+	global		sop_ssnan
+sop_ssnan:
+	mov.b		DTAG(%a6),%d1
+	cmpi.b		%d1,&QNAN
+	beq.b		dst_qnan_src_snan
+	cmpi.b		%d1,&SNAN
+	beq.b		dst_snan
+	bra.b		src_snan
+
+dst_qnan_src_snan:
+	ori.l		&snaniop_mask,USER_FPSR(%a6) # set NAN/SNAN/AIOP
+	bra.b		dst_qnan
+
+#
+# dst_qnan(): Return the dst SNAN w/ the SNAN bit set.
+#
+	global		dst_snan
+dst_snan:
+	fmov.x		DST(%a1),%fp0		# the fmove sets the SNAN bit
+	fmov.l		%fpsr,%d0		# catch resulting status
+	or.l		%d0,USER_FPSR(%a6)	# store status
+	rts
+
+#
+# dst_qnan(): Return the dst QNAN.
+#
+	global		dst_qnan
+dst_qnan:
+	fmov.x		DST(%a1),%fp0		# return the non-signalling nan
+	tst.b		DST_EX(%a1)		# set ccodes according to QNAN sign
+	bmi.b		dst_qnan_m
+dst_qnan_p:
+	mov.b		&nan_bmask,FPSR_CC(%a6)
+	rts
+dst_qnan_m:
+	mov.b		&neg_bmask+nan_bmask,FPSR_CC(%a6)
+	rts
+
+#
+# src_snan(): Return the src SNAN w/ the SNAN bit set.
+#
+	global		src_snan
+src_snan:
+	fmov.x		SRC(%a0),%fp0		# the fmove sets the SNAN bit
+	fmov.l		%fpsr,%d0		# catch resulting status
+	or.l		%d0,USER_FPSR(%a6)	# store status
+	rts
+
+#
+# src_qnan(): Return the src QNAN.
+#
+	global		src_qnan
+src_qnan:
+	fmov.x		SRC(%a0),%fp0		# return the non-signalling nan
+	tst.b		SRC_EX(%a0)		# set ccodes according to QNAN sign
+	bmi.b		dst_qnan_m
+src_qnan_p:
+	mov.b		&nan_bmask,FPSR_CC(%a6)
+	rts
+src_qnan_m:
+	mov.b		&neg_bmask+nan_bmask,FPSR_CC(%a6)
+	rts
+
+#
+# fkern2.s:
+#	These entry points are used by the exception handler
+# routines where an instruction is selected by an index into
+# a large jump table corresponding to a given instruction which
+# has been decoded. Flow continues here where we now decode
+# further accoding to the source operand type.
+#
+
+	global		fsinh
+fsinh:
+	mov.b		STAG(%a6),%d1
+	beq.l		ssinh
+	cmpi.b		%d1,&ZERO
+	beq.l		src_zero
+	cmpi.b		%d1,&INF
+	beq.l		src_inf
+	cmpi.b		%d1,&DENORM
+	beq.l		ssinhd
+	cmpi.b		%d1,&QNAN
+	beq.l		src_qnan
+	bra.l		src_snan
+
+	global		flognp1
+flognp1:
+	mov.b		STAG(%a6),%d1
+	beq.l		slognp1
+	cmpi.b		%d1,&ZERO
+	beq.l		src_zero
+	cmpi.b		%d1,&INF
+	beq.l		sopr_inf
+	cmpi.b		%d1,&DENORM
+	beq.l		slognp1d
+	cmpi.b		%d1,&QNAN
+	beq.l		src_qnan
+	bra.l		src_snan
+
+	global		fetoxm1
+fetoxm1:
+	mov.b		STAG(%a6),%d1
+	beq.l		setoxm1
+	cmpi.b		%d1,&ZERO
+	beq.l		src_zero
+	cmpi.b		%d1,&INF
+	beq.l		setoxm1i
+	cmpi.b		%d1,&DENORM
+	beq.l		setoxm1d
+	cmpi.b		%d1,&QNAN
+	beq.l		src_qnan
+	bra.l		src_snan
+
+	global		ftanh
+ftanh:
+	mov.b		STAG(%a6),%d1
+	beq.l		stanh
+	cmpi.b		%d1,&ZERO
+	beq.l		src_zero
+	cmpi.b		%d1,&INF
+	beq.l		src_one
+	cmpi.b		%d1,&DENORM
+	beq.l		stanhd
+	cmpi.b		%d1,&QNAN
+	beq.l		src_qnan
+	bra.l		src_snan
+
+	global		fatan
+fatan:
+	mov.b		STAG(%a6),%d1
+	beq.l		satan
+	cmpi.b		%d1,&ZERO
+	beq.l		src_zero
+	cmpi.b		%d1,&INF
+	beq.l		spi_2
+	cmpi.b		%d1,&DENORM
+	beq.l		satand
+	cmpi.b		%d1,&QNAN
+	beq.l		src_qnan
+	bra.l		src_snan
+
+	global		fasin
+fasin:
+	mov.b		STAG(%a6),%d1
+	beq.l		sasin
+	cmpi.b		%d1,&ZERO
+	beq.l		src_zero
+	cmpi.b		%d1,&INF
+	beq.l		t_operr
+	cmpi.b		%d1,&DENORM
+	beq.l		sasind
+	cmpi.b		%d1,&QNAN
+	beq.l		src_qnan
+	bra.l		src_snan
+
+	global		fatanh
+fatanh:
+	mov.b		STAG(%a6),%d1
+	beq.l		satanh
+	cmpi.b		%d1,&ZERO
+	beq.l		src_zero
+	cmpi.b		%d1,&INF
+	beq.l		t_operr
+	cmpi.b		%d1,&DENORM
+	beq.l		satanhd
+	cmpi.b		%d1,&QNAN
+	beq.l		src_qnan
+	bra.l		src_snan
+
+	global		fsine
+fsine:
+	mov.b		STAG(%a6),%d1
+	beq.l		ssin
+	cmpi.b		%d1,&ZERO
+	beq.l		src_zero
+	cmpi.b		%d1,&INF
+	beq.l		t_operr
+	cmpi.b		%d1,&DENORM
+	beq.l		ssind
+	cmpi.b		%d1,&QNAN
+	beq.l		src_qnan
+	bra.l		src_snan
+
+	global		ftan
+ftan:
+	mov.b		STAG(%a6),%d1
+	beq.l		stan
+	cmpi.b		%d1,&ZERO
+	beq.l		src_zero
+	cmpi.b		%d1,&INF
+	beq.l		t_operr
+	cmpi.b		%d1,&DENORM
+	beq.l		stand
+	cmpi.b		%d1,&QNAN
+	beq.l		src_qnan
+	bra.l		src_snan
+
+	global		fetox
+fetox:
+	mov.b		STAG(%a6),%d1
+	beq.l		setox
+	cmpi.b		%d1,&ZERO
+	beq.l		ld_pone
+	cmpi.b		%d1,&INF
+	beq.l		szr_inf
+	cmpi.b		%d1,&DENORM
+	beq.l		setoxd
+	cmpi.b		%d1,&QNAN
+	beq.l		src_qnan
+	bra.l		src_snan
+
+	global		ftwotox
+ftwotox:
+	mov.b		STAG(%a6),%d1
+	beq.l		stwotox
+	cmpi.b		%d1,&ZERO
+	beq.l		ld_pone
+	cmpi.b		%d1,&INF
+	beq.l		szr_inf
+	cmpi.b		%d1,&DENORM
+	beq.l		stwotoxd
+	cmpi.b		%d1,&QNAN
+	beq.l		src_qnan
+	bra.l		src_snan
+
+	global		ftentox
+ftentox:
+	mov.b		STAG(%a6),%d1
+	beq.l		stentox
+	cmpi.b		%d1,&ZERO
+	beq.l		ld_pone
+	cmpi.b		%d1,&INF
+	beq.l		szr_inf
+	cmpi.b		%d1,&DENORM
+	beq.l		stentoxd
+	cmpi.b		%d1,&QNAN
+	beq.l		src_qnan
+	bra.l		src_snan
+
+	global		flogn
+flogn:
+	mov.b		STAG(%a6),%d1
+	beq.l		slogn
+	cmpi.b		%d1,&ZERO
+	beq.l		t_dz2
+	cmpi.b		%d1,&INF
+	beq.l		sopr_inf
+	cmpi.b		%d1,&DENORM
+	beq.l		slognd
+	cmpi.b		%d1,&QNAN
+	beq.l		src_qnan
+	bra.l		src_snan
+
+	global		flog10
+flog10:
+	mov.b		STAG(%a6),%d1
+	beq.l		slog10
+	cmpi.b		%d1,&ZERO
+	beq.l		t_dz2
+	cmpi.b		%d1,&INF
+	beq.l		sopr_inf
+	cmpi.b		%d1,&DENORM
+	beq.l		slog10d
+	cmpi.b		%d1,&QNAN
+	beq.l		src_qnan
+	bra.l		src_snan
+
+	global		flog2
+flog2:
+	mov.b		STAG(%a6),%d1
+	beq.l		slog2
+	cmpi.b		%d1,&ZERO
+	beq.l		t_dz2
+	cmpi.b		%d1,&INF
+	beq.l		sopr_inf
+	cmpi.b		%d1,&DENORM
+	beq.l		slog2d
+	cmpi.b		%d1,&QNAN
+	beq.l		src_qnan
+	bra.l		src_snan
+
+	global		fcosh
+fcosh:
+	mov.b		STAG(%a6),%d1
+	beq.l		scosh
+	cmpi.b		%d1,&ZERO
+	beq.l		ld_pone
+	cmpi.b		%d1,&INF
+	beq.l		ld_pinf
+	cmpi.b		%d1,&DENORM
+	beq.l		scoshd
+	cmpi.b		%d1,&QNAN
+	beq.l		src_qnan
+	bra.l		src_snan
+
+	global		facos
+facos:
+	mov.b		STAG(%a6),%d1
+	beq.l		sacos
+	cmpi.b		%d1,&ZERO
+	beq.l		ld_ppi2
+	cmpi.b		%d1,&INF
+	beq.l		t_operr
+	cmpi.b		%d1,&DENORM
+	beq.l		sacosd
+	cmpi.b		%d1,&QNAN
+	beq.l		src_qnan
+	bra.l		src_snan
+
+	global		fcos
+fcos:
+	mov.b		STAG(%a6),%d1
+	beq.l		scos
+	cmpi.b		%d1,&ZERO
+	beq.l		ld_pone
+	cmpi.b		%d1,&INF
+	beq.l		t_operr
+	cmpi.b		%d1,&DENORM
+	beq.l		scosd
+	cmpi.b		%d1,&QNAN
+	beq.l		src_qnan
+	bra.l		src_snan
+
+	global		fgetexp
+fgetexp:
+	mov.b		STAG(%a6),%d1
+	beq.l		sgetexp
+	cmpi.b		%d1,&ZERO
+	beq.l		src_zero
+	cmpi.b		%d1,&INF
+	beq.l		t_operr
+	cmpi.b		%d1,&DENORM
+	beq.l		sgetexpd
+	cmpi.b		%d1,&QNAN
+	beq.l		src_qnan
+	bra.l		src_snan
+
+	global		fgetman
+fgetman:
+	mov.b		STAG(%a6),%d1
+	beq.l		sgetman
+	cmpi.b		%d1,&ZERO
+	beq.l		src_zero
+	cmpi.b		%d1,&INF
+	beq.l		t_operr
+	cmpi.b		%d1,&DENORM
+	beq.l		sgetmand
+	cmpi.b		%d1,&QNAN
+	beq.l		src_qnan
+	bra.l		src_snan
+
+	global		fsincos
+fsincos:
+	mov.b		STAG(%a6),%d1
+	beq.l		ssincos
+	cmpi.b		%d1,&ZERO
+	beq.l		ssincosz
+	cmpi.b		%d1,&INF
+	beq.l		ssincosi
+	cmpi.b		%d1,&DENORM
+	beq.l		ssincosd
+	cmpi.b		%d1,&QNAN
+	beq.l		ssincosqnan
+	bra.l		ssincossnan
+
+	global		fmod
+fmod:
+	mov.b		STAG(%a6),%d1
+	beq.l		smod_snorm
+	cmpi.b		%d1,&ZERO
+	beq.l		smod_szero
+	cmpi.b		%d1,&INF
+	beq.l		smod_sinf
+	cmpi.b		%d1,&DENORM
+	beq.l		smod_sdnrm
+	cmpi.b		%d1,&QNAN
+	beq.l		sop_sqnan
+	bra.l		sop_ssnan
+
+	global		frem
+frem:
+	mov.b		STAG(%a6),%d1
+	beq.l		srem_snorm
+	cmpi.b		%d1,&ZERO
+	beq.l		srem_szero
+	cmpi.b		%d1,&INF
+	beq.l		srem_sinf
+	cmpi.b		%d1,&DENORM
+	beq.l		srem_sdnrm
+	cmpi.b		%d1,&QNAN
+	beq.l		sop_sqnan
+	bra.l		sop_ssnan
+
+	global		fscale
+fscale:
+	mov.b		STAG(%a6),%d1
+	beq.l		sscale_snorm
+	cmpi.b		%d1,&ZERO
+	beq.l		sscale_szero
+	cmpi.b		%d1,&INF
+	beq.l		sscale_sinf
+	cmpi.b		%d1,&DENORM
+	beq.l		sscale_sdnrm
+	cmpi.b		%d1,&QNAN
+	beq.l		sop_sqnan
+	bra.l		sop_ssnan
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fgen_except(): catch an exception during transcendental		#
+#		       emulation					#
+#									#
+# XREF ****************************************************************	#
+#	fmul() - emulate a multiply instruction				#
+#	fadd() - emulate an add instruction				#
+#	fin() - emulate an fmove instruction				#
+#									#
+# INPUT ***************************************************************	#
+#	fp0 = destination operand					#
+#	d0  = type of instruction that took exception			#
+#	fsave frame = source operand					#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = result							#
+#	fp1 = EXOP							#
+#									#
+# ALGORITHM ***********************************************************	#
+#	An exception occurred on the last instruction of the		#
+# transcendental emulation. hopefully, this won't be happening much	#
+# because it will be VERY slow.						#
+#	The only exceptions capable of passing through here are		#
+# Overflow, Underflow, and Unsupported Data Type.			#
+#									#
+#########################################################################
+
+	global		fgen_except
+fgen_except:
+	cmpi.b		0x3(%sp),&0x7		# is exception UNSUPP?
+	beq.b		fge_unsupp		# yes
+
+	mov.b		&NORM,STAG(%a6)
+
+fge_cont:
+	mov.b		&NORM,DTAG(%a6)
+
+# ok, I have a problem with putting the dst op at FP_DST. the emulation
+# routines aren't supposed to alter the operands but we've just squashed
+# FP_DST here...
+
+# 8/17/93 - this turns out to be more of a "cleanliness" standpoint
+# then a potential bug. to begin with, only the dyadic functions
+# frem,fmod, and fscale would get the dst trashed here. But, for
+# the 060SP, the FP_DST is never used again anyways.
+	fmovm.x		&0x80,FP_DST(%a6)	# dst op is in fp0
+
+	lea		0x4(%sp),%a0		# pass: ptr to src op
+	lea		FP_DST(%a6),%a1		# pass: ptr to dst op
+
+	cmpi.b		%d1,&FMOV_OP
+	beq.b		fge_fin			# it was an "fmov"
+	cmpi.b		%d1,&FADD_OP
+	beq.b		fge_fadd		# it was an "fadd"
+fge_fmul:
+	bsr.l		fmul
+	rts
+fge_fadd:
+	bsr.l		fadd
+	rts
+fge_fin:
+	bsr.l		fin
+	rts
+
+fge_unsupp:
+	mov.b		&DENORM,STAG(%a6)
+	bra.b		fge_cont
+
+#
+# This table holds the offsets of the emulation routines for each individual
+# math operation relative to the address of this table. Included are
+# routines like fadd/fmul/fabs as well as the transcendentals.
+# The location within the table is determined by the extension bits of the
+# operation longword.
+#
+
+	swbeg		&109
+tbl_unsupp:
+	long		fin		- tbl_unsupp	# 00: fmove
+	long		fint		- tbl_unsupp	# 01: fint
+	long		fsinh		- tbl_unsupp	# 02: fsinh
+	long		fintrz		- tbl_unsupp	# 03: fintrz
+	long		fsqrt		- tbl_unsupp	# 04: fsqrt
+	long		tbl_unsupp	- tbl_unsupp
+	long		flognp1		- tbl_unsupp	# 06: flognp1
+	long		tbl_unsupp	- tbl_unsupp
+	long		fetoxm1		- tbl_unsupp	# 08: fetoxm1
+	long		ftanh		- tbl_unsupp	# 09: ftanh
+	long		fatan		- tbl_unsupp	# 0a: fatan
+	long		tbl_unsupp	- tbl_unsupp
+	long		fasin		- tbl_unsupp	# 0c: fasin
+	long		fatanh		- tbl_unsupp	# 0d: fatanh
+	long		fsine		- tbl_unsupp	# 0e: fsin
+	long		ftan		- tbl_unsupp	# 0f: ftan
+	long		fetox		- tbl_unsupp	# 10: fetox
+	long		ftwotox		- tbl_unsupp	# 11: ftwotox
+	long		ftentox		- tbl_unsupp	# 12: ftentox
+	long		tbl_unsupp	- tbl_unsupp
+	long		flogn		- tbl_unsupp	# 14: flogn
+	long		flog10		- tbl_unsupp	# 15: flog10
+	long		flog2		- tbl_unsupp	# 16: flog2
+	long		tbl_unsupp	- tbl_unsupp
+	long		fabs		- tbl_unsupp	# 18: fabs
+	long		fcosh		- tbl_unsupp	# 19: fcosh
+	long		fneg		- tbl_unsupp	# 1a: fneg
+	long		tbl_unsupp	- tbl_unsupp
+	long		facos		- tbl_unsupp	# 1c: facos
+	long		fcos		- tbl_unsupp	# 1d: fcos
+	long		fgetexp		- tbl_unsupp	# 1e: fgetexp
+	long		fgetman		- tbl_unsupp	# 1f: fgetman
+	long		fdiv		- tbl_unsupp	# 20: fdiv
+	long		fmod		- tbl_unsupp	# 21: fmod
+	long		fadd		- tbl_unsupp	# 22: fadd
+	long		fmul		- tbl_unsupp	# 23: fmul
+	long		fsgldiv		- tbl_unsupp	# 24: fsgldiv
+	long		frem		- tbl_unsupp	# 25: frem
+	long		fscale		- tbl_unsupp	# 26: fscale
+	long		fsglmul		- tbl_unsupp	# 27: fsglmul
+	long		fsub		- tbl_unsupp	# 28: fsub
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		fsincos		- tbl_unsupp	# 30: fsincos
+	long		fsincos		- tbl_unsupp	# 31: fsincos
+	long		fsincos		- tbl_unsupp	# 32: fsincos
+	long		fsincos		- tbl_unsupp	# 33: fsincos
+	long		fsincos		- tbl_unsupp	# 34: fsincos
+	long		fsincos		- tbl_unsupp	# 35: fsincos
+	long		fsincos		- tbl_unsupp	# 36: fsincos
+	long		fsincos		- tbl_unsupp	# 37: fsincos
+	long		fcmp		- tbl_unsupp	# 38: fcmp
+	long		tbl_unsupp	- tbl_unsupp
+	long		ftst		- tbl_unsupp	# 3a: ftst
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		fsin		- tbl_unsupp	# 40: fsmove
+	long		fssqrt		- tbl_unsupp	# 41: fssqrt
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		fdin		- tbl_unsupp	# 44: fdmove
+	long		fdsqrt		- tbl_unsupp	# 45: fdsqrt
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		fsabs		- tbl_unsupp	# 58: fsabs
+	long		tbl_unsupp	- tbl_unsupp
+	long		fsneg		- tbl_unsupp	# 5a: fsneg
+	long		tbl_unsupp	- tbl_unsupp
+	long		fdabs		- tbl_unsupp	# 5c: fdabs
+	long		tbl_unsupp	- tbl_unsupp
+	long		fdneg		- tbl_unsupp	# 5e: fdneg
+	long		tbl_unsupp	- tbl_unsupp
+	long		fsdiv		- tbl_unsupp	# 60: fsdiv
+	long		tbl_unsupp	- tbl_unsupp
+	long		fsadd		- tbl_unsupp	# 62: fsadd
+	long		fsmul		- tbl_unsupp	# 63: fsmul
+	long		fddiv		- tbl_unsupp	# 64: fddiv
+	long		tbl_unsupp	- tbl_unsupp
+	long		fdadd		- tbl_unsupp	# 66: fdadd
+	long		fdmul		- tbl_unsupp	# 67: fdmul
+	long		fssub		- tbl_unsupp	# 68: fssub
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		fdsub		- tbl_unsupp	# 6c: fdsub
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fmul(): emulates the fmul instruction				#
+#	fsmul(): emulates the fsmul instruction				#
+#	fdmul(): emulates the fdmul instruction				#
+#									#
+# XREF ****************************************************************	#
+#	scale_to_zero_src() - scale src exponent to zero		#
+#	scale_to_zero_dst() - scale dst exponent to zero		#
+#	unf_res() - return default underflow result			#
+#	ovf_res() - return default overflow result			#
+#	res_qnan() - return QNAN result					#
+#	res_snan() - return SNAN result					#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	a1 = pointer to extended precision destination operand		#
+#	d0  rnd prec,mode						#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = result							#
+#	fp1 = EXOP (if exception occurred)				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Handle NANs, infinities, and zeroes as special cases. Divide	#
+# norms/denorms into ext/sgl/dbl precision.				#
+#	For norms/denorms, scale the exponents such that a multiply	#
+# instruction won't cause an exception. Use the regular fmul to		#
+# compute a result. Check if the regular operands would have taken	#
+# an exception. If so, return the default overflow/underflow result	#
+# and return the EXOP if exceptions are enabled. Else, scale the	#
+# result operand to the proper exponent.				#
+#									#
+#########################################################################
+
+	align		0x10
+tbl_fmul_ovfl:
+	long		0x3fff - 0x7ffe		# ext_max
+	long		0x3fff - 0x407e		# sgl_max
+	long		0x3fff - 0x43fe		# dbl_max
+tbl_fmul_unfl:
+	long		0x3fff + 0x0001		# ext_unfl
+	long		0x3fff - 0x3f80		# sgl_unfl
+	long		0x3fff - 0x3c00		# dbl_unfl
+
+	global		fsmul
+fsmul:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&s_mode*0x10,%d0	# insert sgl prec
+	bra.b		fmul
+
+	global		fdmul
+fdmul:
+	andi.b		&0x30,%d0
+	ori.b		&d_mode*0x10,%d0	# insert dbl prec
+
+	global		fmul
+fmul:
+	mov.l		%d0,L_SCR3(%a6)		# store rnd info
+
+	clr.w		%d1
+	mov.b		DTAG(%a6),%d1
+	lsl.b		&0x3,%d1
+	or.b		STAG(%a6),%d1		# combine src tags
+	bne.w		fmul_not_norm		# optimize on non-norm input
+
+fmul_norm:
+	mov.w		DST_EX(%a1),FP_SCR1_EX(%a6)
+	mov.l		DST_HI(%a1),FP_SCR1_HI(%a6)
+	mov.l		DST_LO(%a1),FP_SCR1_LO(%a6)
+
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+	bsr.l		scale_to_zero_src	# scale src exponent
+	mov.l		%d0,-(%sp)		# save scale factor 1
+
+	bsr.l		scale_to_zero_dst	# scale dst exponent
+
+	add.l		%d0,(%sp)		# SCALE_FACTOR = scale1 + scale2
+
+	mov.w		2+L_SCR3(%a6),%d1	# fetch precision
+	lsr.b		&0x6,%d1		# shift to lo bits
+	mov.l		(%sp)+,%d0		# load S.F.
+	cmp.l		%d0,(tbl_fmul_ovfl.w,%pc,%d1.w*4) # would result ovfl?
+	beq.w		fmul_may_ovfl		# result may rnd to overflow
+	blt.w		fmul_ovfl		# result will overflow
+
+	cmp.l		%d0,(tbl_fmul_unfl.w,%pc,%d1.w*4) # would result unfl?
+	beq.w		fmul_may_unfl		# result may rnd to no unfl
+	bgt.w		fmul_unfl		# result will underflow
+
+#
+# NORMAL:
+# - the result of the multiply operation will neither overflow nor underflow.
+# - do the multiply to the proper precision and rounding mode.
+# - scale the result exponent using the scale factor. if both operands were
+# normalized then we really don't need to go through this scaling. but for now,
+# this will do.
+#
+fmul_normal:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst operand
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fmul.x		FP_SCR0(%a6),%fp0	# execute multiply
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fmul_normal_exit:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# load {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	or.w		%d2,%d1			# concat old sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+	rts
+
+#
+# OVERFLOW:
+# - the result of the multiply operation is an overflow.
+# - do the multiply to the proper precision and rounding mode in order to
+# set the inexact bits.
+# - calculate the default result and return it in fp0.
+# - if overflow or inexact is enabled, we need a multiply result rounded to
+# extended precision. if the original operation was extended, then we have this
+# result. if the original operation was single or double, we have to do another
+# multiply using extended precision and the correct rounding mode. the result
+# of this operation then has its exponent scaled by -0x6000 to create the
+# exceptional operand.
+#
+fmul_ovfl:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst operand
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fmul.x		FP_SCR0(%a6),%fp0	# execute multiply
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+# save setting this until now because this is where fmul_may_ovfl may jump in
+fmul_ovfl_tst:
+	or.l		&ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x13,%d1		# is OVFL or INEX enabled?
+	bne.b		fmul_ovfl_ena		# yes
+
+# calculate the default result
+fmul_ovfl_dis:
+	btst		&neg_bit,FPSR_CC(%a6)	# is result negative?
+	sne		%d1			# set sign param accordingly
+	mov.l		L_SCR3(%a6),%d0		# pass rnd prec,mode
+	bsr.l		ovf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set INF,N if applicable
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+	rts
+
+#
+# OVFL is enabled; Create EXOP:
+# - if precision is extended, then we have the EXOP. simply bias the exponent
+# with an extra -0x6000. if the precision is single or double, we need to
+# calculate a result rounded to extended precision.
+#
+fmul_ovfl_ena:
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# test the rnd prec
+	bne.b		fmul_ovfl_ena_sd	# it's sgl or dbl
+
+fmul_ovfl_ena_cont:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# move result to stack
+
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.w		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	sub.l		%d0,%d1			# add scale factor
+	subi.l		&0x6000,%d1		# subtract bias
+	andi.w		&0x7fff,%d1		# clear sign bit
+	andi.w		&0x8000,%d2		# keep old sign
+	or.w		%d2,%d1			# concat old sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	bra.b		fmul_ovfl_dis
+
+fmul_ovfl_ena_sd:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst operand
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0x30,%d1		# keep rnd mode only
+	fmov.l		%d1,%fpcr		# set FPCR
+
+	fmul.x		FP_SCR0(%a6),%fp0	# execute multiply
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	bra.b		fmul_ovfl_ena_cont
+
+#
+# may OVERFLOW:
+# - the result of the multiply operation MAY overflow.
+# - do the multiply to the proper precision and rounding mode in order to
+# set the inexact bits.
+# - calculate the default result and return it in fp0.
+#
+fmul_may_ovfl:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fmul.x		FP_SCR0(%a6),%fp0	# execute multiply
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	fabs.x		%fp0,%fp1		# make a copy of result
+	fcmp.b		%fp1,&0x2		# is |result| >= 2.b?
+	fbge.w		fmul_ovfl_tst		# yes; overflow has occurred
+
+# no, it didn't overflow; we have correct result
+	bra.w		fmul_normal_exit
+
+#
+# UNDERFLOW:
+# - the result of the multiply operation is an underflow.
+# - do the multiply to the proper precision and rounding mode in order to
+# set the inexact bits.
+# - calculate the default result and return it in fp0.
+# - if overflow or inexact is enabled, we need a multiply result rounded to
+# extended precision. if the original operation was extended, then we have this
+# result. if the original operation was single or double, we have to do another
+# multiply using extended precision and the correct rounding mode. the result
+# of this operation then has its exponent scaled by -0x6000 to create the
+# exceptional operand.
+#
+fmul_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+# for fun, let's use only extended precision, round to zero. then, let
+# the unf_res() routine figure out all the rest.
+# will we get the correct answer.
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst operand
+
+	fmov.l		&rz_mode*0x10,%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fmul.x		FP_SCR0(%a6),%fp0	# execute multiply
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0b,%d1		# is UNFL or INEX enabled?
+	bne.b		fmul_unfl_ena		# yes
+
+fmul_unfl_dis:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+
+	lea		FP_SCR0(%a6),%a0	# pass: result addr
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# unf_res2 may have set 'Z'
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+	rts
+
+#
+# UNFL is enabled.
+#
+fmul_unfl_ena:
+	fmovm.x		FP_SCR1(%a6),&0x40	# load dst op
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# is precision extended?
+	bne.b		fmul_unfl_ena_sd	# no, sgl or dbl
+
+# if the rnd mode is anything but RZ, then we have to re-do the above
+# multiplication becuase we used RZ for all.
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+fmul_unfl_ena_cont:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fmul.x		FP_SCR0(%a6),%fp1	# execute multiply
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	fmovm.x		&0x40,FP_SCR0(%a6)	# save result to stack
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	addi.l		&0x6000,%d1		# add bias
+	andi.w		&0x7fff,%d1
+	or.w		%d2,%d1			# concat old sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	bra.w		fmul_unfl_dis
+
+fmul_unfl_ena_sd:
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0x30,%d1		# use only rnd mode
+	fmov.l		%d1,%fpcr		# set FPCR
+
+	bra.b		fmul_unfl_ena_cont
+
+# MAY UNDERFLOW:
+# -use the correct rounding mode and precision. this code favors operations
+# that do not underflow.
+fmul_may_unfl:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst operand
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fmul.x		FP_SCR0(%a6),%fp0	# execute multiply
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	fabs.x		%fp0,%fp1		# make a copy of result
+	fcmp.b		%fp1,&0x2		# is |result| > 2.b?
+	fbgt.w		fmul_normal_exit	# no; no underflow occurred
+	fblt.w		fmul_unfl		# yes; underflow occurred
+
+#
+# we still don't know if underflow occurred. result is ~ equal to 2. but,
+# we don't know if the result was an underflow that rounded up to a 2 or
+# a normalized number that rounded down to a 2. so, redo the entire operation
+# using RZ as the rounding mode to see what the pre-rounded result is.
+# this case should be relatively rare.
+#
+	fmovm.x		FP_SCR1(%a6),&0x40	# load dst operand
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# keep rnd prec
+	ori.b		&rz_mode*0x10,%d1	# insert RZ
+
+	fmov.l		%d1,%fpcr		# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fmul.x		FP_SCR0(%a6),%fp1	# execute multiply
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fabs.x		%fp1			# make absolute value
+	fcmp.b		%fp1,&0x2		# is |result| < 2.b?
+	fbge.w		fmul_normal_exit	# no; no underflow occurred
+	bra.w		fmul_unfl		# yes, underflow occurred
+
+################################################################################
+
+#
+# Multiply: inputs are not both normalized; what are they?
+#
+fmul_not_norm:
+	mov.w		(tbl_fmul_op.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_fmul_op.b,%pc,%d1.w)
+
+	swbeg		&48
+tbl_fmul_op:
+	short		fmul_norm	- tbl_fmul_op # NORM x NORM
+	short		fmul_zero	- tbl_fmul_op # NORM x ZERO
+	short		fmul_inf_src	- tbl_fmul_op # NORM x INF
+	short		fmul_res_qnan	- tbl_fmul_op # NORM x QNAN
+	short		fmul_norm	- tbl_fmul_op # NORM x DENORM
+	short		fmul_res_snan	- tbl_fmul_op # NORM x SNAN
+	short		tbl_fmul_op	- tbl_fmul_op #
+	short		tbl_fmul_op	- tbl_fmul_op #
+
+	short		fmul_zero	- tbl_fmul_op # ZERO x NORM
+	short		fmul_zero	- tbl_fmul_op # ZERO x ZERO
+	short		fmul_res_operr	- tbl_fmul_op # ZERO x INF
+	short		fmul_res_qnan	- tbl_fmul_op # ZERO x QNAN
+	short		fmul_zero	- tbl_fmul_op # ZERO x DENORM
+	short		fmul_res_snan	- tbl_fmul_op # ZERO x SNAN
+	short		tbl_fmul_op	- tbl_fmul_op #
+	short		tbl_fmul_op	- tbl_fmul_op #
+
+	short		fmul_inf_dst	- tbl_fmul_op # INF x NORM
+	short		fmul_res_operr	- tbl_fmul_op # INF x ZERO
+	short		fmul_inf_dst	- tbl_fmul_op # INF x INF
+	short		fmul_res_qnan	- tbl_fmul_op # INF x QNAN
+	short		fmul_inf_dst	- tbl_fmul_op # INF x DENORM
+	short		fmul_res_snan	- tbl_fmul_op # INF x SNAN
+	short		tbl_fmul_op	- tbl_fmul_op #
+	short		tbl_fmul_op	- tbl_fmul_op #
+
+	short		fmul_res_qnan	- tbl_fmul_op # QNAN x NORM
+	short		fmul_res_qnan	- tbl_fmul_op # QNAN x ZERO
+	short		fmul_res_qnan	- tbl_fmul_op # QNAN x INF
+	short		fmul_res_qnan	- tbl_fmul_op # QNAN x QNAN
+	short		fmul_res_qnan	- tbl_fmul_op # QNAN x DENORM
+	short		fmul_res_snan	- tbl_fmul_op # QNAN x SNAN
+	short		tbl_fmul_op	- tbl_fmul_op #
+	short		tbl_fmul_op	- tbl_fmul_op #
+
+	short		fmul_norm	- tbl_fmul_op # NORM x NORM
+	short		fmul_zero	- tbl_fmul_op # NORM x ZERO
+	short		fmul_inf_src	- tbl_fmul_op # NORM x INF
+	short		fmul_res_qnan	- tbl_fmul_op # NORM x QNAN
+	short		fmul_norm	- tbl_fmul_op # NORM x DENORM
+	short		fmul_res_snan	- tbl_fmul_op # NORM x SNAN
+	short		tbl_fmul_op	- tbl_fmul_op #
+	short		tbl_fmul_op	- tbl_fmul_op #
+
+	short		fmul_res_snan	- tbl_fmul_op # SNAN x NORM
+	short		fmul_res_snan	- tbl_fmul_op # SNAN x ZERO
+	short		fmul_res_snan	- tbl_fmul_op # SNAN x INF
+	short		fmul_res_snan	- tbl_fmul_op # SNAN x QNAN
+	short		fmul_res_snan	- tbl_fmul_op # SNAN x DENORM
+	short		fmul_res_snan	- tbl_fmul_op # SNAN x SNAN
+	short		tbl_fmul_op	- tbl_fmul_op #
+	short		tbl_fmul_op	- tbl_fmul_op #
+
+fmul_res_operr:
+	bra.l		res_operr
+fmul_res_snan:
+	bra.l		res_snan
+fmul_res_qnan:
+	bra.l		res_qnan
+
+#
+# Multiply: (Zero x Zero) || (Zero x norm) || (Zero x denorm)
+#
+	global		fmul_zero		# global for fsglmul
+fmul_zero:
+	mov.b		SRC_EX(%a0),%d0		# exclusive or the signs
+	mov.b		DST_EX(%a1),%d1
+	eor.b		%d0,%d1
+	bpl.b		fmul_zero_p		# result ZERO is pos.
+fmul_zero_n:
+	fmov.s		&0x80000000,%fp0	# load -ZERO
+	mov.b		&z_bmask+neg_bmask,FPSR_CC(%a6) # set Z/N
+	rts
+fmul_zero_p:
+	fmov.s		&0x00000000,%fp0	# load +ZERO
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set Z
+	rts
+
+#
+# Multiply: (inf x inf) || (inf x norm) || (inf x denorm)
+#
+# Note: The j-bit for an infinity is a don't-care. However, to be
+# strictly compatible w/ the 68881/882, we make sure to return an
+# INF w/ the j-bit set if the input INF j-bit was set. Destination
+# INFs take priority.
+#
+	global		fmul_inf_dst		# global for fsglmul
+fmul_inf_dst:
+	fmovm.x		DST(%a1),&0x80		# return INF result in fp0
+	mov.b		SRC_EX(%a0),%d0		# exclusive or the signs
+	mov.b		DST_EX(%a1),%d1
+	eor.b		%d0,%d1
+	bpl.b		fmul_inf_dst_p		# result INF is pos.
+fmul_inf_dst_n:
+	fabs.x		%fp0			# clear result sign
+	fneg.x		%fp0			# set result sign
+	mov.b		&inf_bmask+neg_bmask,FPSR_CC(%a6) # set INF/N
+	rts
+fmul_inf_dst_p:
+	fabs.x		%fp0			# clear result sign
+	mov.b		&inf_bmask,FPSR_CC(%a6)	# set INF
+	rts
+
+	global		fmul_inf_src		# global for fsglmul
+fmul_inf_src:
+	fmovm.x		SRC(%a0),&0x80		# return INF result in fp0
+	mov.b		SRC_EX(%a0),%d0		# exclusive or the signs
+	mov.b		DST_EX(%a1),%d1
+	eor.b		%d0,%d1
+	bpl.b		fmul_inf_dst_p		# result INF is pos.
+	bra.b		fmul_inf_dst_n
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fin(): emulates the fmove instruction				#
+#	fsin(): emulates the fsmove instruction				#
+#	fdin(): emulates the fdmove instruction				#
+#									#
+# XREF ****************************************************************	#
+#	norm() - normalize mantissa for EXOP on denorm			#
+#	scale_to_zero_src() - scale src exponent to zero		#
+#	ovf_res() - return default overflow result			#
+#	unf_res() - return default underflow result			#
+#	res_qnan_1op() - return QNAN result				#
+#	res_snan_1op() - return SNAN result				#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	d0 = round prec/mode						#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = result							#
+#	fp1 = EXOP (if exception occurred)				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Handle NANs, infinities, and zeroes as special cases. Divide	#
+# norms into extended, single, and double precision.			#
+#	Norms can be emulated w/ a regular fmove instruction. For	#
+# sgl/dbl, must scale exponent and perform an "fmove". Check to see	#
+# if the result would have overflowed/underflowed. If so, use unf_res()	#
+# or ovf_res() to return the default result. Also return EXOP if	#
+# exception is enabled. If no exception, return the default result.	#
+#	Unnorms don't pass through here.				#
+#									#
+#########################################################################
+
+	global		fsin
+fsin:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&s_mode*0x10,%d0	# insert sgl precision
+	bra.b		fin
+
+	global		fdin
+fdin:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&d_mode*0x10,%d0	# insert dbl precision
+
+	global		fin
+fin:
+	mov.l		%d0,L_SCR3(%a6)		# store rnd info
+
+	mov.b		STAG(%a6),%d1		# fetch src optype tag
+	bne.w		fin_not_norm		# optimize on non-norm input
+
+#
+# FP MOVE IN: NORMs and DENORMs ONLY!
+#
+fin_norm:
+	andi.b		&0xc0,%d0		# is precision extended?
+	bne.w		fin_not_ext		# no, so go handle dbl or sgl
+
+#
+# precision selected is extended. so...we cannot get an underflow
+# or overflow because of rounding to the correct precision. so...
+# skip the scaling and unscaling...
+#
+	tst.b		SRC_EX(%a0)		# is the operand negative?
+	bpl.b		fin_norm_done		# no
+	bset		&neg_bit,FPSR_CC(%a6)	# yes, so set 'N' ccode bit
+fin_norm_done:
+	fmovm.x		SRC(%a0),&0x80		# return result in fp0
+	rts
+
+#
+# for an extended precision DENORM, the UNFL exception bit is set
+# the accrued bit is NOT set in this instance(no inexactness!)
+#
+fin_denorm:
+	andi.b		&0xc0,%d0		# is precision extended?
+	bne.w		fin_not_ext		# no, so go handle dbl or sgl
+
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+	tst.b		SRC_EX(%a0)		# is the operand negative?
+	bpl.b		fin_denorm_done		# no
+	bset		&neg_bit,FPSR_CC(%a6)	# yes, so set 'N' ccode bit
+fin_denorm_done:
+	fmovm.x		SRC(%a0),&0x80		# return result in fp0
+	btst		&unfl_bit,FPCR_ENABLE(%a6) # is UNFL enabled?
+	bne.b		fin_denorm_unfl_ena	# yes
+	rts
+
+#
+# the input is an extended DENORM and underflow is enabled in the FPCR.
+# normalize the mantissa and add the bias of 0x6000 to the resulting negative
+# exponent and insert back into the operand.
+#
+fin_denorm_unfl_ena:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	lea		FP_SCR0(%a6),%a0	# pass: ptr to operand
+	bsr.l		norm			# normalize result
+	neg.w		%d0			# new exponent = -(shft val)
+	addi.w		&0x6000,%d0		# add new bias to exponent
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch old sign,exp
+	andi.w		&0x8000,%d1		# keep old sign
+	andi.w		&0x7fff,%d0		# clear sign position
+	or.w		%d1,%d0			# concat new exo,old sign
+	mov.w		%d0,FP_SCR0_EX(%a6)	# insert new exponent
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	rts
+
+#
+# operand is to be rounded to single or double precision
+#
+fin_not_ext:
+	cmpi.b		%d0,&s_mode*0x10	# separate sgl/dbl prec
+	bne.b		fin_dbl
+
+#
+# operand is to be rounded to single precision
+#
+fin_sgl:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	bsr.l		scale_to_zero_src	# calculate scale factor
+
+	cmpi.l		%d0,&0x3fff-0x3f80	# will move in underflow?
+	bge.w		fin_sd_unfl		# yes; go handle underflow
+	cmpi.l		%d0,&0x3fff-0x407e	# will move in overflow?
+	beq.w		fin_sd_may_ovfl		# maybe; go check
+	blt.w		fin_sd_ovfl		# yes; go handle overflow
+
+#
+# operand will NOT overflow or underflow when moved into the fp reg file
+#
+fin_sd_normal:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fmov.x		FP_SCR0(%a6),%fp0	# perform move
+
+	fmov.l		%fpsr,%d1		# save FPSR
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fin_sd_normal_exit:
+	mov.l		%d2,-(%sp)		# save d2
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+	mov.w		FP_SCR0_EX(%a6),%d1	# load {sgn,exp}
+	mov.w		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	sub.l		%d0,%d1			# add scale factor
+	andi.w		&0x8000,%d2		# keep old sign
+	or.w		%d1,%d2			# concat old sign,new exponent
+	mov.w		%d2,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x80	# return result in fp0
+	rts
+
+#
+# operand is to be rounded to double precision
+#
+fin_dbl:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	bsr.l		scale_to_zero_src	# calculate scale factor
+
+	cmpi.l		%d0,&0x3fff-0x3c00	# will move in underflow?
+	bge.w		fin_sd_unfl		# yes; go handle underflow
+	cmpi.l		%d0,&0x3fff-0x43fe	# will move in overflow?
+	beq.w		fin_sd_may_ovfl		# maybe; go check
+	blt.w		fin_sd_ovfl		# yes; go handle overflow
+	bra.w		fin_sd_normal		# no; ho handle normalized op
+
+#
+# operand WILL underflow when moved in to the fp register file
+#
+fin_sd_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+	tst.b		FP_SCR0_EX(%a6)		# is operand negative?
+	bpl.b		fin_sd_unfl_tst
+	bset		&neg_bit,FPSR_CC(%a6)	# set 'N' ccode bit
+
+# if underflow or inexact is enabled, then go calculate the EXOP first.
+fin_sd_unfl_tst:
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0b,%d1		# is UNFL or INEX enabled?
+	bne.b		fin_sd_unfl_ena		# yes
+
+fin_sd_unfl_dis:
+	lea		FP_SCR0(%a6),%a0	# pass: result addr
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# unf_res may have set 'Z'
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+	rts
+
+#
+# operand will underflow AND underflow or inexact is enabled.
+# therefore, we must return the result rounded to extended precision.
+#
+fin_sd_unfl_ena:
+	mov.l		FP_SCR0_HI(%a6),FP_SCR1_HI(%a6)
+	mov.l		FP_SCR0_LO(%a6),FP_SCR1_LO(%a6)
+	mov.w		FP_SCR0_EX(%a6),%d1	# load current exponent
+
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	sub.l		%d0,%d1			# subtract scale factor
+	andi.w		&0x8000,%d2		# extract old sign
+	addi.l		&0x6000,%d1		# add new bias
+	andi.w		&0x7fff,%d1
+	or.w		%d1,%d2			# concat old sign,new exp
+	mov.w		%d2,FP_SCR1_EX(%a6)	# insert new exponent
+	fmovm.x		FP_SCR1(%a6),&0x40	# return EXOP in fp1
+	mov.l		(%sp)+,%d2		# restore d2
+	bra.b		fin_sd_unfl_dis
+
+#
+# operand WILL overflow.
+#
+fin_sd_ovfl:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fmov.x		FP_SCR0(%a6),%fp0	# perform move
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# save FPSR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fin_sd_ovfl_tst:
+	or.l		&ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x13,%d1		# is OVFL or INEX enabled?
+	bne.b		fin_sd_ovfl_ena		# yes
+
+#
+# OVFL is not enabled; therefore, we must create the default result by
+# calling ovf_res().
+#
+fin_sd_ovfl_dis:
+	btst		&neg_bit,FPSR_CC(%a6)	# is result negative?
+	sne		%d1			# set sign param accordingly
+	mov.l		L_SCR3(%a6),%d0		# pass: prec,mode
+	bsr.l		ovf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set INF,N if applicable
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+	rts
+
+#
+# OVFL is enabled.
+# the INEX2 bit has already been updated by the round to the correct precision.
+# now, round to extended(and don't alter the FPSR).
+#
+fin_sd_ovfl_ena:
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	sub.l		&0x6000,%d1		# subtract bias
+	andi.w		&0x7fff,%d1
+	or.w		%d2,%d1
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	bra.b		fin_sd_ovfl_dis
+
+#
+# the move in MAY overflow. so...
+#
+fin_sd_may_ovfl:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fmov.x		FP_SCR0(%a6),%fp0	# perform the move
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	fabs.x		%fp0,%fp1		# make a copy of result
+	fcmp.b		%fp1,&0x2		# is |result| >= 2.b?
+	fbge.w		fin_sd_ovfl_tst		# yes; overflow has occurred
+
+# no, it didn't overflow; we have correct result
+	bra.w		fin_sd_normal_exit
+
+##########################################################################
+
+#
+# operand is not a NORM: check its optype and branch accordingly
+#
+fin_not_norm:
+	cmpi.b		%d1,&DENORM		# weed out DENORM
+	beq.w		fin_denorm
+	cmpi.b		%d1,&SNAN		# weed out SNANs
+	beq.l		res_snan_1op
+	cmpi.b		%d1,&QNAN		# weed out QNANs
+	beq.l		res_qnan_1op
+
+#
+# do the fmove in; at this point, only possible ops are ZERO and INF.
+# use fmov to determine ccodes.
+# prec:mode should be zero at this point but it won't affect answer anyways.
+#
+	fmov.x		SRC(%a0),%fp0		# do fmove in
+	fmov.l		%fpsr,%d0		# no exceptions possible
+	rol.l		&0x8,%d0		# put ccodes in lo byte
+	mov.b		%d0,FPSR_CC(%a6)	# insert correct ccodes
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fdiv(): emulates the fdiv instruction				#
+#	fsdiv(): emulates the fsdiv instruction				#
+#	fddiv(): emulates the fddiv instruction				#
+#									#
+# XREF ****************************************************************	#
+#	scale_to_zero_src() - scale src exponent to zero		#
+#	scale_to_zero_dst() - scale dst exponent to zero		#
+#	unf_res() - return default underflow result			#
+#	ovf_res() - return default overflow result			#
+#	res_qnan() - return QNAN result					#
+#	res_snan() - return SNAN result					#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	a1 = pointer to extended precision destination operand		#
+#	d0  rnd prec,mode						#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = result							#
+#	fp1 = EXOP (if exception occurred)				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Handle NANs, infinities, and zeroes as special cases. Divide	#
+# norms/denorms into ext/sgl/dbl precision.				#
+#	For norms/denorms, scale the exponents such that a divide	#
+# instruction won't cause an exception. Use the regular fdiv to		#
+# compute a result. Check if the regular operands would have taken	#
+# an exception. If so, return the default overflow/underflow result	#
+# and return the EXOP if exceptions are enabled. Else, scale the	#
+# result operand to the proper exponent.				#
+#									#
+#########################################################################
+
+	align		0x10
+tbl_fdiv_unfl:
+	long		0x3fff - 0x0000		# ext_unfl
+	long		0x3fff - 0x3f81		# sgl_unfl
+	long		0x3fff - 0x3c01		# dbl_unfl
+
+tbl_fdiv_ovfl:
+	long		0x3fff - 0x7ffe		# ext overflow exponent
+	long		0x3fff - 0x407e		# sgl overflow exponent
+	long		0x3fff - 0x43fe		# dbl overflow exponent
+
+	global		fsdiv
+fsdiv:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&s_mode*0x10,%d0	# insert sgl prec
+	bra.b		fdiv
+
+	global		fddiv
+fddiv:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&d_mode*0x10,%d0	# insert dbl prec
+
+	global		fdiv
+fdiv:
+	mov.l		%d0,L_SCR3(%a6)		# store rnd info
+
+	clr.w		%d1
+	mov.b		DTAG(%a6),%d1
+	lsl.b		&0x3,%d1
+	or.b		STAG(%a6),%d1		# combine src tags
+
+	bne.w		fdiv_not_norm		# optimize on non-norm input
+
+#
+# DIVIDE: NORMs and DENORMs ONLY!
+#
+fdiv_norm:
+	mov.w		DST_EX(%a1),FP_SCR1_EX(%a6)
+	mov.l		DST_HI(%a1),FP_SCR1_HI(%a6)
+	mov.l		DST_LO(%a1),FP_SCR1_LO(%a6)
+
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+	bsr.l		scale_to_zero_src	# scale src exponent
+	mov.l		%d0,-(%sp)		# save scale factor 1
+
+	bsr.l		scale_to_zero_dst	# scale dst exponent
+
+	neg.l		(%sp)			# SCALE FACTOR = scale1 - scale2
+	add.l		%d0,(%sp)
+
+	mov.w		2+L_SCR3(%a6),%d1	# fetch precision
+	lsr.b		&0x6,%d1		# shift to lo bits
+	mov.l		(%sp)+,%d0		# load S.F.
+	cmp.l		%d0,(tbl_fdiv_ovfl.b,%pc,%d1.w*4) # will result overflow?
+	ble.w		fdiv_may_ovfl		# result will overflow
+
+	cmp.l		%d0,(tbl_fdiv_unfl.w,%pc,%d1.w*4) # will result underflow?
+	beq.w		fdiv_may_unfl		# maybe
+	bgt.w		fdiv_unfl		# yes; go handle underflow
+
+fdiv_normal:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# save FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fdiv.x		FP_SCR0(%a6),%fp0	# perform divide
+
+	fmov.l		%fpsr,%d1		# save FPSR
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fdiv_normal_exit:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store result on stack
+	mov.l		%d2,-(%sp)		# store d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# load {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	or.w		%d2,%d1			# concat old sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x80	# return result in fp0
+	rts
+
+tbl_fdiv_ovfl2:
+	long		0x7fff
+	long		0x407f
+	long		0x43ff
+
+fdiv_no_ovfl:
+	mov.l		(%sp)+,%d0		# restore scale factor
+	bra.b		fdiv_normal_exit
+
+fdiv_may_ovfl:
+	mov.l		%d0,-(%sp)		# save scale factor
+
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# set FPSR
+
+	fdiv.x		FP_SCR0(%a6),%fp0	# execute divide
+
+	fmov.l		%fpsr,%d0
+	fmov.l		&0x0,%fpcr
+
+	or.l		%d0,USER_FPSR(%a6)	# save INEX,N
+
+	fmovm.x		&0x01,-(%sp)		# save result to stack
+	mov.w		(%sp),%d0		# fetch new exponent
+	add.l		&0xc,%sp		# clear result from stack
+	andi.l		&0x7fff,%d0		# strip sign
+	sub.l		(%sp),%d0		# add scale factor
+	cmp.l		%d0,(tbl_fdiv_ovfl2.b,%pc,%d1.w*4)
+	blt.b		fdiv_no_ovfl
+	mov.l		(%sp)+,%d0
+
+fdiv_ovfl_tst:
+	or.l		&ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x13,%d1		# is OVFL or INEX enabled?
+	bne.b		fdiv_ovfl_ena		# yes
+
+fdiv_ovfl_dis:
+	btst		&neg_bit,FPSR_CC(%a6)	# is result negative?
+	sne		%d1			# set sign param accordingly
+	mov.l		L_SCR3(%a6),%d0		# pass prec:rnd
+	bsr.l		ovf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set INF if applicable
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+	rts
+
+fdiv_ovfl_ena:
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# is precision extended?
+	bne.b		fdiv_ovfl_ena_sd	# no, do sgl or dbl
+
+fdiv_ovfl_ena_cont:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# move result to stack
+
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.w		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	sub.l		%d0,%d1			# add scale factor
+	subi.l		&0x6000,%d1		# subtract bias
+	andi.w		&0x7fff,%d1		# clear sign bit
+	andi.w		&0x8000,%d2		# keep old sign
+	or.w		%d2,%d1			# concat old sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	bra.b		fdiv_ovfl_dis
+
+fdiv_ovfl_ena_sd:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst operand
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0x30,%d1		# keep rnd mode
+	fmov.l		%d1,%fpcr		# set FPCR
+
+	fdiv.x		FP_SCR0(%a6),%fp0	# execute divide
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	bra.b		fdiv_ovfl_ena_cont
+
+fdiv_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		&rz_mode*0x10,%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fdiv.x		FP_SCR0(%a6),%fp0	# execute divide
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0b,%d1		# is UNFL or INEX enabled?
+	bne.b		fdiv_unfl_ena		# yes
+
+fdiv_unfl_dis:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+
+	lea		FP_SCR0(%a6),%a0	# pass: result addr
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# 'Z' may have been set
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+	rts
+
+#
+# UNFL is enabled.
+#
+fdiv_unfl_ena:
+	fmovm.x		FP_SCR1(%a6),&0x40	# load dst op
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# is precision extended?
+	bne.b		fdiv_unfl_ena_sd	# no, sgl or dbl
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+fdiv_unfl_ena_cont:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fdiv.x		FP_SCR0(%a6),%fp1	# execute divide
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	fmovm.x		&0x40,FP_SCR0(%a6)	# save result to stack
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factoer
+	addi.l		&0x6000,%d1		# add bias
+	andi.w		&0x7fff,%d1
+	or.w		%d2,%d1			# concat old sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exp
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	bra.w		fdiv_unfl_dis
+
+fdiv_unfl_ena_sd:
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0x30,%d1		# use only rnd mode
+	fmov.l		%d1,%fpcr		# set FPCR
+
+	bra.b		fdiv_unfl_ena_cont
+
+#
+# the divide operation MAY underflow:
+#
+fdiv_may_unfl:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fdiv.x		FP_SCR0(%a6),%fp0	# execute divide
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	fabs.x		%fp0,%fp1		# make a copy of result
+	fcmp.b		%fp1,&0x1		# is |result| > 1.b?
+	fbgt.w		fdiv_normal_exit	# no; no underflow occurred
+	fblt.w		fdiv_unfl		# yes; underflow occurred
+
+#
+# we still don't know if underflow occurred. result is ~ equal to 1. but,
+# we don't know if the result was an underflow that rounded up to a 1
+# or a normalized number that rounded down to a 1. so, redo the entire
+# operation using RZ as the rounding mode to see what the pre-rounded
+# result is. this case should be relatively rare.
+#
+	fmovm.x		FP_SCR1(%a6),&0x40	# load dst op into fp1
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# keep rnd prec
+	ori.b		&rz_mode*0x10,%d1	# insert RZ
+
+	fmov.l		%d1,%fpcr		# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fdiv.x		FP_SCR0(%a6),%fp1	# execute divide
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fabs.x		%fp1			# make absolute value
+	fcmp.b		%fp1,&0x1		# is |result| < 1.b?
+	fbge.w		fdiv_normal_exit	# no; no underflow occurred
+	bra.w		fdiv_unfl		# yes; underflow occurred
+
+############################################################################
+
+#
+# Divide: inputs are not both normalized; what are they?
+#
+fdiv_not_norm:
+	mov.w		(tbl_fdiv_op.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_fdiv_op.b,%pc,%d1.w*1)
+
+	swbeg		&48
+tbl_fdiv_op:
+	short		fdiv_norm	- tbl_fdiv_op # NORM / NORM
+	short		fdiv_inf_load	- tbl_fdiv_op # NORM / ZERO
+	short		fdiv_zero_load	- tbl_fdiv_op # NORM / INF
+	short		fdiv_res_qnan	- tbl_fdiv_op # NORM / QNAN
+	short		fdiv_norm	- tbl_fdiv_op # NORM / DENORM
+	short		fdiv_res_snan	- tbl_fdiv_op # NORM / SNAN
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+
+	short		fdiv_zero_load	- tbl_fdiv_op # ZERO / NORM
+	short		fdiv_res_operr	- tbl_fdiv_op # ZERO / ZERO
+	short		fdiv_zero_load	- tbl_fdiv_op # ZERO / INF
+	short		fdiv_res_qnan	- tbl_fdiv_op # ZERO / QNAN
+	short		fdiv_zero_load	- tbl_fdiv_op # ZERO / DENORM
+	short		fdiv_res_snan	- tbl_fdiv_op # ZERO / SNAN
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+
+	short		fdiv_inf_dst	- tbl_fdiv_op # INF / NORM
+	short		fdiv_inf_dst	- tbl_fdiv_op # INF / ZERO
+	short		fdiv_res_operr	- tbl_fdiv_op # INF / INF
+	short		fdiv_res_qnan	- tbl_fdiv_op # INF / QNAN
+	short		fdiv_inf_dst	- tbl_fdiv_op # INF / DENORM
+	short		fdiv_res_snan	- tbl_fdiv_op # INF / SNAN
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+
+	short		fdiv_res_qnan	- tbl_fdiv_op # QNAN / NORM
+	short		fdiv_res_qnan	- tbl_fdiv_op # QNAN / ZERO
+	short		fdiv_res_qnan	- tbl_fdiv_op # QNAN / INF
+	short		fdiv_res_qnan	- tbl_fdiv_op # QNAN / QNAN
+	short		fdiv_res_qnan	- tbl_fdiv_op # QNAN / DENORM
+	short		fdiv_res_snan	- tbl_fdiv_op # QNAN / SNAN
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+
+	short		fdiv_norm	- tbl_fdiv_op # DENORM / NORM
+	short		fdiv_inf_load	- tbl_fdiv_op # DENORM / ZERO
+	short		fdiv_zero_load	- tbl_fdiv_op # DENORM / INF
+	short		fdiv_res_qnan	- tbl_fdiv_op # DENORM / QNAN
+	short		fdiv_norm	- tbl_fdiv_op # DENORM / DENORM
+	short		fdiv_res_snan	- tbl_fdiv_op # DENORM / SNAN
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+
+	short		fdiv_res_snan	- tbl_fdiv_op # SNAN / NORM
+	short		fdiv_res_snan	- tbl_fdiv_op # SNAN / ZERO
+	short		fdiv_res_snan	- tbl_fdiv_op # SNAN / INF
+	short		fdiv_res_snan	- tbl_fdiv_op # SNAN / QNAN
+	short		fdiv_res_snan	- tbl_fdiv_op # SNAN / DENORM
+	short		fdiv_res_snan	- tbl_fdiv_op # SNAN / SNAN
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+
+fdiv_res_qnan:
+	bra.l		res_qnan
+fdiv_res_snan:
+	bra.l		res_snan
+fdiv_res_operr:
+	bra.l		res_operr
+
+	global		fdiv_zero_load		# global for fsgldiv
+fdiv_zero_load:
+	mov.b		SRC_EX(%a0),%d0		# result sign is exclusive
+	mov.b		DST_EX(%a1),%d1		# or of input signs.
+	eor.b		%d0,%d1
+	bpl.b		fdiv_zero_load_p	# result is positive
+	fmov.s		&0x80000000,%fp0	# load a -ZERO
+	mov.b		&z_bmask+neg_bmask,FPSR_CC(%a6)	# set Z/N
+	rts
+fdiv_zero_load_p:
+	fmov.s		&0x00000000,%fp0	# load a +ZERO
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set Z
+	rts
+
+#
+# The destination was In Range and the source was a ZERO. The result,
+# therefore, is an INF w/ the proper sign.
+# So, determine the sign and return a new INF (w/ the j-bit cleared).
+#
+	global		fdiv_inf_load		# global for fsgldiv
+fdiv_inf_load:
+	ori.w		&dz_mask+adz_mask,2+USER_FPSR(%a6) # no; set DZ/ADZ
+	mov.b		SRC_EX(%a0),%d0		# load both signs
+	mov.b		DST_EX(%a1),%d1
+	eor.b		%d0,%d1
+	bpl.b		fdiv_inf_load_p		# result is positive
+	fmov.s		&0xff800000,%fp0	# make result -INF
+	mov.b		&inf_bmask+neg_bmask,FPSR_CC(%a6) # set INF/N
+	rts
+fdiv_inf_load_p:
+	fmov.s		&0x7f800000,%fp0	# make result +INF
+	mov.b		&inf_bmask,FPSR_CC(%a6)	# set INF
+	rts
+
+#
+# The destination was an INF w/ an In Range or ZERO source, the result is
+# an INF w/ the proper sign.
+# The 68881/882 returns the destination INF w/ the new sign(if the j-bit of the
+# dst INF is set, then then j-bit of the result INF is also set).
+#
+	global		fdiv_inf_dst		# global for fsgldiv
+fdiv_inf_dst:
+	mov.b		DST_EX(%a1),%d0		# load both signs
+	mov.b		SRC_EX(%a0),%d1
+	eor.b		%d0,%d1
+	bpl.b		fdiv_inf_dst_p		# result is positive
+
+	fmovm.x		DST(%a1),&0x80		# return result in fp0
+	fabs.x		%fp0			# clear sign bit
+	fneg.x		%fp0			# set sign bit
+	mov.b		&inf_bmask+neg_bmask,FPSR_CC(%a6) # set INF/NEG
+	rts
+
+fdiv_inf_dst_p:
+	fmovm.x		DST(%a1),&0x80		# return result in fp0
+	fabs.x		%fp0			# return positive INF
+	mov.b		&inf_bmask,FPSR_CC(%a6) # set INF
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fneg(): emulates the fneg instruction				#
+#	fsneg(): emulates the fsneg instruction				#
+#	fdneg(): emulates the fdneg instruction				#
+#									#
+# XREF ****************************************************************	#
+#	norm() - normalize a denorm to provide EXOP			#
+#	scale_to_zero_src() - scale sgl/dbl source exponent		#
+#	ovf_res() - return default overflow result			#
+#	unf_res() - return default underflow result			#
+#	res_qnan_1op() - return QNAN result				#
+#	res_snan_1op() - return SNAN result				#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	d0 = rnd prec,mode						#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = result							#
+#	fp1 = EXOP (if exception occurred)				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Handle NANs, zeroes, and infinities as special cases. Separate	#
+# norms/denorms into ext/sgl/dbl precisions. Extended precision can be	#
+# emulated by simply setting sign bit. Sgl/dbl operands must be scaled	#
+# and an actual fneg performed to see if overflow/underflow would have	#
+# occurred. If so, return default underflow/overflow result. Else,	#
+# scale the result exponent and return result. FPSR gets set based on	#
+# the result value.							#
+#									#
+#########################################################################
+
+	global		fsneg
+fsneg:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&s_mode*0x10,%d0	# insert sgl precision
+	bra.b		fneg
+
+	global		fdneg
+fdneg:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&d_mode*0x10,%d0	# insert dbl prec
+
+	global		fneg
+fneg:
+	mov.l		%d0,L_SCR3(%a6)		# store rnd info
+	mov.b		STAG(%a6),%d1
+	bne.w		fneg_not_norm		# optimize on non-norm input
+
+#
+# NEGATE SIGN : norms and denorms ONLY!
+#
+fneg_norm:
+	andi.b		&0xc0,%d0		# is precision extended?
+	bne.w		fneg_not_ext		# no; go handle sgl or dbl
+
+#
+# precision selected is extended. so...we can not get an underflow
+# or overflow because of rounding to the correct precision. so...
+# skip the scaling and unscaling...
+#
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	mov.w		SRC_EX(%a0),%d0
+	eori.w		&0x8000,%d0		# negate sign
+	bpl.b		fneg_norm_load		# sign is positive
+	mov.b		&neg_bmask,FPSR_CC(%a6)	# set 'N' ccode bit
+fneg_norm_load:
+	mov.w		%d0,FP_SCR0_EX(%a6)
+	fmovm.x		FP_SCR0(%a6),&0x80	# return result in fp0
+	rts
+
+#
+# for an extended precision DENORM, the UNFL exception bit is set
+# the accrued bit is NOT set in this instance(no inexactness!)
+#
+fneg_denorm:
+	andi.b		&0xc0,%d0		# is precision extended?
+	bne.b		fneg_not_ext		# no; go handle sgl or dbl
+
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	mov.w		SRC_EX(%a0),%d0
+	eori.w		&0x8000,%d0		# negate sign
+	bpl.b		fneg_denorm_done	# no
+	mov.b		&neg_bmask,FPSR_CC(%a6)	# yes, set 'N' ccode bit
+fneg_denorm_done:
+	mov.w		%d0,FP_SCR0_EX(%a6)
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+
+	btst		&unfl_bit,FPCR_ENABLE(%a6) # is UNFL enabled?
+	bne.b		fneg_ext_unfl_ena	# yes
+	rts
+
+#
+# the input is an extended DENORM and underflow is enabled in the FPCR.
+# normalize the mantissa and add the bias of 0x6000 to the resulting negative
+# exponent and insert back into the operand.
+#
+fneg_ext_unfl_ena:
+	lea		FP_SCR0(%a6),%a0	# pass: ptr to operand
+	bsr.l		norm			# normalize result
+	neg.w		%d0			# new exponent = -(shft val)
+	addi.w		&0x6000,%d0		# add new bias to exponent
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch old sign,exp
+	andi.w		&0x8000,%d1		# keep old sign
+	andi.w		&0x7fff,%d0		# clear sign position
+	or.w		%d1,%d0			# concat old sign, new exponent
+	mov.w		%d0,FP_SCR0_EX(%a6)	# insert new exponent
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	rts
+
+#
+# operand is either single or double
+#
+fneg_not_ext:
+	cmpi.b		%d0,&s_mode*0x10	# separate sgl/dbl prec
+	bne.b		fneg_dbl
+
+#
+# operand is to be rounded to single precision
+#
+fneg_sgl:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	bsr.l		scale_to_zero_src	# calculate scale factor
+
+	cmpi.l		%d0,&0x3fff-0x3f80	# will move in underflow?
+	bge.w		fneg_sd_unfl		# yes; go handle underflow
+	cmpi.l		%d0,&0x3fff-0x407e	# will move in overflow?
+	beq.w		fneg_sd_may_ovfl	# maybe; go check
+	blt.w		fneg_sd_ovfl		# yes; go handle overflow
+
+#
+# operand will NOT overflow or underflow when moved in to the fp reg file
+#
+fneg_sd_normal:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fneg.x		FP_SCR0(%a6),%fp0	# perform negation
+
+	fmov.l		%fpsr,%d1		# save FPSR
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fneg_sd_normal_exit:
+	mov.l		%d2,-(%sp)		# save d2
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+	mov.w		FP_SCR0_EX(%a6),%d1	# load sgn,exp
+	mov.w		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	sub.l		%d0,%d1			# add scale factor
+	andi.w		&0x8000,%d2		# keep old sign
+	or.w		%d1,%d2			# concat old sign,new exp
+	mov.w		%d2,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x80	# return result in fp0
+	rts
+
+#
+# operand is to be rounded to double precision
+#
+fneg_dbl:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	bsr.l		scale_to_zero_src	# calculate scale factor
+
+	cmpi.l		%d0,&0x3fff-0x3c00	# will move in underflow?
+	bge.b		fneg_sd_unfl		# yes; go handle underflow
+	cmpi.l		%d0,&0x3fff-0x43fe	# will move in overflow?
+	beq.w		fneg_sd_may_ovfl	# maybe; go check
+	blt.w		fneg_sd_ovfl		# yes; go handle overflow
+	bra.w		fneg_sd_normal		# no; ho handle normalized op
+
+#
+# operand WILL underflow when moved in to the fp register file
+#
+fneg_sd_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+	eori.b		&0x80,FP_SCR0_EX(%a6)	# negate sign
+	bpl.b		fneg_sd_unfl_tst
+	bset		&neg_bit,FPSR_CC(%a6)	# set 'N' ccode bit
+
+# if underflow or inexact is enabled, go calculate EXOP first.
+fneg_sd_unfl_tst:
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0b,%d1		# is UNFL or INEX enabled?
+	bne.b		fneg_sd_unfl_ena	# yes
+
+fneg_sd_unfl_dis:
+	lea		FP_SCR0(%a6),%a0	# pass: result addr
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# unf_res may have set 'Z'
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+	rts
+
+#
+# operand will underflow AND underflow is enabled.
+# therefore, we must return the result rounded to extended precision.
+#
+fneg_sd_unfl_ena:
+	mov.l		FP_SCR0_HI(%a6),FP_SCR1_HI(%a6)
+	mov.l		FP_SCR0_LO(%a6),FP_SCR1_LO(%a6)
+	mov.w		FP_SCR0_EX(%a6),%d1	# load current exponent
+
+	mov.l		%d2,-(%sp)		# save d2
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# subtract scale factor
+	addi.l		&0x6000,%d1		# add new bias
+	andi.w		&0x7fff,%d1
+	or.w		%d2,%d1			# concat new sign,new exp
+	mov.w		%d1,FP_SCR1_EX(%a6)	# insert new exp
+	fmovm.x		FP_SCR1(%a6),&0x40	# return EXOP in fp1
+	mov.l		(%sp)+,%d2		# restore d2
+	bra.b		fneg_sd_unfl_dis
+
+#
+# operand WILL overflow.
+#
+fneg_sd_ovfl:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fneg.x		FP_SCR0(%a6),%fp0	# perform negation
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# save FPSR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fneg_sd_ovfl_tst:
+	or.l		&ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x13,%d1		# is OVFL or INEX enabled?
+	bne.b		fneg_sd_ovfl_ena	# yes
+
+#
+# OVFL is not enabled; therefore, we must create the default result by
+# calling ovf_res().
+#
+fneg_sd_ovfl_dis:
+	btst		&neg_bit,FPSR_CC(%a6)	# is result negative?
+	sne		%d1			# set sign param accordingly
+	mov.l		L_SCR3(%a6),%d0		# pass: prec,mode
+	bsr.l		ovf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set INF,N if applicable
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+	rts
+
+#
+# OVFL is enabled.
+# the INEX2 bit has already been updated by the round to the correct precision.
+# now, round to extended(and don't alter the FPSR).
+#
+fneg_sd_ovfl_ena:
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	subi.l		&0x6000,%d1		# subtract bias
+	andi.w		&0x7fff,%d1
+	or.w		%d2,%d1			# concat sign,exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	mov.l		(%sp)+,%d2		# restore d2
+	bra.b		fneg_sd_ovfl_dis
+
+#
+# the move in MAY underflow. so...
+#
+fneg_sd_may_ovfl:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fneg.x		FP_SCR0(%a6),%fp0	# perform negation
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	fabs.x		%fp0,%fp1		# make a copy of result
+	fcmp.b		%fp1,&0x2		# is |result| >= 2.b?
+	fbge.w		fneg_sd_ovfl_tst	# yes; overflow has occurred
+
+# no, it didn't overflow; we have correct result
+	bra.w		fneg_sd_normal_exit
+
+##########################################################################
+
+#
+# input is not normalized; what is it?
+#
+fneg_not_norm:
+	cmpi.b		%d1,&DENORM		# weed out DENORM
+	beq.w		fneg_denorm
+	cmpi.b		%d1,&SNAN		# weed out SNAN
+	beq.l		res_snan_1op
+	cmpi.b		%d1,&QNAN		# weed out QNAN
+	beq.l		res_qnan_1op
+
+#
+# do the fneg; at this point, only possible ops are ZERO and INF.
+# use fneg to determine ccodes.
+# prec:mode should be zero at this point but it won't affect answer anyways.
+#
+	fneg.x		SRC_EX(%a0),%fp0	# do fneg
+	fmov.l		%fpsr,%d0
+	rol.l		&0x8,%d0		# put ccodes in lo byte
+	mov.b		%d0,FPSR_CC(%a6)	# insert correct ccodes
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	ftst(): emulates the ftest instruction				#
+#									#
+# XREF ****************************************************************	#
+#	res{s,q}nan_1op() - set NAN result for monadic instruction	#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#									#
+# OUTPUT **************************************************************	#
+#	none								#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Check the source operand tag (STAG) and set the FPCR according	#
+# to the operand type and sign.						#
+#									#
+#########################################################################
+
+	global		ftst
+ftst:
+	mov.b		STAG(%a6),%d1
+	bne.b		ftst_not_norm		# optimize on non-norm input
+
+#
+# Norm:
+#
+ftst_norm:
+	tst.b		SRC_EX(%a0)		# is operand negative?
+	bmi.b		ftst_norm_m		# yes
+	rts
+ftst_norm_m:
+	mov.b		&neg_bmask,FPSR_CC(%a6)	# set 'N' ccode bit
+	rts
+
+#
+# input is not normalized; what is it?
+#
+ftst_not_norm:
+	cmpi.b		%d1,&ZERO		# weed out ZERO
+	beq.b		ftst_zero
+	cmpi.b		%d1,&INF		# weed out INF
+	beq.b		ftst_inf
+	cmpi.b		%d1,&SNAN		# weed out SNAN
+	beq.l		res_snan_1op
+	cmpi.b		%d1,&QNAN		# weed out QNAN
+	beq.l		res_qnan_1op
+
+#
+# Denorm:
+#
+ftst_denorm:
+	tst.b		SRC_EX(%a0)		# is operand negative?
+	bmi.b		ftst_denorm_m		# yes
+	rts
+ftst_denorm_m:
+	mov.b		&neg_bmask,FPSR_CC(%a6)	# set 'N' ccode bit
+	rts
+
+#
+# Infinity:
+#
+ftst_inf:
+	tst.b		SRC_EX(%a0)		# is operand negative?
+	bmi.b		ftst_inf_m		# yes
+ftst_inf_p:
+	mov.b		&inf_bmask,FPSR_CC(%a6)	# set 'I' ccode bit
+	rts
+ftst_inf_m:
+	mov.b		&inf_bmask+neg_bmask,FPSR_CC(%a6) # set 'I','N' ccode bits
+	rts
+
+#
+# Zero:
+#
+ftst_zero:
+	tst.b		SRC_EX(%a0)		# is operand negative?
+	bmi.b		ftst_zero_m		# yes
+ftst_zero_p:
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set 'N' ccode bit
+	rts
+ftst_zero_m:
+	mov.b		&z_bmask+neg_bmask,FPSR_CC(%a6)	# set 'Z','N' ccode bits
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fint(): emulates the fint instruction				#
+#									#
+# XREF ****************************************************************	#
+#	res_{s,q}nan_1op() - set NAN result for monadic operation	#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	d0 = round precision/mode					#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = result							#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Separate according to operand type. Unnorms don't pass through	#
+# here. For norms, load the rounding mode/prec, execute a "fint", then	#
+# store the resulting FPSR bits.					#
+#	For denorms, force the j-bit to a one and do the same as for	#
+# norms. Denorms are so low that the answer will either be a zero or a	#
+# one.									#
+#	For zeroes/infs/NANs, return the same while setting the FPSR	#
+# as appropriate.							#
+#									#
+#########################################################################
+
+	global		fint
+fint:
+	mov.b		STAG(%a6),%d1
+	bne.b		fint_not_norm		# optimize on non-norm input
+
+#
+# Norm:
+#
+fint_norm:
+	andi.b		&0x30,%d0		# set prec = ext
+
+	fmov.l		%d0,%fpcr		# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fint.x		SRC(%a0),%fp0		# execute fint
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d0		# save FPSR
+	or.l		%d0,USER_FPSR(%a6)	# set exception bits
+
+	rts
+
+#
+# input is not normalized; what is it?
+#
+fint_not_norm:
+	cmpi.b		%d1,&ZERO		# weed out ZERO
+	beq.b		fint_zero
+	cmpi.b		%d1,&INF		# weed out INF
+	beq.b		fint_inf
+	cmpi.b		%d1,&DENORM		# weed out DENORM
+	beq.b		fint_denorm
+	cmpi.b		%d1,&SNAN		# weed out SNAN
+	beq.l		res_snan_1op
+	bra.l		res_qnan_1op		# weed out QNAN
+
+#
+# Denorm:
+#
+# for DENORMs, the result will be either (+/-)ZERO or (+/-)1.
+# also, the INEX2 and AINEX exception bits will be set.
+# so, we could either set these manually or force the DENORM
+# to a very small NORM and ship it to the NORM routine.
+# I do the latter.
+#
+fint_denorm:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6) # copy sign, zero exp
+	mov.b		&0x80,FP_SCR0_HI(%a6)	# force DENORM ==> small NORM
+	lea		FP_SCR0(%a6),%a0
+	bra.b		fint_norm
+
+#
+# Zero:
+#
+fint_zero:
+	tst.b		SRC_EX(%a0)		# is ZERO negative?
+	bmi.b		fint_zero_m		# yes
+fint_zero_p:
+	fmov.s		&0x00000000,%fp0	# return +ZERO in fp0
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set 'Z' ccode bit
+	rts
+fint_zero_m:
+	fmov.s		&0x80000000,%fp0	# return -ZERO in fp0
+	mov.b		&z_bmask+neg_bmask,FPSR_CC(%a6) # set 'Z','N' ccode bits
+	rts
+
+#
+# Infinity:
+#
+fint_inf:
+	fmovm.x		SRC(%a0),&0x80		# return result in fp0
+	tst.b		SRC_EX(%a0)		# is INF negative?
+	bmi.b		fint_inf_m		# yes
+fint_inf_p:
+	mov.b		&inf_bmask,FPSR_CC(%a6)	# set 'I' ccode bit
+	rts
+fint_inf_m:
+	mov.b		&inf_bmask+neg_bmask,FPSR_CC(%a6) # set 'N','I' ccode bits
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fintrz(): emulates the fintrz instruction			#
+#									#
+# XREF ****************************************************************	#
+#	res_{s,q}nan_1op() - set NAN result for monadic operation	#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	d0 = round precision/mode					#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = result							#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Separate according to operand type. Unnorms don't pass through	#
+# here. For norms, load the rounding mode/prec, execute a "fintrz",	#
+# then store the resulting FPSR bits.					#
+#	For denorms, force the j-bit to a one and do the same as for	#
+# norms. Denorms are so low that the answer will either be a zero or a	#
+# one.									#
+#	For zeroes/infs/NANs, return the same while setting the FPSR	#
+# as appropriate.							#
+#									#
+#########################################################################
+
+	global		fintrz
+fintrz:
+	mov.b		STAG(%a6),%d1
+	bne.b		fintrz_not_norm		# optimize on non-norm input
+
+#
+# Norm:
+#
+fintrz_norm:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fintrz.x	SRC(%a0),%fp0		# execute fintrz
+
+	fmov.l		%fpsr,%d0		# save FPSR
+	or.l		%d0,USER_FPSR(%a6)	# set exception bits
+
+	rts
+
+#
+# input is not normalized; what is it?
+#
+fintrz_not_norm:
+	cmpi.b		%d1,&ZERO		# weed out ZERO
+	beq.b		fintrz_zero
+	cmpi.b		%d1,&INF		# weed out INF
+	beq.b		fintrz_inf
+	cmpi.b		%d1,&DENORM		# weed out DENORM
+	beq.b		fintrz_denorm
+	cmpi.b		%d1,&SNAN		# weed out SNAN
+	beq.l		res_snan_1op
+	bra.l		res_qnan_1op		# weed out QNAN
+
+#
+# Denorm:
+#
+# for DENORMs, the result will be (+/-)ZERO.
+# also, the INEX2 and AINEX exception bits will be set.
+# so, we could either set these manually or force the DENORM
+# to a very small NORM and ship it to the NORM routine.
+# I do the latter.
+#
+fintrz_denorm:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6) # copy sign, zero exp
+	mov.b		&0x80,FP_SCR0_HI(%a6)	# force DENORM ==> small NORM
+	lea		FP_SCR0(%a6),%a0
+	bra.b		fintrz_norm
+
+#
+# Zero:
+#
+fintrz_zero:
+	tst.b		SRC_EX(%a0)		# is ZERO negative?
+	bmi.b		fintrz_zero_m		# yes
+fintrz_zero_p:
+	fmov.s		&0x00000000,%fp0	# return +ZERO in fp0
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set 'Z' ccode bit
+	rts
+fintrz_zero_m:
+	fmov.s		&0x80000000,%fp0	# return -ZERO in fp0
+	mov.b		&z_bmask+neg_bmask,FPSR_CC(%a6) # set 'Z','N' ccode bits
+	rts
+
+#
+# Infinity:
+#
+fintrz_inf:
+	fmovm.x		SRC(%a0),&0x80		# return result in fp0
+	tst.b		SRC_EX(%a0)		# is INF negative?
+	bmi.b		fintrz_inf_m		# yes
+fintrz_inf_p:
+	mov.b		&inf_bmask,FPSR_CC(%a6)	# set 'I' ccode bit
+	rts
+fintrz_inf_m:
+	mov.b		&inf_bmask+neg_bmask,FPSR_CC(%a6) # set 'N','I' ccode bits
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fabs():  emulates the fabs instruction				#
+#	fsabs(): emulates the fsabs instruction				#
+#	fdabs(): emulates the fdabs instruction				#
+#									#
+# XREF **************************************************************** #
+#	norm() - normalize denorm mantissa to provide EXOP		#
+#	scale_to_zero_src() - make exponent. = 0; get scale factor	#
+#	unf_res() - calculate underflow result				#
+#	ovf_res() - calculate overflow result				#
+#	res_{s,q}nan_1op() - set NAN result for monadic operation	#
+#									#
+# INPUT *************************************************************** #
+#	a0 = pointer to extended precision source operand		#
+#	d0 = rnd precision/mode						#
+#									#
+# OUTPUT ************************************************************** #
+#	fp0 = result							#
+#	fp1 = EXOP (if exception occurred)				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Handle NANs, infinities, and zeroes as special cases. Divide	#
+# norms into extended, single, and double precision.			#
+#	Simply clear sign for extended precision norm. Ext prec denorm	#
+# gets an EXOP created for it since it's an underflow.			#
+#	Double and single precision can overflow and underflow. First,	#
+# scale the operand such that the exponent is zero. Perform an "fabs"	#
+# using the correct rnd mode/prec. Check to see if the original		#
+# exponent would take an exception. If so, use unf_res() or ovf_res()	#
+# to calculate the default result. Also, create the EXOP for the	#
+# exceptional case. If no exception should occur, insert the correct	#
+# result exponent and return.						#
+#	Unnorms don't pass through here.				#
+#									#
+#########################################################################
+
+	global		fsabs
+fsabs:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&s_mode*0x10,%d0	# insert sgl precision
+	bra.b		fabs
+
+	global		fdabs
+fdabs:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&d_mode*0x10,%d0	# insert dbl precision
+
+	global		fabs
+fabs:
+	mov.l		%d0,L_SCR3(%a6)		# store rnd info
+	mov.b		STAG(%a6),%d1
+	bne.w		fabs_not_norm		# optimize on non-norm input
+
+#
+# ABSOLUTE VALUE: norms and denorms ONLY!
+#
+fabs_norm:
+	andi.b		&0xc0,%d0		# is precision extended?
+	bne.b		fabs_not_ext		# no; go handle sgl or dbl
+
+#
+# precision selected is extended. so...we can not get an underflow
+# or overflow because of rounding to the correct precision. so...
+# skip the scaling and unscaling...
+#
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	mov.w		SRC_EX(%a0),%d1
+	bclr		&15,%d1			# force absolute value
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert exponent
+	fmovm.x		FP_SCR0(%a6),&0x80	# return result in fp0
+	rts
+
+#
+# for an extended precision DENORM, the UNFL exception bit is set
+# the accrued bit is NOT set in this instance(no inexactness!)
+#
+fabs_denorm:
+	andi.b		&0xc0,%d0		# is precision extended?
+	bne.b		fabs_not_ext		# no
+
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	mov.w		SRC_EX(%a0),%d0
+	bclr		&15,%d0			# clear sign
+	mov.w		%d0,FP_SCR0_EX(%a6)	# insert exponent
+
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+
+	btst		&unfl_bit,FPCR_ENABLE(%a6) # is UNFL enabled?
+	bne.b		fabs_ext_unfl_ena
+	rts
+
+#
+# the input is an extended DENORM and underflow is enabled in the FPCR.
+# normalize the mantissa and add the bias of 0x6000 to the resulting negative
+# exponent and insert back into the operand.
+#
+fabs_ext_unfl_ena:
+	lea		FP_SCR0(%a6),%a0	# pass: ptr to operand
+	bsr.l		norm			# normalize result
+	neg.w		%d0			# new exponent = -(shft val)
+	addi.w		&0x6000,%d0		# add new bias to exponent
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch old sign,exp
+	andi.w		&0x8000,%d1		# keep old sign
+	andi.w		&0x7fff,%d0		# clear sign position
+	or.w		%d1,%d0			# concat old sign, new exponent
+	mov.w		%d0,FP_SCR0_EX(%a6)	# insert new exponent
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	rts
+
+#
+# operand is either single or double
+#
+fabs_not_ext:
+	cmpi.b		%d0,&s_mode*0x10	# separate sgl/dbl prec
+	bne.b		fabs_dbl
+
+#
+# operand is to be rounded to single precision
+#
+fabs_sgl:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	bsr.l		scale_to_zero_src	# calculate scale factor
+
+	cmpi.l		%d0,&0x3fff-0x3f80	# will move in underflow?
+	bge.w		fabs_sd_unfl		# yes; go handle underflow
+	cmpi.l		%d0,&0x3fff-0x407e	# will move in overflow?
+	beq.w		fabs_sd_may_ovfl	# maybe; go check
+	blt.w		fabs_sd_ovfl		# yes; go handle overflow
+
+#
+# operand will NOT overflow or underflow when moved in to the fp reg file
+#
+fabs_sd_normal:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fabs.x		FP_SCR0(%a6),%fp0	# perform absolute
+
+	fmov.l		%fpsr,%d1		# save FPSR
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fabs_sd_normal_exit:
+	mov.l		%d2,-(%sp)		# save d2
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+	mov.w		FP_SCR0_EX(%a6),%d1	# load sgn,exp
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	sub.l		%d0,%d1			# add scale factor
+	andi.w		&0x8000,%d2		# keep old sign
+	or.w		%d1,%d2			# concat old sign,new exp
+	mov.w		%d2,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x80	# return result in fp0
+	rts
+
+#
+# operand is to be rounded to double precision
+#
+fabs_dbl:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	bsr.l		scale_to_zero_src	# calculate scale factor
+
+	cmpi.l		%d0,&0x3fff-0x3c00	# will move in underflow?
+	bge.b		fabs_sd_unfl		# yes; go handle underflow
+	cmpi.l		%d0,&0x3fff-0x43fe	# will move in overflow?
+	beq.w		fabs_sd_may_ovfl	# maybe; go check
+	blt.w		fabs_sd_ovfl		# yes; go handle overflow
+	bra.w		fabs_sd_normal		# no; ho handle normalized op
+
+#
+# operand WILL underflow when moved in to the fp register file
+#
+fabs_sd_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+	bclr		&0x7,FP_SCR0_EX(%a6)	# force absolute value
+
+# if underflow or inexact is enabled, go calculate EXOP first.
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0b,%d1		# is UNFL or INEX enabled?
+	bne.b		fabs_sd_unfl_ena	# yes
+
+fabs_sd_unfl_dis:
+	lea		FP_SCR0(%a6),%a0	# pass: result addr
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set possible 'Z' ccode
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+	rts
+
+#
+# operand will underflow AND underflow is enabled.
+# therefore, we must return the result rounded to extended precision.
+#
+fabs_sd_unfl_ena:
+	mov.l		FP_SCR0_HI(%a6),FP_SCR1_HI(%a6)
+	mov.l		FP_SCR0_LO(%a6),FP_SCR1_LO(%a6)
+	mov.w		FP_SCR0_EX(%a6),%d1	# load current exponent
+
+	mov.l		%d2,-(%sp)		# save d2
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# subtract scale factor
+	addi.l		&0x6000,%d1		# add new bias
+	andi.w		&0x7fff,%d1
+	or.w		%d2,%d1			# concat new sign,new exp
+	mov.w		%d1,FP_SCR1_EX(%a6)	# insert new exp
+	fmovm.x		FP_SCR1(%a6),&0x40	# return EXOP in fp1
+	mov.l		(%sp)+,%d2		# restore d2
+	bra.b		fabs_sd_unfl_dis
+
+#
+# operand WILL overflow.
+#
+fabs_sd_ovfl:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fabs.x		FP_SCR0(%a6),%fp0	# perform absolute
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# save FPSR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fabs_sd_ovfl_tst:
+	or.l		&ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x13,%d1		# is OVFL or INEX enabled?
+	bne.b		fabs_sd_ovfl_ena	# yes
+
+#
+# OVFL is not enabled; therefore, we must create the default result by
+# calling ovf_res().
+#
+fabs_sd_ovfl_dis:
+	btst		&neg_bit,FPSR_CC(%a6)	# is result negative?
+	sne		%d1			# set sign param accordingly
+	mov.l		L_SCR3(%a6),%d0		# pass: prec,mode
+	bsr.l		ovf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set INF,N if applicable
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+	rts
+
+#
+# OVFL is enabled.
+# the INEX2 bit has already been updated by the round to the correct precision.
+# now, round to extended(and don't alter the FPSR).
+#
+fabs_sd_ovfl_ena:
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	subi.l		&0x6000,%d1		# subtract bias
+	andi.w		&0x7fff,%d1
+	or.w		%d2,%d1			# concat sign,exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	mov.l		(%sp)+,%d2		# restore d2
+	bra.b		fabs_sd_ovfl_dis
+
+#
+# the move in MAY underflow. so...
+#
+fabs_sd_may_ovfl:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fabs.x		FP_SCR0(%a6),%fp0	# perform absolute
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	fabs.x		%fp0,%fp1		# make a copy of result
+	fcmp.b		%fp1,&0x2		# is |result| >= 2.b?
+	fbge.w		fabs_sd_ovfl_tst	# yes; overflow has occurred
+
+# no, it didn't overflow; we have correct result
+	bra.w		fabs_sd_normal_exit
+
+##########################################################################
+
+#
+# input is not normalized; what is it?
+#
+fabs_not_norm:
+	cmpi.b		%d1,&DENORM		# weed out DENORM
+	beq.w		fabs_denorm
+	cmpi.b		%d1,&SNAN		# weed out SNAN
+	beq.l		res_snan_1op
+	cmpi.b		%d1,&QNAN		# weed out QNAN
+	beq.l		res_qnan_1op
+
+	fabs.x		SRC(%a0),%fp0		# force absolute value
+
+	cmpi.b		%d1,&INF		# weed out INF
+	beq.b		fabs_inf
+fabs_zero:
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set 'Z' ccode bit
+	rts
+fabs_inf:
+	mov.b		&inf_bmask,FPSR_CC(%a6)	# set 'I' ccode bit
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fcmp(): fp compare op routine					#
+#									#
+# XREF ****************************************************************	#
+#	res_qnan() - return QNAN result					#
+#	res_snan() - return SNAN result					#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	a1 = pointer to extended precision destination operand		#
+#	d0 = round prec/mode						#
+#									#
+# OUTPUT ************************************************************** #
+#	None								#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Handle NANs and denorms as special cases. For everything else,	#
+# just use the actual fcmp instruction to produce the correct condition	#
+# codes.								#
+#									#
+#########################################################################
+
+	global		fcmp
+fcmp:
+	clr.w		%d1
+	mov.b		DTAG(%a6),%d1
+	lsl.b		&0x3,%d1
+	or.b		STAG(%a6),%d1
+	bne.b		fcmp_not_norm		# optimize on non-norm input
+
+#
+# COMPARE FP OPs : NORMs, ZEROs, INFs, and "corrected" DENORMs
+#
+fcmp_norm:
+	fmovm.x		DST(%a1),&0x80		# load dst op
+
+	fcmp.x		%fp0,SRC(%a0)		# do compare
+
+	fmov.l		%fpsr,%d0		# save FPSR
+	rol.l		&0x8,%d0		# extract ccode bits
+	mov.b		%d0,FPSR_CC(%a6)	# set ccode bits(no exc bits are set)
+
+	rts
+
+#
+# fcmp: inputs are not both normalized; what are they?
+#
+fcmp_not_norm:
+	mov.w		(tbl_fcmp_op.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_fcmp_op.b,%pc,%d1.w*1)
+
+	swbeg		&48
+tbl_fcmp_op:
+	short		fcmp_norm	- tbl_fcmp_op # NORM - NORM
+	short		fcmp_norm	- tbl_fcmp_op # NORM - ZERO
+	short		fcmp_norm	- tbl_fcmp_op # NORM - INF
+	short		fcmp_res_qnan	- tbl_fcmp_op # NORM - QNAN
+	short		fcmp_nrm_dnrm	- tbl_fcmp_op # NORM - DENORM
+	short		fcmp_res_snan	- tbl_fcmp_op # NORM - SNAN
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+
+	short		fcmp_norm	- tbl_fcmp_op # ZERO - NORM
+	short		fcmp_norm	- tbl_fcmp_op # ZERO - ZERO
+	short		fcmp_norm	- tbl_fcmp_op # ZERO - INF
+	short		fcmp_res_qnan	- tbl_fcmp_op # ZERO - QNAN
+	short		fcmp_dnrm_s	- tbl_fcmp_op # ZERO - DENORM
+	short		fcmp_res_snan	- tbl_fcmp_op # ZERO - SNAN
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+
+	short		fcmp_norm	- tbl_fcmp_op # INF - NORM
+	short		fcmp_norm	- tbl_fcmp_op # INF - ZERO
+	short		fcmp_norm	- tbl_fcmp_op # INF - INF
+	short		fcmp_res_qnan	- tbl_fcmp_op # INF - QNAN
+	short		fcmp_dnrm_s	- tbl_fcmp_op # INF - DENORM
+	short		fcmp_res_snan	- tbl_fcmp_op # INF - SNAN
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+
+	short		fcmp_res_qnan	- tbl_fcmp_op # QNAN - NORM
+	short		fcmp_res_qnan	- tbl_fcmp_op # QNAN - ZERO
+	short		fcmp_res_qnan	- tbl_fcmp_op # QNAN - INF
+	short		fcmp_res_qnan	- tbl_fcmp_op # QNAN - QNAN
+	short		fcmp_res_qnan	- tbl_fcmp_op # QNAN - DENORM
+	short		fcmp_res_snan	- tbl_fcmp_op # QNAN - SNAN
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+
+	short		fcmp_dnrm_nrm	- tbl_fcmp_op # DENORM - NORM
+	short		fcmp_dnrm_d	- tbl_fcmp_op # DENORM - ZERO
+	short		fcmp_dnrm_d	- tbl_fcmp_op # DENORM - INF
+	short		fcmp_res_qnan	- tbl_fcmp_op # DENORM - QNAN
+	short		fcmp_dnrm_sd	- tbl_fcmp_op # DENORM - DENORM
+	short		fcmp_res_snan	- tbl_fcmp_op # DENORM - SNAN
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+
+	short		fcmp_res_snan	- tbl_fcmp_op # SNAN - NORM
+	short		fcmp_res_snan	- tbl_fcmp_op # SNAN - ZERO
+	short		fcmp_res_snan	- tbl_fcmp_op # SNAN - INF
+	short		fcmp_res_snan	- tbl_fcmp_op # SNAN - QNAN
+	short		fcmp_res_snan	- tbl_fcmp_op # SNAN - DENORM
+	short		fcmp_res_snan	- tbl_fcmp_op # SNAN - SNAN
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+
+# unlike all other functions for QNAN and SNAN, fcmp does NOT set the
+# 'N' bit for a negative QNAN or SNAN input so we must squelch it here.
+fcmp_res_qnan:
+	bsr.l		res_qnan
+	andi.b		&0xf7,FPSR_CC(%a6)
+	rts
+fcmp_res_snan:
+	bsr.l		res_snan
+	andi.b		&0xf7,FPSR_CC(%a6)
+	rts
+
+#
+# DENORMs are a little more difficult.
+# If you have a 2 DENORMs, then you can just force the j-bit to a one
+# and use the fcmp_norm routine.
+# If you have a DENORM and an INF or ZERO, just force the DENORM's j-bit to a one
+# and use the fcmp_norm routine.
+# If you have a DENORM and a NORM with opposite signs, then use fcmp_norm, also.
+# But with a DENORM and a NORM of the same sign, the neg bit is set if the
+# (1) signs are (+) and the DENORM is the dst or
+# (2) signs are (-) and the DENORM is the src
+#
+
+fcmp_dnrm_s:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),%d0
+	bset		&31,%d0			# DENORM src; make into small norm
+	mov.l		%d0,FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	lea		FP_SCR0(%a6),%a0
+	bra.w		fcmp_norm
+
+fcmp_dnrm_d:
+	mov.l		DST_EX(%a1),FP_SCR0_EX(%a6)
+	mov.l		DST_HI(%a1),%d0
+	bset		&31,%d0			# DENORM src; make into small norm
+	mov.l		%d0,FP_SCR0_HI(%a6)
+	mov.l		DST_LO(%a1),FP_SCR0_LO(%a6)
+	lea		FP_SCR0(%a6),%a1
+	bra.w		fcmp_norm
+
+fcmp_dnrm_sd:
+	mov.w		DST_EX(%a1),FP_SCR1_EX(%a6)
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		DST_HI(%a1),%d0
+	bset		&31,%d0			# DENORM dst; make into small norm
+	mov.l		%d0,FP_SCR1_HI(%a6)
+	mov.l		SRC_HI(%a0),%d0
+	bset		&31,%d0			# DENORM dst; make into small norm
+	mov.l		%d0,FP_SCR0_HI(%a6)
+	mov.l		DST_LO(%a1),FP_SCR1_LO(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	lea		FP_SCR1(%a6),%a1
+	lea		FP_SCR0(%a6),%a0
+	bra.w		fcmp_norm
+
+fcmp_nrm_dnrm:
+	mov.b		SRC_EX(%a0),%d0		# determine if like signs
+	mov.b		DST_EX(%a1),%d1
+	eor.b		%d0,%d1
+	bmi.w		fcmp_dnrm_s
+
+# signs are the same, so must determine the answer ourselves.
+	tst.b		%d0			# is src op negative?
+	bmi.b		fcmp_nrm_dnrm_m		# yes
+	rts
+fcmp_nrm_dnrm_m:
+	mov.b		&neg_bmask,FPSR_CC(%a6)	# set 'Z' ccode bit
+	rts
+
+fcmp_dnrm_nrm:
+	mov.b		SRC_EX(%a0),%d0		# determine if like signs
+	mov.b		DST_EX(%a1),%d1
+	eor.b		%d0,%d1
+	bmi.w		fcmp_dnrm_d
+
+# signs are the same, so must determine the answer ourselves.
+	tst.b		%d0			# is src op negative?
+	bpl.b		fcmp_dnrm_nrm_m		# no
+	rts
+fcmp_dnrm_nrm_m:
+	mov.b		&neg_bmask,FPSR_CC(%a6)	# set 'Z' ccode bit
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fsglmul(): emulates the fsglmul instruction			#
+#									#
+# XREF ****************************************************************	#
+#	scale_to_zero_src() - scale src exponent to zero		#
+#	scale_to_zero_dst() - scale dst exponent to zero		#
+#	unf_res4() - return default underflow result for sglop		#
+#	ovf_res() - return default overflow result			#
+#	res_qnan() - return QNAN result					#
+#	res_snan() - return SNAN result					#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	a1 = pointer to extended precision destination operand		#
+#	d0  rnd prec,mode						#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = result							#
+#	fp1 = EXOP (if exception occurred)				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Handle NANs, infinities, and zeroes as special cases. Divide	#
+# norms/denorms into ext/sgl/dbl precision.				#
+#	For norms/denorms, scale the exponents such that a multiply	#
+# instruction won't cause an exception. Use the regular fsglmul to	#
+# compute a result. Check if the regular operands would have taken	#
+# an exception. If so, return the default overflow/underflow result	#
+# and return the EXOP if exceptions are enabled. Else, scale the	#
+# result operand to the proper exponent.				#
+#									#
+#########################################################################
+
+	global		fsglmul
+fsglmul:
+	mov.l		%d0,L_SCR3(%a6)		# store rnd info
+
+	clr.w		%d1
+	mov.b		DTAG(%a6),%d1
+	lsl.b		&0x3,%d1
+	or.b		STAG(%a6),%d1
+
+	bne.w		fsglmul_not_norm	# optimize on non-norm input
+
+fsglmul_norm:
+	mov.w		DST_EX(%a1),FP_SCR1_EX(%a6)
+	mov.l		DST_HI(%a1),FP_SCR1_HI(%a6)
+	mov.l		DST_LO(%a1),FP_SCR1_LO(%a6)
+
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+	bsr.l		scale_to_zero_src	# scale exponent
+	mov.l		%d0,-(%sp)		# save scale factor 1
+
+	bsr.l		scale_to_zero_dst	# scale dst exponent
+
+	add.l		(%sp)+,%d0		# SCALE_FACTOR = scale1 + scale2
+
+	cmpi.l		%d0,&0x3fff-0x7ffe	# would result ovfl?
+	beq.w		fsglmul_may_ovfl	# result may rnd to overflow
+	blt.w		fsglmul_ovfl		# result will overflow
+
+	cmpi.l		%d0,&0x3fff+0x0001	# would result unfl?
+	beq.w		fsglmul_may_unfl	# result may rnd to no unfl
+	bgt.w		fsglmul_unfl		# result will underflow
+
+fsglmul_normal:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsglmul.x	FP_SCR0(%a6),%fp0	# execute sgl multiply
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fsglmul_normal_exit:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# load {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	or.w		%d2,%d1			# concat old sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x80	# return result in fp0
+	rts
+
+fsglmul_ovfl:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsglmul.x	FP_SCR0(%a6),%fp0	# execute sgl multiply
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fsglmul_ovfl_tst:
+
+# save setting this until now because this is where fsglmul_may_ovfl may jump in
+	or.l		&ovfl_inx_mask, USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x13,%d1		# is OVFL or INEX enabled?
+	bne.b		fsglmul_ovfl_ena	# yes
+
+fsglmul_ovfl_dis:
+	btst		&neg_bit,FPSR_CC(%a6)	# is result negative?
+	sne		%d1			# set sign param accordingly
+	mov.l		L_SCR3(%a6),%d0		# pass prec:rnd
+	andi.b		&0x30,%d0		# force prec = ext
+	bsr.l		ovf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set INF,N if applicable
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+	rts
+
+fsglmul_ovfl_ena:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# move result to stack
+
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	sub.l		%d0,%d1			# add scale factor
+	subi.l		&0x6000,%d1		# subtract bias
+	andi.w		&0x7fff,%d1
+	andi.w		&0x8000,%d2		# keep old sign
+	or.w		%d2,%d1			# concat old sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	bra.b		fsglmul_ovfl_dis
+
+fsglmul_may_ovfl:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsglmul.x	FP_SCR0(%a6),%fp0	# execute sgl multiply
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	fabs.x		%fp0,%fp1		# make a copy of result
+	fcmp.b		%fp1,&0x2		# is |result| >= 2.b?
+	fbge.w		fsglmul_ovfl_tst	# yes; overflow has occurred
+
+# no, it didn't overflow; we have correct result
+	bra.w		fsglmul_normal_exit
+
+fsglmul_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		&rz_mode*0x10,%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsglmul.x	FP_SCR0(%a6),%fp0	# execute sgl multiply
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0b,%d1		# is UNFL or INEX enabled?
+	bne.b		fsglmul_unfl_ena	# yes
+
+fsglmul_unfl_dis:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+
+	lea		FP_SCR0(%a6),%a0	# pass: result addr
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res4		# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# 'Z' bit may have been set
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+	rts
+
+#
+# UNFL is enabled.
+#
+fsglmul_unfl_ena:
+	fmovm.x		FP_SCR1(%a6),&0x40	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsglmul.x	FP_SCR0(%a6),%fp1	# execute sgl multiply
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	fmovm.x		&0x40,FP_SCR0(%a6)	# save result to stack
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	addi.l		&0x6000,%d1		# add bias
+	andi.w		&0x7fff,%d1
+	or.w		%d2,%d1			# concat old sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	bra.w		fsglmul_unfl_dis
+
+fsglmul_may_unfl:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsglmul.x	FP_SCR0(%a6),%fp0	# execute sgl multiply
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	fabs.x		%fp0,%fp1		# make a copy of result
+	fcmp.b		%fp1,&0x2		# is |result| > 2.b?
+	fbgt.w		fsglmul_normal_exit	# no; no underflow occurred
+	fblt.w		fsglmul_unfl		# yes; underflow occurred
+
+#
+# we still don't know if underflow occurred. result is ~ equal to 2. but,
+# we don't know if the result was an underflow that rounded up to a 2 or
+# a normalized number that rounded down to a 2. so, redo the entire operation
+# using RZ as the rounding mode to see what the pre-rounded result is.
+# this case should be relatively rare.
+#
+	fmovm.x		FP_SCR1(%a6),&0x40	# load dst op into fp1
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# keep rnd prec
+	ori.b		&rz_mode*0x10,%d1	# insert RZ
+
+	fmov.l		%d1,%fpcr		# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsglmul.x	FP_SCR0(%a6),%fp1	# execute sgl multiply
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fabs.x		%fp1			# make absolute value
+	fcmp.b		%fp1,&0x2		# is |result| < 2.b?
+	fbge.w		fsglmul_normal_exit	# no; no underflow occurred
+	bra.w		fsglmul_unfl		# yes, underflow occurred
+
+##############################################################################
+
+#
+# Single Precision Multiply: inputs are not both normalized; what are they?
+#
+fsglmul_not_norm:
+	mov.w		(tbl_fsglmul_op.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_fsglmul_op.b,%pc,%d1.w*1)
+
+	swbeg		&48
+tbl_fsglmul_op:
+	short		fsglmul_norm		- tbl_fsglmul_op # NORM x NORM
+	short		fsglmul_zero		- tbl_fsglmul_op # NORM x ZERO
+	short		fsglmul_inf_src		- tbl_fsglmul_op # NORM x INF
+	short		fsglmul_res_qnan	- tbl_fsglmul_op # NORM x QNAN
+	short		fsglmul_norm		- tbl_fsglmul_op # NORM x DENORM
+	short		fsglmul_res_snan	- tbl_fsglmul_op # NORM x SNAN
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+
+	short		fsglmul_zero		- tbl_fsglmul_op # ZERO x NORM
+	short		fsglmul_zero		- tbl_fsglmul_op # ZERO x ZERO
+	short		fsglmul_res_operr	- tbl_fsglmul_op # ZERO x INF
+	short		fsglmul_res_qnan	- tbl_fsglmul_op # ZERO x QNAN
+	short		fsglmul_zero		- tbl_fsglmul_op # ZERO x DENORM
+	short		fsglmul_res_snan	- tbl_fsglmul_op # ZERO x SNAN
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+
+	short		fsglmul_inf_dst		- tbl_fsglmul_op # INF x NORM
+	short		fsglmul_res_operr	- tbl_fsglmul_op # INF x ZERO
+	short		fsglmul_inf_dst		- tbl_fsglmul_op # INF x INF
+	short		fsglmul_res_qnan	- tbl_fsglmul_op # INF x QNAN
+	short		fsglmul_inf_dst		- tbl_fsglmul_op # INF x DENORM
+	short		fsglmul_res_snan	- tbl_fsglmul_op # INF x SNAN
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+
+	short		fsglmul_res_qnan	- tbl_fsglmul_op # QNAN x NORM
+	short		fsglmul_res_qnan	- tbl_fsglmul_op # QNAN x ZERO
+	short		fsglmul_res_qnan	- tbl_fsglmul_op # QNAN x INF
+	short		fsglmul_res_qnan	- tbl_fsglmul_op # QNAN x QNAN
+	short		fsglmul_res_qnan	- tbl_fsglmul_op # QNAN x DENORM
+	short		fsglmul_res_snan	- tbl_fsglmul_op # QNAN x SNAN
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+
+	short		fsglmul_norm		- tbl_fsglmul_op # NORM x NORM
+	short		fsglmul_zero		- tbl_fsglmul_op # NORM x ZERO
+	short		fsglmul_inf_src		- tbl_fsglmul_op # NORM x INF
+	short		fsglmul_res_qnan	- tbl_fsglmul_op # NORM x QNAN
+	short		fsglmul_norm		- tbl_fsglmul_op # NORM x DENORM
+	short		fsglmul_res_snan	- tbl_fsglmul_op # NORM x SNAN
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+
+	short		fsglmul_res_snan	- tbl_fsglmul_op # SNAN x NORM
+	short		fsglmul_res_snan	- tbl_fsglmul_op # SNAN x ZERO
+	short		fsglmul_res_snan	- tbl_fsglmul_op # SNAN x INF
+	short		fsglmul_res_snan	- tbl_fsglmul_op # SNAN x QNAN
+	short		fsglmul_res_snan	- tbl_fsglmul_op # SNAN x DENORM
+	short		fsglmul_res_snan	- tbl_fsglmul_op # SNAN x SNAN
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+
+fsglmul_res_operr:
+	bra.l		res_operr
+fsglmul_res_snan:
+	bra.l		res_snan
+fsglmul_res_qnan:
+	bra.l		res_qnan
+fsglmul_zero:
+	bra.l		fmul_zero
+fsglmul_inf_src:
+	bra.l		fmul_inf_src
+fsglmul_inf_dst:
+	bra.l		fmul_inf_dst
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fsgldiv(): emulates the fsgldiv instruction			#
+#									#
+# XREF ****************************************************************	#
+#	scale_to_zero_src() - scale src exponent to zero		#
+#	scale_to_zero_dst() - scale dst exponent to zero		#
+#	unf_res4() - return default underflow result for sglop		#
+#	ovf_res() - return default overflow result			#
+#	res_qnan() - return QNAN result					#
+#	res_snan() - return SNAN result					#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	a1 = pointer to extended precision destination operand		#
+#	d0  rnd prec,mode						#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = result							#
+#	fp1 = EXOP (if exception occurred)				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Handle NANs, infinities, and zeroes as special cases. Divide	#
+# norms/denorms into ext/sgl/dbl precision.				#
+#	For norms/denorms, scale the exponents such that a divide	#
+# instruction won't cause an exception. Use the regular fsgldiv to	#
+# compute a result. Check if the regular operands would have taken	#
+# an exception. If so, return the default overflow/underflow result	#
+# and return the EXOP if exceptions are enabled. Else, scale the	#
+# result operand to the proper exponent.				#
+#									#
+#########################################################################
+
+	global		fsgldiv
+fsgldiv:
+	mov.l		%d0,L_SCR3(%a6)		# store rnd info
+
+	clr.w		%d1
+	mov.b		DTAG(%a6),%d1
+	lsl.b		&0x3,%d1
+	or.b		STAG(%a6),%d1		# combine src tags
+
+	bne.w		fsgldiv_not_norm	# optimize on non-norm input
+
+#
+# DIVIDE: NORMs and DENORMs ONLY!
+#
+fsgldiv_norm:
+	mov.w		DST_EX(%a1),FP_SCR1_EX(%a6)
+	mov.l		DST_HI(%a1),FP_SCR1_HI(%a6)
+	mov.l		DST_LO(%a1),FP_SCR1_LO(%a6)
+
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+	bsr.l		scale_to_zero_src	# calculate scale factor 1
+	mov.l		%d0,-(%sp)		# save scale factor 1
+
+	bsr.l		scale_to_zero_dst	# calculate scale factor 2
+
+	neg.l		(%sp)			# S.F. = scale1 - scale2
+	add.l		%d0,(%sp)
+
+	mov.w		2+L_SCR3(%a6),%d1	# fetch precision,mode
+	lsr.b		&0x6,%d1
+	mov.l		(%sp)+,%d0
+	cmpi.l		%d0,&0x3fff-0x7ffe
+	ble.w		fsgldiv_may_ovfl
+
+	cmpi.l		%d0,&0x3fff-0x0000	# will result underflow?
+	beq.w		fsgldiv_may_unfl	# maybe
+	bgt.w		fsgldiv_unfl		# yes; go handle underflow
+
+fsgldiv_normal:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# save FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsgldiv.x	FP_SCR0(%a6),%fp0	# perform sgl divide
+
+	fmov.l		%fpsr,%d1		# save FPSR
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fsgldiv_normal_exit:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store result on stack
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# load {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	or.w		%d2,%d1			# concat old sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x80	# return result in fp0
+	rts
+
+fsgldiv_may_ovfl:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# set FPSR
+
+	fsgldiv.x	FP_SCR0(%a6),%fp0	# execute divide
+
+	fmov.l		%fpsr,%d1
+	fmov.l		&0x0,%fpcr
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX,N
+
+	fmovm.x		&0x01,-(%sp)		# save result to stack
+	mov.w		(%sp),%d1		# fetch new exponent
+	add.l		&0xc,%sp		# clear result
+	andi.l		&0x7fff,%d1		# strip sign
+	sub.l		%d0,%d1			# add scale factor
+	cmp.l		%d1,&0x7fff		# did divide overflow?
+	blt.b		fsgldiv_normal_exit
+
+fsgldiv_ovfl_tst:
+	or.w		&ovfl_inx_mask,2+USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x13,%d1		# is OVFL or INEX enabled?
+	bne.b		fsgldiv_ovfl_ena	# yes
+
+fsgldiv_ovfl_dis:
+	btst		&neg_bit,FPSR_CC(%a6)	# is result negative
+	sne		%d1			# set sign param accordingly
+	mov.l		L_SCR3(%a6),%d0		# pass prec:rnd
+	andi.b		&0x30,%d0		# kill precision
+	bsr.l		ovf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set INF if applicable
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+	rts
+
+fsgldiv_ovfl_ena:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# move result to stack
+
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	subi.l		&0x6000,%d1		# subtract new bias
+	andi.w		&0x7fff,%d1		# clear ms bit
+	or.w		%d2,%d1			# concat old sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	bra.b		fsgldiv_ovfl_dis
+
+fsgldiv_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		&rz_mode*0x10,%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsgldiv.x	FP_SCR0(%a6),%fp0	# execute sgl divide
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0b,%d1		# is UNFL or INEX enabled?
+	bne.b		fsgldiv_unfl_ena	# yes
+
+fsgldiv_unfl_dis:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+
+	lea		FP_SCR0(%a6),%a0	# pass: result addr
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res4		# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# 'Z' bit may have been set
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+	rts
+
+#
+# UNFL is enabled.
+#
+fsgldiv_unfl_ena:
+	fmovm.x		FP_SCR1(%a6),&0x40	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsgldiv.x	FP_SCR0(%a6),%fp1	# execute sgl divide
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	fmovm.x		&0x40,FP_SCR0(%a6)	# save result to stack
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	addi.l		&0x6000,%d1		# add bias
+	andi.w		&0x7fff,%d1		# clear top bit
+	or.w		%d2,%d1			# concat old sign, new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	bra.b		fsgldiv_unfl_dis
+
+#
+# the divide operation MAY underflow:
+#
+fsgldiv_may_unfl:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsgldiv.x	FP_SCR0(%a6),%fp0	# execute sgl divide
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	fabs.x		%fp0,%fp1		# make a copy of result
+	fcmp.b		%fp1,&0x1		# is |result| > 1.b?
+	fbgt.w		fsgldiv_normal_exit	# no; no underflow occurred
+	fblt.w		fsgldiv_unfl		# yes; underflow occurred
+
+#
+# we still don't know if underflow occurred. result is ~ equal to 1. but,
+# we don't know if the result was an underflow that rounded up to a 1
+# or a normalized number that rounded down to a 1. so, redo the entire
+# operation using RZ as the rounding mode to see what the pre-rounded
+# result is. this case should be relatively rare.
+#
+	fmovm.x		FP_SCR1(%a6),&0x40	# load dst op into %fp1
+
+	clr.l		%d1			# clear scratch register
+	ori.b		&rz_mode*0x10,%d1	# force RZ rnd mode
+
+	fmov.l		%d1,%fpcr		# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsgldiv.x	FP_SCR0(%a6),%fp1	# execute sgl divide
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fabs.x		%fp1			# make absolute value
+	fcmp.b		%fp1,&0x1		# is |result| < 1.b?
+	fbge.w		fsgldiv_normal_exit	# no; no underflow occurred
+	bra.w		fsgldiv_unfl		# yes; underflow occurred
+
+############################################################################
+
+#
+# Divide: inputs are not both normalized; what are they?
+#
+fsgldiv_not_norm:
+	mov.w		(tbl_fsgldiv_op.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_fsgldiv_op.b,%pc,%d1.w*1)
+
+	swbeg		&48
+tbl_fsgldiv_op:
+	short		fsgldiv_norm		- tbl_fsgldiv_op # NORM / NORM
+	short		fsgldiv_inf_load	- tbl_fsgldiv_op # NORM / ZERO
+	short		fsgldiv_zero_load	- tbl_fsgldiv_op # NORM / INF
+	short		fsgldiv_res_qnan	- tbl_fsgldiv_op # NORM / QNAN
+	short		fsgldiv_norm		- tbl_fsgldiv_op # NORM / DENORM
+	short		fsgldiv_res_snan	- tbl_fsgldiv_op # NORM / SNAN
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+
+	short		fsgldiv_zero_load	- tbl_fsgldiv_op # ZERO / NORM
+	short		fsgldiv_res_operr	- tbl_fsgldiv_op # ZERO / ZERO
+	short		fsgldiv_zero_load	- tbl_fsgldiv_op # ZERO / INF
+	short		fsgldiv_res_qnan	- tbl_fsgldiv_op # ZERO / QNAN
+	short		fsgldiv_zero_load	- tbl_fsgldiv_op # ZERO / DENORM
+	short		fsgldiv_res_snan	- tbl_fsgldiv_op # ZERO / SNAN
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+
+	short		fsgldiv_inf_dst		- tbl_fsgldiv_op # INF / NORM
+	short		fsgldiv_inf_dst		- tbl_fsgldiv_op # INF / ZERO
+	short		fsgldiv_res_operr	- tbl_fsgldiv_op # INF / INF
+	short		fsgldiv_res_qnan	- tbl_fsgldiv_op # INF / QNAN
+	short		fsgldiv_inf_dst		- tbl_fsgldiv_op # INF / DENORM
+	short		fsgldiv_res_snan	- tbl_fsgldiv_op # INF / SNAN
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+
+	short		fsgldiv_res_qnan	- tbl_fsgldiv_op # QNAN / NORM
+	short		fsgldiv_res_qnan	- tbl_fsgldiv_op # QNAN / ZERO
+	short		fsgldiv_res_qnan	- tbl_fsgldiv_op # QNAN / INF
+	short		fsgldiv_res_qnan	- tbl_fsgldiv_op # QNAN / QNAN
+	short		fsgldiv_res_qnan	- tbl_fsgldiv_op # QNAN / DENORM
+	short		fsgldiv_res_snan	- tbl_fsgldiv_op # QNAN / SNAN
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+
+	short		fsgldiv_norm		- tbl_fsgldiv_op # DENORM / NORM
+	short		fsgldiv_inf_load	- tbl_fsgldiv_op # DENORM / ZERO
+	short		fsgldiv_zero_load	- tbl_fsgldiv_op # DENORM / INF
+	short		fsgldiv_res_qnan	- tbl_fsgldiv_op # DENORM / QNAN
+	short		fsgldiv_norm		- tbl_fsgldiv_op # DENORM / DENORM
+	short		fsgldiv_res_snan	- tbl_fsgldiv_op # DENORM / SNAN
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+
+	short		fsgldiv_res_snan	- tbl_fsgldiv_op # SNAN / NORM
+	short		fsgldiv_res_snan	- tbl_fsgldiv_op # SNAN / ZERO
+	short		fsgldiv_res_snan	- tbl_fsgldiv_op # SNAN / INF
+	short		fsgldiv_res_snan	- tbl_fsgldiv_op # SNAN / QNAN
+	short		fsgldiv_res_snan	- tbl_fsgldiv_op # SNAN / DENORM
+	short		fsgldiv_res_snan	- tbl_fsgldiv_op # SNAN / SNAN
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+
+fsgldiv_res_qnan:
+	bra.l		res_qnan
+fsgldiv_res_snan:
+	bra.l		res_snan
+fsgldiv_res_operr:
+	bra.l		res_operr
+fsgldiv_inf_load:
+	bra.l		fdiv_inf_load
+fsgldiv_zero_load:
+	bra.l		fdiv_zero_load
+fsgldiv_inf_dst:
+	bra.l		fdiv_inf_dst
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fadd(): emulates the fadd instruction				#
+#	fsadd(): emulates the fadd instruction				#
+#	fdadd(): emulates the fdadd instruction				#
+#									#
+# XREF ****************************************************************	#
+#	addsub_scaler2() - scale the operands so they won't take exc	#
+#	ovf_res() - return default overflow result			#
+#	unf_res() - return default underflow result			#
+#	res_qnan() - set QNAN result					#
+#	res_snan() - set SNAN result					#
+#	res_operr() - set OPERR result					#
+#	scale_to_zero_src() - set src operand exponent equal to zero	#
+#	scale_to_zero_dst() - set dst operand exponent equal to zero	#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	a1 = pointer to extended precision destination operand		#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = result							#
+#	fp1 = EXOP (if exception occurred)				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Handle NANs, infinities, and zeroes as special cases. Divide	#
+# norms into extended, single, and double precision.			#
+#	Do addition after scaling exponents such that exception won't	#
+# occur. Then, check result exponent to see if exception would have	#
+# occurred. If so, return default result and maybe EXOP. Else, insert	#
+# the correct result exponent and return. Set FPSR bits as appropriate.	#
+#									#
+#########################################################################
+
+	global		fsadd
+fsadd:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&s_mode*0x10,%d0	# insert sgl prec
+	bra.b		fadd
+
+	global		fdadd
+fdadd:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&d_mode*0x10,%d0	# insert dbl prec
+
+	global		fadd
+fadd:
+	mov.l		%d0,L_SCR3(%a6)		# store rnd info
+
+	clr.w		%d1
+	mov.b		DTAG(%a6),%d1
+	lsl.b		&0x3,%d1
+	or.b		STAG(%a6),%d1		# combine src tags
+
+	bne.w		fadd_not_norm		# optimize on non-norm input
+
+#
+# ADD: norms and denorms
+#
+fadd_norm:
+	bsr.l		addsub_scaler2		# scale exponents
+
+fadd_zero_entry:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fadd.x		FP_SCR0(%a6),%fp0	# execute add
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# fetch INEX2,N,Z
+
+	or.l		%d1,USER_FPSR(%a6)	# save exc and ccode bits
+
+	fbeq.w		fadd_zero_exit		# if result is zero, end now
+
+	mov.l		%d2,-(%sp)		# save d2
+
+	fmovm.x		&0x01,-(%sp)		# save result to stack
+
+	mov.w		2+L_SCR3(%a6),%d1
+	lsr.b		&0x6,%d1
+
+	mov.w		(%sp),%d2		# fetch new sign, exp
+	andi.l		&0x7fff,%d2		# strip sign
+	sub.l		%d0,%d2			# add scale factor
+
+	cmp.l		%d2,(tbl_fadd_ovfl.b,%pc,%d1.w*4) # is it an overflow?
+	bge.b		fadd_ovfl		# yes
+
+	cmp.l		%d2,(tbl_fadd_unfl.b,%pc,%d1.w*4) # is it an underflow?
+	blt.w		fadd_unfl		# yes
+	beq.w		fadd_may_unfl		# maybe; go find out
+
+fadd_normal:
+	mov.w		(%sp),%d1
+	andi.w		&0x8000,%d1		# keep sign
+	or.w		%d2,%d1			# concat sign,new exp
+	mov.w		%d1,(%sp)		# insert new exponent
+
+	fmovm.x		(%sp)+,&0x80		# return result in fp0
+
+	mov.l		(%sp)+,%d2		# restore d2
+	rts
+
+fadd_zero_exit:
+#	fmov.s		&0x00000000,%fp0	# return zero in fp0
+	rts
+
+tbl_fadd_ovfl:
+	long		0x7fff			# ext ovfl
+	long		0x407f			# sgl ovfl
+	long		0x43ff			# dbl ovfl
+
+tbl_fadd_unfl:
+	long	        0x0000			# ext unfl
+	long		0x3f81			# sgl unfl
+	long		0x3c01			# dbl unfl
+
+fadd_ovfl:
+	or.l		&ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x13,%d1		# is OVFL or INEX enabled?
+	bne.b		fadd_ovfl_ena		# yes
+
+	add.l		&0xc,%sp
+fadd_ovfl_dis:
+	btst		&neg_bit,FPSR_CC(%a6)	# is result negative?
+	sne		%d1			# set sign param accordingly
+	mov.l		L_SCR3(%a6),%d0		# pass prec:rnd
+	bsr.l		ovf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set INF,N if applicable
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+	mov.l		(%sp)+,%d2		# restore d2
+	rts
+
+fadd_ovfl_ena:
+	mov.b		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# is precision extended?
+	bne.b		fadd_ovfl_ena_sd	# no; prec = sgl or dbl
+
+fadd_ovfl_ena_cont:
+	mov.w		(%sp),%d1
+	andi.w		&0x8000,%d1		# keep sign
+	subi.l		&0x6000,%d2		# add extra bias
+	andi.w		&0x7fff,%d2
+	or.w		%d2,%d1			# concat sign,new exp
+	mov.w		%d1,(%sp)		# insert new exponent
+
+	fmovm.x		(%sp)+,&0x40		# return EXOP in fp1
+	bra.b		fadd_ovfl_dis
+
+fadd_ovfl_ena_sd:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0x30,%d1		# keep rnd mode
+	fmov.l		%d1,%fpcr		# set FPCR
+
+	fadd.x		FP_SCR0(%a6),%fp0	# execute add
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	add.l		&0xc,%sp
+	fmovm.x		&0x01,-(%sp)
+	bra.b		fadd_ovfl_ena_cont
+
+fadd_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+	add.l		&0xc,%sp
+
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		&rz_mode*0x10,%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fadd.x		FP_SCR0(%a6),%fp0	# execute add
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# save status
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX,N
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0b,%d1		# is UNFL or INEX enabled?
+	bne.b		fadd_unfl_ena		# yes
+
+fadd_unfl_dis:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+
+	lea		FP_SCR0(%a6),%a0	# pass: result addr
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# 'Z' bit may have been set
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+	mov.l		(%sp)+,%d2		# restore d2
+	rts
+
+fadd_unfl_ena:
+	fmovm.x		FP_SCR1(%a6),&0x40	# load dst op
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# is precision extended?
+	bne.b		fadd_unfl_ena_sd	# no; sgl or dbl
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+fadd_unfl_ena_cont:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fadd.x		FP_SCR0(%a6),%fp1	# execute multiply
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	fmovm.x		&0x40,FP_SCR0(%a6)	# save result to stack
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	addi.l		&0x6000,%d1		# add new bias
+	andi.w		&0x7fff,%d1		# clear top bit
+	or.w		%d2,%d1			# concat sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	bra.w		fadd_unfl_dis
+
+fadd_unfl_ena_sd:
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0x30,%d1		# use only rnd mode
+	fmov.l		%d1,%fpcr		# set FPCR
+
+	bra.b		fadd_unfl_ena_cont
+
+#
+# result is equal to the smallest normalized number in the selected precision
+# if the precision is extended, this result could not have come from an
+# underflow that rounded up.
+#
+fadd_may_unfl:
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1
+	beq.w		fadd_normal		# yes; no underflow occurred
+
+	mov.l		0x4(%sp),%d1		# extract hi(man)
+	cmpi.l		%d1,&0x80000000		# is hi(man) = 0x80000000?
+	bne.w		fadd_normal		# no; no underflow occurred
+
+	tst.l		0x8(%sp)		# is lo(man) = 0x0?
+	bne.w		fadd_normal		# no; no underflow occurred
+
+	btst		&inex2_bit,FPSR_EXCEPT(%a6) # is INEX2 set?
+	beq.w		fadd_normal		# no; no underflow occurred
+
+#
+# ok, so now the result has a exponent equal to the smallest normalized
+# exponent for the selected precision. also, the mantissa is equal to
+# 0x8000000000000000 and this mantissa is the result of rounding non-zero
+# g,r,s.
+# now, we must determine whether the pre-rounded result was an underflow
+# rounded "up" or a normalized number rounded "down".
+# so, we do this be re-executing the add using RZ as the rounding mode and
+# seeing if the new result is smaller or equal to the current result.
+#
+	fmovm.x		FP_SCR1(%a6),&0x40	# load dst op into fp1
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# keep rnd prec
+	ori.b		&rz_mode*0x10,%d1	# insert rnd mode
+	fmov.l		%d1,%fpcr		# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fadd.x		FP_SCR0(%a6),%fp1	# execute add
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	fabs.x		%fp0			# compare absolute values
+	fabs.x		%fp1
+	fcmp.x		%fp0,%fp1		# is first result > second?
+
+	fbgt.w		fadd_unfl		# yes; it's an underflow
+	bra.w		fadd_normal		# no; it's not an underflow
+
+##########################################################################
+
+#
+# Add: inputs are not both normalized; what are they?
+#
+fadd_not_norm:
+	mov.w		(tbl_fadd_op.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_fadd_op.b,%pc,%d1.w*1)
+
+	swbeg		&48
+tbl_fadd_op:
+	short		fadd_norm	- tbl_fadd_op # NORM + NORM
+	short		fadd_zero_src	- tbl_fadd_op # NORM + ZERO
+	short		fadd_inf_src	- tbl_fadd_op # NORM + INF
+	short		fadd_res_qnan	- tbl_fadd_op # NORM + QNAN
+	short		fadd_norm	- tbl_fadd_op # NORM + DENORM
+	short		fadd_res_snan	- tbl_fadd_op # NORM + SNAN
+	short		tbl_fadd_op	- tbl_fadd_op #
+	short		tbl_fadd_op	- tbl_fadd_op #
+
+	short		fadd_zero_dst	- tbl_fadd_op # ZERO + NORM
+	short		fadd_zero_2	- tbl_fadd_op # ZERO + ZERO
+	short		fadd_inf_src	- tbl_fadd_op # ZERO + INF
+	short		fadd_res_qnan	- tbl_fadd_op # NORM + QNAN
+	short		fadd_zero_dst	- tbl_fadd_op # ZERO + DENORM
+	short		fadd_res_snan	- tbl_fadd_op # NORM + SNAN
+	short		tbl_fadd_op	- tbl_fadd_op #
+	short		tbl_fadd_op	- tbl_fadd_op #
+
+	short		fadd_inf_dst	- tbl_fadd_op # INF + NORM
+	short		fadd_inf_dst	- tbl_fadd_op # INF + ZERO
+	short		fadd_inf_2	- tbl_fadd_op # INF + INF
+	short		fadd_res_qnan	- tbl_fadd_op # NORM + QNAN
+	short		fadd_inf_dst	- tbl_fadd_op # INF + DENORM
+	short		fadd_res_snan	- tbl_fadd_op # NORM + SNAN
+	short		tbl_fadd_op	- tbl_fadd_op #
+	short		tbl_fadd_op	- tbl_fadd_op #
+
+	short		fadd_res_qnan	- tbl_fadd_op # QNAN + NORM
+	short		fadd_res_qnan	- tbl_fadd_op # QNAN + ZERO
+	short		fadd_res_qnan	- tbl_fadd_op # QNAN + INF
+	short		fadd_res_qnan	- tbl_fadd_op # QNAN + QNAN
+	short		fadd_res_qnan	- tbl_fadd_op # QNAN + DENORM
+	short		fadd_res_snan	- tbl_fadd_op # QNAN + SNAN
+	short		tbl_fadd_op	- tbl_fadd_op #
+	short		tbl_fadd_op	- tbl_fadd_op #
+
+	short		fadd_norm	- tbl_fadd_op # DENORM + NORM
+	short		fadd_zero_src	- tbl_fadd_op # DENORM + ZERO
+	short		fadd_inf_src	- tbl_fadd_op # DENORM + INF
+	short		fadd_res_qnan	- tbl_fadd_op # NORM + QNAN
+	short		fadd_norm	- tbl_fadd_op # DENORM + DENORM
+	short		fadd_res_snan	- tbl_fadd_op # NORM + SNAN
+	short		tbl_fadd_op	- tbl_fadd_op #
+	short		tbl_fadd_op	- tbl_fadd_op #
+
+	short		fadd_res_snan	- tbl_fadd_op # SNAN + NORM
+	short		fadd_res_snan	- tbl_fadd_op # SNAN + ZERO
+	short		fadd_res_snan	- tbl_fadd_op # SNAN + INF
+	short		fadd_res_snan	- tbl_fadd_op # SNAN + QNAN
+	short		fadd_res_snan	- tbl_fadd_op # SNAN + DENORM
+	short		fadd_res_snan	- tbl_fadd_op # SNAN + SNAN
+	short		tbl_fadd_op	- tbl_fadd_op #
+	short		tbl_fadd_op	- tbl_fadd_op #
+
+fadd_res_qnan:
+	bra.l		res_qnan
+fadd_res_snan:
+	bra.l		res_snan
+
+#
+# both operands are ZEROes
+#
+fadd_zero_2:
+	mov.b		SRC_EX(%a0),%d0		# are the signs opposite
+	mov.b		DST_EX(%a1),%d1
+	eor.b		%d0,%d1
+	bmi.w		fadd_zero_2_chk_rm	# weed out (-ZERO)+(+ZERO)
+
+# the signs are the same. so determine whether they are positive or negative
+# and return the appropriately signed zero.
+	tst.b		%d0			# are ZEROes positive or negative?
+	bmi.b		fadd_zero_rm		# negative
+	fmov.s		&0x00000000,%fp0	# return +ZERO
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set Z
+	rts
+
+#
+# the ZEROes have opposite signs:
+# - therefore, we return +ZERO if the rounding modes are RN,RZ, or RP.
+# - -ZERO is returned in the case of RM.
+#
+fadd_zero_2_chk_rm:
+	mov.b		3+L_SCR3(%a6),%d1
+	andi.b		&0x30,%d1		# extract rnd mode
+	cmpi.b		%d1,&rm_mode*0x10	# is rnd mode == RM?
+	beq.b		fadd_zero_rm		# yes
+	fmov.s		&0x00000000,%fp0	# return +ZERO
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set Z
+	rts
+
+fadd_zero_rm:
+	fmov.s		&0x80000000,%fp0	# return -ZERO
+	mov.b		&neg_bmask+z_bmask,FPSR_CC(%a6) # set NEG/Z
+	rts
+
+#
+# one operand is a ZERO and the other is a DENORM or NORM. scale
+# the DENORM or NORM and jump to the regular fadd routine.
+#
+fadd_zero_dst:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	bsr.l		scale_to_zero_src	# scale the operand
+	clr.w		FP_SCR1_EX(%a6)
+	clr.l		FP_SCR1_HI(%a6)
+	clr.l		FP_SCR1_LO(%a6)
+	bra.w		fadd_zero_entry		# go execute fadd
+
+fadd_zero_src:
+	mov.w		DST_EX(%a1),FP_SCR1_EX(%a6)
+	mov.l		DST_HI(%a1),FP_SCR1_HI(%a6)
+	mov.l		DST_LO(%a1),FP_SCR1_LO(%a6)
+	bsr.l		scale_to_zero_dst	# scale the operand
+	clr.w		FP_SCR0_EX(%a6)
+	clr.l		FP_SCR0_HI(%a6)
+	clr.l		FP_SCR0_LO(%a6)
+	bra.w		fadd_zero_entry		# go execute fadd
+
+#
+# both operands are INFs. an OPERR will result if the INFs have
+# different signs. else, an INF of the same sign is returned
+#
+fadd_inf_2:
+	mov.b		SRC_EX(%a0),%d0		# exclusive or the signs
+	mov.b		DST_EX(%a1),%d1
+	eor.b		%d1,%d0
+	bmi.l		res_operr		# weed out (-INF)+(+INF)
+
+# ok, so it's not an OPERR. but, we do have to remember to return the
+# src INF since that's where the 881/882 gets the j-bit from...
+
+#
+# operands are INF and one of {ZERO, INF, DENORM, NORM}
+#
+fadd_inf_src:
+	fmovm.x		SRC(%a0),&0x80		# return src INF
+	tst.b		SRC_EX(%a0)		# is INF positive?
+	bpl.b		fadd_inf_done		# yes; we're done
+	mov.b		&neg_bmask+inf_bmask,FPSR_CC(%a6) # set INF/NEG
+	rts
+
+#
+# operands are INF and one of {ZERO, INF, DENORM, NORM}
+#
+fadd_inf_dst:
+	fmovm.x		DST(%a1),&0x80		# return dst INF
+	tst.b		DST_EX(%a1)		# is INF positive?
+	bpl.b		fadd_inf_done		# yes; we're done
+	mov.b		&neg_bmask+inf_bmask,FPSR_CC(%a6) # set INF/NEG
+	rts
+
+fadd_inf_done:
+	mov.b		&inf_bmask,FPSR_CC(%a6) # set INF
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fsub(): emulates the fsub instruction				#
+#	fssub(): emulates the fssub instruction				#
+#	fdsub(): emulates the fdsub instruction				#
+#									#
+# XREF ****************************************************************	#
+#	addsub_scaler2() - scale the operands so they won't take exc	#
+#	ovf_res() - return default overflow result			#
+#	unf_res() - return default underflow result			#
+#	res_qnan() - set QNAN result					#
+#	res_snan() - set SNAN result					#
+#	res_operr() - set OPERR result					#
+#	scale_to_zero_src() - set src operand exponent equal to zero	#
+#	scale_to_zero_dst() - set dst operand exponent equal to zero	#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	a1 = pointer to extended precision destination operand		#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = result							#
+#	fp1 = EXOP (if exception occurred)				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Handle NANs, infinities, and zeroes as special cases. Divide	#
+# norms into extended, single, and double precision.			#
+#	Do subtraction after scaling exponents such that exception won't#
+# occur. Then, check result exponent to see if exception would have	#
+# occurred. If so, return default result and maybe EXOP. Else, insert	#
+# the correct result exponent and return. Set FPSR bits as appropriate.	#
+#									#
+#########################################################################
+
+	global		fssub
+fssub:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&s_mode*0x10,%d0	# insert sgl prec
+	bra.b		fsub
+
+	global		fdsub
+fdsub:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&d_mode*0x10,%d0	# insert dbl prec
+
+	global		fsub
+fsub:
+	mov.l		%d0,L_SCR3(%a6)		# store rnd info
+
+	clr.w		%d1
+	mov.b		DTAG(%a6),%d1
+	lsl.b		&0x3,%d1
+	or.b		STAG(%a6),%d1		# combine src tags
+
+	bne.w		fsub_not_norm		# optimize on non-norm input
+
+#
+# SUB: norms and denorms
+#
+fsub_norm:
+	bsr.l		addsub_scaler2		# scale exponents
+
+fsub_zero_entry:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fsub.x		FP_SCR0(%a6),%fp0	# execute subtract
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# fetch INEX2, N, Z
+
+	or.l		%d1,USER_FPSR(%a6)	# save exc and ccode bits
+
+	fbeq.w		fsub_zero_exit		# if result zero, end now
+
+	mov.l		%d2,-(%sp)		# save d2
+
+	fmovm.x		&0x01,-(%sp)		# save result to stack
+
+	mov.w		2+L_SCR3(%a6),%d1
+	lsr.b		&0x6,%d1
+
+	mov.w		(%sp),%d2		# fetch new exponent
+	andi.l		&0x7fff,%d2		# strip sign
+	sub.l		%d0,%d2			# add scale factor
+
+	cmp.l		%d2,(tbl_fsub_ovfl.b,%pc,%d1.w*4) # is it an overflow?
+	bge.b		fsub_ovfl		# yes
+
+	cmp.l		%d2,(tbl_fsub_unfl.b,%pc,%d1.w*4) # is it an underflow?
+	blt.w		fsub_unfl		# yes
+	beq.w		fsub_may_unfl		# maybe; go find out
+
+fsub_normal:
+	mov.w		(%sp),%d1
+	andi.w		&0x8000,%d1		# keep sign
+	or.w		%d2,%d1			# insert new exponent
+	mov.w		%d1,(%sp)		# insert new exponent
+
+	fmovm.x		(%sp)+,&0x80		# return result in fp0
+
+	mov.l		(%sp)+,%d2		# restore d2
+	rts
+
+fsub_zero_exit:
+#	fmov.s		&0x00000000,%fp0	# return zero in fp0
+	rts
+
+tbl_fsub_ovfl:
+	long		0x7fff			# ext ovfl
+	long		0x407f			# sgl ovfl
+	long		0x43ff			# dbl ovfl
+
+tbl_fsub_unfl:
+	long	        0x0000			# ext unfl
+	long		0x3f81			# sgl unfl
+	long		0x3c01			# dbl unfl
+
+fsub_ovfl:
+	or.l		&ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x13,%d1		# is OVFL or INEX enabled?
+	bne.b		fsub_ovfl_ena		# yes
+
+	add.l		&0xc,%sp
+fsub_ovfl_dis:
+	btst		&neg_bit,FPSR_CC(%a6)	# is result negative?
+	sne		%d1			# set sign param accordingly
+	mov.l		L_SCR3(%a6),%d0		# pass prec:rnd
+	bsr.l		ovf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set INF,N if applicable
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+	mov.l		(%sp)+,%d2		# restore d2
+	rts
+
+fsub_ovfl_ena:
+	mov.b		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# is precision extended?
+	bne.b		fsub_ovfl_ena_sd	# no
+
+fsub_ovfl_ena_cont:
+	mov.w		(%sp),%d1		# fetch {sgn,exp}
+	andi.w		&0x8000,%d1		# keep sign
+	subi.l		&0x6000,%d2		# subtract new bias
+	andi.w		&0x7fff,%d2		# clear top bit
+	or.w		%d2,%d1			# concat sign,exp
+	mov.w		%d1,(%sp)		# insert new exponent
+
+	fmovm.x		(%sp)+,&0x40		# return EXOP in fp1
+	bra.b		fsub_ovfl_dis
+
+fsub_ovfl_ena_sd:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0x30,%d1		# clear rnd prec
+	fmov.l		%d1,%fpcr		# set FPCR
+
+	fsub.x		FP_SCR0(%a6),%fp0	# execute subtract
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	add.l		&0xc,%sp
+	fmovm.x		&0x01,-(%sp)
+	bra.b		fsub_ovfl_ena_cont
+
+fsub_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+	add.l		&0xc,%sp
+
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		&rz_mode*0x10,%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsub.x		FP_SCR0(%a6),%fp0	# execute subtract
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# save status
+
+	or.l		%d1,USER_FPSR(%a6)
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0b,%d1		# is UNFL or INEX enabled?
+	bne.b		fsub_unfl_ena		# yes
+
+fsub_unfl_dis:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+
+	lea		FP_SCR0(%a6),%a0	# pass: result addr
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# 'Z' may have been set
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+	mov.l		(%sp)+,%d2		# restore d2
+	rts
+
+fsub_unfl_ena:
+	fmovm.x		FP_SCR1(%a6),&0x40
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# is precision extended?
+	bne.b		fsub_unfl_ena_sd	# no
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+fsub_unfl_ena_cont:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsub.x		FP_SCR0(%a6),%fp1	# execute subtract
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	fmovm.x		&0x40,FP_SCR0(%a6)	# store result to stack
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	addi.l		&0x6000,%d1		# subtract new bias
+	andi.w		&0x7fff,%d1		# clear top bit
+	or.w		%d2,%d1			# concat sgn,exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	bra.w		fsub_unfl_dis
+
+fsub_unfl_ena_sd:
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0x30,%d1		# clear rnd prec
+	fmov.l		%d1,%fpcr		# set FPCR
+
+	bra.b		fsub_unfl_ena_cont
+
+#
+# result is equal to the smallest normalized number in the selected precision
+# if the precision is extended, this result could not have come from an
+# underflow that rounded up.
+#
+fsub_may_unfl:
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# fetch rnd prec
+	beq.w		fsub_normal		# yes; no underflow occurred
+
+	mov.l		0x4(%sp),%d1
+	cmpi.l		%d1,&0x80000000		# is hi(man) = 0x80000000?
+	bne.w		fsub_normal		# no; no underflow occurred
+
+	tst.l		0x8(%sp)		# is lo(man) = 0x0?
+	bne.w		fsub_normal		# no; no underflow occurred
+
+	btst		&inex2_bit,FPSR_EXCEPT(%a6) # is INEX2 set?
+	beq.w		fsub_normal		# no; no underflow occurred
+
+#
+# ok, so now the result has a exponent equal to the smallest normalized
+# exponent for the selected precision. also, the mantissa is equal to
+# 0x8000000000000000 and this mantissa is the result of rounding non-zero
+# g,r,s.
+# now, we must determine whether the pre-rounded result was an underflow
+# rounded "up" or a normalized number rounded "down".
+# so, we do this be re-executing the add using RZ as the rounding mode and
+# seeing if the new result is smaller or equal to the current result.
+#
+	fmovm.x		FP_SCR1(%a6),&0x40	# load dst op into fp1
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# keep rnd prec
+	ori.b		&rz_mode*0x10,%d1	# insert rnd mode
+	fmov.l		%d1,%fpcr		# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsub.x		FP_SCR0(%a6),%fp1	# execute subtract
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	fabs.x		%fp0			# compare absolute values
+	fabs.x		%fp1
+	fcmp.x		%fp0,%fp1		# is first result > second?
+
+	fbgt.w		fsub_unfl		# yes; it's an underflow
+	bra.w		fsub_normal		# no; it's not an underflow
+
+##########################################################################
+
+#
+# Sub: inputs are not both normalized; what are they?
+#
+fsub_not_norm:
+	mov.w		(tbl_fsub_op.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_fsub_op.b,%pc,%d1.w*1)
+
+	swbeg		&48
+tbl_fsub_op:
+	short		fsub_norm	- tbl_fsub_op # NORM - NORM
+	short		fsub_zero_src	- tbl_fsub_op # NORM - ZERO
+	short		fsub_inf_src	- tbl_fsub_op # NORM - INF
+	short		fsub_res_qnan	- tbl_fsub_op # NORM - QNAN
+	short		fsub_norm	- tbl_fsub_op # NORM - DENORM
+	short		fsub_res_snan	- tbl_fsub_op # NORM - SNAN
+	short		tbl_fsub_op	- tbl_fsub_op #
+	short		tbl_fsub_op	- tbl_fsub_op #
+
+	short		fsub_zero_dst	- tbl_fsub_op # ZERO - NORM
+	short		fsub_zero_2	- tbl_fsub_op # ZERO - ZERO
+	short		fsub_inf_src	- tbl_fsub_op # ZERO - INF
+	short		fsub_res_qnan	- tbl_fsub_op # NORM - QNAN
+	short		fsub_zero_dst	- tbl_fsub_op # ZERO - DENORM
+	short		fsub_res_snan	- tbl_fsub_op # NORM - SNAN
+	short		tbl_fsub_op	- tbl_fsub_op #
+	short		tbl_fsub_op	- tbl_fsub_op #
+
+	short		fsub_inf_dst	- tbl_fsub_op # INF - NORM
+	short		fsub_inf_dst	- tbl_fsub_op # INF - ZERO
+	short		fsub_inf_2	- tbl_fsub_op # INF - INF
+	short		fsub_res_qnan	- tbl_fsub_op # NORM - QNAN
+	short		fsub_inf_dst	- tbl_fsub_op # INF - DENORM
+	short		fsub_res_snan	- tbl_fsub_op # NORM - SNAN
+	short		tbl_fsub_op	- tbl_fsub_op #
+	short		tbl_fsub_op	- tbl_fsub_op #
+
+	short		fsub_res_qnan	- tbl_fsub_op # QNAN - NORM
+	short		fsub_res_qnan	- tbl_fsub_op # QNAN - ZERO
+	short		fsub_res_qnan	- tbl_fsub_op # QNAN - INF
+	short		fsub_res_qnan	- tbl_fsub_op # QNAN - QNAN
+	short		fsub_res_qnan	- tbl_fsub_op # QNAN - DENORM
+	short		fsub_res_snan	- tbl_fsub_op # QNAN - SNAN
+	short		tbl_fsub_op	- tbl_fsub_op #
+	short		tbl_fsub_op	- tbl_fsub_op #
+
+	short		fsub_norm	- tbl_fsub_op # DENORM - NORM
+	short		fsub_zero_src	- tbl_fsub_op # DENORM - ZERO
+	short		fsub_inf_src	- tbl_fsub_op # DENORM - INF
+	short		fsub_res_qnan	- tbl_fsub_op # NORM - QNAN
+	short		fsub_norm	- tbl_fsub_op # DENORM - DENORM
+	short		fsub_res_snan	- tbl_fsub_op # NORM - SNAN
+	short		tbl_fsub_op	- tbl_fsub_op #
+	short		tbl_fsub_op	- tbl_fsub_op #
+
+	short		fsub_res_snan	- tbl_fsub_op # SNAN - NORM
+	short		fsub_res_snan	- tbl_fsub_op # SNAN - ZERO
+	short		fsub_res_snan	- tbl_fsub_op # SNAN - INF
+	short		fsub_res_snan	- tbl_fsub_op # SNAN - QNAN
+	short		fsub_res_snan	- tbl_fsub_op # SNAN - DENORM
+	short		fsub_res_snan	- tbl_fsub_op # SNAN - SNAN
+	short		tbl_fsub_op	- tbl_fsub_op #
+	short		tbl_fsub_op	- tbl_fsub_op #
+
+fsub_res_qnan:
+	bra.l		res_qnan
+fsub_res_snan:
+	bra.l		res_snan
+
+#
+# both operands are ZEROes
+#
+fsub_zero_2:
+	mov.b		SRC_EX(%a0),%d0
+	mov.b		DST_EX(%a1),%d1
+	eor.b		%d1,%d0
+	bpl.b		fsub_zero_2_chk_rm
+
+# the signs are opposite, so, return a ZERO w/ the sign of the dst ZERO
+	tst.b		%d0			# is dst negative?
+	bmi.b		fsub_zero_2_rm		# yes
+	fmov.s		&0x00000000,%fp0	# no; return +ZERO
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set Z
+	rts
+
+#
+# the ZEROes have the same signs:
+# - therefore, we return +ZERO if the rounding mode is RN,RZ, or RP
+# - -ZERO is returned in the case of RM.
+#
+fsub_zero_2_chk_rm:
+	mov.b		3+L_SCR3(%a6),%d1
+	andi.b		&0x30,%d1		# extract rnd mode
+	cmpi.b		%d1,&rm_mode*0x10	# is rnd mode = RM?
+	beq.b		fsub_zero_2_rm		# yes
+	fmov.s		&0x00000000,%fp0	# no; return +ZERO
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set Z
+	rts
+
+fsub_zero_2_rm:
+	fmov.s		&0x80000000,%fp0	# return -ZERO
+	mov.b		&z_bmask+neg_bmask,FPSR_CC(%a6)	# set Z/NEG
+	rts
+
+#
+# one operand is a ZERO and the other is a DENORM or a NORM.
+# scale the DENORM or NORM and jump to the regular fsub routine.
+#
+fsub_zero_dst:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	bsr.l		scale_to_zero_src	# scale the operand
+	clr.w		FP_SCR1_EX(%a6)
+	clr.l		FP_SCR1_HI(%a6)
+	clr.l		FP_SCR1_LO(%a6)
+	bra.w		fsub_zero_entry		# go execute fsub
+
+fsub_zero_src:
+	mov.w		DST_EX(%a1),FP_SCR1_EX(%a6)
+	mov.l		DST_HI(%a1),FP_SCR1_HI(%a6)
+	mov.l		DST_LO(%a1),FP_SCR1_LO(%a6)
+	bsr.l		scale_to_zero_dst	# scale the operand
+	clr.w		FP_SCR0_EX(%a6)
+	clr.l		FP_SCR0_HI(%a6)
+	clr.l		FP_SCR0_LO(%a6)
+	bra.w		fsub_zero_entry		# go execute fsub
+
+#
+# both operands are INFs. an OPERR will result if the INFs have the
+# same signs. else,
+#
+fsub_inf_2:
+	mov.b		SRC_EX(%a0),%d0		# exclusive or the signs
+	mov.b		DST_EX(%a1),%d1
+	eor.b		%d1,%d0
+	bpl.l		res_operr		# weed out (-INF)+(+INF)
+
+# ok, so it's not an OPERR. but we do have to remember to return
+# the src INF since that's where the 881/882 gets the j-bit.
+
+fsub_inf_src:
+	fmovm.x		SRC(%a0),&0x80		# return src INF
+	fneg.x		%fp0			# invert sign
+	fbge.w		fsub_inf_done		# sign is now positive
+	mov.b		&neg_bmask+inf_bmask,FPSR_CC(%a6) # set INF/NEG
+	rts
+
+fsub_inf_dst:
+	fmovm.x		DST(%a1),&0x80		# return dst INF
+	tst.b		DST_EX(%a1)		# is INF negative?
+	bpl.b		fsub_inf_done		# no
+	mov.b		&neg_bmask+inf_bmask,FPSR_CC(%a6) # set INF/NEG
+	rts
+
+fsub_inf_done:
+	mov.b		&inf_bmask,FPSR_CC(%a6)	# set INF
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fsqrt(): emulates the fsqrt instruction				#
+#	fssqrt(): emulates the fssqrt instruction			#
+#	fdsqrt(): emulates the fdsqrt instruction			#
+#									#
+# XREF ****************************************************************	#
+#	scale_sqrt() - scale the source operand				#
+#	unf_res() - return default underflow result			#
+#	ovf_res() - return default overflow result			#
+#	res_qnan_1op() - return QNAN result				#
+#	res_snan_1op() - return SNAN result				#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	d0  rnd prec,mode						#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = result							#
+#	fp1 = EXOP (if exception occurred)				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Handle NANs, infinities, and zeroes as special cases. Divide	#
+# norms/denorms into ext/sgl/dbl precision.				#
+#	For norms/denorms, scale the exponents such that a sqrt		#
+# instruction won't cause an exception. Use the regular fsqrt to	#
+# compute a result. Check if the regular operands would have taken	#
+# an exception. If so, return the default overflow/underflow result	#
+# and return the EXOP if exceptions are enabled. Else, scale the	#
+# result operand to the proper exponent.				#
+#									#
+#########################################################################
+
+	global		fssqrt
+fssqrt:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&s_mode*0x10,%d0	# insert sgl precision
+	bra.b		fsqrt
+
+	global		fdsqrt
+fdsqrt:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&d_mode*0x10,%d0	# insert dbl precision
+
+	global		fsqrt
+fsqrt:
+	mov.l		%d0,L_SCR3(%a6)		# store rnd info
+	clr.w		%d1
+	mov.b		STAG(%a6),%d1
+	bne.w		fsqrt_not_norm		# optimize on non-norm input
+
+#
+# SQUARE ROOT: norms and denorms ONLY!
+#
+fsqrt_norm:
+	tst.b		SRC_EX(%a0)		# is operand negative?
+	bmi.l		res_operr		# yes
+
+	andi.b		&0xc0,%d0		# is precision extended?
+	bne.b		fsqrt_not_ext		# no; go handle sgl or dbl
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsqrt.x		(%a0),%fp0		# execute square root
+
+	fmov.l		%fpsr,%d1
+	or.l		%d1,USER_FPSR(%a6)	# set N,INEX
+
+	rts
+
+fsqrt_denorm:
+	tst.b		SRC_EX(%a0)		# is operand negative?
+	bmi.l		res_operr		# yes
+
+	andi.b		&0xc0,%d0		# is precision extended?
+	bne.b		fsqrt_not_ext		# no; go handle sgl or dbl
+
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+	bsr.l		scale_sqrt		# calculate scale factor
+
+	bra.w		fsqrt_sd_normal
+
+#
+# operand is either single or double
+#
+fsqrt_not_ext:
+	cmpi.b		%d0,&s_mode*0x10	# separate sgl/dbl prec
+	bne.w		fsqrt_dbl
+
+#
+# operand is to be rounded to single precision
+#
+fsqrt_sgl:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+	bsr.l		scale_sqrt		# calculate scale factor
+
+	cmpi.l		%d0,&0x3fff-0x3f81	# will move in underflow?
+	beq.w		fsqrt_sd_may_unfl
+	bgt.w		fsqrt_sd_unfl		# yes; go handle underflow
+	cmpi.l		%d0,&0x3fff-0x407f	# will move in overflow?
+	beq.w		fsqrt_sd_may_ovfl	# maybe; go check
+	blt.w		fsqrt_sd_ovfl		# yes; go handle overflow
+
+#
+# operand will NOT overflow or underflow when moved in to the fp reg file
+#
+fsqrt_sd_normal:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fsqrt.x		FP_SCR0(%a6),%fp0	# perform absolute
+
+	fmov.l		%fpsr,%d1		# save FPSR
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fsqrt_sd_normal_exit:
+	mov.l		%d2,-(%sp)		# save d2
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+	mov.w		FP_SCR0_EX(%a6),%d1	# load sgn,exp
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	sub.l		%d0,%d1			# add scale factor
+	andi.w		&0x8000,%d2		# keep old sign
+	or.w		%d1,%d2			# concat old sign,new exp
+	mov.w		%d2,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x80	# return result in fp0
+	rts
+
+#
+# operand is to be rounded to double precision
+#
+fsqrt_dbl:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+	bsr.l		scale_sqrt		# calculate scale factor
+
+	cmpi.l		%d0,&0x3fff-0x3c01	# will move in underflow?
+	beq.w		fsqrt_sd_may_unfl
+	bgt.b		fsqrt_sd_unfl		# yes; go handle underflow
+	cmpi.l		%d0,&0x3fff-0x43ff	# will move in overflow?
+	beq.w		fsqrt_sd_may_ovfl	# maybe; go check
+	blt.w		fsqrt_sd_ovfl		# yes; go handle overflow
+	bra.w		fsqrt_sd_normal		# no; ho handle normalized op
+
+# we're on the line here and the distinguising characteristic is whether
+# the exponent is 3fff or 3ffe. if it's 3ffe, then it's a safe number
+# elsewise fall through to underflow.
+fsqrt_sd_may_unfl:
+	btst		&0x0,1+FP_SCR0_EX(%a6)	# is exponent 0x3fff?
+	bne.w		fsqrt_sd_normal		# yes, so no underflow
+
+#
+# operand WILL underflow when moved in to the fp register file
+#
+fsqrt_sd_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+	fmov.l		&rz_mode*0x10,%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsqrt.x		FP_SCR0(%a6),%fp0	# execute square root
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+# if underflow or inexact is enabled, go calculate EXOP first.
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0b,%d1		# is UNFL or INEX enabled?
+	bne.b		fsqrt_sd_unfl_ena	# yes
+
+fsqrt_sd_unfl_dis:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+
+	lea		FP_SCR0(%a6),%a0	# pass: result addr
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set possible 'Z' ccode
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+	rts
+
+#
+# operand will underflow AND underflow is enabled.
+# therefore, we must return the result rounded to extended precision.
+#
+fsqrt_sd_unfl_ena:
+	mov.l		FP_SCR0_HI(%a6),FP_SCR1_HI(%a6)
+	mov.l		FP_SCR0_LO(%a6),FP_SCR1_LO(%a6)
+	mov.w		FP_SCR0_EX(%a6),%d1	# load current exponent
+
+	mov.l		%d2,-(%sp)		# save d2
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# subtract scale factor
+	addi.l		&0x6000,%d1		# add new bias
+	andi.w		&0x7fff,%d1
+	or.w		%d2,%d1			# concat new sign,new exp
+	mov.w		%d1,FP_SCR1_EX(%a6)	# insert new exp
+	fmovm.x		FP_SCR1(%a6),&0x40	# return EXOP in fp1
+	mov.l		(%sp)+,%d2		# restore d2
+	bra.b		fsqrt_sd_unfl_dis
+
+#
+# operand WILL overflow.
+#
+fsqrt_sd_ovfl:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fsqrt.x		FP_SCR0(%a6),%fp0	# perform square root
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# save FPSR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fsqrt_sd_ovfl_tst:
+	or.l		&ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x13,%d1		# is OVFL or INEX enabled?
+	bne.b		fsqrt_sd_ovfl_ena	# yes
+
+#
+# OVFL is not enabled; therefore, we must create the default result by
+# calling ovf_res().
+#
+fsqrt_sd_ovfl_dis:
+	btst		&neg_bit,FPSR_CC(%a6)	# is result negative?
+	sne		%d1			# set sign param accordingly
+	mov.l		L_SCR3(%a6),%d0		# pass: prec,mode
+	bsr.l		ovf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set INF,N if applicable
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+	rts
+
+#
+# OVFL is enabled.
+# the INEX2 bit has already been updated by the round to the correct precision.
+# now, round to extended(and don't alter the FPSR).
+#
+fsqrt_sd_ovfl_ena:
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	subi.l		&0x6000,%d1		# subtract bias
+	andi.w		&0x7fff,%d1
+	or.w		%d2,%d1			# concat sign,exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	mov.l		(%sp)+,%d2		# restore d2
+	bra.b		fsqrt_sd_ovfl_dis
+
+#
+# the move in MAY underflow. so...
+#
+fsqrt_sd_may_ovfl:
+	btst		&0x0,1+FP_SCR0_EX(%a6)	# is exponent 0x3fff?
+	bne.w		fsqrt_sd_ovfl		# yes, so overflow
+
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fsqrt.x		FP_SCR0(%a6),%fp0	# perform absolute
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	fmov.x		%fp0,%fp1		# make a copy of result
+	fcmp.b		%fp1,&0x1		# is |result| >= 1.b?
+	fbge.w		fsqrt_sd_ovfl_tst	# yes; overflow has occurred
+
+# no, it didn't overflow; we have correct result
+	bra.w		fsqrt_sd_normal_exit
+
+##########################################################################
+
+#
+# input is not normalized; what is it?
+#
+fsqrt_not_norm:
+	cmpi.b		%d1,&DENORM		# weed out DENORM
+	beq.w		fsqrt_denorm
+	cmpi.b		%d1,&ZERO		# weed out ZERO
+	beq.b		fsqrt_zero
+	cmpi.b		%d1,&INF		# weed out INF
+	beq.b		fsqrt_inf
+	cmpi.b		%d1,&SNAN		# weed out SNAN
+	beq.l		res_snan_1op
+	bra.l		res_qnan_1op
+
+#
+#	fsqrt(+0) = +0
+#	fsqrt(-0) = -0
+#	fsqrt(+INF) = +INF
+#	fsqrt(-INF) = OPERR
+#
+fsqrt_zero:
+	tst.b		SRC_EX(%a0)		# is ZERO positive or negative?
+	bmi.b		fsqrt_zero_m		# negative
+fsqrt_zero_p:
+	fmov.s		&0x00000000,%fp0	# return +ZERO
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set 'Z' ccode bit
+	rts
+fsqrt_zero_m:
+	fmov.s		&0x80000000,%fp0	# return -ZERO
+	mov.b		&z_bmask+neg_bmask,FPSR_CC(%a6)	# set 'Z','N' ccode bits
+	rts
+
+fsqrt_inf:
+	tst.b		SRC_EX(%a0)		# is INF positive or negative?
+	bmi.l		res_operr		# negative
+fsqrt_inf_p:
+	fmovm.x		SRC(%a0),&0x80		# return +INF in fp0
+	mov.b		&inf_bmask,FPSR_CC(%a6)	# set 'I' ccode bit
+	rts
+
+##########################################################################
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	addsub_scaler2(): scale inputs to fadd/fsub such that no	#
+#			  OVFL/UNFL exceptions will result		#
+#									#
+# XREF ****************************************************************	#
+#	norm() - normalize mantissa after adjusting exponent		#
+#									#
+# INPUT ***************************************************************	#
+#	FP_SRC(a6) = fp op1(src)					#
+#	FP_DST(a6) = fp op2(dst)					#
+#									#
+# OUTPUT **************************************************************	#
+#	FP_SRC(a6) = fp op1 scaled(src)					#
+#	FP_DST(a6) = fp op2 scaled(dst)					#
+#	d0         = scale amount					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	If the DST exponent is > the SRC exponent, set the DST exponent	#
+# equal to 0x3fff and scale the SRC exponent by the value that the	#
+# DST exponent was scaled by. If the SRC exponent is greater or equal,	#
+# do the opposite. Return this scale factor in d0.			#
+#	If the two exponents differ by > the number of mantissa bits	#
+# plus two, then set the smallest exponent to a very small value as a	#
+# quick shortcut.							#
+#									#
+#########################################################################
+
+	global		addsub_scaler2
+addsub_scaler2:
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		DST_HI(%a1),FP_SCR1_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	mov.l		DST_LO(%a1),FP_SCR1_LO(%a6)
+	mov.w		SRC_EX(%a0),%d0
+	mov.w		DST_EX(%a1),%d1
+	mov.w		%d0,FP_SCR0_EX(%a6)
+	mov.w		%d1,FP_SCR1_EX(%a6)
+
+	andi.w		&0x7fff,%d0
+	andi.w		&0x7fff,%d1
+	mov.w		%d0,L_SCR1(%a6)		# store src exponent
+	mov.w		%d1,2+L_SCR1(%a6)	# store dst exponent
+
+	cmp.w		%d0, %d1		# is src exp >= dst exp?
+	bge.l		src_exp_ge2
+
+# dst exp is >  src exp; scale dst to exp = 0x3fff
+dst_exp_gt2:
+	bsr.l		scale_to_zero_dst
+	mov.l		%d0,-(%sp)		# save scale factor
+
+	cmpi.b		STAG(%a6),&DENORM	# is dst denormalized?
+	bne.b		cmpexp12
+
+	lea		FP_SCR0(%a6),%a0
+	bsr.l		norm			# normalize the denorm; result is new exp
+	neg.w		%d0			# new exp = -(shft val)
+	mov.w		%d0,L_SCR1(%a6)		# inset new exp
+
+cmpexp12:
+	mov.w		2+L_SCR1(%a6),%d0
+	subi.w		&mantissalen+2,%d0	# subtract mantissalen+2 from larger exp
+
+	cmp.w		%d0,L_SCR1(%a6)		# is difference >= len(mantissa)+2?
+	bge.b		quick_scale12
+
+	mov.w		L_SCR1(%a6),%d0
+	add.w		0x2(%sp),%d0		# scale src exponent by scale factor
+	mov.w		FP_SCR0_EX(%a6),%d1
+	and.w		&0x8000,%d1
+	or.w		%d1,%d0			# concat {sgn,new exp}
+	mov.w		%d0,FP_SCR0_EX(%a6)	# insert new dst exponent
+
+	mov.l		(%sp)+,%d0		# return SCALE factor
+	rts
+
+quick_scale12:
+	andi.w		&0x8000,FP_SCR0_EX(%a6)	# zero src exponent
+	bset		&0x0,1+FP_SCR0_EX(%a6)	# set exp = 1
+
+	mov.l		(%sp)+,%d0		# return SCALE factor
+	rts
+
+# src exp is >= dst exp; scale src to exp = 0x3fff
+src_exp_ge2:
+	bsr.l		scale_to_zero_src
+	mov.l		%d0,-(%sp)		# save scale factor
+
+	cmpi.b		DTAG(%a6),&DENORM	# is dst denormalized?
+	bne.b		cmpexp22
+	lea		FP_SCR1(%a6),%a0
+	bsr.l		norm			# normalize the denorm; result is new exp
+	neg.w		%d0			# new exp = -(shft val)
+	mov.w		%d0,2+L_SCR1(%a6)	# inset new exp
+
+cmpexp22:
+	mov.w		L_SCR1(%a6),%d0
+	subi.w		&mantissalen+2,%d0	# subtract mantissalen+2 from larger exp
+
+	cmp.w		%d0,2+L_SCR1(%a6)	# is difference >= len(mantissa)+2?
+	bge.b		quick_scale22
+
+	mov.w		2+L_SCR1(%a6),%d0
+	add.w		0x2(%sp),%d0		# scale dst exponent by scale factor
+	mov.w		FP_SCR1_EX(%a6),%d1
+	andi.w		&0x8000,%d1
+	or.w		%d1,%d0			# concat {sgn,new exp}
+	mov.w		%d0,FP_SCR1_EX(%a6)	# insert new dst exponent
+
+	mov.l		(%sp)+,%d0		# return SCALE factor
+	rts
+
+quick_scale22:
+	andi.w		&0x8000,FP_SCR1_EX(%a6)	# zero dst exponent
+	bset		&0x0,1+FP_SCR1_EX(%a6)	# set exp = 1
+
+	mov.l		(%sp)+,%d0		# return SCALE factor
+	rts
+
+##########################################################################
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	scale_to_zero_src(): scale the exponent of extended precision	#
+#			     value at FP_SCR0(a6).			#
+#									#
+# XREF ****************************************************************	#
+#	norm() - normalize the mantissa if the operand was a DENORM	#
+#									#
+# INPUT ***************************************************************	#
+#	FP_SCR0(a6) = extended precision operand to be scaled		#
+#									#
+# OUTPUT **************************************************************	#
+#	FP_SCR0(a6) = scaled extended precision operand			#
+#	d0	    = scale value					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Set the exponent of the input operand to 0x3fff. Save the value	#
+# of the difference between the original and new exponent. Then,	#
+# normalize the operand if it was a DENORM. Add this normalization	#
+# value to the previous value. Return the result.			#
+#									#
+#########################################################################
+
+	global		scale_to_zero_src
+scale_to_zero_src:
+	mov.w		FP_SCR0_EX(%a6),%d1	# extract operand's {sgn,exp}
+	mov.w		%d1,%d0			# make a copy
+
+	andi.l		&0x7fff,%d1		# extract operand's exponent
+
+	andi.w		&0x8000,%d0		# extract operand's sgn
+	or.w		&0x3fff,%d0		# insert new operand's exponent(=0)
+
+	mov.w		%d0,FP_SCR0_EX(%a6)	# insert biased exponent
+
+	cmpi.b		STAG(%a6),&DENORM	# is operand normalized?
+	beq.b		stzs_denorm		# normalize the DENORM
+
+stzs_norm:
+	mov.l		&0x3fff,%d0
+	sub.l		%d1,%d0			# scale = BIAS + (-exp)
+
+	rts
+
+stzs_denorm:
+	lea		FP_SCR0(%a6),%a0	# pass ptr to src op
+	bsr.l		norm			# normalize denorm
+	neg.l		%d0			# new exponent = -(shft val)
+	mov.l		%d0,%d1			# prepare for op_norm call
+	bra.b		stzs_norm		# finish scaling
+
+###
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	scale_sqrt(): scale the input operand exponent so a subsequent	#
+#		      fsqrt operation won't take an exception.		#
+#									#
+# XREF ****************************************************************	#
+#	norm() - normalize the mantissa if the operand was a DENORM	#
+#									#
+# INPUT ***************************************************************	#
+#	FP_SCR0(a6) = extended precision operand to be scaled		#
+#									#
+# OUTPUT **************************************************************	#
+#	FP_SCR0(a6) = scaled extended precision operand			#
+#	d0	    = scale value					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	If the input operand is a DENORM, normalize it.			#
+#	If the exponent of the input operand is even, set the exponent	#
+# to 0x3ffe and return a scale factor of "(exp-0x3ffe)/2". If the	#
+# exponent of the input operand is off, set the exponent to ox3fff and	#
+# return a scale factor of "(exp-0x3fff)/2".				#
+#									#
+#########################################################################
+
+	global		scale_sqrt
+scale_sqrt:
+	cmpi.b		STAG(%a6),&DENORM	# is operand normalized?
+	beq.b		ss_denorm		# normalize the DENORM
+
+	mov.w		FP_SCR0_EX(%a6),%d1	# extract operand's {sgn,exp}
+	andi.l		&0x7fff,%d1		# extract operand's exponent
+
+	andi.w		&0x8000,FP_SCR0_EX(%a6)	# extract operand's sgn
+
+	btst		&0x0,%d1		# is exp even or odd?
+	beq.b		ss_norm_even
+
+	ori.w		&0x3fff,FP_SCR0_EX(%a6)	# insert new operand's exponent(=0)
+
+	mov.l		&0x3fff,%d0
+	sub.l		%d1,%d0			# scale = BIAS + (-exp)
+	asr.l		&0x1,%d0		# divide scale factor by 2
+	rts
+
+ss_norm_even:
+	ori.w		&0x3ffe,FP_SCR0_EX(%a6)	# insert new operand's exponent(=0)
+
+	mov.l		&0x3ffe,%d0
+	sub.l		%d1,%d0			# scale = BIAS + (-exp)
+	asr.l		&0x1,%d0		# divide scale factor by 2
+	rts
+
+ss_denorm:
+	lea		FP_SCR0(%a6),%a0	# pass ptr to src op
+	bsr.l		norm			# normalize denorm
+
+	btst		&0x0,%d0		# is exp even or odd?
+	beq.b		ss_denorm_even
+
+	ori.w		&0x3fff,FP_SCR0_EX(%a6)	# insert new operand's exponent(=0)
+
+	add.l		&0x3fff,%d0
+	asr.l		&0x1,%d0		# divide scale factor by 2
+	rts
+
+ss_denorm_even:
+	ori.w		&0x3ffe,FP_SCR0_EX(%a6)	# insert new operand's exponent(=0)
+
+	add.l		&0x3ffe,%d0
+	asr.l		&0x1,%d0		# divide scale factor by 2
+	rts
+
+###
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	scale_to_zero_dst(): scale the exponent of extended precision	#
+#			     value at FP_SCR1(a6).			#
+#									#
+# XREF ****************************************************************	#
+#	norm() - normalize the mantissa if the operand was a DENORM	#
+#									#
+# INPUT ***************************************************************	#
+#	FP_SCR1(a6) = extended precision operand to be scaled		#
+#									#
+# OUTPUT **************************************************************	#
+#	FP_SCR1(a6) = scaled extended precision operand			#
+#	d0	    = scale value					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Set the exponent of the input operand to 0x3fff. Save the value	#
+# of the difference between the original and new exponent. Then,	#
+# normalize the operand if it was a DENORM. Add this normalization	#
+# value to the previous value. Return the result.			#
+#									#
+#########################################################################
+
+	global		scale_to_zero_dst
+scale_to_zero_dst:
+	mov.w		FP_SCR1_EX(%a6),%d1	# extract operand's {sgn,exp}
+	mov.w		%d1,%d0			# make a copy
+
+	andi.l		&0x7fff,%d1		# extract operand's exponent
+
+	andi.w		&0x8000,%d0		# extract operand's sgn
+	or.w		&0x3fff,%d0		# insert new operand's exponent(=0)
+
+	mov.w		%d0,FP_SCR1_EX(%a6)	# insert biased exponent
+
+	cmpi.b		DTAG(%a6),&DENORM	# is operand normalized?
+	beq.b		stzd_denorm		# normalize the DENORM
+
+stzd_norm:
+	mov.l		&0x3fff,%d0
+	sub.l		%d1,%d0			# scale = BIAS + (-exp)
+	rts
+
+stzd_denorm:
+	lea		FP_SCR1(%a6),%a0	# pass ptr to dst op
+	bsr.l		norm			# normalize denorm
+	neg.l		%d0			# new exponent = -(shft val)
+	mov.l		%d0,%d1			# prepare for op_norm call
+	bra.b		stzd_norm		# finish scaling
+
+##########################################################################
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	res_qnan(): return default result w/ QNAN operand for dyadic	#
+#	res_snan(): return default result w/ SNAN operand for dyadic	#
+#	res_qnan_1op(): return dflt result w/ QNAN operand for monadic	#
+#	res_snan_1op(): return dflt result w/ SNAN operand for monadic	#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	FP_SRC(a6) = pointer to extended precision src operand		#
+#	FP_DST(a6) = pointer to extended precision dst operand		#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = default result						#
+#									#
+# ALGORITHM ***********************************************************	#
+#	If either operand (but not both operands) of an operation is a	#
+# nonsignalling NAN, then that NAN is returned as the result. If both	#
+# operands are nonsignalling NANs, then the destination operand		#
+# nonsignalling NAN is returned as the result.				#
+#	If either operand to an operation is a signalling NAN (SNAN),	#
+# then, the SNAN bit is set in the FPSR EXC byte. If the SNAN trap	#
+# enable bit is set in the FPCR, then the trap is taken and the		#
+# destination is not modified. If the SNAN trap enable bit is not set,	#
+# then the SNAN is converted to a nonsignalling NAN (by setting the	#
+# SNAN bit in the operand to one), and the operation continues as	#
+# described in the preceding paragraph, for nonsignalling NANs.		#
+#	Make sure the appropriate FPSR bits are set before exiting.	#
+#									#
+#########################################################################
+
+	global		res_qnan
+	global		res_snan
+res_qnan:
+res_snan:
+	cmp.b		DTAG(%a6), &SNAN	# is the dst an SNAN?
+	beq.b		dst_snan2
+	cmp.b		DTAG(%a6), &QNAN	# is the dst a  QNAN?
+	beq.b		dst_qnan2
+src_nan:
+	cmp.b		STAG(%a6), &QNAN
+	beq.b		src_qnan2
+	global		res_snan_1op
+res_snan_1op:
+src_snan2:
+	bset		&0x6, FP_SRC_HI(%a6)	# set SNAN bit
+	or.l		&nan_mask+aiop_mask+snan_mask, USER_FPSR(%a6)
+	lea		FP_SRC(%a6), %a0
+	bra.b		nan_comp
+	global		res_qnan_1op
+res_qnan_1op:
+src_qnan2:
+	or.l		&nan_mask, USER_FPSR(%a6)
+	lea		FP_SRC(%a6), %a0
+	bra.b		nan_comp
+dst_snan2:
+	or.l		&nan_mask+aiop_mask+snan_mask, USER_FPSR(%a6)
+	bset		&0x6, FP_DST_HI(%a6)	# set SNAN bit
+	lea		FP_DST(%a6), %a0
+	bra.b		nan_comp
+dst_qnan2:
+	lea		FP_DST(%a6), %a0
+	cmp.b		STAG(%a6), &SNAN
+	bne		nan_done
+	or.l		&aiop_mask+snan_mask, USER_FPSR(%a6)
+nan_done:
+	or.l		&nan_mask, USER_FPSR(%a6)
+nan_comp:
+	btst		&0x7, FTEMP_EX(%a0)	# is NAN neg?
+	beq.b		nan_not_neg
+	or.l		&neg_mask, USER_FPSR(%a6)
+nan_not_neg:
+	fmovm.x		(%a0), &0x80
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	res_operr(): return default result during operand error		#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	None								#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = default operand error result				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	An nonsignalling NAN is returned as the default result when	#
+# an operand error occurs for the following cases:			#
+#									#
+#	Multiply: (Infinity x Zero)					#
+#	Divide  : (Zero / Zero) || (Infinity / Infinity)		#
+#									#
+#########################################################################
+
+	global		res_operr
+res_operr:
+	or.l		&nan_mask+operr_mask+aiop_mask, USER_FPSR(%a6)
+	fmovm.x		nan_return(%pc), &0x80
+	rts
+
+nan_return:
+	long		0x7fff0000, 0xffffffff, 0xffffffff
+
+#########################################################################
+# fdbcc(): routine to emulate the fdbcc instruction			#
+#									#
+# XDEF **************************************************************** #
+#	_fdbcc()							#
+#									#
+# XREF **************************************************************** #
+#	fetch_dreg() - fetch Dn value					#
+#	store_dreg_l() - store updated Dn value				#
+#									#
+# INPUT ***************************************************************	#
+#	d0 = displacement						#
+#									#
+# OUTPUT ************************************************************** #
+#	none								#
+#									#
+# ALGORITHM ***********************************************************	#
+#	This routine checks which conditional predicate is specified by	#
+# the stacked fdbcc instruction opcode and then branches to a routine	#
+# for that predicate. The corresponding fbcc instruction is then used	#
+# to see whether the condition (specified by the stacked FPSR) is true	#
+# or false.								#
+#	If a BSUN exception should be indicated, the BSUN and ABSUN	#
+# bits are set in the stacked FPSR. If the BSUN exception is enabled,	#
+# the fbsun_flg is set in the SPCOND_FLG location on the stack. If an	#
+# enabled BSUN should not be flagged and the predicate is true, then	#
+# Dn is fetched and decremented by one. If Dn is not equal to -1, add	#
+# the displacement value to the stacked PC so that when an "rte" is	#
+# finally executed, the branch occurs.					#
+#									#
+#########################################################################
+	global		_fdbcc
+_fdbcc:
+	mov.l		%d0,L_SCR1(%a6)		# save displacement
+
+	mov.w		EXC_CMDREG(%a6),%d0	# fetch predicate
+
+	clr.l		%d1			# clear scratch reg
+	mov.b		FPSR_CC(%a6),%d1	# fetch fp ccodes
+	ror.l		&0x8,%d1		# rotate to top byte
+	fmov.l		%d1,%fpsr		# insert into FPSR
+
+	mov.w		(tbl_fdbcc.b,%pc,%d0.w*2),%d1 # load table
+	jmp		(tbl_fdbcc.b,%pc,%d1.w) # jump to fdbcc routine
+
+tbl_fdbcc:
+	short		fdbcc_f		-	tbl_fdbcc	# 00
+	short		fdbcc_eq	-	tbl_fdbcc	# 01
+	short		fdbcc_ogt	-	tbl_fdbcc	# 02
+	short		fdbcc_oge	-	tbl_fdbcc	# 03
+	short		fdbcc_olt	-	tbl_fdbcc	# 04
+	short		fdbcc_ole	-	tbl_fdbcc	# 05
+	short		fdbcc_ogl	-	tbl_fdbcc	# 06
+	short		fdbcc_or	-	tbl_fdbcc	# 07
+	short		fdbcc_un	-	tbl_fdbcc	# 08
+	short		fdbcc_ueq	-	tbl_fdbcc	# 09
+	short		fdbcc_ugt	-	tbl_fdbcc	# 10
+	short		fdbcc_uge	-	tbl_fdbcc	# 11
+	short		fdbcc_ult	-	tbl_fdbcc	# 12
+	short		fdbcc_ule	-	tbl_fdbcc	# 13
+	short		fdbcc_neq	-	tbl_fdbcc	# 14
+	short		fdbcc_t		-	tbl_fdbcc	# 15
+	short		fdbcc_sf	-	tbl_fdbcc	# 16
+	short		fdbcc_seq	-	tbl_fdbcc	# 17
+	short		fdbcc_gt	-	tbl_fdbcc	# 18
+	short		fdbcc_ge	-	tbl_fdbcc	# 19
+	short		fdbcc_lt	-	tbl_fdbcc	# 20
+	short		fdbcc_le	-	tbl_fdbcc	# 21
+	short		fdbcc_gl	-	tbl_fdbcc	# 22
+	short		fdbcc_gle	-	tbl_fdbcc	# 23
+	short		fdbcc_ngle	-	tbl_fdbcc	# 24
+	short		fdbcc_ngl	-	tbl_fdbcc	# 25
+	short		fdbcc_nle	-	tbl_fdbcc	# 26
+	short		fdbcc_nlt	-	tbl_fdbcc	# 27
+	short		fdbcc_nge	-	tbl_fdbcc	# 28
+	short		fdbcc_ngt	-	tbl_fdbcc	# 29
+	short		fdbcc_sneq	-	tbl_fdbcc	# 30
+	short		fdbcc_st	-	tbl_fdbcc	# 31
+
+#########################################################################
+#									#
+# IEEE Nonaware tests							#
+#									#
+# For the IEEE nonaware tests, only the false branch changes the	#
+# counter. However, the true branch may set bsun so we check to see	#
+# if the NAN bit is set, in which case BSUN and AIOP will be set.	#
+#									#
+# The cases EQ and NE are shared by the Aware and Nonaware groups	#
+# and are incapable of setting the BSUN exception bit.			#
+#									#
+# Typically, only one of the two possible branch directions could	#
+# have the NAN bit set.							#
+# (This is assuming the mutual exclusiveness of FPSR cc bit groupings	#
+#  is preserved.)							#
+#									#
+#########################################################################
+
+#
+# equal:
+#
+#	Z
+#
+fdbcc_eq:
+	fbeq.w		fdbcc_eq_yes		# equal?
+fdbcc_eq_no:
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_eq_yes:
+	rts
+
+#
+# not equal:
+#	_
+#	Z
+#
+fdbcc_neq:
+	fbneq.w		fdbcc_neq_yes		# not equal?
+fdbcc_neq_no:
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_neq_yes:
+	rts
+
+#
+# greater than:
+#	_______
+#	NANvZvN
+#
+fdbcc_gt:
+	fbgt.w		fdbcc_gt_yes		# greater than?
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		fdbcc_false		# no;go handle counter
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+	bne.w		fdbcc_bsun		# yes; we have an exception
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_gt_yes:
+	rts					# do nothing
+
+#
+# not greater than:
+#
+#	NANvZvN
+#
+fdbcc_ngt:
+	fbngt.w		fdbcc_ngt_yes		# not greater than?
+fdbcc_ngt_no:
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_ngt_yes:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.b		fdbcc_ngt_done		# no;go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+	bne.w		fdbcc_bsun		# yes; we have an exception
+fdbcc_ngt_done:
+	rts					# no; do nothing
+
+#
+# greater than or equal:
+#	   _____
+#	Zv(NANvN)
+#
+fdbcc_ge:
+	fbge.w		fdbcc_ge_yes		# greater than or equal?
+fdbcc_ge_no:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		fdbcc_false		# no;go handle counter
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+	bne.w		fdbcc_bsun		# yes; we have an exception
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_ge_yes:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.b		fdbcc_ge_yes_done	# no;go do nothing
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+	bne.w		fdbcc_bsun		# yes; we have an exception
+fdbcc_ge_yes_done:
+	rts					# do nothing
+
+#
+# not (greater than or equal):
+#	       _
+#	NANv(N^Z)
+#
+fdbcc_nge:
+	fbnge.w		fdbcc_nge_yes		# not (greater than or equal)?
+fdbcc_nge_no:
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_nge_yes:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.b		fdbcc_nge_done		# no;go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+	bne.w		fdbcc_bsun		# yes; we have an exception
+fdbcc_nge_done:
+	rts					# no; do nothing
+
+#
+# less than:
+#	   _____
+#	N^(NANvZ)
+#
+fdbcc_lt:
+	fblt.w		fdbcc_lt_yes		# less than?
+fdbcc_lt_no:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		fdbcc_false		# no; go handle counter
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+	bne.w		fdbcc_bsun		# yes; we have an exception
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_lt_yes:
+	rts					# do nothing
+
+#
+# not less than:
+#	       _
+#	NANv(ZvN)
+#
+fdbcc_nlt:
+	fbnlt.w		fdbcc_nlt_yes		# not less than?
+fdbcc_nlt_no:
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_nlt_yes:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.b		fdbcc_nlt_done		# no;go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+	bne.w		fdbcc_bsun		# yes; we have an exception
+fdbcc_nlt_done:
+	rts					# no; do nothing
+
+#
+# less than or equal:
+#	     ___
+#	Zv(N^NAN)
+#
+fdbcc_le:
+	fble.w		fdbcc_le_yes		# less than or equal?
+fdbcc_le_no:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		fdbcc_false		# no; go handle counter
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+	bne.w		fdbcc_bsun		# yes; we have an exception
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_le_yes:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.b		fdbcc_le_yes_done	# no; go do nothing
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+	bne.w		fdbcc_bsun		# yes; we have an exception
+fdbcc_le_yes_done:
+	rts					# do nothing
+
+#
+# not (less than or equal):
+#	     ___
+#	NANv(NvZ)
+#
+fdbcc_nle:
+	fbnle.w		fdbcc_nle_yes		# not (less than or equal)?
+fdbcc_nle_no:
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_nle_yes:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		fdbcc_nle_done		# no; go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+	bne.w		fdbcc_bsun		# yes; we have an exception
+fdbcc_nle_done:
+	rts					# no; do nothing
+
+#
+# greater or less than:
+#	_____
+#	NANvZ
+#
+fdbcc_gl:
+	fbgl.w		fdbcc_gl_yes		# greater or less than?
+fdbcc_gl_no:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		fdbcc_false		# no; handle counter
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+	bne.w		fdbcc_bsun		# yes; we have an exception
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_gl_yes:
+	rts					# do nothing
+
+#
+# not (greater or less than):
+#
+#	NANvZ
+#
+fdbcc_ngl:
+	fbngl.w		fdbcc_ngl_yes		# not (greater or less than)?
+fdbcc_ngl_no:
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_ngl_yes:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.b		fdbcc_ngl_done		# no; go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+	bne.w		fdbcc_bsun		# yes; we have an exception
+fdbcc_ngl_done:
+	rts					# no; do nothing
+
+#
+# greater, less, or equal:
+#	___
+#	NAN
+#
+fdbcc_gle:
+	fbgle.w		fdbcc_gle_yes		# greater, less, or equal?
+fdbcc_gle_no:
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+	bne.w		fdbcc_bsun		# yes; we have an exception
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_gle_yes:
+	rts					# do nothing
+
+#
+# not (greater, less, or equal):
+#
+#	NAN
+#
+fdbcc_ngle:
+	fbngle.w	fdbcc_ngle_yes		# not (greater, less, or equal)?
+fdbcc_ngle_no:
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_ngle_yes:
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+	bne.w		fdbcc_bsun		# yes; we have an exception
+	rts					# no; do nothing
+
+#########################################################################
+#									#
+# Miscellaneous tests							#
+#									#
+# For the IEEE miscellaneous tests, all but fdbf and fdbt can set bsun. #
+#									#
+#########################################################################
+
+#
+# false:
+#
+#	False
+#
+fdbcc_f:					# no bsun possible
+	bra.w		fdbcc_false		# go handle counter
+
+#
+# true:
+#
+#	True
+#
+fdbcc_t:					# no bsun possible
+	rts					# do nothing
+
+#
+# signalling false:
+#
+#	False
+#
+fdbcc_sf:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set?
+	beq.w		fdbcc_false		# no;go handle counter
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+	bne.w		fdbcc_bsun		# yes; we have an exception
+	bra.w		fdbcc_false		# go handle counter
+
+#
+# signalling true:
+#
+#	True
+#
+fdbcc_st:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set?
+	beq.b		fdbcc_st_done		# no;go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+	bne.w		fdbcc_bsun		# yes; we have an exception
+fdbcc_st_done:
+	rts
+
+#
+# signalling equal:
+#
+#	Z
+#
+fdbcc_seq:
+	fbseq.w		fdbcc_seq_yes		# signalling equal?
+fdbcc_seq_no:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set?
+	beq.w		fdbcc_false		# no;go handle counter
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+	bne.w		fdbcc_bsun		# yes; we have an exception
+	bra.w		fdbcc_false		# go handle counter
+fdbcc_seq_yes:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set?
+	beq.b		fdbcc_seq_yes_done	# no;go do nothing
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+	bne.w		fdbcc_bsun		# yes; we have an exception
+fdbcc_seq_yes_done:
+	rts					# yes; do nothing
+
+#
+# signalling not equal:
+#	_
+#	Z
+#
+fdbcc_sneq:
+	fbsneq.w	fdbcc_sneq_yes		# signalling not equal?
+fdbcc_sneq_no:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set?
+	beq.w		fdbcc_false		# no;go handle counter
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+	bne.w		fdbcc_bsun		# yes; we have an exception
+	bra.w		fdbcc_false		# go handle counter
+fdbcc_sneq_yes:
+	btst		&nan_bit, FPSR_CC(%a6)	# set BSUN exc bit
+	beq.w		fdbcc_sneq_done		# no;go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+	bne.w		fdbcc_bsun		# yes; we have an exception
+fdbcc_sneq_done:
+	rts
+
+#########################################################################
+#									#
+# IEEE Aware tests							#
+#									#
+# For the IEEE aware tests, action is only taken if the result is false.#
+# Therefore, the opposite branch type is used to jump to the decrement	#
+# routine.								#
+# The BSUN exception will not be set for any of these tests.		#
+#									#
+#########################################################################
+
+#
+# ordered greater than:
+#	_______
+#	NANvZvN
+#
+fdbcc_ogt:
+	fbogt.w		fdbcc_ogt_yes		# ordered greater than?
+fdbcc_ogt_no:
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_ogt_yes:
+	rts					# yes; do nothing
+
+#
+# unordered or less or equal:
+#	_______
+#	NANvZvN
+#
+fdbcc_ule:
+	fbule.w		fdbcc_ule_yes		# unordered or less or equal?
+fdbcc_ule_no:
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_ule_yes:
+	rts					# yes; do nothing
+
+#
+# ordered greater than or equal:
+#	   _____
+#	Zv(NANvN)
+#
+fdbcc_oge:
+	fboge.w		fdbcc_oge_yes		# ordered greater than or equal?
+fdbcc_oge_no:
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_oge_yes:
+	rts					# yes; do nothing
+
+#
+# unordered or less than:
+#	       _
+#	NANv(N^Z)
+#
+fdbcc_ult:
+	fbult.w		fdbcc_ult_yes		# unordered or less than?
+fdbcc_ult_no:
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_ult_yes:
+	rts					# yes; do nothing
+
+#
+# ordered less than:
+#	   _____
+#	N^(NANvZ)
+#
+fdbcc_olt:
+	fbolt.w		fdbcc_olt_yes		# ordered less than?
+fdbcc_olt_no:
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_olt_yes:
+	rts					# yes; do nothing
+
+#
+# unordered or greater or equal:
+#
+#	NANvZvN
+#
+fdbcc_uge:
+	fbuge.w		fdbcc_uge_yes		# unordered or greater than?
+fdbcc_uge_no:
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_uge_yes:
+	rts					# yes; do nothing
+
+#
+# ordered less than or equal:
+#	     ___
+#	Zv(N^NAN)
+#
+fdbcc_ole:
+	fbole.w		fdbcc_ole_yes		# ordered greater or less than?
+fdbcc_ole_no:
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_ole_yes:
+	rts					# yes; do nothing
+
+#
+# unordered or greater than:
+#	     ___
+#	NANv(NvZ)
+#
+fdbcc_ugt:
+	fbugt.w		fdbcc_ugt_yes		# unordered or greater than?
+fdbcc_ugt_no:
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_ugt_yes:
+	rts					# yes; do nothing
+
+#
+# ordered greater or less than:
+#	_____
+#	NANvZ
+#
+fdbcc_ogl:
+	fbogl.w		fdbcc_ogl_yes		# ordered greater or less than?
+fdbcc_ogl_no:
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_ogl_yes:
+	rts					# yes; do nothing
+
+#
+# unordered or equal:
+#
+#	NANvZ
+#
+fdbcc_ueq:
+	fbueq.w		fdbcc_ueq_yes		# unordered or equal?
+fdbcc_ueq_no:
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_ueq_yes:
+	rts					# yes; do nothing
+
+#
+# ordered:
+#	___
+#	NAN
+#
+fdbcc_or:
+	fbor.w		fdbcc_or_yes		# ordered?
+fdbcc_or_no:
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_or_yes:
+	rts					# yes; do nothing
+
+#
+# unordered:
+#
+#	NAN
+#
+fdbcc_un:
+	fbun.w		fdbcc_un_yes		# unordered?
+fdbcc_un_no:
+	bra.w		fdbcc_false		# no; go handle counter
+fdbcc_un_yes:
+	rts					# yes; do nothing
+
+#######################################################################
+
+#
+# the bsun exception bit was not set.
+#
+# (1) subtract 1 from the count register
+# (2) if (cr == -1) then
+#	pc = pc of next instruction
+#     else
+#	pc += sign_ext(16-bit displacement)
+#
+fdbcc_false:
+	mov.b		1+EXC_OPWORD(%a6), %d1	# fetch lo opword
+	andi.w		&0x7, %d1		# extract count register
+
+	bsr.l		fetch_dreg		# fetch count value
+# make sure that d0 isn't corrupted between calls...
+
+	subq.w		&0x1, %d0		# Dn - 1 -> Dn
+
+	bsr.l		store_dreg_l		# store new count value
+
+	cmpi.w		%d0, &-0x1		# is (Dn == -1)?
+	bne.b		fdbcc_false_cont	# no;
+	rts
+
+fdbcc_false_cont:
+	mov.l		L_SCR1(%a6),%d0		# fetch displacement
+	add.l		USER_FPIAR(%a6),%d0	# add instruction PC
+	addq.l		&0x4,%d0		# add instruction length
+	mov.l		%d0,EXC_PC(%a6)		# set new PC
+	rts
+
+# the emulation routine set bsun and BSUN was enabled. have to
+# fix stack and jump to the bsun handler.
+# let the caller of this routine shift the stack frame up to
+# eliminate the effective address field.
+fdbcc_bsun:
+	mov.b		&fbsun_flg,SPCOND_FLG(%a6)
+	rts
+
+#########################################################################
+# ftrapcc(): routine to emulate the ftrapcc instruction			#
+#									#
+# XDEF ****************************************************************	#
+#	_ftrapcc()							#
+#									#
+# XREF ****************************************************************	#
+#	none								#
+#									#
+# INPUT *************************************************************** #
+#	none								#
+#									#
+# OUTPUT ************************************************************** #
+#	none								#
+#									#
+# ALGORITHM *********************************************************** #
+#	This routine checks which conditional predicate is specified by	#
+# the stacked ftrapcc instruction opcode and then branches to a routine	#
+# for that predicate. The corresponding fbcc instruction is then used	#
+# to see whether the condition (specified by the stacked FPSR) is true	#
+# or false.								#
+#	If a BSUN exception should be indicated, the BSUN and ABSUN	#
+# bits are set in the stacked FPSR. If the BSUN exception is enabled,	#
+# the fbsun_flg is set in the SPCOND_FLG location on the stack. If an	#
+# enabled BSUN should not be flagged and the predicate is true, then	#
+# the ftrapcc_flg is set in the SPCOND_FLG location. These special	#
+# flags indicate to the calling routine to emulate the exceptional	#
+# condition.								#
+#									#
+#########################################################################
+
+	global		_ftrapcc
+_ftrapcc:
+	mov.w		EXC_CMDREG(%a6),%d0	# fetch predicate
+
+	clr.l		%d1			# clear scratch reg
+	mov.b		FPSR_CC(%a6),%d1	# fetch fp ccodes
+	ror.l		&0x8,%d1		# rotate to top byte
+	fmov.l		%d1,%fpsr		# insert into FPSR
+
+	mov.w		(tbl_ftrapcc.b,%pc,%d0.w*2), %d1 # load table
+	jmp		(tbl_ftrapcc.b,%pc,%d1.w) # jump to ftrapcc routine
+
+tbl_ftrapcc:
+	short		ftrapcc_f	-	tbl_ftrapcc	# 00
+	short		ftrapcc_eq	-	tbl_ftrapcc	# 01
+	short		ftrapcc_ogt	-	tbl_ftrapcc	# 02
+	short		ftrapcc_oge	-	tbl_ftrapcc	# 03
+	short		ftrapcc_olt	-	tbl_ftrapcc	# 04
+	short		ftrapcc_ole	-	tbl_ftrapcc	# 05
+	short		ftrapcc_ogl	-	tbl_ftrapcc	# 06
+	short		ftrapcc_or	-	tbl_ftrapcc	# 07
+	short		ftrapcc_un	-	tbl_ftrapcc	# 08
+	short		ftrapcc_ueq	-	tbl_ftrapcc	# 09
+	short		ftrapcc_ugt	-	tbl_ftrapcc	# 10
+	short		ftrapcc_uge	-	tbl_ftrapcc	# 11
+	short		ftrapcc_ult	-	tbl_ftrapcc	# 12
+	short		ftrapcc_ule	-	tbl_ftrapcc	# 13
+	short		ftrapcc_neq	-	tbl_ftrapcc	# 14
+	short		ftrapcc_t	-	tbl_ftrapcc	# 15
+	short		ftrapcc_sf	-	tbl_ftrapcc	# 16
+	short		ftrapcc_seq	-	tbl_ftrapcc	# 17
+	short		ftrapcc_gt	-	tbl_ftrapcc	# 18
+	short		ftrapcc_ge	-	tbl_ftrapcc	# 19
+	short		ftrapcc_lt	-	tbl_ftrapcc	# 20
+	short		ftrapcc_le	-	tbl_ftrapcc	# 21
+	short		ftrapcc_gl	-	tbl_ftrapcc	# 22
+	short		ftrapcc_gle	-	tbl_ftrapcc	# 23
+	short		ftrapcc_ngle	-	tbl_ftrapcc	# 24
+	short		ftrapcc_ngl	-	tbl_ftrapcc	# 25
+	short		ftrapcc_nle	-	tbl_ftrapcc	# 26
+	short		ftrapcc_nlt	-	tbl_ftrapcc	# 27
+	short		ftrapcc_nge	-	tbl_ftrapcc	# 28
+	short		ftrapcc_ngt	-	tbl_ftrapcc	# 29
+	short		ftrapcc_sneq	-	tbl_ftrapcc	# 30
+	short		ftrapcc_st	-	tbl_ftrapcc	# 31
+
+#########################################################################
+#									#
+# IEEE Nonaware tests							#
+#									#
+# For the IEEE nonaware tests, we set the result based on the		#
+# floating point condition codes. In addition, we check to see		#
+# if the NAN bit is set, in which case BSUN and AIOP will be set.	#
+#									#
+# The cases EQ and NE are shared by the Aware and Nonaware groups	#
+# and are incapable of setting the BSUN exception bit.			#
+#									#
+# Typically, only one of the two possible branch directions could	#
+# have the NAN bit set.							#
+#									#
+#########################################################################
+
+#
+# equal:
+#
+#	Z
+#
+ftrapcc_eq:
+	fbeq.w		ftrapcc_trap		# equal?
+ftrapcc_eq_no:
+	rts					# do nothing
+
+#
+# not equal:
+#	_
+#	Z
+#
+ftrapcc_neq:
+	fbneq.w		ftrapcc_trap		# not equal?
+ftrapcc_neq_no:
+	rts					# do nothing
+
+#
+# greater than:
+#	_______
+#	NANvZvN
+#
+ftrapcc_gt:
+	fbgt.w		ftrapcc_trap		# greater than?
+ftrapcc_gt_no:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.b		ftrapcc_gt_done		# no
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+	bne.w		ftrapcc_bsun		# yes
+ftrapcc_gt_done:
+	rts					# no; do nothing
+
+#
+# not greater than:
+#
+#	NANvZvN
+#
+ftrapcc_ngt:
+	fbngt.w		ftrapcc_ngt_yes		# not greater than?
+ftrapcc_ngt_no:
+	rts					# do nothing
+ftrapcc_ngt_yes:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		ftrapcc_trap		# no; go take trap
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+	bne.w		ftrapcc_bsun		# yes
+	bra.w		ftrapcc_trap		# no; go take trap
+
+#
+# greater than or equal:
+#	   _____
+#	Zv(NANvN)
+#
+ftrapcc_ge:
+	fbge.w		ftrapcc_ge_yes		# greater than or equal?
+ftrapcc_ge_no:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.b		ftrapcc_ge_done		# no; go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+	bne.w		ftrapcc_bsun		# yes
+ftrapcc_ge_done:
+	rts					# no; do nothing
+ftrapcc_ge_yes:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		ftrapcc_trap		# no; go take trap
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+	bne.w		ftrapcc_bsun		# yes
+	bra.w		ftrapcc_trap		# no; go take trap
+
+#
+# not (greater than or equal):
+#	       _
+#	NANv(N^Z)
+#
+ftrapcc_nge:
+	fbnge.w		ftrapcc_nge_yes		# not (greater than or equal)?
+ftrapcc_nge_no:
+	rts					# do nothing
+ftrapcc_nge_yes:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		ftrapcc_trap		# no; go take trap
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+	bne.w		ftrapcc_bsun		# yes
+	bra.w		ftrapcc_trap		# no; go take trap
+
+#
+# less than:
+#	   _____
+#	N^(NANvZ)
+#
+ftrapcc_lt:
+	fblt.w		ftrapcc_trap		# less than?
+ftrapcc_lt_no:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.b		ftrapcc_lt_done		# no; go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+	bne.w		ftrapcc_bsun		# yes
+ftrapcc_lt_done:
+	rts					# no; do nothing
+
+#
+# not less than:
+#	       _
+#	NANv(ZvN)
+#
+ftrapcc_nlt:
+	fbnlt.w		ftrapcc_nlt_yes		# not less than?
+ftrapcc_nlt_no:
+	rts					# do nothing
+ftrapcc_nlt_yes:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		ftrapcc_trap		# no; go take trap
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+	bne.w		ftrapcc_bsun		# yes
+	bra.w		ftrapcc_trap		# no; go take trap
+
+#
+# less than or equal:
+#	     ___
+#	Zv(N^NAN)
+#
+ftrapcc_le:
+	fble.w		ftrapcc_le_yes		# less than or equal?
+ftrapcc_le_no:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.b		ftrapcc_le_done		# no; go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+	bne.w		ftrapcc_bsun		# yes
+ftrapcc_le_done:
+	rts					# no; do nothing
+ftrapcc_le_yes:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		ftrapcc_trap		# no; go take trap
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+	bne.w		ftrapcc_bsun		# yes
+	bra.w		ftrapcc_trap		# no; go take trap
+
+#
+# not (less than or equal):
+#	     ___
+#	NANv(NvZ)
+#
+ftrapcc_nle:
+	fbnle.w		ftrapcc_nle_yes		# not (less than or equal)?
+ftrapcc_nle_no:
+	rts					# do nothing
+ftrapcc_nle_yes:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		ftrapcc_trap		# no; go take trap
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+	bne.w		ftrapcc_bsun		# yes
+	bra.w		ftrapcc_trap		# no; go take trap
+
+#
+# greater or less than:
+#	_____
+#	NANvZ
+#
+ftrapcc_gl:
+	fbgl.w		ftrapcc_trap		# greater or less than?
+ftrapcc_gl_no:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.b		ftrapcc_gl_done		# no; go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+	bne.w		ftrapcc_bsun		# yes
+ftrapcc_gl_done:
+	rts					# no; do nothing
+
+#
+# not (greater or less than):
+#
+#	NANvZ
+#
+ftrapcc_ngl:
+	fbngl.w		ftrapcc_ngl_yes		# not (greater or less than)?
+ftrapcc_ngl_no:
+	rts					# do nothing
+ftrapcc_ngl_yes:
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		ftrapcc_trap		# no; go take trap
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+	bne.w		ftrapcc_bsun		# yes
+	bra.w		ftrapcc_trap		# no; go take trap
+
+#
+# greater, less, or equal:
+#	___
+#	NAN
+#
+ftrapcc_gle:
+	fbgle.w		ftrapcc_trap		# greater, less, or equal?
+ftrapcc_gle_no:
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+	bne.w		ftrapcc_bsun		# yes
+	rts					# no; do nothing
+
+#
+# not (greater, less, or equal):
+#
+#	NAN
+#
+ftrapcc_ngle:
+	fbngle.w	ftrapcc_ngle_yes	# not (greater, less, or equal)?
+ftrapcc_ngle_no:
+	rts					# do nothing
+ftrapcc_ngle_yes:
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+	bne.w		ftrapcc_bsun		# yes
+	bra.w		ftrapcc_trap		# no; go take trap
+
+#########################################################################
+#									#
+# Miscellaneous tests							#
+#									#
+# For the IEEE aware tests, we only have to set the result based on the	#
+# floating point condition codes. The BSUN exception will not be	#
+# set for any of these tests.						#
+#									#
+#########################################################################
+
+#
+# false:
+#
+#	False
+#
+ftrapcc_f:
+	rts					# do nothing
+
+#
+# true:
+#
+#	True
+#
+ftrapcc_t:
+	bra.w		ftrapcc_trap		# go take trap
+
+#
+# signalling false:
+#
+#	False
+#
+ftrapcc_sf:
+	btst		&nan_bit, FPSR_CC(%a6)	# set BSUN exc bit
+	beq.b		ftrapcc_sf_done		# no; go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+	bne.w		ftrapcc_bsun		# yes
+ftrapcc_sf_done:
+	rts					# no; do nothing
+
+#
+# signalling true:
+#
+#	True
+#
+ftrapcc_st:
+	btst		&nan_bit, FPSR_CC(%a6)	# set BSUN exc bit
+	beq.w		ftrapcc_trap		# no; go take trap
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+	bne.w		ftrapcc_bsun		# yes
+	bra.w		ftrapcc_trap		# no; go take trap
+
+#
+# signalling equal:
+#
+#	Z
+#
+ftrapcc_seq:
+	fbseq.w		ftrapcc_seq_yes		# signalling equal?
+ftrapcc_seq_no:
+	btst		&nan_bit, FPSR_CC(%a6)	# set BSUN exc bit
+	beq.w		ftrapcc_seq_done	# no; go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+	bne.w		ftrapcc_bsun		# yes
+ftrapcc_seq_done:
+	rts					# no; do nothing
+ftrapcc_seq_yes:
+	btst		&nan_bit, FPSR_CC(%a6)	# set BSUN exc bit
+	beq.w		ftrapcc_trap		# no; go take trap
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+	bne.w		ftrapcc_bsun		# yes
+	bra.w		ftrapcc_trap		# no; go take trap
+
+#
+# signalling not equal:
+#	_
+#	Z
+#
+ftrapcc_sneq:
+	fbsneq.w	ftrapcc_sneq_yes	# signalling equal?
+ftrapcc_sneq_no:
+	btst		&nan_bit, FPSR_CC(%a6)	# set BSUN exc bit
+	beq.w		ftrapcc_sneq_no_done	# no; go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+	bne.w		ftrapcc_bsun		# yes
+ftrapcc_sneq_no_done:
+	rts					# do nothing
+ftrapcc_sneq_yes:
+	btst		&nan_bit, FPSR_CC(%a6)	# set BSUN exc bit
+	beq.w		ftrapcc_trap		# no; go take trap
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	btst		&bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+	bne.w		ftrapcc_bsun		# yes
+	bra.w		ftrapcc_trap		# no; go take trap
+
+#########################################################################
+#									#
+# IEEE Aware tests							#
+#									#
+# For the IEEE aware tests, we only have to set the result based on the	#
+# floating point condition codes. The BSUN exception will not be	#
+# set for any of these tests.						#
+#									#
+#########################################################################
+
+#
+# ordered greater than:
+#	_______
+#	NANvZvN
+#
+ftrapcc_ogt:
+	fbogt.w		ftrapcc_trap		# ordered greater than?
+ftrapcc_ogt_no:
+	rts					# do nothing
+
+#
+# unordered or less or equal:
+#	_______
+#	NANvZvN
+#
+ftrapcc_ule:
+	fbule.w		ftrapcc_trap		# unordered or less or equal?
+ftrapcc_ule_no:
+	rts					# do nothing
+
+#
+# ordered greater than or equal:
+#	   _____
+#	Zv(NANvN)
+#
+ftrapcc_oge:
+	fboge.w		ftrapcc_trap		# ordered greater than or equal?
+ftrapcc_oge_no:
+	rts					# do nothing
+
+#
+# unordered or less than:
+#	       _
+#	NANv(N^Z)
+#
+ftrapcc_ult:
+	fbult.w		ftrapcc_trap		# unordered or less than?
+ftrapcc_ult_no:
+	rts					# do nothing
+
+#
+# ordered less than:
+#	   _____
+#	N^(NANvZ)
+#
+ftrapcc_olt:
+	fbolt.w		ftrapcc_trap		# ordered less than?
+ftrapcc_olt_no:
+	rts					# do nothing
+
+#
+# unordered or greater or equal:
+#
+#	NANvZvN
+#
+ftrapcc_uge:
+	fbuge.w		ftrapcc_trap		# unordered or greater than?
+ftrapcc_uge_no:
+	rts					# do nothing
+
+#
+# ordered less than or equal:
+#	     ___
+#	Zv(N^NAN)
+#
+ftrapcc_ole:
+	fbole.w		ftrapcc_trap		# ordered greater or less than?
+ftrapcc_ole_no:
+	rts					# do nothing
+
+#
+# unordered or greater than:
+#	     ___
+#	NANv(NvZ)
+#
+ftrapcc_ugt:
+	fbugt.w		ftrapcc_trap		# unordered or greater than?
+ftrapcc_ugt_no:
+	rts					# do nothing
+
+#
+# ordered greater or less than:
+#	_____
+#	NANvZ
+#
+ftrapcc_ogl:
+	fbogl.w		ftrapcc_trap		# ordered greater or less than?
+ftrapcc_ogl_no:
+	rts					# do nothing
+
+#
+# unordered or equal:
+#
+#	NANvZ
+#
+ftrapcc_ueq:
+	fbueq.w		ftrapcc_trap		# unordered or equal?
+ftrapcc_ueq_no:
+	rts					# do nothing
+
+#
+# ordered:
+#	___
+#	NAN
+#
+ftrapcc_or:
+	fbor.w		ftrapcc_trap		# ordered?
+ftrapcc_or_no:
+	rts					# do nothing
+
+#
+# unordered:
+#
+#	NAN
+#
+ftrapcc_un:
+	fbun.w		ftrapcc_trap		# unordered?
+ftrapcc_un_no:
+	rts					# do nothing
+
+#######################################################################
+
+# the bsun exception bit was not set.
+# we will need to jump to the ftrapcc vector. the stack frame
+# is the same size as that of the fp unimp instruction. the
+# only difference is that the <ea> field should hold the PC
+# of the ftrapcc instruction and the vector offset field
+# should denote the ftrapcc trap.
+ftrapcc_trap:
+	mov.b		&ftrapcc_flg,SPCOND_FLG(%a6)
+	rts
+
+# the emulation routine set bsun and BSUN was enabled. have to
+# fix stack and jump to the bsun handler.
+# let the caller of this routine shift the stack frame up to
+# eliminate the effective address field.
+ftrapcc_bsun:
+	mov.b		&fbsun_flg,SPCOND_FLG(%a6)
+	rts
+
+#########################################################################
+# fscc(): routine to emulate the fscc instruction			#
+#									#
+# XDEF **************************************************************** #
+#	_fscc()								#
+#									#
+# XREF **************************************************************** #
+#	store_dreg_b() - store result to data register file		#
+#	dec_areg() - decrement an areg for -(an) mode			#
+#	inc_areg() - increment an areg for (an)+ mode			#
+#	_dmem_write_byte() - store result to memory			#
+#									#
+# INPUT ***************************************************************	#
+#	none								#
+#									#
+# OUTPUT ************************************************************** #
+#	none								#
+#									#
+# ALGORITHM ***********************************************************	#
+#	This routine checks which conditional predicate is specified by	#
+# the stacked fscc instruction opcode and then branches to a routine	#
+# for that predicate. The corresponding fbcc instruction is then used	#
+# to see whether the condition (specified by the stacked FPSR) is true	#
+# or false.								#
+#	If a BSUN exception should be indicated, the BSUN and ABSUN	#
+# bits are set in the stacked FPSR. If the BSUN exception is enabled,	#
+# the fbsun_flg is set in the SPCOND_FLG location on the stack. If an	#
+# enabled BSUN should not be flagged and the predicate is true, then	#
+# the result is stored to the data register file or memory		#
+#									#
+#########################################################################
+
+	global		_fscc
+_fscc:
+	mov.w		EXC_CMDREG(%a6),%d0	# fetch predicate
+
+	clr.l		%d1			# clear scratch reg
+	mov.b		FPSR_CC(%a6),%d1	# fetch fp ccodes
+	ror.l		&0x8,%d1		# rotate to top byte
+	fmov.l		%d1,%fpsr		# insert into FPSR
+
+	mov.w		(tbl_fscc.b,%pc,%d0.w*2),%d1 # load table
+	jmp		(tbl_fscc.b,%pc,%d1.w)	# jump to fscc routine
+
+tbl_fscc:
+	short		fscc_f		-	tbl_fscc	# 00
+	short		fscc_eq		-	tbl_fscc	# 01
+	short		fscc_ogt	-	tbl_fscc	# 02
+	short		fscc_oge	-	tbl_fscc	# 03
+	short		fscc_olt	-	tbl_fscc	# 04
+	short		fscc_ole	-	tbl_fscc	# 05
+	short		fscc_ogl	-	tbl_fscc	# 06
+	short		fscc_or		-	tbl_fscc	# 07
+	short		fscc_un		-	tbl_fscc	# 08
+	short		fscc_ueq	-	tbl_fscc	# 09
+	short		fscc_ugt	-	tbl_fscc	# 10
+	short		fscc_uge	-	tbl_fscc	# 11
+	short		fscc_ult	-	tbl_fscc	# 12
+	short		fscc_ule	-	tbl_fscc	# 13
+	short		fscc_neq	-	tbl_fscc	# 14
+	short		fscc_t		-	tbl_fscc	# 15
+	short		fscc_sf		-	tbl_fscc	# 16
+	short		fscc_seq	-	tbl_fscc	# 17
+	short		fscc_gt		-	tbl_fscc	# 18
+	short		fscc_ge		-	tbl_fscc	# 19
+	short		fscc_lt		-	tbl_fscc	# 20
+	short		fscc_le		-	tbl_fscc	# 21
+	short		fscc_gl		-	tbl_fscc	# 22
+	short		fscc_gle	-	tbl_fscc	# 23
+	short		fscc_ngle	-	tbl_fscc	# 24
+	short		fscc_ngl	-	tbl_fscc	# 25
+	short		fscc_nle	-	tbl_fscc	# 26
+	short		fscc_nlt	-	tbl_fscc	# 27
+	short		fscc_nge	-	tbl_fscc	# 28
+	short		fscc_ngt	-	tbl_fscc	# 29
+	short		fscc_sneq	-	tbl_fscc	# 30
+	short		fscc_st		-	tbl_fscc	# 31
+
+#########################################################################
+#									#
+# IEEE Nonaware tests							#
+#									#
+# For the IEEE nonaware tests, we set the result based on the		#
+# floating point condition codes. In addition, we check to see		#
+# if the NAN bit is set, in which case BSUN and AIOP will be set.	#
+#									#
+# The cases EQ and NE are shared by the Aware and Nonaware groups	#
+# and are incapable of setting the BSUN exception bit.			#
+#									#
+# Typically, only one of the two possible branch directions could	#
+# have the NAN bit set.							#
+#									#
+#########################################################################
+
+#
+# equal:
+#
+#	Z
+#
+fscc_eq:
+	fbeq.w		fscc_eq_yes		# equal?
+fscc_eq_no:
+	clr.b		%d0			# set false
+	bra.w		fscc_done		# go finish
+fscc_eq_yes:
+	st		%d0			# set true
+	bra.w		fscc_done		# go finish
+
+#
+# not equal:
+#	_
+#	Z
+#
+fscc_neq:
+	fbneq.w		fscc_neq_yes		# not equal?
+fscc_neq_no:
+	clr.b		%d0			# set false
+	bra.w		fscc_done		# go finish
+fscc_neq_yes:
+	st		%d0			# set true
+	bra.w		fscc_done		# go finish
+
+#
+# greater than:
+#	_______
+#	NANvZvN
+#
+fscc_gt:
+	fbgt.w		fscc_gt_yes		# greater than?
+fscc_gt_no:
+	clr.b		%d0			# set false
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		fscc_done		# no;go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	bra.w		fscc_chk_bsun		# go finish
+fscc_gt_yes:
+	st		%d0			# set true
+	bra.w		fscc_done		# go finish
+
+#
+# not greater than:
+#
+#	NANvZvN
+#
+fscc_ngt:
+	fbngt.w		fscc_ngt_yes		# not greater than?
+fscc_ngt_no:
+	clr.b		%d0			# set false
+	bra.w		fscc_done		# go finish
+fscc_ngt_yes:
+	st		%d0			# set true
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		fscc_done		# no;go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	bra.w		fscc_chk_bsun		# go finish
+
+#
+# greater than or equal:
+#	   _____
+#	Zv(NANvN)
+#
+fscc_ge:
+	fbge.w		fscc_ge_yes		# greater than or equal?
+fscc_ge_no:
+	clr.b		%d0			# set false
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		fscc_done		# no;go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	bra.w		fscc_chk_bsun		# go finish
+fscc_ge_yes:
+	st		%d0			# set true
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		fscc_done		# no;go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	bra.w		fscc_chk_bsun		# go finish
+
+#
+# not (greater than or equal):
+#	       _
+#	NANv(N^Z)
+#
+fscc_nge:
+	fbnge.w		fscc_nge_yes		# not (greater than or equal)?
+fscc_nge_no:
+	clr.b		%d0			# set false
+	bra.w		fscc_done		# go finish
+fscc_nge_yes:
+	st		%d0			# set true
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		fscc_done		# no;go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	bra.w		fscc_chk_bsun		# go finish
+
+#
+# less than:
+#	   _____
+#	N^(NANvZ)
+#
+fscc_lt:
+	fblt.w		fscc_lt_yes		# less than?
+fscc_lt_no:
+	clr.b		%d0			# set false
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		fscc_done		# no;go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	bra.w		fscc_chk_bsun		# go finish
+fscc_lt_yes:
+	st		%d0			# set true
+	bra.w		fscc_done		# go finish
+
+#
+# not less than:
+#	       _
+#	NANv(ZvN)
+#
+fscc_nlt:
+	fbnlt.w		fscc_nlt_yes		# not less than?
+fscc_nlt_no:
+	clr.b		%d0			# set false
+	bra.w		fscc_done		# go finish
+fscc_nlt_yes:
+	st		%d0			# set true
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		fscc_done		# no;go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	bra.w		fscc_chk_bsun		# go finish
+
+#
+# less than or equal:
+#	     ___
+#	Zv(N^NAN)
+#
+fscc_le:
+	fble.w		fscc_le_yes		# less than or equal?
+fscc_le_no:
+	clr.b		%d0			# set false
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		fscc_done		# no;go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	bra.w		fscc_chk_bsun		# go finish
+fscc_le_yes:
+	st		%d0			# set true
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		fscc_done		# no;go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	bra.w		fscc_chk_bsun		# go finish
+
+#
+# not (less than or equal):
+#	     ___
+#	NANv(NvZ)
+#
+fscc_nle:
+	fbnle.w		fscc_nle_yes		# not (less than or equal)?
+fscc_nle_no:
+	clr.b		%d0			# set false
+	bra.w		fscc_done		# go finish
+fscc_nle_yes:
+	st		%d0			# set true
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		fscc_done		# no;go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	bra.w		fscc_chk_bsun		# go finish
+
+#
+# greater or less than:
+#	_____
+#	NANvZ
+#
+fscc_gl:
+	fbgl.w		fscc_gl_yes		# greater or less than?
+fscc_gl_no:
+	clr.b		%d0			# set false
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		fscc_done		# no;go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	bra.w		fscc_chk_bsun		# go finish
+fscc_gl_yes:
+	st		%d0			# set true
+	bra.w		fscc_done		# go finish
+
+#
+# not (greater or less than):
+#
+#	NANvZ
+#
+fscc_ngl:
+	fbngl.w		fscc_ngl_yes		# not (greater or less than)?
+fscc_ngl_no:
+	clr.b		%d0			# set false
+	bra.w		fscc_done		# go finish
+fscc_ngl_yes:
+	st		%d0			# set true
+	btst		&nan_bit, FPSR_CC(%a6)	# is NAN set in cc?
+	beq.w		fscc_done		# no;go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	bra.w		fscc_chk_bsun		# go finish
+
+#
+# greater, less, or equal:
+#	___
+#	NAN
+#
+fscc_gle:
+	fbgle.w		fscc_gle_yes		# greater, less, or equal?
+fscc_gle_no:
+	clr.b		%d0			# set false
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	bra.w		fscc_chk_bsun		# go finish
+fscc_gle_yes:
+	st		%d0			# set true
+	bra.w		fscc_done		# go finish
+
+#
+# not (greater, less, or equal):
+#
+#	NAN
+#
+fscc_ngle:
+	fbngle.w		fscc_ngle_yes	# not (greater, less, or equal)?
+fscc_ngle_no:
+	clr.b		%d0			# set false
+	bra.w		fscc_done		# go finish
+fscc_ngle_yes:
+	st		%d0			# set true
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	bra.w		fscc_chk_bsun		# go finish
+
+#########################################################################
+#									#
+# Miscellaneous tests							#
+#									#
+# For the IEEE aware tests, we only have to set the result based on the	#
+# floating point condition codes. The BSUN exception will not be	#
+# set for any of these tests.						#
+#									#
+#########################################################################
+
+#
+# false:
+#
+#	False
+#
+fscc_f:
+	clr.b		%d0			# set false
+	bra.w		fscc_done		# go finish
+
+#
+# true:
+#
+#	True
+#
+fscc_t:
+	st		%d0			# set true
+	bra.w		fscc_done		# go finish
+
+#
+# signalling false:
+#
+#	False
+#
+fscc_sf:
+	clr.b		%d0			# set false
+	btst		&nan_bit, FPSR_CC(%a6)	# set BSUN exc bit
+	beq.w		fscc_done		# no;go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	bra.w		fscc_chk_bsun		# go finish
+
+#
+# signalling true:
+#
+#	True
+#
+fscc_st:
+	st		%d0			# set false
+	btst		&nan_bit, FPSR_CC(%a6)	# set BSUN exc bit
+	beq.w		fscc_done		# no;go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	bra.w		fscc_chk_bsun		# go finish
+
+#
+# signalling equal:
+#
+#	Z
+#
+fscc_seq:
+	fbseq.w		fscc_seq_yes		# signalling equal?
+fscc_seq_no:
+	clr.b		%d0			# set false
+	btst		&nan_bit, FPSR_CC(%a6)	# set BSUN exc bit
+	beq.w		fscc_done		# no;go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	bra.w		fscc_chk_bsun		# go finish
+fscc_seq_yes:
+	st		%d0			# set true
+	btst		&nan_bit, FPSR_CC(%a6)	# set BSUN exc bit
+	beq.w		fscc_done		# no;go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	bra.w		fscc_chk_bsun		# go finish
+
+#
+# signalling not equal:
+#	_
+#	Z
+#
+fscc_sneq:
+	fbsneq.w	fscc_sneq_yes		# signalling equal?
+fscc_sneq_no:
+	clr.b		%d0			# set false
+	btst		&nan_bit, FPSR_CC(%a6)	# set BSUN exc bit
+	beq.w		fscc_done		# no;go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	bra.w		fscc_chk_bsun		# go finish
+fscc_sneq_yes:
+	st		%d0			# set true
+	btst		&nan_bit, FPSR_CC(%a6)	# set BSUN exc bit
+	beq.w		fscc_done		# no;go finish
+	ori.l		&bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+	bra.w		fscc_chk_bsun		# go finish
+
+#########################################################################
+#									#
+# IEEE Aware tests							#
+#									#
+# For the IEEE aware tests, we only have to set the result based on the	#
+# floating point condition codes. The BSUN exception will not be	#
+# set for any of these tests.						#
+#									#
+#########################################################################
+
+#
+# ordered greater than:
+#	_______
+#	NANvZvN
+#
+fscc_ogt:
+	fbogt.w		fscc_ogt_yes		# ordered greater than?
+fscc_ogt_no:
+	clr.b		%d0			# set false
+	bra.w		fscc_done		# go finish
+fscc_ogt_yes:
+	st		%d0			# set true
+	bra.w		fscc_done		# go finish
+
+#
+# unordered or less or equal:
+#	_______
+#	NANvZvN
+#
+fscc_ule:
+	fbule.w		fscc_ule_yes		# unordered or less or equal?
+fscc_ule_no:
+	clr.b		%d0			# set false
+	bra.w		fscc_done		# go finish
+fscc_ule_yes:
+	st		%d0			# set true
+	bra.w		fscc_done		# go finish
+
+#
+# ordered greater than or equal:
+#	   _____
+#	Zv(NANvN)
+#
+fscc_oge:
+	fboge.w		fscc_oge_yes		# ordered greater than or equal?
+fscc_oge_no:
+	clr.b		%d0			# set false
+	bra.w		fscc_done		# go finish
+fscc_oge_yes:
+	st		%d0			# set true
+	bra.w		fscc_done		# go finish
+
+#
+# unordered or less than:
+#	       _
+#	NANv(N^Z)
+#
+fscc_ult:
+	fbult.w		fscc_ult_yes		# unordered or less than?
+fscc_ult_no:
+	clr.b		%d0			# set false
+	bra.w		fscc_done		# go finish
+fscc_ult_yes:
+	st		%d0			# set true
+	bra.w		fscc_done		# go finish
+
+#
+# ordered less than:
+#	   _____
+#	N^(NANvZ)
+#
+fscc_olt:
+	fbolt.w		fscc_olt_yes		# ordered less than?
+fscc_olt_no:
+	clr.b		%d0			# set false
+	bra.w		fscc_done		# go finish
+fscc_olt_yes:
+	st		%d0			# set true
+	bra.w		fscc_done		# go finish
+
+#
+# unordered or greater or equal:
+#
+#	NANvZvN
+#
+fscc_uge:
+	fbuge.w		fscc_uge_yes		# unordered or greater than?
+fscc_uge_no:
+	clr.b		%d0			# set false
+	bra.w		fscc_done		# go finish
+fscc_uge_yes:
+	st		%d0			# set true
+	bra.w		fscc_done		# go finish
+
+#
+# ordered less than or equal:
+#	     ___
+#	Zv(N^NAN)
+#
+fscc_ole:
+	fbole.w		fscc_ole_yes		# ordered greater or less than?
+fscc_ole_no:
+	clr.b		%d0			# set false
+	bra.w		fscc_done		# go finish
+fscc_ole_yes:
+	st		%d0			# set true
+	bra.w		fscc_done		# go finish
+
+#
+# unordered or greater than:
+#	     ___
+#	NANv(NvZ)
+#
+fscc_ugt:
+	fbugt.w		fscc_ugt_yes		# unordered or greater than?
+fscc_ugt_no:
+	clr.b		%d0			# set false
+	bra.w		fscc_done		# go finish
+fscc_ugt_yes:
+	st		%d0			# set true
+	bra.w		fscc_done		# go finish
+
+#
+# ordered greater or less than:
+#	_____
+#	NANvZ
+#
+fscc_ogl:
+	fbogl.w		fscc_ogl_yes		# ordered greater or less than?
+fscc_ogl_no:
+	clr.b		%d0			# set false
+	bra.w		fscc_done		# go finish
+fscc_ogl_yes:
+	st		%d0			# set true
+	bra.w		fscc_done		# go finish
+
+#
+# unordered or equal:
+#
+#	NANvZ
+#
+fscc_ueq:
+	fbueq.w		fscc_ueq_yes		# unordered or equal?
+fscc_ueq_no:
+	clr.b		%d0			# set false
+	bra.w		fscc_done		# go finish
+fscc_ueq_yes:
+	st		%d0			# set true
+	bra.w		fscc_done		# go finish
+
+#
+# ordered:
+#	___
+#	NAN
+#
+fscc_or:
+	fbor.w		fscc_or_yes		# ordered?
+fscc_or_no:
+	clr.b		%d0			# set false
+	bra.w		fscc_done		# go finish
+fscc_or_yes:
+	st		%d0			# set true
+	bra.w		fscc_done		# go finish
+
+#
+# unordered:
+#
+#	NAN
+#
+fscc_un:
+	fbun.w		fscc_un_yes		# unordered?
+fscc_un_no:
+	clr.b		%d0			# set false
+	bra.w		fscc_done		# go finish
+fscc_un_yes:
+	st		%d0			# set true
+	bra.w		fscc_done		# go finish
+
+#######################################################################
+
+#
+# the bsun exception bit was set. now, check to see is BSUN
+# is enabled. if so, don't store result and correct stack frame
+# for a bsun exception.
+#
+fscc_chk_bsun:
+	btst		&bsun_bit,FPCR_ENABLE(%a6) # was BSUN set?
+	bne.w		fscc_bsun
+
+#
+# the bsun exception bit was not set.
+# the result has been selected.
+# now, check to see if the result is to be stored in the data register
+# file or in memory.
+#
+fscc_done:
+	mov.l		%d0,%a0			# save result for a moment
+
+	mov.b		1+EXC_OPWORD(%a6),%d1	# fetch lo opword
+	mov.l		%d1,%d0			# make a copy
+	andi.b		&0x38,%d1		# extract src mode
+
+	bne.b		fscc_mem_op		# it's a memory operation
+
+	mov.l		%d0,%d1
+	andi.w		&0x7,%d1		# pass index in d1
+	mov.l		%a0,%d0			# pass result in d0
+	bsr.l		store_dreg_b		# save result in regfile
+	rts
+
+#
+# the stacked <ea> is correct with the exception of:
+#	-> Dn : <ea> is garbage
+#
+# if the addressing mode is post-increment or pre-decrement,
+# then the address registers have not been updated.
+#
+fscc_mem_op:
+	cmpi.b		%d1,&0x18		# is <ea> (An)+ ?
+	beq.b		fscc_mem_inc		# yes
+	cmpi.b		%d1,&0x20		# is <ea> -(An) ?
+	beq.b		fscc_mem_dec		# yes
+
+	mov.l		%a0,%d0			# pass result in d0
+	mov.l		EXC_EA(%a6),%a0		# fetch <ea>
+	bsr.l		_dmem_write_byte	# write result byte
+
+	tst.l		%d1			# did dstore fail?
+	bne.w		fscc_err		# yes
+
+	rts
+
+# addresing mode is post-increment. write the result byte. if the write
+# fails then don't update the address register. if write passes then
+# call inc_areg() to update the address register.
+fscc_mem_inc:
+	mov.l		%a0,%d0			# pass result in d0
+	mov.l		EXC_EA(%a6),%a0		# fetch <ea>
+	bsr.l		_dmem_write_byte	# write result byte
+
+	tst.l		%d1			# did dstore fail?
+	bne.w		fscc_err		# yes
+
+	mov.b		0x1+EXC_OPWORD(%a6),%d1	# fetch opword
+	andi.w		&0x7,%d1		# pass index in d1
+	movq.l		&0x1,%d0		# pass amt to inc by
+	bsr.l		inc_areg		# increment address register
+
+	rts
+
+# addressing mode is pre-decrement. write the result byte. if the write
+# fails then don't update the address register. if the write passes then
+# call dec_areg() to update the address register.
+fscc_mem_dec:
+	mov.l		%a0,%d0			# pass result in d0
+	mov.l		EXC_EA(%a6),%a0		# fetch <ea>
+	bsr.l		_dmem_write_byte	# write result byte
+
+	tst.l		%d1			# did dstore fail?
+	bne.w		fscc_err		# yes
+
+	mov.b		0x1+EXC_OPWORD(%a6),%d1	# fetch opword
+	andi.w		&0x7,%d1		# pass index in d1
+	movq.l		&0x1,%d0		# pass amt to dec by
+	bsr.l		dec_areg		# decrement address register
+
+	rts
+
+# the emulation routine set bsun and BSUN was enabled. have to
+# fix stack and jump to the bsun handler.
+# let the caller of this routine shift the stack frame up to
+# eliminate the effective address field.
+fscc_bsun:
+	mov.b		&fbsun_flg,SPCOND_FLG(%a6)
+	rts
+
+# the byte write to memory has failed. pass the failing effective address
+# and a FSLW to funimp_dacc().
+fscc_err:
+	mov.w		&0x00a1,EXC_VOFF(%a6)
+	bra.l		facc_finish
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fmovm_dynamic(): emulate "fmovm" dynamic instruction		#
+#									#
+# XREF ****************************************************************	#
+#	fetch_dreg() - fetch data register				#
+#	{i,d,}mem_read() - fetch data from memory			#
+#	_mem_write() - write data to memory				#
+#	iea_iacc() - instruction memory access error occurred		#
+#	iea_dacc() - data memory access error occurred			#
+#	restore() - restore An index regs if access error occurred	#
+#									#
+# INPUT ***************************************************************	#
+#	None								#
+#									#
+# OUTPUT **************************************************************	#
+#	If instr is "fmovm Dn,-(A7)" from supervisor mode,		#
+#		d0 = size of dump					#
+#		d1 = Dn							#
+#	Else if instruction access error,				#
+#		d0 = FSLW						#
+#	Else if data access error,					#
+#		d0 = FSLW						#
+#		a0 = address of fault					#
+#	Else								#
+#		none.							#
+#									#
+# ALGORITHM ***********************************************************	#
+#	The effective address must be calculated since this is entered	#
+# from an "Unimplemented Effective Address" exception handler. So, we	#
+# have our own fcalc_ea() routine here. If an access error is flagged	#
+# by a _{i,d,}mem_read() call, we must exit through the special		#
+# handler.								#
+#	The data register is determined and its value loaded to get the	#
+# string of FP registers affected. This value is used as an index into	#
+# a lookup table such that we can determine the number of bytes		#
+# involved.								#
+#	If the instruction is "fmovm.x <ea>,Dn", a _mem_read() is used	#
+# to read in all FP values. Again, _mem_read() may fail and require a	#
+# special exit.								#
+#	If the instruction is "fmovm.x DN,<ea>", a _mem_write() is used	#
+# to write all FP values. _mem_write() may also fail.			#
+#	If the instruction is "fmovm.x DN,-(a7)" from supervisor mode,	#
+# then we return the size of the dump and the string to the caller	#
+# so that the move can occur outside of this routine. This special	#
+# case is required so that moves to the system stack are handled	#
+# correctly.								#
+#									#
+# DYNAMIC:								#
+#	fmovm.x	dn, <ea>						#
+#	fmovm.x	<ea>, dn						#
+#									#
+#	      <WORD 1>		      <WORD2>				#
+#	1111 0010 00 |<ea>|	11@& 1000 0$$$ 0000			#
+#									#
+#	& = (0): predecrement addressing mode				#
+#	    (1): postincrement or control addressing mode		#
+#	@ = (0): move listed regs from memory to the FPU		#
+#	    (1): move listed regs from the FPU to memory		#
+#	$$$    : index of data register holding reg select mask		#
+#									#
+# NOTES:								#
+#	If the data register holds a zero, then the			#
+#	instruction is a nop.						#
+#									#
+#########################################################################
+
+	global		fmovm_dynamic
+fmovm_dynamic:
+
+# extract the data register in which the bit string resides...
+	mov.b		1+EXC_EXTWORD(%a6),%d1	# fetch extword
+	andi.w		&0x70,%d1		# extract reg bits
+	lsr.b		&0x4,%d1		# shift into lo bits
+
+# fetch the bit string into d0...
+	bsr.l		fetch_dreg		# fetch reg string
+
+	andi.l		&0x000000ff,%d0		# keep only lo byte
+
+	mov.l		%d0,-(%sp)		# save strg
+	mov.b		(tbl_fmovm_size.w,%pc,%d0),%d0
+	mov.l		%d0,-(%sp)		# save size
+	bsr.l		fmovm_calc_ea		# calculate <ea>
+	mov.l		(%sp)+,%d0		# restore size
+	mov.l		(%sp)+,%d1		# restore strg
+
+# if the bit string is a zero, then the operation is a no-op
+# but, make sure that we've calculated ea and advanced the opword pointer
+	beq.w		fmovm_data_done
+
+# separate move ins from move outs...
+	btst		&0x5,EXC_EXTWORD(%a6)	# is it a move in or out?
+	beq.w		fmovm_data_in		# it's a move out
+
+#############
+# MOVE OUT: #
+#############
+fmovm_data_out:
+	btst		&0x4,EXC_EXTWORD(%a6)	# control or predecrement?
+	bne.w		fmovm_out_ctrl		# control
+
+############################
+fmovm_out_predec:
+# for predecrement mode, the bit string is the opposite of both control
+# operations and postincrement mode. (bit7 = FP7 ... bit0 = FP0)
+# here, we convert it to be just like the others...
+	mov.b		(tbl_fmovm_convert.w,%pc,%d1.w*1),%d1
+
+	btst		&0x5,EXC_SR(%a6)	# user or supervisor mode?
+	beq.b		fmovm_out_ctrl		# user
+
+fmovm_out_predec_s:
+	cmpi.b		SPCOND_FLG(%a6),&mda7_flg # is <ea> mode -(a7)?
+	bne.b		fmovm_out_ctrl
+
+# the operation was unfortunately an: fmovm.x dn,-(sp)
+# called from supervisor mode.
+# we're also passing "size" and "strg" back to the calling routine
+	rts
+
+############################
+fmovm_out_ctrl:
+	mov.l		%a0,%a1			# move <ea> to a1
+
+	sub.l		%d0,%sp			# subtract size of dump
+	lea		(%sp),%a0
+
+	tst.b		%d1			# should FP0 be moved?
+	bpl.b		fmovm_out_ctrl_fp1	# no
+
+	mov.l		0x0+EXC_FP0(%a6),(%a0)+	# yes
+	mov.l		0x4+EXC_FP0(%a6),(%a0)+
+	mov.l		0x8+EXC_FP0(%a6),(%a0)+
+
+fmovm_out_ctrl_fp1:
+	lsl.b		&0x1,%d1		# should FP1 be moved?
+	bpl.b		fmovm_out_ctrl_fp2	# no
+
+	mov.l		0x0+EXC_FP1(%a6),(%a0)+	# yes
+	mov.l		0x4+EXC_FP1(%a6),(%a0)+
+	mov.l		0x8+EXC_FP1(%a6),(%a0)+
+
+fmovm_out_ctrl_fp2:
+	lsl.b		&0x1,%d1		# should FP2 be moved?
+	bpl.b		fmovm_out_ctrl_fp3	# no
+
+	fmovm.x		&0x20,(%a0)		# yes
+	add.l		&0xc,%a0
+
+fmovm_out_ctrl_fp3:
+	lsl.b		&0x1,%d1		# should FP3 be moved?
+	bpl.b		fmovm_out_ctrl_fp4	# no
+
+	fmovm.x		&0x10,(%a0)		# yes
+	add.l		&0xc,%a0
+
+fmovm_out_ctrl_fp4:
+	lsl.b		&0x1,%d1		# should FP4 be moved?
+	bpl.b		fmovm_out_ctrl_fp5	# no
+
+	fmovm.x		&0x08,(%a0)		# yes
+	add.l		&0xc,%a0
+
+fmovm_out_ctrl_fp5:
+	lsl.b		&0x1,%d1		# should FP5 be moved?
+	bpl.b		fmovm_out_ctrl_fp6	# no
+
+	fmovm.x		&0x04,(%a0)		# yes
+	add.l		&0xc,%a0
+
+fmovm_out_ctrl_fp6:
+	lsl.b		&0x1,%d1		# should FP6 be moved?
+	bpl.b		fmovm_out_ctrl_fp7	# no
+
+	fmovm.x		&0x02,(%a0)		# yes
+	add.l		&0xc,%a0
+
+fmovm_out_ctrl_fp7:
+	lsl.b		&0x1,%d1		# should FP7 be moved?
+	bpl.b		fmovm_out_ctrl_done	# no
+
+	fmovm.x		&0x01,(%a0)		# yes
+	add.l		&0xc,%a0
+
+fmovm_out_ctrl_done:
+	mov.l		%a1,L_SCR1(%a6)
+
+	lea		(%sp),%a0		# pass: supervisor src
+	mov.l		%d0,-(%sp)		# save size
+	bsr.l		_dmem_write		# copy data to user mem
+
+	mov.l		(%sp)+,%d0
+	add.l		%d0,%sp			# clear fpreg data from stack
+
+	tst.l		%d1			# did dstore err?
+	bne.w		fmovm_out_err		# yes
+
+	rts
+
+############
+# MOVE IN: #
+############
+fmovm_data_in:
+	mov.l		%a0,L_SCR1(%a6)
+
+	sub.l		%d0,%sp			# make room for fpregs
+	lea		(%sp),%a1
+
+	mov.l		%d1,-(%sp)		# save bit string for later
+	mov.l		%d0,-(%sp)		# save # of bytes
+
+	bsr.l		_dmem_read		# copy data from user mem
+
+	mov.l		(%sp)+,%d0		# retrieve # of bytes
+
+	tst.l		%d1			# did dfetch fail?
+	bne.w		fmovm_in_err		# yes
+
+	mov.l		(%sp)+,%d1		# load bit string
+
+	lea		(%sp),%a0		# addr of stack
+
+	tst.b		%d1			# should FP0 be moved?
+	bpl.b		fmovm_data_in_fp1	# no
+
+	mov.l		(%a0)+,0x0+EXC_FP0(%a6)	# yes
+	mov.l		(%a0)+,0x4+EXC_FP0(%a6)
+	mov.l		(%a0)+,0x8+EXC_FP0(%a6)
+
+fmovm_data_in_fp1:
+	lsl.b		&0x1,%d1		# should FP1 be moved?
+	bpl.b		fmovm_data_in_fp2	# no
+
+	mov.l		(%a0)+,0x0+EXC_FP1(%a6)	# yes
+	mov.l		(%a0)+,0x4+EXC_FP1(%a6)
+	mov.l		(%a0)+,0x8+EXC_FP1(%a6)
+
+fmovm_data_in_fp2:
+	lsl.b		&0x1,%d1		# should FP2 be moved?
+	bpl.b		fmovm_data_in_fp3	# no
+
+	fmovm.x		(%a0)+,&0x20		# yes
+
+fmovm_data_in_fp3:
+	lsl.b		&0x1,%d1		# should FP3 be moved?
+	bpl.b		fmovm_data_in_fp4	# no
+
+	fmovm.x		(%a0)+,&0x10		# yes
+
+fmovm_data_in_fp4:
+	lsl.b		&0x1,%d1		# should FP4 be moved?
+	bpl.b		fmovm_data_in_fp5	# no
+
+	fmovm.x		(%a0)+,&0x08		# yes
+
+fmovm_data_in_fp5:
+	lsl.b		&0x1,%d1		# should FP5 be moved?
+	bpl.b		fmovm_data_in_fp6	# no
+
+	fmovm.x		(%a0)+,&0x04		# yes
+
+fmovm_data_in_fp6:
+	lsl.b		&0x1,%d1		# should FP6 be moved?
+	bpl.b		fmovm_data_in_fp7	# no
+
+	fmovm.x		(%a0)+,&0x02		# yes
+
+fmovm_data_in_fp7:
+	lsl.b		&0x1,%d1		# should FP7 be moved?
+	bpl.b		fmovm_data_in_done	# no
+
+	fmovm.x		(%a0)+,&0x01		# yes
+
+fmovm_data_in_done:
+	add.l		%d0,%sp			# remove fpregs from stack
+	rts
+
+#####################################
+
+fmovm_data_done:
+	rts
+
+##############################################################################
+
+#
+# table indexed by the operation's bit string that gives the number
+# of bytes that will be moved.
+#
+# number of bytes = (# of 1's in bit string) * 12(bytes/fpreg)
+#
+tbl_fmovm_size:
+	byte	0x00,0x0c,0x0c,0x18,0x0c,0x18,0x18,0x24
+	byte	0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
+	byte	0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
+	byte	0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+	byte	0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
+	byte	0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+	byte	0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+	byte	0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+	byte	0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
+	byte	0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+	byte	0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+	byte	0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+	byte	0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+	byte	0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+	byte	0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+	byte	0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
+	byte	0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
+	byte	0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+	byte	0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+	byte	0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+	byte	0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+	byte	0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+	byte	0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+	byte	0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
+	byte	0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+	byte	0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+	byte	0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+	byte	0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
+	byte	0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+	byte	0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
+	byte	0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
+	byte	0x3c,0x48,0x48,0x54,0x48,0x54,0x54,0x60
+
+#
+# table to convert a pre-decrement bit string into a post-increment
+# or control bit string.
+# ex:	0x00	==>	0x00
+#	0x01	==>	0x80
+#	0x02	==>	0x40
+#		.
+#		.
+#	0xfd	==>	0xbf
+#	0xfe	==>	0x7f
+#	0xff	==>	0xff
+#
+tbl_fmovm_convert:
+	byte	0x00,0x80,0x40,0xc0,0x20,0xa0,0x60,0xe0
+	byte	0x10,0x90,0x50,0xd0,0x30,0xb0,0x70,0xf0
+	byte	0x08,0x88,0x48,0xc8,0x28,0xa8,0x68,0xe8
+	byte	0x18,0x98,0x58,0xd8,0x38,0xb8,0x78,0xf8
+	byte	0x04,0x84,0x44,0xc4,0x24,0xa4,0x64,0xe4
+	byte	0x14,0x94,0x54,0xd4,0x34,0xb4,0x74,0xf4
+	byte	0x0c,0x8c,0x4c,0xcc,0x2c,0xac,0x6c,0xec
+	byte	0x1c,0x9c,0x5c,0xdc,0x3c,0xbc,0x7c,0xfc
+	byte	0x02,0x82,0x42,0xc2,0x22,0xa2,0x62,0xe2
+	byte	0x12,0x92,0x52,0xd2,0x32,0xb2,0x72,0xf2
+	byte	0x0a,0x8a,0x4a,0xca,0x2a,0xaa,0x6a,0xea
+	byte	0x1a,0x9a,0x5a,0xda,0x3a,0xba,0x7a,0xfa
+	byte	0x06,0x86,0x46,0xc6,0x26,0xa6,0x66,0xe6
+	byte	0x16,0x96,0x56,0xd6,0x36,0xb6,0x76,0xf6
+	byte	0x0e,0x8e,0x4e,0xce,0x2e,0xae,0x6e,0xee
+	byte	0x1e,0x9e,0x5e,0xde,0x3e,0xbe,0x7e,0xfe
+	byte	0x01,0x81,0x41,0xc1,0x21,0xa1,0x61,0xe1
+	byte	0x11,0x91,0x51,0xd1,0x31,0xb1,0x71,0xf1
+	byte	0x09,0x89,0x49,0xc9,0x29,0xa9,0x69,0xe9
+	byte	0x19,0x99,0x59,0xd9,0x39,0xb9,0x79,0xf9
+	byte	0x05,0x85,0x45,0xc5,0x25,0xa5,0x65,0xe5
+	byte	0x15,0x95,0x55,0xd5,0x35,0xb5,0x75,0xf5
+	byte	0x0d,0x8d,0x4d,0xcd,0x2d,0xad,0x6d,0xed
+	byte	0x1d,0x9d,0x5d,0xdd,0x3d,0xbd,0x7d,0xfd
+	byte	0x03,0x83,0x43,0xc3,0x23,0xa3,0x63,0xe3
+	byte	0x13,0x93,0x53,0xd3,0x33,0xb3,0x73,0xf3
+	byte	0x0b,0x8b,0x4b,0xcb,0x2b,0xab,0x6b,0xeb
+	byte	0x1b,0x9b,0x5b,0xdb,0x3b,0xbb,0x7b,0xfb
+	byte	0x07,0x87,0x47,0xc7,0x27,0xa7,0x67,0xe7
+	byte	0x17,0x97,0x57,0xd7,0x37,0xb7,0x77,0xf7
+	byte	0x0f,0x8f,0x4f,0xcf,0x2f,0xaf,0x6f,0xef
+	byte	0x1f,0x9f,0x5f,0xdf,0x3f,0xbf,0x7f,0xff
+
+	global		fmovm_calc_ea
+###############################################
+# _fmovm_calc_ea: calculate effective address #
+###############################################
+fmovm_calc_ea:
+	mov.l		%d0,%a0			# move # bytes to a0
+
+# currently, MODE and REG are taken from the EXC_OPWORD. this could be
+# easily changed if they were inputs passed in registers.
+	mov.w		EXC_OPWORD(%a6),%d0	# fetch opcode word
+	mov.w		%d0,%d1			# make a copy
+
+	andi.w		&0x3f,%d0		# extract mode field
+	andi.l		&0x7,%d1		# extract reg  field
+
+# jump to the corresponding function for each {MODE,REG} pair.
+	mov.w		(tbl_fea_mode.b,%pc,%d0.w*2),%d0 # fetch jmp distance
+	jmp		(tbl_fea_mode.b,%pc,%d0.w*1) # jmp to correct ea mode
+
+	swbeg		&64
+tbl_fea_mode:
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+
+	short		faddr_ind_a0	-	tbl_fea_mode
+	short		faddr_ind_a1	-	tbl_fea_mode
+	short		faddr_ind_a2	-	tbl_fea_mode
+	short		faddr_ind_a3	-	tbl_fea_mode
+	short		faddr_ind_a4	-	tbl_fea_mode
+	short		faddr_ind_a5	-	tbl_fea_mode
+	short		faddr_ind_a6	-	tbl_fea_mode
+	short		faddr_ind_a7	-	tbl_fea_mode
+
+	short		faddr_ind_p_a0	-	tbl_fea_mode
+	short		faddr_ind_p_a1	-	tbl_fea_mode
+	short		faddr_ind_p_a2	-	tbl_fea_mode
+	short		faddr_ind_p_a3	-	tbl_fea_mode
+	short		faddr_ind_p_a4	-	tbl_fea_mode
+	short		faddr_ind_p_a5	-	tbl_fea_mode
+	short		faddr_ind_p_a6	-	tbl_fea_mode
+	short		faddr_ind_p_a7	-	tbl_fea_mode
+
+	short		faddr_ind_m_a0	-	tbl_fea_mode
+	short		faddr_ind_m_a1	-	tbl_fea_mode
+	short		faddr_ind_m_a2	-	tbl_fea_mode
+	short		faddr_ind_m_a3	-	tbl_fea_mode
+	short		faddr_ind_m_a4	-	tbl_fea_mode
+	short		faddr_ind_m_a5	-	tbl_fea_mode
+	short		faddr_ind_m_a6	-	tbl_fea_mode
+	short		faddr_ind_m_a7	-	tbl_fea_mode
+
+	short		faddr_ind_disp_a0	-	tbl_fea_mode
+	short		faddr_ind_disp_a1	-	tbl_fea_mode
+	short		faddr_ind_disp_a2	-	tbl_fea_mode
+	short		faddr_ind_disp_a3	-	tbl_fea_mode
+	short		faddr_ind_disp_a4	-	tbl_fea_mode
+	short		faddr_ind_disp_a5	-	tbl_fea_mode
+	short		faddr_ind_disp_a6	-	tbl_fea_mode
+	short		faddr_ind_disp_a7	-	tbl_fea_mode
+
+	short		faddr_ind_ext	-	tbl_fea_mode
+	short		faddr_ind_ext	-	tbl_fea_mode
+	short		faddr_ind_ext	-	tbl_fea_mode
+	short		faddr_ind_ext	-	tbl_fea_mode
+	short		faddr_ind_ext	-	tbl_fea_mode
+	short		faddr_ind_ext	-	tbl_fea_mode
+	short		faddr_ind_ext	-	tbl_fea_mode
+	short		faddr_ind_ext	-	tbl_fea_mode
+
+	short		fabs_short	-	tbl_fea_mode
+	short		fabs_long	-	tbl_fea_mode
+	short		fpc_ind		-	tbl_fea_mode
+	short		fpc_ind_ext	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+
+###################################
+# Address register indirect: (An) #
+###################################
+faddr_ind_a0:
+	mov.l		EXC_DREGS+0x8(%a6),%a0	# Get current a0
+	rts
+
+faddr_ind_a1:
+	mov.l		EXC_DREGS+0xc(%a6),%a0	# Get current a1
+	rts
+
+faddr_ind_a2:
+	mov.l		%a2,%a0			# Get current a2
+	rts
+
+faddr_ind_a3:
+	mov.l		%a3,%a0			# Get current a3
+	rts
+
+faddr_ind_a4:
+	mov.l		%a4,%a0			# Get current a4
+	rts
+
+faddr_ind_a5:
+	mov.l		%a5,%a0			# Get current a5
+	rts
+
+faddr_ind_a6:
+	mov.l		(%a6),%a0		# Get current a6
+	rts
+
+faddr_ind_a7:
+	mov.l		EXC_A7(%a6),%a0		# Get current a7
+	rts
+
+#####################################################
+# Address register indirect w/ postincrement: (An)+ #
+#####################################################
+faddr_ind_p_a0:
+	mov.l		EXC_DREGS+0x8(%a6),%d0	# Get current a0
+	mov.l		%d0,%d1
+	add.l		%a0,%d1			# Increment
+	mov.l		%d1,EXC_DREGS+0x8(%a6)	# Save incr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_p_a1:
+	mov.l		EXC_DREGS+0xc(%a6),%d0	# Get current a1
+	mov.l		%d0,%d1
+	add.l		%a0,%d1			# Increment
+	mov.l		%d1,EXC_DREGS+0xc(%a6)	# Save incr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_p_a2:
+	mov.l		%a2,%d0			# Get current a2
+	mov.l		%d0,%d1
+	add.l		%a0,%d1			# Increment
+	mov.l		%d1,%a2			# Save incr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_p_a3:
+	mov.l		%a3,%d0			# Get current a3
+	mov.l		%d0,%d1
+	add.l		%a0,%d1			# Increment
+	mov.l		%d1,%a3			# Save incr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_p_a4:
+	mov.l		%a4,%d0			# Get current a4
+	mov.l		%d0,%d1
+	add.l		%a0,%d1			# Increment
+	mov.l		%d1,%a4			# Save incr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_p_a5:
+	mov.l		%a5,%d0			# Get current a5
+	mov.l		%d0,%d1
+	add.l		%a0,%d1			# Increment
+	mov.l		%d1,%a5			# Save incr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_p_a6:
+	mov.l		(%a6),%d0		# Get current a6
+	mov.l		%d0,%d1
+	add.l		%a0,%d1			# Increment
+	mov.l		%d1,(%a6)		# Save incr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_p_a7:
+	mov.b		&mia7_flg,SPCOND_FLG(%a6) # set "special case" flag
+
+	mov.l		EXC_A7(%a6),%d0		# Get current a7
+	mov.l		%d0,%d1
+	add.l		%a0,%d1			# Increment
+	mov.l		%d1,EXC_A7(%a6)		# Save incr value
+	mov.l		%d0,%a0
+	rts
+
+####################################################
+# Address register indirect w/ predecrement: -(An) #
+####################################################
+faddr_ind_m_a0:
+	mov.l		EXC_DREGS+0x8(%a6),%d0	# Get current a0
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,EXC_DREGS+0x8(%a6)	# Save decr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_m_a1:
+	mov.l		EXC_DREGS+0xc(%a6),%d0	# Get current a1
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,EXC_DREGS+0xc(%a6)	# Save decr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_m_a2:
+	mov.l		%a2,%d0			# Get current a2
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,%a2			# Save decr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_m_a3:
+	mov.l		%a3,%d0			# Get current a3
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,%a3			# Save decr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_m_a4:
+	mov.l		%a4,%d0			# Get current a4
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,%a4			# Save decr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_m_a5:
+	mov.l		%a5,%d0			# Get current a5
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,%a5			# Save decr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_m_a6:
+	mov.l		(%a6),%d0		# Get current a6
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,(%a6)		# Save decr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_m_a7:
+	mov.b		&mda7_flg,SPCOND_FLG(%a6) # set "special case" flag
+
+	mov.l		EXC_A7(%a6),%d0		# Get current a7
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,EXC_A7(%a6)		# Save decr value
+	mov.l		%d0,%a0
+	rts
+
+########################################################
+# Address register indirect w/ displacement: (d16, An) #
+########################################################
+faddr_ind_disp_a0:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+
+	add.l		EXC_DREGS+0x8(%a6),%a0	# a0 + d16
+	rts
+
+faddr_ind_disp_a1:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+
+	add.l		EXC_DREGS+0xc(%a6),%a0	# a1 + d16
+	rts
+
+faddr_ind_disp_a2:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+
+	add.l		%a2,%a0			# a2 + d16
+	rts
+
+faddr_ind_disp_a3:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+
+	add.l		%a3,%a0			# a3 + d16
+	rts
+
+faddr_ind_disp_a4:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+
+	add.l		%a4,%a0			# a4 + d16
+	rts
+
+faddr_ind_disp_a5:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+
+	add.l		%a5,%a0			# a5 + d16
+	rts
+
+faddr_ind_disp_a6:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+
+	add.l		(%a6),%a0		# a6 + d16
+	rts
+
+faddr_ind_disp_a7:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+
+	add.l		EXC_A7(%a6),%a0		# a7 + d16
+	rts
+
+########################################################################
+# Address register indirect w/ index(8-bit displacement): (d8, An, Xn) #
+#    "       "         "    w/   "  (base displacement): (bd, An, Xn)  #
+# Memory indirect postindexed: ([bd, An], Xn, od)		       #
+# Memory indirect preindexed: ([bd, An, Xn], od)		       #
+########################################################################
+faddr_ind_ext:
+	addq.l		&0x8,%d1
+	bsr.l		fetch_dreg		# fetch base areg
+	mov.l		%d0,-(%sp)
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word		# fetch extword in d0
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		(%sp)+,%a0
+
+	btst		&0x8,%d0
+	bne.w		fcalc_mem_ind
+
+	mov.l		%d0,L_SCR1(%a6)		# hold opword
+
+	mov.l		%d0,%d1
+	rol.w		&0x4,%d1
+	andi.w		&0xf,%d1		# extract index regno
+
+# count on fetch_dreg() not to alter a0...
+	bsr.l		fetch_dreg		# fetch index
+
+	mov.l		%d2,-(%sp)		# save d2
+	mov.l		L_SCR1(%a6),%d2		# fetch opword
+
+	btst		&0xb,%d2		# is it word or long?
+	bne.b		faii8_long
+	ext.l		%d0			# sign extend word index
+faii8_long:
+	mov.l		%d2,%d1
+	rol.w		&0x7,%d1
+	andi.l		&0x3,%d1		# extract scale value
+
+	lsl.l		%d1,%d0			# shift index by scale
+
+	extb.l		%d2			# sign extend displacement
+	add.l		%d2,%d0			# index + disp
+	add.l		%d0,%a0			# An + (index + disp)
+
+	mov.l		(%sp)+,%d2		# restore old d2
+	rts
+
+###########################
+# Absolute short: (XXX).W #
+###########################
+fabs_short:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word		# fetch short address
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.w		%d0,%a0			# return <ea> in a0
+	rts
+
+##########################
+# Absolute long: (XXX).L #
+##########################
+fabs_long:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch long address
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		%d0,%a0			# return <ea> in a0
+	rts
+
+#######################################################
+# Program counter indirect w/ displacement: (d16, PC) #
+#######################################################
+fpc_ind:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word		# fetch word displacement
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+
+	add.l		EXC_EXTWPTR(%a6),%a0	# pc + d16
+
+# _imem_read_word() increased the extwptr by 2. need to adjust here.
+	subq.l		&0x2,%a0		# adjust <ea>
+	rts
+
+##########################################################
+# PC indirect w/ index(8-bit displacement): (d8, PC, An) #
+# "     "     w/   "  (base displacement): (bd, PC, An)  #
+# PC memory indirect postindexed: ([bd, PC], Xn, od)     #
+# PC memory indirect preindexed: ([bd, PC, Xn], od)      #
+##########################################################
+fpc_ind_ext:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word		# fetch ext word
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# put base in a0
+	subq.l		&0x2,%a0		# adjust base
+
+	btst		&0x8,%d0		# is disp only 8 bits?
+	bne.w		fcalc_mem_ind		# calc memory indirect
+
+	mov.l		%d0,L_SCR1(%a6)		# store opword
+
+	mov.l		%d0,%d1			# make extword copy
+	rol.w		&0x4,%d1		# rotate reg num into place
+	andi.w		&0xf,%d1		# extract register number
+
+# count on fetch_dreg() not to alter a0...
+	bsr.l		fetch_dreg		# fetch index
+
+	mov.l		%d2,-(%sp)		# save d2
+	mov.l		L_SCR1(%a6),%d2		# fetch opword
+
+	btst		&0xb,%d2		# is index word or long?
+	bne.b		fpii8_long		# long
+	ext.l		%d0			# sign extend word index
+fpii8_long:
+	mov.l		%d2,%d1
+	rol.w		&0x7,%d1		# rotate scale value into place
+	andi.l		&0x3,%d1		# extract scale value
+
+	lsl.l		%d1,%d0			# shift index by scale
+
+	extb.l		%d2			# sign extend displacement
+	add.l		%d2,%d0			# disp + index
+	add.l		%d0,%a0			# An + (index + disp)
+
+	mov.l		(%sp)+,%d2		# restore temp register
+	rts
+
+# d2 = index
+# d3 = base
+# d4 = od
+# d5 = extword
+fcalc_mem_ind:
+	btst		&0x6,%d0		# is the index suppressed?
+	beq.b		fcalc_index
+
+	movm.l		&0x3c00,-(%sp)		# save d2-d5
+
+	mov.l		%d0,%d5			# put extword in d5
+	mov.l		%a0,%d3			# put base in d3
+
+	clr.l		%d2			# yes, so index = 0
+	bra.b		fbase_supp_ck
+
+# index:
+fcalc_index:
+	mov.l		%d0,L_SCR1(%a6)		# save d0 (opword)
+	bfextu		%d0{&16:&4},%d1		# fetch dreg index
+	bsr.l		fetch_dreg
+
+	movm.l		&0x3c00,-(%sp)		# save d2-d5
+	mov.l		%d0,%d2			# put index in d2
+	mov.l		L_SCR1(%a6),%d5
+	mov.l		%a0,%d3
+
+	btst		&0xb,%d5		# is index word or long?
+	bne.b		fno_ext
+	ext.l		%d2
+
+fno_ext:
+	bfextu		%d5{&21:&2},%d0
+	lsl.l		%d0,%d2
+
+# base address (passed as parameter in d3):
+# we clear the value here if it should actually be suppressed.
+fbase_supp_ck:
+	btst		&0x7,%d5		# is the bd suppressed?
+	beq.b		fno_base_sup
+	clr.l		%d3
+
+# base displacement:
+fno_base_sup:
+	bfextu		%d5{&26:&2},%d0		# get bd size
+#	beq.l		fmovm_error		# if (size == 0) it's reserved
+
+	cmpi.b		%d0,&0x2
+	blt.b		fno_bd
+	beq.b		fget_word_bd
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		fcea_iacc		# yes
+
+	bra.b		fchk_ind
+
+fget_word_bd:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		fcea_iacc		# yes
+
+	ext.l		%d0			# sign extend bd
+
+fchk_ind:
+	add.l		%d0,%d3			# base += bd
+
+# outer displacement:
+fno_bd:
+	bfextu		%d5{&30:&2},%d0		# is od suppressed?
+	beq.w		faii_bd
+
+	cmpi.b		%d0,&0x2
+	blt.b		fnull_od
+	beq.b		fword_od
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		fcea_iacc		# yes
+
+	bra.b		fadd_them
+
+fword_od:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		fcea_iacc		# yes
+
+	ext.l		%d0			# sign extend od
+	bra.b		fadd_them
+
+fnull_od:
+	clr.l		%d0
+
+fadd_them:
+	mov.l		%d0,%d4
+
+	btst		&0x2,%d5		# pre or post indexing?
+	beq.b		fpre_indexed
+
+	mov.l		%d3,%a0
+	bsr.l		_dmem_read_long
+
+	tst.l		%d1			# did dfetch fail?
+	bne.w		fcea_err		# yes
+
+	add.l		%d2,%d0			# <ea> += index
+	add.l		%d4,%d0			# <ea> += od
+	bra.b		fdone_ea
+
+fpre_indexed:
+	add.l		%d2,%d3			# preindexing
+	mov.l		%d3,%a0
+	bsr.l		_dmem_read_long
+
+	tst.l		%d1			# did dfetch fail?
+	bne.w		fcea_err		# yes
+
+	add.l		%d4,%d0			# ea += od
+	bra.b		fdone_ea
+
+faii_bd:
+	add.l		%d2,%d3			# ea = (base + bd) + index
+	mov.l		%d3,%d0
+fdone_ea:
+	mov.l		%d0,%a0
+
+	movm.l		(%sp)+,&0x003c		# restore d2-d5
+	rts
+
+#########################################################
+fcea_err:
+	mov.l		%d3,%a0
+
+	movm.l		(%sp)+,&0x003c		# restore d2-d5
+	mov.w		&0x0101,%d0
+	bra.l		iea_dacc
+
+fcea_iacc:
+	movm.l		(%sp)+,&0x003c		# restore d2-d5
+	bra.l		iea_iacc
+
+fmovm_out_err:
+	bsr.l		restore
+	mov.w		&0x00e1,%d0
+	bra.b		fmovm_err
+
+fmovm_in_err:
+	bsr.l		restore
+	mov.w		&0x0161,%d0
+
+fmovm_err:
+	mov.l		L_SCR1(%a6),%a0
+	bra.l		iea_dacc
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fmovm_ctrl(): emulate fmovm.l of control registers instr	#
+#									#
+# XREF ****************************************************************	#
+#	_imem_read_long() - read longword from memory			#
+#	iea_iacc() - _imem_read_long() failed; error recovery		#
+#									#
+# INPUT ***************************************************************	#
+#	None								#
+#									#
+# OUTPUT **************************************************************	#
+#	If _imem_read_long() doesn't fail:				#
+#		USER_FPCR(a6)  = new FPCR value				#
+#		USER_FPSR(a6)  = new FPSR value				#
+#		USER_FPIAR(a6) = new FPIAR value			#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Decode the instruction type by looking at the extension word	#
+# in order to see how many control registers to fetch from memory.	#
+# Fetch them using _imem_read_long(). If this fetch fails, exit through	#
+# the special access error exit handler iea_iacc().			#
+#									#
+# Instruction word decoding:						#
+#									#
+#	fmovem.l #<data>, {FPIAR&|FPCR&|FPSR}				#
+#									#
+#		WORD1			WORD2				#
+#	1111 0010 00 111100	100$ $$00 0000 0000			#
+#									#
+#	$$$ (100): FPCR							#
+#	    (010): FPSR							#
+#	    (001): FPIAR						#
+#	    (000): FPIAR						#
+#									#
+#########################################################################
+
+	global		fmovm_ctrl
+fmovm_ctrl:
+	mov.b		EXC_EXTWORD(%a6),%d0	# fetch reg select bits
+	cmpi.b		%d0,&0x9c		# fpcr & fpsr & fpiar ?
+	beq.w		fctrl_in_7		# yes
+	cmpi.b		%d0,&0x98		# fpcr & fpsr ?
+	beq.w		fctrl_in_6		# yes
+	cmpi.b		%d0,&0x94		# fpcr & fpiar ?
+	beq.b		fctrl_in_5		# yes
+
+# fmovem.l #<data>, fpsr/fpiar
+fctrl_in_3:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch FPSR from mem
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		%d0,USER_FPSR(%a6)	# store new FPSR to stack
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch FPIAR from mem
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		%d0,USER_FPIAR(%a6)	# store new FPIAR to stack
+	rts
+
+# fmovem.l #<data>, fpcr/fpiar
+fctrl_in_5:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch FPCR from mem
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		%d0,USER_FPCR(%a6)	# store new FPCR to stack
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch FPIAR from mem
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		%d0,USER_FPIAR(%a6)	# store new FPIAR to stack
+	rts
+
+# fmovem.l #<data>, fpcr/fpsr
+fctrl_in_6:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch FPCR from mem
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		%d0,USER_FPCR(%a6)	# store new FPCR to mem
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch FPSR from mem
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		%d0,USER_FPSR(%a6)	# store new FPSR to mem
+	rts
+
+# fmovem.l #<data>, fpcr/fpsr/fpiar
+fctrl_in_7:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch FPCR from mem
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		%d0,USER_FPCR(%a6)	# store new FPCR to mem
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch FPSR from mem
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		%d0,USER_FPSR(%a6)	# store new FPSR to mem
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch FPIAR from mem
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		%d0,USER_FPIAR(%a6)	# store new FPIAR to mem
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_dcalc_ea(): calc correct <ea> from <ea> stacked on exception	#
+#									#
+# XREF ****************************************************************	#
+#	inc_areg() - increment an address register			#
+#	dec_areg() - decrement an address register			#
+#									#
+# INPUT ***************************************************************	#
+#	d0 = number of bytes to adjust <ea> by				#
+#									#
+# OUTPUT **************************************************************	#
+#	None								#
+#									#
+# ALGORITHM ***********************************************************	#
+# "Dummy" CALCulate Effective Address:					#
+#	The stacked <ea> for FP unimplemented instructions and opclass	#
+#	two packed instructions is correct with the exception of...	#
+#									#
+#	1) -(An)   : The register is not updated regardless of size.	#
+#		     Also, for extended precision and packed, the	#
+#		     stacked <ea> value is 8 bytes too big		#
+#	2) (An)+   : The register is not updated.			#
+#	3) #<data> : The upper longword of the immediate operand is	#
+#		     stacked b,w,l and s sizes are completely stacked.	#
+#		     d,x, and p are not.				#
+#									#
+#########################################################################
+
+	global		_dcalc_ea
+_dcalc_ea:
+	mov.l		%d0, %a0		# move # bytes to %a0
+
+	mov.b		1+EXC_OPWORD(%a6), %d0	# fetch opcode word
+	mov.l		%d0, %d1		# make a copy
+
+	andi.w		&0x38, %d0		# extract mode field
+	andi.l		&0x7, %d1		# extract reg  field
+
+	cmpi.b		%d0,&0x18		# is mode (An)+ ?
+	beq.b		dcea_pi			# yes
+
+	cmpi.b		%d0,&0x20		# is mode -(An) ?
+	beq.b		dcea_pd			# yes
+
+	or.w		%d1,%d0			# concat mode,reg
+	cmpi.b		%d0,&0x3c		# is mode #<data>?
+
+	beq.b		dcea_imm		# yes
+
+	mov.l		EXC_EA(%a6),%a0		# return <ea>
+	rts
+
+# need to set immediate data flag here since we'll need to do
+# an imem_read to fetch this later.
+dcea_imm:
+	mov.b		&immed_flg,SPCOND_FLG(%a6)
+	lea		([USER_FPIAR,%a6],0x4),%a0 # no; return <ea>
+	rts
+
+# here, the <ea> is stacked correctly. however, we must update the
+# address register...
+dcea_pi:
+	mov.l		%a0,%d0			# pass amt to inc by
+	bsr.l		inc_areg		# inc addr register
+
+	mov.l		EXC_EA(%a6),%a0		# stacked <ea> is correct
+	rts
+
+# the <ea> is stacked correctly for all but extended and packed which
+# the <ea>s are 8 bytes too large.
+# it would make no sense to have a pre-decrement to a7 in supervisor
+# mode so we don't even worry about this tricky case here : )
+dcea_pd:
+	mov.l		%a0,%d0			# pass amt to dec by
+	bsr.l		dec_areg		# dec addr register
+
+	mov.l		EXC_EA(%a6),%a0		# stacked <ea> is correct
+
+	cmpi.b		%d0,&0xc		# is opsize ext or packed?
+	beq.b		dcea_pd2		# yes
+	rts
+dcea_pd2:
+	sub.l		&0x8,%a0		# correct <ea>
+	mov.l		%a0,EXC_EA(%a6)		# put correct <ea> on stack
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_calc_ea_fout(): calculate correct stacked <ea> for extended	#
+#			 and packed data opclass 3 operations.		#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	None								#
+#									#
+# OUTPUT **************************************************************	#
+#	a0 = return correct effective address				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	For opclass 3 extended and packed data operations, the <ea>	#
+# stacked for the exception is incorrect for -(an) and (an)+ addressing	#
+# modes. Also, while we're at it, the index register itself must get	#
+# updated.								#
+#	So, for -(an), we must subtract 8 off of the stacked <ea> value	#
+# and return that value as the correct <ea> and store that value in An.	#
+# For (an)+, the stacked <ea> is correct but we must adjust An by +12.	#
+#									#
+#########################################################################
+
+# This calc_ea is currently used to retrieve the correct <ea>
+# for fmove outs of type extended and packed.
+	global		_calc_ea_fout
+_calc_ea_fout:
+	mov.b		1+EXC_OPWORD(%a6),%d0	# fetch opcode word
+	mov.l		%d0,%d1			# make a copy
+
+	andi.w		&0x38,%d0		# extract mode field
+	andi.l		&0x7,%d1		# extract reg  field
+
+	cmpi.b		%d0,&0x18		# is mode (An)+ ?
+	beq.b		ceaf_pi			# yes
+
+	cmpi.b		%d0,&0x20		# is mode -(An) ?
+	beq.w		ceaf_pd			# yes
+
+	mov.l		EXC_EA(%a6),%a0		# stacked <ea> is correct
+	rts
+
+# (An)+ : extended and packed fmove out
+#	: stacked <ea> is correct
+#	: "An" not updated
+ceaf_pi:
+	mov.w		(tbl_ceaf_pi.b,%pc,%d1.w*2),%d1
+	mov.l		EXC_EA(%a6),%a0
+	jmp		(tbl_ceaf_pi.b,%pc,%d1.w*1)
+
+	swbeg		&0x8
+tbl_ceaf_pi:
+	short		ceaf_pi0 - tbl_ceaf_pi
+	short		ceaf_pi1 - tbl_ceaf_pi
+	short		ceaf_pi2 - tbl_ceaf_pi
+	short		ceaf_pi3 - tbl_ceaf_pi
+	short		ceaf_pi4 - tbl_ceaf_pi
+	short		ceaf_pi5 - tbl_ceaf_pi
+	short		ceaf_pi6 - tbl_ceaf_pi
+	short		ceaf_pi7 - tbl_ceaf_pi
+
+ceaf_pi0:
+	addi.l		&0xc,EXC_DREGS+0x8(%a6)
+	rts
+ceaf_pi1:
+	addi.l		&0xc,EXC_DREGS+0xc(%a6)
+	rts
+ceaf_pi2:
+	add.l		&0xc,%a2
+	rts
+ceaf_pi3:
+	add.l		&0xc,%a3
+	rts
+ceaf_pi4:
+	add.l		&0xc,%a4
+	rts
+ceaf_pi5:
+	add.l		&0xc,%a5
+	rts
+ceaf_pi6:
+	addi.l		&0xc,EXC_A6(%a6)
+	rts
+ceaf_pi7:
+	mov.b		&mia7_flg,SPCOND_FLG(%a6)
+	addi.l		&0xc,EXC_A7(%a6)
+	rts
+
+# -(An) : extended and packed fmove out
+#	: stacked <ea> = actual <ea> + 8
+#	: "An" not updated
+ceaf_pd:
+	mov.w		(tbl_ceaf_pd.b,%pc,%d1.w*2),%d1
+	mov.l		EXC_EA(%a6),%a0
+	sub.l		&0x8,%a0
+	sub.l		&0x8,EXC_EA(%a6)
+	jmp		(tbl_ceaf_pd.b,%pc,%d1.w*1)
+
+	swbeg		&0x8
+tbl_ceaf_pd:
+	short		ceaf_pd0 - tbl_ceaf_pd
+	short		ceaf_pd1 - tbl_ceaf_pd
+	short		ceaf_pd2 - tbl_ceaf_pd
+	short		ceaf_pd3 - tbl_ceaf_pd
+	short		ceaf_pd4 - tbl_ceaf_pd
+	short		ceaf_pd5 - tbl_ceaf_pd
+	short		ceaf_pd6 - tbl_ceaf_pd
+	short		ceaf_pd7 - tbl_ceaf_pd
+
+ceaf_pd0:
+	mov.l		%a0,EXC_DREGS+0x8(%a6)
+	rts
+ceaf_pd1:
+	mov.l		%a0,EXC_DREGS+0xc(%a6)
+	rts
+ceaf_pd2:
+	mov.l		%a0,%a2
+	rts
+ceaf_pd3:
+	mov.l		%a0,%a3
+	rts
+ceaf_pd4:
+	mov.l		%a0,%a4
+	rts
+ceaf_pd5:
+	mov.l		%a0,%a5
+	rts
+ceaf_pd6:
+	mov.l		%a0,EXC_A6(%a6)
+	rts
+ceaf_pd7:
+	mov.l		%a0,EXC_A7(%a6)
+	mov.b		&mda7_flg,SPCOND_FLG(%a6)
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_load_fop(): load operand for unimplemented FP exception	#
+#									#
+# XREF ****************************************************************	#
+#	set_tag_x() - determine ext prec optype tag			#
+#	set_tag_s() - determine sgl prec optype tag			#
+#	set_tag_d() - determine dbl prec optype tag			#
+#	unnorm_fix() - convert normalized number to denorm or zero	#
+#	norm() - normalize a denormalized number			#
+#	get_packed() - fetch a packed operand from memory		#
+#	_dcalc_ea() - calculate <ea>, fixing An in process		#
+#									#
+#	_imem_read_{word,long}() - read from instruction memory		#
+#	_dmem_read() - read from data memory				#
+#	_dmem_read_{byte,word,long}() - read from data memory		#
+#									#
+#	facc_in_{b,w,l,d,x}() - mem read failed; special exit point	#
+#									#
+# INPUT ***************************************************************	#
+#	None								#
+#									#
+# OUTPUT **************************************************************	#
+#	If memory access doesn't fail:					#
+#		FP_SRC(a6) = source operand in extended precision	#
+#		FP_DST(a6) = destination operand in extended precision	#
+#									#
+# ALGORITHM ***********************************************************	#
+#	This is called from the Unimplemented FP exception handler in	#
+# order to load the source and maybe destination operand into		#
+# FP_SRC(a6) and FP_DST(a6). If the instruction was opclass zero, load	#
+# the source and destination from the FP register file. Set the optype	#
+# tags for both if dyadic, one for monadic. If a number is an UNNORM,	#
+# convert it to a DENORM or a ZERO.					#
+#	If the instruction is opclass two (memory->reg), then fetch	#
+# the destination from the register file and the source operand from	#
+# memory. Tag and fix both as above w/ opclass zero instructions.	#
+#	If the source operand is byte,word,long, or single, it may be	#
+# in the data register file. If it's actually out in memory, use one of	#
+# the mem_read() routines to fetch it. If the mem_read() access returns	#
+# a failing value, exit through the special facc_in() routine which	#
+# will create an access error exception frame from the current exception #
+# frame.								#
+#	Immediate data and regular data accesses are separated because	#
+# if an immediate data access fails, the resulting fault status		#
+# longword stacked for the access error exception must have the		#
+# instruction bit set.							#
+#									#
+#########################################################################
+
+	global		_load_fop
+_load_fop:
+
+#  15     13 12 10  9 7  6       0
+# /        \ /   \ /  \ /         \
+# ---------------------------------
+# | opclass | RX  | RY | EXTENSION |  (2nd word of general FP instruction)
+# ---------------------------------
+#
+
+#	bfextu		EXC_CMDREG(%a6){&0:&3}, %d0 # extract opclass
+#	cmpi.b		%d0, &0x2		# which class is it? ('000,'010,'011)
+#	beq.w		op010			# handle <ea> -> fpn
+#	bgt.w		op011			# handle fpn -> <ea>
+
+# we're not using op011 for now...
+	btst		&0x6,EXC_CMDREG(%a6)
+	bne.b		op010
+
+############################
+# OPCLASS '000: reg -> reg #
+############################
+op000:
+	mov.b		1+EXC_CMDREG(%a6),%d0	# fetch extension word lo
+	btst		&0x5,%d0		# testing extension bits
+	beq.b		op000_src		# (bit 5 == 0) => monadic
+	btst		&0x4,%d0		# (bit 5 == 1)
+	beq.b		op000_dst		# (bit 4 == 0) => dyadic
+	and.w		&0x007f,%d0		# extract extension bits {6:0}
+	cmpi.w		%d0,&0x0038		# is it an fcmp (dyadic) ?
+	bne.b		op000_src		# it's an fcmp
+
+op000_dst:
+	bfextu		EXC_CMDREG(%a6){&6:&3}, %d0 # extract dst field
+	bsr.l		load_fpn2		# fetch dst fpreg into FP_DST
+
+	bsr.l		set_tag_x		# get dst optype tag
+
+	cmpi.b		%d0, &UNNORM		# is dst fpreg an UNNORM?
+	beq.b		op000_dst_unnorm	# yes
+op000_dst_cont:
+	mov.b		%d0, DTAG(%a6)		# store the dst optype tag
+
+op000_src:
+	bfextu		EXC_CMDREG(%a6){&3:&3}, %d0 # extract src field
+	bsr.l		load_fpn1		# fetch src fpreg into FP_SRC
+
+	bsr.l		set_tag_x		# get src optype tag
+
+	cmpi.b		%d0, &UNNORM		# is src fpreg an UNNORM?
+	beq.b		op000_src_unnorm	# yes
+op000_src_cont:
+	mov.b		%d0, STAG(%a6)		# store the src optype tag
+	rts
+
+op000_dst_unnorm:
+	bsr.l		unnorm_fix		# fix the dst UNNORM
+	bra.b		op000_dst_cont
+op000_src_unnorm:
+	bsr.l		unnorm_fix		# fix the src UNNORM
+	bra.b		op000_src_cont
+
+#############################
+# OPCLASS '010: <ea> -> reg #
+#############################
+op010:
+	mov.w		EXC_CMDREG(%a6),%d0	# fetch extension word
+	btst		&0x5,%d0		# testing extension bits
+	beq.b		op010_src		# (bit 5 == 0) => monadic
+	btst		&0x4,%d0		# (bit 5 == 1)
+	beq.b		op010_dst		# (bit 4 == 0) => dyadic
+	and.w		&0x007f,%d0		# extract extension bits {6:0}
+	cmpi.w		%d0,&0x0038		# is it an fcmp (dyadic) ?
+	bne.b		op010_src		# it's an fcmp
+
+op010_dst:
+	bfextu		EXC_CMDREG(%a6){&6:&3}, %d0 # extract dst field
+	bsr.l		load_fpn2		# fetch dst fpreg ptr
+
+	bsr.l		set_tag_x		# get dst type tag
+
+	cmpi.b		%d0, &UNNORM		# is dst fpreg an UNNORM?
+	beq.b		op010_dst_unnorm	# yes
+op010_dst_cont:
+	mov.b		%d0, DTAG(%a6)		# store the dst optype tag
+
+op010_src:
+	bfextu		EXC_CMDREG(%a6){&3:&3}, %d0 # extract src type field
+
+	bfextu		EXC_OPWORD(%a6){&10:&3}, %d1 # extract <ea> mode field
+	bne.w		fetch_from_mem		# src op is in memory
+
+op010_dreg:
+	clr.b		STAG(%a6)		# either NORM or ZERO
+	bfextu		EXC_OPWORD(%a6){&13:&3}, %d1 # extract src reg field
+
+	mov.w		(tbl_op010_dreg.b,%pc,%d0.w*2), %d0 # jmp based on optype
+	jmp		(tbl_op010_dreg.b,%pc,%d0.w*1) # fetch src from dreg
+
+op010_dst_unnorm:
+	bsr.l		unnorm_fix		# fix the dst UNNORM
+	bra.b		op010_dst_cont
+
+	swbeg		&0x8
+tbl_op010_dreg:
+	short		opd_long	- tbl_op010_dreg
+	short		opd_sgl		- tbl_op010_dreg
+	short		tbl_op010_dreg	- tbl_op010_dreg
+	short		tbl_op010_dreg	- tbl_op010_dreg
+	short		opd_word	- tbl_op010_dreg
+	short		tbl_op010_dreg	- tbl_op010_dreg
+	short		opd_byte	- tbl_op010_dreg
+	short		tbl_op010_dreg	- tbl_op010_dreg
+
+#
+# LONG: can be either NORM or ZERO...
+#
+opd_long:
+	bsr.l		fetch_dreg		# fetch long in d0
+	fmov.l		%d0, %fp0		# load a long
+	fmovm.x		&0x80, FP_SRC(%a6)	# return src op in FP_SRC
+	fbeq.w		opd_long_zero		# long is a ZERO
+	rts
+opd_long_zero:
+	mov.b		&ZERO, STAG(%a6)	# set ZERO optype flag
+	rts
+
+#
+# WORD: can be either NORM or ZERO...
+#
+opd_word:
+	bsr.l		fetch_dreg		# fetch word in d0
+	fmov.w		%d0, %fp0		# load a word
+	fmovm.x		&0x80, FP_SRC(%a6)	# return src op in FP_SRC
+	fbeq.w		opd_word_zero		# WORD is a ZERO
+	rts
+opd_word_zero:
+	mov.b		&ZERO, STAG(%a6)	# set ZERO optype flag
+	rts
+
+#
+# BYTE: can be either NORM or ZERO...
+#
+opd_byte:
+	bsr.l		fetch_dreg		# fetch word in d0
+	fmov.b		%d0, %fp0		# load a byte
+	fmovm.x		&0x80, FP_SRC(%a6)	# return src op in FP_SRC
+	fbeq.w		opd_byte_zero		# byte is a ZERO
+	rts
+opd_byte_zero:
+	mov.b		&ZERO, STAG(%a6)	# set ZERO optype flag
+	rts
+
+#
+# SGL: can be either NORM, DENORM, ZERO, INF, QNAN or SNAN but not UNNORM
+#
+# separate SNANs and DENORMs so they can be loaded w/ special care.
+# all others can simply be moved "in" using fmove.
+#
+opd_sgl:
+	bsr.l		fetch_dreg		# fetch sgl in d0
+	mov.l		%d0,L_SCR1(%a6)
+
+	lea		L_SCR1(%a6), %a0	# pass: ptr to the sgl
+	bsr.l		set_tag_s		# determine sgl type
+	mov.b		%d0, STAG(%a6)		# save the src tag
+
+	cmpi.b		%d0, &SNAN		# is it an SNAN?
+	beq.w		get_sgl_snan		# yes
+
+	cmpi.b		%d0, &DENORM		# is it a DENORM?
+	beq.w		get_sgl_denorm		# yes
+
+	fmov.s		(%a0), %fp0		# no, so can load it regular
+	fmovm.x		&0x80, FP_SRC(%a6)	# return src op in FP_SRC
+	rts
+
+##############################################################################
+
+#########################################################################
+# fetch_from_mem():							#
+# - src is out in memory. must:						#
+#	(1) calc ea - must read AFTER you know the src type since	#
+#		      if the ea is -() or ()+, need to know # of bytes.	#
+#	(2) read it in from either user or supervisor space		#
+#	(3) if (b || w || l) then simply read in			#
+#	    if (s || d || x) then check for SNAN,UNNORM,DENORM		#
+#	    if (packed) then punt for now				#
+# INPUT:								#
+#	%d0 : src type field						#
+#########################################################################
+fetch_from_mem:
+	clr.b		STAG(%a6)		# either NORM or ZERO
+
+	mov.w		(tbl_fp_type.b,%pc,%d0.w*2), %d0 # index by src type field
+	jmp		(tbl_fp_type.b,%pc,%d0.w*1)
+
+	swbeg		&0x8
+tbl_fp_type:
+	short		load_long	- tbl_fp_type
+	short		load_sgl	- tbl_fp_type
+	short		load_ext	- tbl_fp_type
+	short		load_packed	- tbl_fp_type
+	short		load_word	- tbl_fp_type
+	short		load_dbl	- tbl_fp_type
+	short		load_byte	- tbl_fp_type
+	short		tbl_fp_type	- tbl_fp_type
+
+#########################################
+# load a LONG into %fp0:		#
+#	-number can't fault		#
+#	(1) calc ea			#
+#	(2) read 4 bytes into L_SCR1	#
+#	(3) fmov.l into %fp0		#
+#########################################
+load_long:
+	movq.l		&0x4, %d0		# pass: 4 (bytes)
+	bsr.l		_dcalc_ea		# calc <ea>; <ea> in %a0
+
+	cmpi.b		SPCOND_FLG(%a6),&immed_flg
+	beq.b		load_long_immed
+
+	bsr.l		_dmem_read_long		# fetch src operand from memory
+
+	tst.l		%d1			# did dfetch fail?
+	bne.l		facc_in_l		# yes
+
+load_long_cont:
+	fmov.l		%d0, %fp0		# read into %fp0;convert to xprec
+	fmovm.x		&0x80, FP_SRC(%a6)	# return src op in FP_SRC
+
+	fbeq.w		load_long_zero		# src op is a ZERO
+	rts
+load_long_zero:
+	mov.b		&ZERO, STAG(%a6)	# set optype tag to ZERO
+	rts
+
+load_long_immed:
+	bsr.l		_imem_read_long		# fetch src operand immed data
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		funimp_iacc		# yes
+	bra.b		load_long_cont
+
+#########################################
+# load a WORD into %fp0:		#
+#	-number can't fault		#
+#	(1) calc ea			#
+#	(2) read 2 bytes into L_SCR1	#
+#	(3) fmov.w into %fp0		#
+#########################################
+load_word:
+	movq.l		&0x2, %d0		# pass: 2 (bytes)
+	bsr.l		_dcalc_ea		# calc <ea>; <ea> in %a0
+
+	cmpi.b		SPCOND_FLG(%a6),&immed_flg
+	beq.b		load_word_immed
+
+	bsr.l		_dmem_read_word		# fetch src operand from memory
+
+	tst.l		%d1			# did dfetch fail?
+	bne.l		facc_in_w		# yes
+
+load_word_cont:
+	fmov.w		%d0, %fp0		# read into %fp0;convert to xprec
+	fmovm.x		&0x80, FP_SRC(%a6)	# return src op in FP_SRC
+
+	fbeq.w		load_word_zero		# src op is a ZERO
+	rts
+load_word_zero:
+	mov.b		&ZERO, STAG(%a6)	# set optype tag to ZERO
+	rts
+
+load_word_immed:
+	bsr.l		_imem_read_word		# fetch src operand immed data
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		funimp_iacc		# yes
+	bra.b		load_word_cont
+
+#########################################
+# load a BYTE into %fp0:		#
+#	-number can't fault		#
+#	(1) calc ea			#
+#	(2) read 1 byte into L_SCR1	#
+#	(3) fmov.b into %fp0		#
+#########################################
+load_byte:
+	movq.l		&0x1, %d0		# pass: 1 (byte)
+	bsr.l		_dcalc_ea		# calc <ea>; <ea> in %a0
+
+	cmpi.b		SPCOND_FLG(%a6),&immed_flg
+	beq.b		load_byte_immed
+
+	bsr.l		_dmem_read_byte		# fetch src operand from memory
+
+	tst.l		%d1			# did dfetch fail?
+	bne.l		facc_in_b		# yes
+
+load_byte_cont:
+	fmov.b		%d0, %fp0		# read into %fp0;convert to xprec
+	fmovm.x		&0x80, FP_SRC(%a6)	# return src op in FP_SRC
+
+	fbeq.w		load_byte_zero		# src op is a ZERO
+	rts
+load_byte_zero:
+	mov.b		&ZERO, STAG(%a6)	# set optype tag to ZERO
+	rts
+
+load_byte_immed:
+	bsr.l		_imem_read_word		# fetch src operand immed data
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		funimp_iacc		# yes
+	bra.b		load_byte_cont
+
+#########################################
+# load a SGL into %fp0:			#
+#	-number can't fault		#
+#	(1) calc ea			#
+#	(2) read 4 bytes into L_SCR1	#
+#	(3) fmov.s into %fp0		#
+#########################################
+load_sgl:
+	movq.l		&0x4, %d0		# pass: 4 (bytes)
+	bsr.l		_dcalc_ea		# calc <ea>; <ea> in %a0
+
+	cmpi.b		SPCOND_FLG(%a6),&immed_flg
+	beq.b		load_sgl_immed
+
+	bsr.l		_dmem_read_long		# fetch src operand from memory
+	mov.l		%d0, L_SCR1(%a6)	# store src op on stack
+
+	tst.l		%d1			# did dfetch fail?
+	bne.l		facc_in_l		# yes
+
+load_sgl_cont:
+	lea		L_SCR1(%a6), %a0	# pass: ptr to sgl src op
+	bsr.l		set_tag_s		# determine src type tag
+	mov.b		%d0, STAG(%a6)		# save src optype tag on stack
+
+	cmpi.b		%d0, &DENORM		# is it a sgl DENORM?
+	beq.w		get_sgl_denorm		# yes
+
+	cmpi.b		%d0, &SNAN		# is it a sgl SNAN?
+	beq.w		get_sgl_snan		# yes
+
+	fmov.s		L_SCR1(%a6), %fp0	# read into %fp0;convert to xprec
+	fmovm.x		&0x80, FP_SRC(%a6)	# return src op in FP_SRC
+	rts
+
+load_sgl_immed:
+	bsr.l		_imem_read_long		# fetch src operand immed data
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		funimp_iacc		# yes
+	bra.b		load_sgl_cont
+
+# must convert sgl denorm format to an Xprec denorm fmt suitable for
+# normalization...
+# %a0 : points to sgl denorm
+get_sgl_denorm:
+	clr.w		FP_SRC_EX(%a6)
+	bfextu		(%a0){&9:&23}, %d0	# fetch sgl hi(_mantissa)
+	lsl.l		&0x8, %d0
+	mov.l		%d0, FP_SRC_HI(%a6)	# set ext hi(_mantissa)
+	clr.l		FP_SRC_LO(%a6)		# set ext lo(_mantissa)
+
+	clr.w		FP_SRC_EX(%a6)
+	btst		&0x7, (%a0)		# is sgn bit set?
+	beq.b		sgl_dnrm_norm
+	bset		&0x7, FP_SRC_EX(%a6)	# set sgn of xprec value
+
+sgl_dnrm_norm:
+	lea		FP_SRC(%a6), %a0
+	bsr.l		norm			# normalize number
+	mov.w		&0x3f81, %d1		# xprec exp = 0x3f81
+	sub.w		%d0, %d1		# exp = 0x3f81 - shft amt.
+	or.w		%d1, FP_SRC_EX(%a6)	# {sgn,exp}
+
+	mov.b		&NORM, STAG(%a6)	# fix src type tag
+	rts
+
+# convert sgl to ext SNAN
+# %a0 : points to sgl SNAN
+get_sgl_snan:
+	mov.w		&0x7fff, FP_SRC_EX(%a6) # set exp of SNAN
+	bfextu		(%a0){&9:&23}, %d0
+	lsl.l		&0x8, %d0		# extract and insert hi(man)
+	mov.l		%d0, FP_SRC_HI(%a6)
+	clr.l		FP_SRC_LO(%a6)
+
+	btst		&0x7, (%a0)		# see if sign of SNAN is set
+	beq.b		no_sgl_snan_sgn
+	bset		&0x7, FP_SRC_EX(%a6)
+no_sgl_snan_sgn:
+	rts
+
+#########################################
+# load a DBL into %fp0:			#
+#	-number can't fault		#
+#	(1) calc ea			#
+#	(2) read 8 bytes into L_SCR(1,2)#
+#	(3) fmov.d into %fp0		#
+#########################################
+load_dbl:
+	movq.l		&0x8, %d0		# pass: 8 (bytes)
+	bsr.l		_dcalc_ea		# calc <ea>; <ea> in %a0
+
+	cmpi.b		SPCOND_FLG(%a6),&immed_flg
+	beq.b		load_dbl_immed
+
+	lea		L_SCR1(%a6), %a1	# pass: ptr to input dbl tmp space
+	movq.l		&0x8, %d0		# pass: # bytes to read
+	bsr.l		_dmem_read		# fetch src operand from memory
+
+	tst.l		%d1			# did dfetch fail?
+	bne.l		facc_in_d		# yes
+
+load_dbl_cont:
+	lea		L_SCR1(%a6), %a0	# pass: ptr to input dbl
+	bsr.l		set_tag_d		# determine src type tag
+	mov.b		%d0, STAG(%a6)		# set src optype tag
+
+	cmpi.b		%d0, &DENORM		# is it a dbl DENORM?
+	beq.w		get_dbl_denorm		# yes
+
+	cmpi.b		%d0, &SNAN		# is it a dbl SNAN?
+	beq.w		get_dbl_snan		# yes
+
+	fmov.d		L_SCR1(%a6), %fp0	# read into %fp0;convert to xprec
+	fmovm.x		&0x80, FP_SRC(%a6)	# return src op in FP_SRC
+	rts
+
+load_dbl_immed:
+	lea		L_SCR1(%a6), %a1	# pass: ptr to input dbl tmp space
+	movq.l		&0x8, %d0		# pass: # bytes to read
+	bsr.l		_imem_read		# fetch src operand from memory
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		funimp_iacc		# yes
+	bra.b		load_dbl_cont
+
+# must convert dbl denorm format to an Xprec denorm fmt suitable for
+# normalization...
+# %a0 : loc. of dbl denorm
+get_dbl_denorm:
+	clr.w		FP_SRC_EX(%a6)
+	bfextu		(%a0){&12:&31}, %d0	# fetch hi(_mantissa)
+	mov.l		%d0, FP_SRC_HI(%a6)
+	bfextu		4(%a0){&11:&21}, %d0	# fetch lo(_mantissa)
+	mov.l		&0xb, %d1
+	lsl.l		%d1, %d0
+	mov.l		%d0, FP_SRC_LO(%a6)
+
+	btst		&0x7, (%a0)		# is sgn bit set?
+	beq.b		dbl_dnrm_norm
+	bset		&0x7, FP_SRC_EX(%a6)	# set sgn of xprec value
+
+dbl_dnrm_norm:
+	lea		FP_SRC(%a6), %a0
+	bsr.l		norm			# normalize number
+	mov.w		&0x3c01, %d1		# xprec exp = 0x3c01
+	sub.w		%d0, %d1		# exp = 0x3c01 - shft amt.
+	or.w		%d1, FP_SRC_EX(%a6)	# {sgn,exp}
+
+	mov.b		&NORM, STAG(%a6)	# fix src type tag
+	rts
+
+# convert dbl to ext SNAN
+# %a0 : points to dbl SNAN
+get_dbl_snan:
+	mov.w		&0x7fff, FP_SRC_EX(%a6) # set exp of SNAN
+
+	bfextu		(%a0){&12:&31}, %d0	# fetch hi(_mantissa)
+	mov.l		%d0, FP_SRC_HI(%a6)
+	bfextu		4(%a0){&11:&21}, %d0	# fetch lo(_mantissa)
+	mov.l		&0xb, %d1
+	lsl.l		%d1, %d0
+	mov.l		%d0, FP_SRC_LO(%a6)
+
+	btst		&0x7, (%a0)		# see if sign of SNAN is set
+	beq.b		no_dbl_snan_sgn
+	bset		&0x7, FP_SRC_EX(%a6)
+no_dbl_snan_sgn:
+	rts
+
+#################################################
+# load a Xprec into %fp0:			#
+#	-number can't fault			#
+#	(1) calc ea				#
+#	(2) read 12 bytes into L_SCR(1,2)	#
+#	(3) fmov.x into %fp0			#
+#################################################
+load_ext:
+	mov.l		&0xc, %d0		# pass: 12 (bytes)
+	bsr.l		_dcalc_ea		# calc <ea>
+
+	lea		FP_SRC(%a6), %a1	# pass: ptr to input ext tmp space
+	mov.l		&0xc, %d0		# pass: # of bytes to read
+	bsr.l		_dmem_read		# fetch src operand from memory
+
+	tst.l		%d1			# did dfetch fail?
+	bne.l		facc_in_x		# yes
+
+	lea		FP_SRC(%a6), %a0	# pass: ptr to src op
+	bsr.l		set_tag_x		# determine src type tag
+
+	cmpi.b		%d0, &UNNORM		# is the src op an UNNORM?
+	beq.b		load_ext_unnorm		# yes
+
+	mov.b		%d0, STAG(%a6)		# store the src optype tag
+	rts
+
+load_ext_unnorm:
+	bsr.l		unnorm_fix		# fix the src UNNORM
+	mov.b		%d0, STAG(%a6)		# store the src optype tag
+	rts
+
+#################################################
+# load a packed into %fp0:			#
+#	-number can't fault			#
+#	(1) calc ea				#
+#	(2) read 12 bytes into L_SCR(1,2,3)	#
+#	(3) fmov.x into %fp0			#
+#################################################
+load_packed:
+	bsr.l		get_packed
+
+	lea		FP_SRC(%a6),%a0		# pass ptr to src op
+	bsr.l		set_tag_x		# determine src type tag
+	cmpi.b		%d0,&UNNORM		# is the src op an UNNORM ZERO?
+	beq.b		load_packed_unnorm	# yes
+
+	mov.b		%d0,STAG(%a6)		# store the src optype tag
+	rts
+
+load_packed_unnorm:
+	bsr.l		unnorm_fix		# fix the UNNORM ZERO
+	mov.b		%d0,STAG(%a6)		# store the src optype tag
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fout(): move from fp register to memory or data register	#
+#									#
+# XREF ****************************************************************	#
+#	_round() - needed to create EXOP for sgl/dbl precision		#
+#	norm() - needed to create EXOP for extended precision		#
+#	ovf_res() - create default overflow result for sgl/dbl precision#
+#	unf_res() - create default underflow result for sgl/dbl prec.	#
+#	dst_dbl() - create rounded dbl precision result.		#
+#	dst_sgl() - create rounded sgl precision result.		#
+#	fetch_dreg() - fetch dynamic k-factor reg for packed.		#
+#	bindec() - convert FP binary number to packed number.		#
+#	_mem_write() - write data to memory.				#
+#	_mem_write2() - write data to memory unless supv mode -(a7) exc.#
+#	_dmem_write_{byte,word,long}() - write data to memory.		#
+#	store_dreg_{b,w,l}() - store data to data register file.	#
+#	facc_out_{b,w,l,d,x}() - data access error occurred.		#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	d0 = round prec,mode						#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 : intermediate underflow or overflow result if		#
+#	      OVFL/UNFL occurred for a sgl or dbl operand		#
+#									#
+# ALGORITHM ***********************************************************	#
+#	This routine is accessed by many handlers that need to do an	#
+# opclass three move of an operand out to memory.			#
+#	Decode an fmove out (opclass 3) instruction to determine if	#
+# it's b,w,l,s,d,x, or p in size. b,w,l can be stored to either a data	#
+# register or memory. The algorithm uses a standard "fmove" to create	#
+# the rounded result. Also, since exceptions are disabled, this also	#
+# create the correct OPERR default result if appropriate.		#
+#	For sgl or dbl precision, overflow or underflow can occur. If	#
+# either occurs and is enabled, the EXOP.				#
+#	For extended precision, the stacked <ea> must be fixed along	#
+# w/ the address index register as appropriate w/ _calc_ea_fout(). If	#
+# the source is a denorm and if underflow is enabled, an EXOP must be	#
+# created.								#
+#	For packed, the k-factor must be fetched from the instruction	#
+# word or a data register. The <ea> must be fixed as w/ extended	#
+# precision. Then, bindec() is called to create the appropriate		#
+# packed result.							#
+#	If at any time an access error is flagged by one of the move-	#
+# to-memory routines, then a special exit must be made so that the	#
+# access error can be handled properly.					#
+#									#
+#########################################################################
+
+	global		fout
+fout:
+	bfextu		EXC_CMDREG(%a6){&3:&3},%d1 # extract dst fmt
+	mov.w		(tbl_fout.b,%pc,%d1.w*2),%a1 # use as index
+	jmp		(tbl_fout.b,%pc,%a1)	# jump to routine
+
+	swbeg		&0x8
+tbl_fout:
+	short		fout_long	-	tbl_fout
+	short		fout_sgl	-	tbl_fout
+	short		fout_ext	-	tbl_fout
+	short		fout_pack	-	tbl_fout
+	short		fout_word	-	tbl_fout
+	short		fout_dbl	-	tbl_fout
+	short		fout_byte	-	tbl_fout
+	short		fout_pack	-	tbl_fout
+
+#################################################################
+# fmove.b out ###################################################
+#################################################################
+
+# Only "Unimplemented Data Type" exceptions enter here. The operand
+# is either a DENORM or a NORM.
+fout_byte:
+	tst.b		STAG(%a6)		# is operand normalized?
+	bne.b		fout_byte_denorm	# no
+
+	fmovm.x		SRC(%a0),&0x80		# load value
+
+fout_byte_norm:
+	fmov.l		%d0,%fpcr		# insert rnd prec,mode
+
+	fmov.b		%fp0,%d0		# exec move out w/ correct rnd mode
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# fetch FPSR
+	or.w		%d1,2+USER_FPSR(%a6)	# save new exc,accrued bits
+
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract dst mode
+	andi.b		&0x38,%d1		# is mode == 0? (Dreg dst)
+	beq.b		fout_byte_dn		# must save to integer regfile
+
+	mov.l		EXC_EA(%a6),%a0		# stacked <ea> is correct
+	bsr.l		_dmem_write_byte	# write byte
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_b		# yes
+
+	rts
+
+fout_byte_dn:
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract Dn
+	andi.w		&0x7,%d1
+	bsr.l		store_dreg_b
+	rts
+
+fout_byte_denorm:
+	mov.l		SRC_EX(%a0),%d1
+	andi.l		&0x80000000,%d1		# keep DENORM sign
+	ori.l		&0x00800000,%d1		# make smallest sgl
+	fmov.s		%d1,%fp0
+	bra.b		fout_byte_norm
+
+#################################################################
+# fmove.w out ###################################################
+#################################################################
+
+# Only "Unimplemented Data Type" exceptions enter here. The operand
+# is either a DENORM or a NORM.
+fout_word:
+	tst.b		STAG(%a6)		# is operand normalized?
+	bne.b		fout_word_denorm	# no
+
+	fmovm.x		SRC(%a0),&0x80		# load value
+
+fout_word_norm:
+	fmov.l		%d0,%fpcr		# insert rnd prec:mode
+
+	fmov.w		%fp0,%d0		# exec move out w/ correct rnd mode
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# fetch FPSR
+	or.w		%d1,2+USER_FPSR(%a6)	# save new exc,accrued bits
+
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract dst mode
+	andi.b		&0x38,%d1		# is mode == 0? (Dreg dst)
+	beq.b		fout_word_dn		# must save to integer regfile
+
+	mov.l		EXC_EA(%a6),%a0		# stacked <ea> is correct
+	bsr.l		_dmem_write_word	# write word
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_w		# yes
+
+	rts
+
+fout_word_dn:
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract Dn
+	andi.w		&0x7,%d1
+	bsr.l		store_dreg_w
+	rts
+
+fout_word_denorm:
+	mov.l		SRC_EX(%a0),%d1
+	andi.l		&0x80000000,%d1		# keep DENORM sign
+	ori.l		&0x00800000,%d1		# make smallest sgl
+	fmov.s		%d1,%fp0
+	bra.b		fout_word_norm
+
+#################################################################
+# fmove.l out ###################################################
+#################################################################
+
+# Only "Unimplemented Data Type" exceptions enter here. The operand
+# is either a DENORM or a NORM.
+fout_long:
+	tst.b		STAG(%a6)		# is operand normalized?
+	bne.b		fout_long_denorm	# no
+
+	fmovm.x		SRC(%a0),&0x80		# load value
+
+fout_long_norm:
+	fmov.l		%d0,%fpcr		# insert rnd prec:mode
+
+	fmov.l		%fp0,%d0		# exec move out w/ correct rnd mode
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# fetch FPSR
+	or.w		%d1,2+USER_FPSR(%a6)	# save new exc,accrued bits
+
+fout_long_write:
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract dst mode
+	andi.b		&0x38,%d1		# is mode == 0? (Dreg dst)
+	beq.b		fout_long_dn		# must save to integer regfile
+
+	mov.l		EXC_EA(%a6),%a0		# stacked <ea> is correct
+	bsr.l		_dmem_write_long	# write long
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_l		# yes
+
+	rts
+
+fout_long_dn:
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract Dn
+	andi.w		&0x7,%d1
+	bsr.l		store_dreg_l
+	rts
+
+fout_long_denorm:
+	mov.l		SRC_EX(%a0),%d1
+	andi.l		&0x80000000,%d1		# keep DENORM sign
+	ori.l		&0x00800000,%d1		# make smallest sgl
+	fmov.s		%d1,%fp0
+	bra.b		fout_long_norm
+
+#################################################################
+# fmove.x out ###################################################
+#################################################################
+
+# Only "Unimplemented Data Type" exceptions enter here. The operand
+# is either a DENORM or a NORM.
+# The DENORM causes an Underflow exception.
+fout_ext:
+
+# we copy the extended precision result to FP_SCR0 so that the reserved
+# 16-bit field gets zeroed. we do this since we promise not to disturb
+# what's at SRC(a0).
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	clr.w		2+FP_SCR0_EX(%a6)	# clear reserved field
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+	fmovm.x		SRC(%a0),&0x80		# return result
+
+	bsr.l		_calc_ea_fout		# fix stacked <ea>
+
+	mov.l		%a0,%a1			# pass: dst addr
+	lea		FP_SCR0(%a6),%a0	# pass: src addr
+	mov.l		&0xc,%d0		# pass: opsize is 12 bytes
+
+# we must not yet write the extended precision data to the stack
+# in the pre-decrement case from supervisor mode or else we'll corrupt
+# the stack frame. so, leave it in FP_SRC for now and deal with it later...
+	cmpi.b		SPCOND_FLG(%a6),&mda7_flg
+	beq.b		fout_ext_a7
+
+	bsr.l		_dmem_write		# write ext prec number to memory
+
+	tst.l		%d1			# did dstore fail?
+	bne.w		fout_ext_err		# yes
+
+	tst.b		STAG(%a6)		# is operand normalized?
+	bne.b		fout_ext_denorm		# no
+	rts
+
+# the number is a DENORM. must set the underflow exception bit
+fout_ext_denorm:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set underflow exc bit
+
+	mov.b		FPCR_ENABLE(%a6),%d0
+	andi.b		&0x0a,%d0		# is UNFL or INEX enabled?
+	bne.b		fout_ext_exc		# yes
+	rts
+
+# we don't want to do the write if the exception occurred in supervisor mode
+# so _mem_write2() handles this for us.
+fout_ext_a7:
+	bsr.l		_mem_write2		# write ext prec number to memory
+
+	tst.l		%d1			# did dstore fail?
+	bne.w		fout_ext_err		# yes
+
+	tst.b		STAG(%a6)		# is operand normalized?
+	bne.b		fout_ext_denorm		# no
+	rts
+
+fout_ext_exc:
+	lea		FP_SCR0(%a6),%a0
+	bsr.l		norm			# normalize the mantissa
+	neg.w		%d0			# new exp = -(shft amt)
+	andi.w		&0x7fff,%d0
+	andi.w		&0x8000,FP_SCR0_EX(%a6)	# keep only old sign
+	or.w		%d0,FP_SCR0_EX(%a6)	# insert new exponent
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	rts
+
+fout_ext_err:
+	mov.l		EXC_A6(%a6),(%a6)	# fix stacked a6
+	bra.l		facc_out_x
+
+#########################################################################
+# fmove.s out ###########################################################
+#########################################################################
+fout_sgl:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&s_mode*0x10,%d0	# insert sgl prec
+	mov.l		%d0,L_SCR3(%a6)		# save rnd prec,mode on stack
+
+#
+# operand is a normalized number. first, we check to see if the move out
+# would cause either an underflow or overflow. these cases are handled
+# separately. otherwise, set the FPCR to the proper rounding mode and
+# execute the move.
+#
+	mov.w		SRC_EX(%a0),%d0		# extract exponent
+	andi.w		&0x7fff,%d0		# strip sign
+
+	cmpi.w		%d0,&SGL_HI		# will operand overflow?
+	bgt.w		fout_sgl_ovfl		# yes; go handle OVFL
+	beq.w		fout_sgl_may_ovfl	# maybe; go handle possible OVFL
+	cmpi.w		%d0,&SGL_LO		# will operand underflow?
+	blt.w		fout_sgl_unfl		# yes; go handle underflow
+
+#
+# NORMs(in range) can be stored out by a simple "fmov.s"
+# Unnormalized inputs can come through this point.
+#
+fout_sgl_exg:
+	fmovm.x		SRC(%a0),&0x80		# fetch fop from stack
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fmov.s		%fp0,%d0		# store does convert and round
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# save FPSR
+
+	or.w		%d1,2+USER_FPSR(%a6)	# set possible inex2/ainex
+
+fout_sgl_exg_write:
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract dst mode
+	andi.b		&0x38,%d1		# is mode == 0? (Dreg dst)
+	beq.b		fout_sgl_exg_write_dn	# must save to integer regfile
+
+	mov.l		EXC_EA(%a6),%a0		# stacked <ea> is correct
+	bsr.l		_dmem_write_long	# write long
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_l		# yes
+
+	rts
+
+fout_sgl_exg_write_dn:
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract Dn
+	andi.w		&0x7,%d1
+	bsr.l		store_dreg_l
+	rts
+
+#
+# here, we know that the operand would UNFL if moved out to single prec,
+# so, denorm and round and then use generic store single routine to
+# write the value to memory.
+#
+fout_sgl_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set UNFL
+
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	mov.l		%a0,-(%sp)
+
+	clr.l		%d0			# pass: S.F. = 0
+
+	cmpi.b		STAG(%a6),&DENORM	# fetch src optype tag
+	bne.b		fout_sgl_unfl_cont	# let DENORMs fall through
+
+	lea		FP_SCR0(%a6),%a0
+	bsr.l		norm			# normalize the DENORM
+
+fout_sgl_unfl_cont:
+	lea		FP_SCR0(%a6),%a0	# pass: ptr to operand
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res			# calc default underflow result
+
+	lea		FP_SCR0(%a6),%a0	# pass: ptr to fop
+	bsr.l		dst_sgl			# convert to single prec
+
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract dst mode
+	andi.b		&0x38,%d1		# is mode == 0? (Dreg dst)
+	beq.b		fout_sgl_unfl_dn	# must save to integer regfile
+
+	mov.l		EXC_EA(%a6),%a0		# stacked <ea> is correct
+	bsr.l		_dmem_write_long	# write long
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_l		# yes
+
+	bra.b		fout_sgl_unfl_chkexc
+
+fout_sgl_unfl_dn:
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract Dn
+	andi.w		&0x7,%d1
+	bsr.l		store_dreg_l
+
+fout_sgl_unfl_chkexc:
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0a,%d1		# is UNFL or INEX enabled?
+	bne.w		fout_sd_exc_unfl	# yes
+	addq.l		&0x4,%sp
+	rts
+
+#
+# it's definitely an overflow so call ovf_res to get the correct answer
+#
+fout_sgl_ovfl:
+	tst.b		3+SRC_HI(%a0)		# is result inexact?
+	bne.b		fout_sgl_ovfl_inex2
+	tst.l		SRC_LO(%a0)		# is result inexact?
+	bne.b		fout_sgl_ovfl_inex2
+	ori.w		&ovfl_inx_mask,2+USER_FPSR(%a6) # set ovfl/aovfl/ainex
+	bra.b		fout_sgl_ovfl_cont
+fout_sgl_ovfl_inex2:
+	ori.w		&ovfinx_mask,2+USER_FPSR(%a6) # set ovfl/aovfl/ainex/inex2
+
+fout_sgl_ovfl_cont:
+	mov.l		%a0,-(%sp)
+
+# call ovf_res() w/ sgl prec and the correct rnd mode to create the default
+# overflow result. DON'T save the returned ccodes from ovf_res() since
+# fmove out doesn't alter them.
+	tst.b		SRC_EX(%a0)		# is operand negative?
+	smi		%d1			# set if so
+	mov.l		L_SCR3(%a6),%d0		# pass: sgl prec,rnd mode
+	bsr.l		ovf_res			# calc OVFL result
+	fmovm.x		(%a0),&0x80		# load default overflow result
+	fmov.s		%fp0,%d0		# store to single
+
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract dst mode
+	andi.b		&0x38,%d1		# is mode == 0? (Dreg dst)
+	beq.b		fout_sgl_ovfl_dn	# must save to integer regfile
+
+	mov.l		EXC_EA(%a6),%a0		# stacked <ea> is correct
+	bsr.l		_dmem_write_long	# write long
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_l		# yes
+
+	bra.b		fout_sgl_ovfl_chkexc
+
+fout_sgl_ovfl_dn:
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract Dn
+	andi.w		&0x7,%d1
+	bsr.l		store_dreg_l
+
+fout_sgl_ovfl_chkexc:
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0a,%d1		# is UNFL or INEX enabled?
+	bne.w		fout_sd_exc_ovfl	# yes
+	addq.l		&0x4,%sp
+	rts
+
+#
+# move out MAY overflow:
+# (1) force the exp to 0x3fff
+# (2) do a move w/ appropriate rnd mode
+# (3) if exp still equals zero, then insert original exponent
+#	for the correct result.
+#     if exp now equals one, then it overflowed so call ovf_res.
+#
+fout_sgl_may_ovfl:
+	mov.w		SRC_EX(%a0),%d1		# fetch current sign
+	andi.w		&0x8000,%d1		# keep it,clear exp
+	ori.w		&0x3fff,%d1		# insert exp = 0
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert scaled exp
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6) # copy hi(man)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6) # copy lo(man)
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fmov.x		FP_SCR0(%a6),%fp0	# force fop to be rounded
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	fabs.x		%fp0			# need absolute value
+	fcmp.b		%fp0,&0x2		# did exponent increase?
+	fblt.w		fout_sgl_exg		# no; go finish NORM
+	bra.w		fout_sgl_ovfl		# yes; go handle overflow
+
+################
+
+fout_sd_exc_unfl:
+	mov.l		(%sp)+,%a0
+
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+	cmpi.b		STAG(%a6),&DENORM	# was src a DENORM?
+	bne.b		fout_sd_exc_cont	# no
+
+	lea		FP_SCR0(%a6),%a0
+	bsr.l		norm
+	neg.l		%d0
+	andi.w		&0x7fff,%d0
+	bfins		%d0,FP_SCR0_EX(%a6){&1:&15}
+	bra.b		fout_sd_exc_cont
+
+fout_sd_exc:
+fout_sd_exc_ovfl:
+	mov.l		(%sp)+,%a0		# restore a0
+
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+fout_sd_exc_cont:
+	bclr		&0x7,FP_SCR0_EX(%a6)	# clear sign bit
+	sne.b		2+FP_SCR0_EX(%a6)	# set internal sign bit
+	lea		FP_SCR0(%a6),%a0	# pass: ptr to DENORM
+
+	mov.b		3+L_SCR3(%a6),%d1
+	lsr.b		&0x4,%d1
+	andi.w		&0x0c,%d1
+	swap		%d1
+	mov.b		3+L_SCR3(%a6),%d1
+	lsr.b		&0x4,%d1
+	andi.w		&0x03,%d1
+	clr.l		%d0			# pass: zero g,r,s
+	bsr.l		_round			# round the DENORM
+
+	tst.b		2+FP_SCR0_EX(%a6)	# is EXOP negative?
+	beq.b		fout_sd_exc_done	# no
+	bset		&0x7,FP_SCR0_EX(%a6)	# yes
+
+fout_sd_exc_done:
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	rts
+
+#################################################################
+# fmove.d out ###################################################
+#################################################################
+fout_dbl:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&d_mode*0x10,%d0	# insert dbl prec
+	mov.l		%d0,L_SCR3(%a6)		# save rnd prec,mode on stack
+
+#
+# operand is a normalized number. first, we check to see if the move out
+# would cause either an underflow or overflow. these cases are handled
+# separately. otherwise, set the FPCR to the proper rounding mode and
+# execute the move.
+#
+	mov.w		SRC_EX(%a0),%d0		# extract exponent
+	andi.w		&0x7fff,%d0		# strip sign
+
+	cmpi.w		%d0,&DBL_HI		# will operand overflow?
+	bgt.w		fout_dbl_ovfl		# yes; go handle OVFL
+	beq.w		fout_dbl_may_ovfl	# maybe; go handle possible OVFL
+	cmpi.w		%d0,&DBL_LO		# will operand underflow?
+	blt.w		fout_dbl_unfl		# yes; go handle underflow
+
+#
+# NORMs(in range) can be stored out by a simple "fmov.d"
+# Unnormalized inputs can come through this point.
+#
+fout_dbl_exg:
+	fmovm.x		SRC(%a0),&0x80		# fetch fop from stack
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fmov.d		%fp0,L_SCR1(%a6)	# store does convert and round
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d0		# save FPSR
+
+	or.w		%d0,2+USER_FPSR(%a6)	# set possible inex2/ainex
+
+	mov.l		EXC_EA(%a6),%a1		# pass: dst addr
+	lea		L_SCR1(%a6),%a0		# pass: src addr
+	movq.l		&0x8,%d0		# pass: opsize is 8 bytes
+	bsr.l		_dmem_write		# store dbl fop to memory
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_d		# yes
+
+	rts					# no; so we're finished
+
+#
+# here, we know that the operand would UNFL if moved out to double prec,
+# so, denorm and round and then use generic store double routine to
+# write the value to memory.
+#
+fout_dbl_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set UNFL
+
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	mov.l		%a0,-(%sp)
+
+	clr.l		%d0			# pass: S.F. = 0
+
+	cmpi.b		STAG(%a6),&DENORM	# fetch src optype tag
+	bne.b		fout_dbl_unfl_cont	# let DENORMs fall through
+
+	lea		FP_SCR0(%a6),%a0
+	bsr.l		norm			# normalize the DENORM
+
+fout_dbl_unfl_cont:
+	lea		FP_SCR0(%a6),%a0	# pass: ptr to operand
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res			# calc default underflow result
+
+	lea		FP_SCR0(%a6),%a0	# pass: ptr to fop
+	bsr.l		dst_dbl			# convert to single prec
+	mov.l		%d0,L_SCR1(%a6)
+	mov.l		%d1,L_SCR2(%a6)
+
+	mov.l		EXC_EA(%a6),%a1		# pass: dst addr
+	lea		L_SCR1(%a6),%a0		# pass: src addr
+	movq.l		&0x8,%d0		# pass: opsize is 8 bytes
+	bsr.l		_dmem_write		# store dbl fop to memory
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_d		# yes
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0a,%d1		# is UNFL or INEX enabled?
+	bne.w		fout_sd_exc_unfl	# yes
+	addq.l		&0x4,%sp
+	rts
+
+#
+# it's definitely an overflow so call ovf_res to get the correct answer
+#
+fout_dbl_ovfl:
+	mov.w		2+SRC_LO(%a0),%d0
+	andi.w		&0x7ff,%d0
+	bne.b		fout_dbl_ovfl_inex2
+
+	ori.w		&ovfl_inx_mask,2+USER_FPSR(%a6) # set ovfl/aovfl/ainex
+	bra.b		fout_dbl_ovfl_cont
+fout_dbl_ovfl_inex2:
+	ori.w		&ovfinx_mask,2+USER_FPSR(%a6) # set ovfl/aovfl/ainex/inex2
+
+fout_dbl_ovfl_cont:
+	mov.l		%a0,-(%sp)
+
+# call ovf_res() w/ dbl prec and the correct rnd mode to create the default
+# overflow result. DON'T save the returned ccodes from ovf_res() since
+# fmove out doesn't alter them.
+	tst.b		SRC_EX(%a0)		# is operand negative?
+	smi		%d1			# set if so
+	mov.l		L_SCR3(%a6),%d0		# pass: dbl prec,rnd mode
+	bsr.l		ovf_res			# calc OVFL result
+	fmovm.x		(%a0),&0x80		# load default overflow result
+	fmov.d		%fp0,L_SCR1(%a6)	# store to double
+
+	mov.l		EXC_EA(%a6),%a1		# pass: dst addr
+	lea		L_SCR1(%a6),%a0		# pass: src addr
+	movq.l		&0x8,%d0		# pass: opsize is 8 bytes
+	bsr.l		_dmem_write		# store dbl fop to memory
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_d		# yes
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0a,%d1		# is UNFL or INEX enabled?
+	bne.w		fout_sd_exc_ovfl	# yes
+	addq.l		&0x4,%sp
+	rts
+
+#
+# move out MAY overflow:
+# (1) force the exp to 0x3fff
+# (2) do a move w/ appropriate rnd mode
+# (3) if exp still equals zero, then insert original exponent
+#	for the correct result.
+#     if exp now equals one, then it overflowed so call ovf_res.
+#
+fout_dbl_may_ovfl:
+	mov.w		SRC_EX(%a0),%d1		# fetch current sign
+	andi.w		&0x8000,%d1		# keep it,clear exp
+	ori.w		&0x3fff,%d1		# insert exp = 0
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert scaled exp
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6) # copy hi(man)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6) # copy lo(man)
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fmov.x		FP_SCR0(%a6),%fp0	# force fop to be rounded
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	fabs.x		%fp0			# need absolute value
+	fcmp.b		%fp0,&0x2		# did exponent increase?
+	fblt.w		fout_dbl_exg		# no; go finish NORM
+	bra.w		fout_dbl_ovfl		# yes; go handle overflow
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	dst_dbl(): create double precision value from extended prec.	#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to source operand in extended precision		#
+#									#
+# OUTPUT **************************************************************	#
+#	d0 = hi(double precision result)				#
+#	d1 = lo(double precision result)				#
+#									#
+# ALGORITHM ***********************************************************	#
+#									#
+#  Changes extended precision to double precision.			#
+#  Note: no attempt is made to round the extended value to double.	#
+#	dbl_sign = ext_sign						#
+#	dbl_exp = ext_exp - $3fff(ext bias) + $7ff(dbl bias)		#
+#	get rid of ext integer bit					#
+#	dbl_mant = ext_mant{62:12}					#
+#									#
+#		---------------   ---------------    ---------------	#
+#  extended ->  |s|    exp    |   |1| ms mant   |    | ls mant     |	#
+#		---------------   ---------------    ---------------	#
+#		 95	    64    63 62	      32      31     11	  0	#
+#				     |			     |		#
+#				     |			     |		#
+#				     |			     |		#
+#			             v			     v		#
+#			      ---------------   ---------------		#
+#  double   ->		      |s|exp| mant  |   |  mant       |		#
+#			      ---------------   ---------------		#
+#			      63     51   32   31	       0	#
+#									#
+#########################################################################
+
+dst_dbl:
+	clr.l		%d0			# clear d0
+	mov.w		FTEMP_EX(%a0),%d0	# get exponent
+	subi.w		&EXT_BIAS,%d0		# subtract extended precision bias
+	addi.w		&DBL_BIAS,%d0		# add double precision bias
+	tst.b		FTEMP_HI(%a0)		# is number a denorm?
+	bmi.b		dst_get_dupper		# no
+	subq.w		&0x1,%d0		# yes; denorm bias = DBL_BIAS - 1
+dst_get_dupper:
+	swap		%d0			# d0 now in upper word
+	lsl.l		&0x4,%d0		# d0 in proper place for dbl prec exp
+	tst.b		FTEMP_EX(%a0)		# test sign
+	bpl.b		dst_get_dman		# if postive, go process mantissa
+	bset		&0x1f,%d0		# if negative, set sign
+dst_get_dman:
+	mov.l		FTEMP_HI(%a0),%d1	# get ms mantissa
+	bfextu		%d1{&1:&20},%d1		# get upper 20 bits of ms
+	or.l		%d1,%d0			# put these bits in ms word of double
+	mov.l		%d0,L_SCR1(%a6)		# put the new exp back on the stack
+	mov.l		FTEMP_HI(%a0),%d1	# get ms mantissa
+	mov.l		&21,%d0			# load shift count
+	lsl.l		%d0,%d1			# put lower 11 bits in upper bits
+	mov.l		%d1,L_SCR2(%a6)		# build lower lword in memory
+	mov.l		FTEMP_LO(%a0),%d1	# get ls mantissa
+	bfextu		%d1{&0:&21},%d0		# get ls 21 bits of double
+	mov.l		L_SCR2(%a6),%d1
+	or.l		%d0,%d1			# put them in double result
+	mov.l		L_SCR1(%a6),%d0
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	dst_sgl(): create single precision value from extended prec	#
+#									#
+# XREF ****************************************************************	#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to source operand in extended precision		#
+#									#
+# OUTPUT **************************************************************	#
+#	d0 = single precision result					#
+#									#
+# ALGORITHM ***********************************************************	#
+#									#
+# Changes extended precision to single precision.			#
+#	sgl_sign = ext_sign						#
+#	sgl_exp = ext_exp - $3fff(ext bias) + $7f(sgl bias)		#
+#	get rid of ext integer bit					#
+#	sgl_mant = ext_mant{62:12}					#
+#									#
+#		---------------   ---------------    ---------------	#
+#  extended ->  |s|    exp    |   |1| ms mant   |    | ls mant     |	#
+#		---------------   ---------------    ---------------	#
+#		 95	    64    63 62	   40 32      31     12	  0	#
+#				     |	   |				#
+#				     |	   |				#
+#				     |	   |				#
+#			             v     v				#
+#			      ---------------				#
+#  single   ->		      |s|exp| mant  |				#
+#			      ---------------				#
+#			      31     22     0				#
+#									#
+#########################################################################
+
+dst_sgl:
+	clr.l		%d0
+	mov.w		FTEMP_EX(%a0),%d0	# get exponent
+	subi.w		&EXT_BIAS,%d0		# subtract extended precision bias
+	addi.w		&SGL_BIAS,%d0		# add single precision bias
+	tst.b		FTEMP_HI(%a0)		# is number a denorm?
+	bmi.b		dst_get_supper		# no
+	subq.w		&0x1,%d0		# yes; denorm bias = SGL_BIAS - 1
+dst_get_supper:
+	swap		%d0			# put exp in upper word of d0
+	lsl.l		&0x7,%d0		# shift it into single exp bits
+	tst.b		FTEMP_EX(%a0)		# test sign
+	bpl.b		dst_get_sman		# if positive, continue
+	bset		&0x1f,%d0		# if negative, put in sign first
+dst_get_sman:
+	mov.l		FTEMP_HI(%a0),%d1	# get ms mantissa
+	andi.l		&0x7fffff00,%d1		# get upper 23 bits of ms
+	lsr.l		&0x8,%d1		# and put them flush right
+	or.l		%d1,%d0			# put these bits in ms word of single
+	rts
+
+##############################################################################
+fout_pack:
+	bsr.l		_calc_ea_fout		# fetch the <ea>
+	mov.l		%a0,-(%sp)
+
+	mov.b		STAG(%a6),%d0		# fetch input type
+	bne.w		fout_pack_not_norm	# input is not NORM
+
+fout_pack_norm:
+	btst		&0x4,EXC_CMDREG(%a6)	# static or dynamic?
+	beq.b		fout_pack_s		# static
+
+fout_pack_d:
+	mov.b		1+EXC_CMDREG(%a6),%d1	# fetch dynamic reg
+	lsr.b		&0x4,%d1
+	andi.w		&0x7,%d1
+
+	bsr.l		fetch_dreg		# fetch Dn w/ k-factor
+
+	bra.b		fout_pack_type
+fout_pack_s:
+	mov.b		1+EXC_CMDREG(%a6),%d0	# fetch static field
+
+fout_pack_type:
+	bfexts		%d0{&25:&7},%d0		# extract k-factor
+	mov.l	%d0,-(%sp)
+
+	lea		FP_SRC(%a6),%a0		# pass: ptr to input
+
+# bindec is currently scrambling FP_SRC for denorm inputs.
+# we'll have to change this, but for now, tough luck!!!
+	bsr.l		bindec			# convert xprec to packed
+
+#	andi.l		&0xcfff000f,FP_SCR0(%a6) # clear unused fields
+	andi.l		&0xcffff00f,FP_SCR0(%a6) # clear unused fields
+
+	mov.l	(%sp)+,%d0
+
+	tst.b		3+FP_SCR0_EX(%a6)
+	bne.b		fout_pack_set
+	tst.l		FP_SCR0_HI(%a6)
+	bne.b		fout_pack_set
+	tst.l		FP_SCR0_LO(%a6)
+	bne.b		fout_pack_set
+
+# add the extra condition that only if the k-factor was zero, too, should
+# we zero the exponent
+	tst.l		%d0
+	bne.b		fout_pack_set
+# "mantissa" is all zero which means that the answer is zero. but, the '040
+# algorithm allows the exponent to be non-zero. the 881/2 do not. therefore,
+# if the mantissa is zero, I will zero the exponent, too.
+# the question now is whether the exponents sign bit is allowed to be non-zero
+# for a zero, also...
+	andi.w		&0xf000,FP_SCR0(%a6)
+
+fout_pack_set:
+
+	lea		FP_SCR0(%a6),%a0	# pass: src addr
+
+fout_pack_write:
+	mov.l		(%sp)+,%a1		# pass: dst addr
+	mov.l		&0xc,%d0		# pass: opsize is 12 bytes
+
+	cmpi.b		SPCOND_FLG(%a6),&mda7_flg
+	beq.b		fout_pack_a7
+
+	bsr.l		_dmem_write		# write ext prec number to memory
+
+	tst.l		%d1			# did dstore fail?
+	bne.w		fout_ext_err		# yes
+
+	rts
+
+# we don't want to do the write if the exception occurred in supervisor mode
+# so _mem_write2() handles this for us.
+fout_pack_a7:
+	bsr.l		_mem_write2		# write ext prec number to memory
+
+	tst.l		%d1			# did dstore fail?
+	bne.w		fout_ext_err		# yes
+
+	rts
+
+fout_pack_not_norm:
+	cmpi.b		%d0,&DENORM		# is it a DENORM?
+	beq.w		fout_pack_norm		# yes
+	lea		FP_SRC(%a6),%a0
+	clr.w		2+FP_SRC_EX(%a6)
+	cmpi.b		%d0,&SNAN		# is it an SNAN?
+	beq.b		fout_pack_snan		# yes
+	bra.b		fout_pack_write		# no
+
+fout_pack_snan:
+	ori.w		&snaniop2_mask,FPSR_EXCEPT(%a6) # set SNAN/AIOP
+	bset		&0x6,FP_SRC_HI(%a6)	# set snan bit
+	bra.b		fout_pack_write
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fetch_dreg(): fetch register according to index in d1		#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	d1 = index of register to fetch from				#
+#									#
+# OUTPUT **************************************************************	#
+#	d0 = value of register fetched					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	According to the index value in d1 which can range from zero	#
+# to fifteen, load the corresponding register file value (where		#
+# address register indexes start at 8). D0/D1/A0/A1/A6/A7 are on the	#
+# stack. The rest should still be in their original places.		#
+#									#
+#########################################################################
+
+# this routine leaves d1 intact for subsequent store_dreg calls.
+	global		fetch_dreg
+fetch_dreg:
+	mov.w		(tbl_fdreg.b,%pc,%d1.w*2),%d0
+	jmp		(tbl_fdreg.b,%pc,%d0.w*1)
+
+tbl_fdreg:
+	short		fdreg0 - tbl_fdreg
+	short		fdreg1 - tbl_fdreg
+	short		fdreg2 - tbl_fdreg
+	short		fdreg3 - tbl_fdreg
+	short		fdreg4 - tbl_fdreg
+	short		fdreg5 - tbl_fdreg
+	short		fdreg6 - tbl_fdreg
+	short		fdreg7 - tbl_fdreg
+	short		fdreg8 - tbl_fdreg
+	short		fdreg9 - tbl_fdreg
+	short		fdrega - tbl_fdreg
+	short		fdregb - tbl_fdreg
+	short		fdregc - tbl_fdreg
+	short		fdregd - tbl_fdreg
+	short		fdrege - tbl_fdreg
+	short		fdregf - tbl_fdreg
+
+fdreg0:
+	mov.l		EXC_DREGS+0x0(%a6),%d0
+	rts
+fdreg1:
+	mov.l		EXC_DREGS+0x4(%a6),%d0
+	rts
+fdreg2:
+	mov.l		%d2,%d0
+	rts
+fdreg3:
+	mov.l		%d3,%d0
+	rts
+fdreg4:
+	mov.l		%d4,%d0
+	rts
+fdreg5:
+	mov.l		%d5,%d0
+	rts
+fdreg6:
+	mov.l		%d6,%d0
+	rts
+fdreg7:
+	mov.l		%d7,%d0
+	rts
+fdreg8:
+	mov.l		EXC_DREGS+0x8(%a6),%d0
+	rts
+fdreg9:
+	mov.l		EXC_DREGS+0xc(%a6),%d0
+	rts
+fdrega:
+	mov.l		%a2,%d0
+	rts
+fdregb:
+	mov.l		%a3,%d0
+	rts
+fdregc:
+	mov.l		%a4,%d0
+	rts
+fdregd:
+	mov.l		%a5,%d0
+	rts
+fdrege:
+	mov.l		(%a6),%d0
+	rts
+fdregf:
+	mov.l		EXC_A7(%a6),%d0
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	store_dreg_l(): store longword to data register specified by d1	#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	d0 = longowrd value to store					#
+#	d1 = index of register to fetch from				#
+#									#
+# OUTPUT **************************************************************	#
+#	(data register is updated)					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	According to the index value in d1, store the longword value	#
+# in d0 to the corresponding data register. D0/D1 are on the stack	#
+# while the rest are in their initial places.				#
+#									#
+#########################################################################
+
+	global		store_dreg_l
+store_dreg_l:
+	mov.w		(tbl_sdregl.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_sdregl.b,%pc,%d1.w*1)
+
+tbl_sdregl:
+	short		sdregl0 - tbl_sdregl
+	short		sdregl1 - tbl_sdregl
+	short		sdregl2 - tbl_sdregl
+	short		sdregl3 - tbl_sdregl
+	short		sdregl4 - tbl_sdregl
+	short		sdregl5 - tbl_sdregl
+	short		sdregl6 - tbl_sdregl
+	short		sdregl7 - tbl_sdregl
+
+sdregl0:
+	mov.l		%d0,EXC_DREGS+0x0(%a6)
+	rts
+sdregl1:
+	mov.l		%d0,EXC_DREGS+0x4(%a6)
+	rts
+sdregl2:
+	mov.l		%d0,%d2
+	rts
+sdregl3:
+	mov.l		%d0,%d3
+	rts
+sdregl4:
+	mov.l		%d0,%d4
+	rts
+sdregl5:
+	mov.l		%d0,%d5
+	rts
+sdregl6:
+	mov.l		%d0,%d6
+	rts
+sdregl7:
+	mov.l		%d0,%d7
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	store_dreg_w(): store word to data register specified by d1	#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	d0 = word value to store					#
+#	d1 = index of register to fetch from				#
+#									#
+# OUTPUT **************************************************************	#
+#	(data register is updated)					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	According to the index value in d1, store the word value	#
+# in d0 to the corresponding data register. D0/D1 are on the stack	#
+# while the rest are in their initial places.				#
+#									#
+#########################################################################
+
+	global		store_dreg_w
+store_dreg_w:
+	mov.w		(tbl_sdregw.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_sdregw.b,%pc,%d1.w*1)
+
+tbl_sdregw:
+	short		sdregw0 - tbl_sdregw
+	short		sdregw1 - tbl_sdregw
+	short		sdregw2 - tbl_sdregw
+	short		sdregw3 - tbl_sdregw
+	short		sdregw4 - tbl_sdregw
+	short		sdregw5 - tbl_sdregw
+	short		sdregw6 - tbl_sdregw
+	short		sdregw7 - tbl_sdregw
+
+sdregw0:
+	mov.w		%d0,2+EXC_DREGS+0x0(%a6)
+	rts
+sdregw1:
+	mov.w		%d0,2+EXC_DREGS+0x4(%a6)
+	rts
+sdregw2:
+	mov.w		%d0,%d2
+	rts
+sdregw3:
+	mov.w		%d0,%d3
+	rts
+sdregw4:
+	mov.w		%d0,%d4
+	rts
+sdregw5:
+	mov.w		%d0,%d5
+	rts
+sdregw6:
+	mov.w		%d0,%d6
+	rts
+sdregw7:
+	mov.w		%d0,%d7
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	store_dreg_b(): store byte to data register specified by d1	#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	d0 = byte value to store					#
+#	d1 = index of register to fetch from				#
+#									#
+# OUTPUT **************************************************************	#
+#	(data register is updated)					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	According to the index value in d1, store the byte value	#
+# in d0 to the corresponding data register. D0/D1 are on the stack	#
+# while the rest are in their initial places.				#
+#									#
+#########################################################################
+
+	global		store_dreg_b
+store_dreg_b:
+	mov.w		(tbl_sdregb.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_sdregb.b,%pc,%d1.w*1)
+
+tbl_sdregb:
+	short		sdregb0 - tbl_sdregb
+	short		sdregb1 - tbl_sdregb
+	short		sdregb2 - tbl_sdregb
+	short		sdregb3 - tbl_sdregb
+	short		sdregb4 - tbl_sdregb
+	short		sdregb5 - tbl_sdregb
+	short		sdregb6 - tbl_sdregb
+	short		sdregb7 - tbl_sdregb
+
+sdregb0:
+	mov.b		%d0,3+EXC_DREGS+0x0(%a6)
+	rts
+sdregb1:
+	mov.b		%d0,3+EXC_DREGS+0x4(%a6)
+	rts
+sdregb2:
+	mov.b		%d0,%d2
+	rts
+sdregb3:
+	mov.b		%d0,%d3
+	rts
+sdregb4:
+	mov.b		%d0,%d4
+	rts
+sdregb5:
+	mov.b		%d0,%d5
+	rts
+sdregb6:
+	mov.b		%d0,%d6
+	rts
+sdregb7:
+	mov.b		%d0,%d7
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	inc_areg(): increment an address register by the value in d0	#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	d0 = amount to increment by					#
+#	d1 = index of address register to increment			#
+#									#
+# OUTPUT **************************************************************	#
+#	(address register is updated)					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Typically used for an instruction w/ a post-increment <ea>,	#
+# this routine adds the increment value in d0 to the address register	#
+# specified by d1. A0/A1/A6/A7 reside on the stack. The rest reside	#
+# in their original places.						#
+#	For a7, if the increment amount is one, then we have to		#
+# increment by two. For any a7 update, set the mia7_flag so that if	#
+# an access error exception occurs later in emulation, this address	#
+# register update can be undone.					#
+#									#
+#########################################################################
+
+	global		inc_areg
+inc_areg:
+	mov.w		(tbl_iareg.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_iareg.b,%pc,%d1.w*1)
+
+tbl_iareg:
+	short		iareg0 - tbl_iareg
+	short		iareg1 - tbl_iareg
+	short		iareg2 - tbl_iareg
+	short		iareg3 - tbl_iareg
+	short		iareg4 - tbl_iareg
+	short		iareg5 - tbl_iareg
+	short		iareg6 - tbl_iareg
+	short		iareg7 - tbl_iareg
+
+iareg0:	add.l		%d0,EXC_DREGS+0x8(%a6)
+	rts
+iareg1:	add.l		%d0,EXC_DREGS+0xc(%a6)
+	rts
+iareg2:	add.l		%d0,%a2
+	rts
+iareg3:	add.l		%d0,%a3
+	rts
+iareg4:	add.l		%d0,%a4
+	rts
+iareg5:	add.l		%d0,%a5
+	rts
+iareg6:	add.l		%d0,(%a6)
+	rts
+iareg7:	mov.b		&mia7_flg,SPCOND_FLG(%a6)
+	cmpi.b		%d0,&0x1
+	beq.b		iareg7b
+	add.l		%d0,EXC_A7(%a6)
+	rts
+iareg7b:
+	addq.l		&0x2,EXC_A7(%a6)
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	dec_areg(): decrement an address register by the value in d0	#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	d0 = amount to decrement by					#
+#	d1 = index of address register to decrement			#
+#									#
+# OUTPUT **************************************************************	#
+#	(address register is updated)					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Typically used for an instruction w/ a pre-decrement <ea>,	#
+# this routine adds the decrement value in d0 to the address register	#
+# specified by d1. A0/A1/A6/A7 reside on the stack. The rest reside	#
+# in their original places.						#
+#	For a7, if the decrement amount is one, then we have to		#
+# decrement by two. For any a7 update, set the mda7_flag so that if	#
+# an access error exception occurs later in emulation, this address	#
+# register update can be undone.					#
+#									#
+#########################################################################
+
+	global		dec_areg
+dec_areg:
+	mov.w		(tbl_dareg.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_dareg.b,%pc,%d1.w*1)
+
+tbl_dareg:
+	short		dareg0 - tbl_dareg
+	short		dareg1 - tbl_dareg
+	short		dareg2 - tbl_dareg
+	short		dareg3 - tbl_dareg
+	short		dareg4 - tbl_dareg
+	short		dareg5 - tbl_dareg
+	short		dareg6 - tbl_dareg
+	short		dareg7 - tbl_dareg
+
+dareg0:	sub.l		%d0,EXC_DREGS+0x8(%a6)
+	rts
+dareg1:	sub.l		%d0,EXC_DREGS+0xc(%a6)
+	rts
+dareg2:	sub.l		%d0,%a2
+	rts
+dareg3:	sub.l		%d0,%a3
+	rts
+dareg4:	sub.l		%d0,%a4
+	rts
+dareg5:	sub.l		%d0,%a5
+	rts
+dareg6:	sub.l		%d0,(%a6)
+	rts
+dareg7:	mov.b		&mda7_flg,SPCOND_FLG(%a6)
+	cmpi.b		%d0,&0x1
+	beq.b		dareg7b
+	sub.l		%d0,EXC_A7(%a6)
+	rts
+dareg7b:
+	subq.l		&0x2,EXC_A7(%a6)
+	rts
+
+##############################################################################
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	load_fpn1(): load FP register value into FP_SRC(a6).		#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	d0 = index of FP register to load				#
+#									#
+# OUTPUT **************************************************************	#
+#	FP_SRC(a6) = value loaded from FP register file			#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Using the index in d0, load FP_SRC(a6) with a number from the	#
+# FP register file.							#
+#									#
+#########################################################################
+
+	global		load_fpn1
+load_fpn1:
+	mov.w		(tbl_load_fpn1.b,%pc,%d0.w*2), %d0
+	jmp		(tbl_load_fpn1.b,%pc,%d0.w*1)
+
+tbl_load_fpn1:
+	short		load_fpn1_0 - tbl_load_fpn1
+	short		load_fpn1_1 - tbl_load_fpn1
+	short		load_fpn1_2 - tbl_load_fpn1
+	short		load_fpn1_3 - tbl_load_fpn1
+	short		load_fpn1_4 - tbl_load_fpn1
+	short		load_fpn1_5 - tbl_load_fpn1
+	short		load_fpn1_6 - tbl_load_fpn1
+	short		load_fpn1_7 - tbl_load_fpn1
+
+load_fpn1_0:
+	mov.l		0+EXC_FP0(%a6), 0+FP_SRC(%a6)
+	mov.l		4+EXC_FP0(%a6), 4+FP_SRC(%a6)
+	mov.l		8+EXC_FP0(%a6), 8+FP_SRC(%a6)
+	lea		FP_SRC(%a6), %a0
+	rts
+load_fpn1_1:
+	mov.l		0+EXC_FP1(%a6), 0+FP_SRC(%a6)
+	mov.l		4+EXC_FP1(%a6), 4+FP_SRC(%a6)
+	mov.l		8+EXC_FP1(%a6), 8+FP_SRC(%a6)
+	lea		FP_SRC(%a6), %a0
+	rts
+load_fpn1_2:
+	fmovm.x		&0x20, FP_SRC(%a6)
+	lea		FP_SRC(%a6), %a0
+	rts
+load_fpn1_3:
+	fmovm.x		&0x10, FP_SRC(%a6)
+	lea		FP_SRC(%a6), %a0
+	rts
+load_fpn1_4:
+	fmovm.x		&0x08, FP_SRC(%a6)
+	lea		FP_SRC(%a6), %a0
+	rts
+load_fpn1_5:
+	fmovm.x		&0x04, FP_SRC(%a6)
+	lea		FP_SRC(%a6), %a0
+	rts
+load_fpn1_6:
+	fmovm.x		&0x02, FP_SRC(%a6)
+	lea		FP_SRC(%a6), %a0
+	rts
+load_fpn1_7:
+	fmovm.x		&0x01, FP_SRC(%a6)
+	lea		FP_SRC(%a6), %a0
+	rts
+
+#############################################################################
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	load_fpn2(): load FP register value into FP_DST(a6).		#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	d0 = index of FP register to load				#
+#									#
+# OUTPUT **************************************************************	#
+#	FP_DST(a6) = value loaded from FP register file			#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Using the index in d0, load FP_DST(a6) with a number from the	#
+# FP register file.							#
+#									#
+#########################################################################
+
+	global		load_fpn2
+load_fpn2:
+	mov.w		(tbl_load_fpn2.b,%pc,%d0.w*2), %d0
+	jmp		(tbl_load_fpn2.b,%pc,%d0.w*1)
+
+tbl_load_fpn2:
+	short		load_fpn2_0 - tbl_load_fpn2
+	short		load_fpn2_1 - tbl_load_fpn2
+	short		load_fpn2_2 - tbl_load_fpn2
+	short		load_fpn2_3 - tbl_load_fpn2
+	short		load_fpn2_4 - tbl_load_fpn2
+	short		load_fpn2_5 - tbl_load_fpn2
+	short		load_fpn2_6 - tbl_load_fpn2
+	short		load_fpn2_7 - tbl_load_fpn2
+
+load_fpn2_0:
+	mov.l		0+EXC_FP0(%a6), 0+FP_DST(%a6)
+	mov.l		4+EXC_FP0(%a6), 4+FP_DST(%a6)
+	mov.l		8+EXC_FP0(%a6), 8+FP_DST(%a6)
+	lea		FP_DST(%a6), %a0
+	rts
+load_fpn2_1:
+	mov.l		0+EXC_FP1(%a6), 0+FP_DST(%a6)
+	mov.l		4+EXC_FP1(%a6), 4+FP_DST(%a6)
+	mov.l		8+EXC_FP1(%a6), 8+FP_DST(%a6)
+	lea		FP_DST(%a6), %a0
+	rts
+load_fpn2_2:
+	fmovm.x		&0x20, FP_DST(%a6)
+	lea		FP_DST(%a6), %a0
+	rts
+load_fpn2_3:
+	fmovm.x		&0x10, FP_DST(%a6)
+	lea		FP_DST(%a6), %a0
+	rts
+load_fpn2_4:
+	fmovm.x		&0x08, FP_DST(%a6)
+	lea		FP_DST(%a6), %a0
+	rts
+load_fpn2_5:
+	fmovm.x		&0x04, FP_DST(%a6)
+	lea		FP_DST(%a6), %a0
+	rts
+load_fpn2_6:
+	fmovm.x		&0x02, FP_DST(%a6)
+	lea		FP_DST(%a6), %a0
+	rts
+load_fpn2_7:
+	fmovm.x		&0x01, FP_DST(%a6)
+	lea		FP_DST(%a6), %a0
+	rts
+
+#############################################################################
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	store_fpreg(): store an fp value to the fpreg designated d0.	#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	fp0 = extended precision value to store				#
+#	d0  = index of floating-point register				#
+#									#
+# OUTPUT **************************************************************	#
+#	None								#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Store the value in fp0 to the FP register designated by the	#
+# value in d0. The FP number can be DENORM or SNAN so we have to be	#
+# careful that we don't take an exception here.				#
+#									#
+#########################################################################
+
+	global		store_fpreg
+store_fpreg:
+	mov.w		(tbl_store_fpreg.b,%pc,%d0.w*2), %d0
+	jmp		(tbl_store_fpreg.b,%pc,%d0.w*1)
+
+tbl_store_fpreg:
+	short		store_fpreg_0 - tbl_store_fpreg
+	short		store_fpreg_1 - tbl_store_fpreg
+	short		store_fpreg_2 - tbl_store_fpreg
+	short		store_fpreg_3 - tbl_store_fpreg
+	short		store_fpreg_4 - tbl_store_fpreg
+	short		store_fpreg_5 - tbl_store_fpreg
+	short		store_fpreg_6 - tbl_store_fpreg
+	short		store_fpreg_7 - tbl_store_fpreg
+
+store_fpreg_0:
+	fmovm.x		&0x80, EXC_FP0(%a6)
+	rts
+store_fpreg_1:
+	fmovm.x		&0x80, EXC_FP1(%a6)
+	rts
+store_fpreg_2:
+	fmovm.x		&0x01, -(%sp)
+	fmovm.x		(%sp)+, &0x20
+	rts
+store_fpreg_3:
+	fmovm.x		&0x01, -(%sp)
+	fmovm.x		(%sp)+, &0x10
+	rts
+store_fpreg_4:
+	fmovm.x		&0x01, -(%sp)
+	fmovm.x		(%sp)+, &0x08
+	rts
+store_fpreg_5:
+	fmovm.x		&0x01, -(%sp)
+	fmovm.x		(%sp)+, &0x04
+	rts
+store_fpreg_6:
+	fmovm.x		&0x01, -(%sp)
+	fmovm.x		(%sp)+, &0x02
+	rts
+store_fpreg_7:
+	fmovm.x		&0x01, -(%sp)
+	fmovm.x		(%sp)+, &0x01
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_denorm(): denormalize an intermediate result			#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT *************************************************************** #
+#	a0 = points to the operand to be denormalized			#
+#		(in the internal extended format)			#
+#									#
+#	d0 = rounding precision						#
+#									#
+# OUTPUT **************************************************************	#
+#	a0 = pointer to the denormalized result				#
+#		(in the internal extended format)			#
+#									#
+#	d0 = guard,round,sticky						#
+#									#
+# ALGORITHM ***********************************************************	#
+#	According to the exponent underflow threshold for the given	#
+# precision, shift the mantissa bits to the right in order raise the	#
+# exponent of the operand to the threshold value. While shifting the	#
+# mantissa bits right, maintain the value of the guard, round, and	#
+# sticky bits.								#
+# other notes:								#
+#	(1) _denorm() is called by the underflow routines		#
+#	(2) _denorm() does NOT affect the status register		#
+#									#
+#########################################################################
+
+#
+# table of exponent threshold values for each precision
+#
+tbl_thresh:
+	short		0x0
+	short		sgl_thresh
+	short		dbl_thresh
+
+	global		_denorm
+_denorm:
+#
+# Load the exponent threshold for the precision selected and check
+# to see if (threshold - exponent) is > 65 in which case we can
+# simply calculate the sticky bit and zero the mantissa. otherwise
+# we have to call the denormalization routine.
+#
+	lsr.b		&0x2, %d0		# shift prec to lo bits
+	mov.w		(tbl_thresh.b,%pc,%d0.w*2), %d1 # load prec threshold
+	mov.w		%d1, %d0		# copy d1 into d0
+	sub.w		FTEMP_EX(%a0), %d0	# diff = threshold - exp
+	cmpi.w		%d0, &66		# is diff > 65? (mant + g,r bits)
+	bpl.b		denorm_set_stky		# yes; just calc sticky
+
+	clr.l		%d0			# clear g,r,s
+	btst		&inex2_bit, FPSR_EXCEPT(%a6) # yes; was INEX2 set?
+	beq.b		denorm_call		# no; don't change anything
+	bset		&29, %d0		# yes; set sticky bit
+
+denorm_call:
+	bsr.l		dnrm_lp			# denormalize the number
+	rts
+
+#
+# all bit would have been shifted off during the denorm so simply
+# calculate if the sticky should be set and clear the entire mantissa.
+#
+denorm_set_stky:
+	mov.l		&0x20000000, %d0	# set sticky bit in return value
+	mov.w		%d1, FTEMP_EX(%a0)	# load exp with threshold
+	clr.l		FTEMP_HI(%a0)		# set d1 = 0 (ms mantissa)
+	clr.l		FTEMP_LO(%a0)		# set d2 = 0 (ms mantissa)
+	rts
+
+#									#
+# dnrm_lp(): normalize exponent/mantissa to specified threshhold	#
+#									#
+# INPUT:								#
+#	%a0	   : points to the operand to be denormalized		#
+#	%d0{31:29} : initial guard,round,sticky				#
+#	%d1{15:0}  : denormalization threshold				#
+# OUTPUT:								#
+#	%a0	   : points to the denormalized operand			#
+#	%d0{31:29} : final guard,round,sticky				#
+#									#
+
+# *** Local Equates *** #
+set	GRS,		L_SCR2			# g,r,s temp storage
+set	FTEMP_LO2,	L_SCR1			# FTEMP_LO copy
+
+	global		dnrm_lp
+dnrm_lp:
+
+#
+# make a copy of FTEMP_LO and place the g,r,s bits directly after it
+# in memory so as to make the bitfield extraction for denormalization easier.
+#
+	mov.l		FTEMP_LO(%a0), FTEMP_LO2(%a6) # make FTEMP_LO copy
+	mov.l		%d0, GRS(%a6)		# place g,r,s after it
+
+#
+# check to see how much less than the underflow threshold the operand
+# exponent is.
+#
+	mov.l		%d1, %d0		# copy the denorm threshold
+	sub.w		FTEMP_EX(%a0), %d1	# d1 = threshold - uns exponent
+	ble.b		dnrm_no_lp		# d1 <= 0
+	cmpi.w		%d1, &0x20		# is ( 0 <= d1 < 32) ?
+	blt.b		case_1			# yes
+	cmpi.w		%d1, &0x40		# is (32 <= d1 < 64) ?
+	blt.b		case_2			# yes
+	bra.w		case_3			# (d1 >= 64)
+
+#
+# No normalization necessary
+#
+dnrm_no_lp:
+	mov.l		GRS(%a6), %d0		# restore original g,r,s
+	rts
+
+#
+# case (0<d1<32)
+#
+# %d0 = denorm threshold
+# %d1 = "n" = amt to shift
+#
+#	---------------------------------------------------------
+#	|     FTEMP_HI	  |	FTEMP_LO     |grs000.........000|
+#	---------------------------------------------------------
+#	<-(32 - n)-><-(n)-><-(32 - n)-><-(n)-><-(32 - n)-><-(n)->
+#	\	   \		      \			 \
+#	 \	    \		       \		  \
+#	  \	     \			\		   \
+#	   \	      \			 \		    \
+#	    \	       \		  \		     \
+#	     \		\		   \		      \
+#	      \		 \		    \		       \
+#	       \	  \		     \			\
+#	<-(n)-><-(32 - n)-><------(32)-------><------(32)------->
+#	---------------------------------------------------------
+#	|0.....0| NEW_HI  |  NEW_FTEMP_LO     |grs		|
+#	---------------------------------------------------------
+#
+case_1:
+	mov.l		%d2, -(%sp)		# create temp storage
+
+	mov.w		%d0, FTEMP_EX(%a0)	# exponent = denorm threshold
+	mov.l		&32, %d0
+	sub.w		%d1, %d0		# %d0 = 32 - %d1
+
+	cmpi.w		%d1, &29		# is shft amt >= 29
+	blt.b		case1_extract		# no; no fix needed
+	mov.b		GRS(%a6), %d2
+	or.b		%d2, 3+FTEMP_LO2(%a6)
+
+case1_extract:
+	bfextu		FTEMP_HI(%a0){&0:%d0}, %d2 # %d2 = new FTEMP_HI
+	bfextu		FTEMP_HI(%a0){%d0:&32}, %d1 # %d1 = new FTEMP_LO
+	bfextu		FTEMP_LO2(%a6){%d0:&32}, %d0 # %d0 = new G,R,S
+
+	mov.l		%d2, FTEMP_HI(%a0)	# store new FTEMP_HI
+	mov.l		%d1, FTEMP_LO(%a0)	# store new FTEMP_LO
+
+	bftst		%d0{&2:&30}		# were bits shifted off?
+	beq.b		case1_sticky_clear	# no; go finish
+	bset		&rnd_stky_bit, %d0	# yes; set sticky bit
+
+case1_sticky_clear:
+	and.l		&0xe0000000, %d0	# clear all but G,R,S
+	mov.l		(%sp)+, %d2		# restore temp register
+	rts
+
+#
+# case (32<=d1<64)
+#
+# %d0 = denorm threshold
+# %d1 = "n" = amt to shift
+#
+#	---------------------------------------------------------
+#	|     FTEMP_HI	  |	FTEMP_LO     |grs000.........000|
+#	---------------------------------------------------------
+#	<-(32 - n)-><-(n)-><-(32 - n)-><-(n)-><-(32 - n)-><-(n)->
+#	\	   \		      \
+#	 \	    \		       \
+#	  \	     \			-------------------
+#	   \	      --------------------		   \
+#	    -------------------		  \		    \
+#			       \	   \		     \
+#				\	    \		      \
+#				 \	     \		       \
+#	<-------(32)------><-(n)-><-(32 - n)-><------(32)------->
+#	---------------------------------------------------------
+#	|0...............0|0....0| NEW_LO     |grs		|
+#	---------------------------------------------------------
+#
+case_2:
+	mov.l		%d2, -(%sp)		# create temp storage
+
+	mov.w		%d0, FTEMP_EX(%a0)	# exponent = denorm threshold
+	subi.w		&0x20, %d1		# %d1 now between 0 and 32
+	mov.l		&0x20, %d0
+	sub.w		%d1, %d0		# %d0 = 32 - %d1
+
+# subtle step here; or in the g,r,s at the bottom of FTEMP_LO to minimize
+# the number of bits to check for the sticky detect.
+# it only plays a role in shift amounts of 61-63.
+	mov.b		GRS(%a6), %d2
+	or.b		%d2, 3+FTEMP_LO2(%a6)
+
+	bfextu		FTEMP_HI(%a0){&0:%d0}, %d2 # %d2 = new FTEMP_LO
+	bfextu		FTEMP_HI(%a0){%d0:&32}, %d1 # %d1 = new G,R,S
+
+	bftst		%d1{&2:&30}		# were any bits shifted off?
+	bne.b		case2_set_sticky	# yes; set sticky bit
+	bftst		FTEMP_LO2(%a6){%d0:&31}	# were any bits shifted off?
+	bne.b		case2_set_sticky	# yes; set sticky bit
+
+	mov.l		%d1, %d0		# move new G,R,S to %d0
+	bra.b		case2_end
+
+case2_set_sticky:
+	mov.l		%d1, %d0		# move new G,R,S to %d0
+	bset		&rnd_stky_bit, %d0	# set sticky bit
+
+case2_end:
+	clr.l		FTEMP_HI(%a0)		# store FTEMP_HI = 0
+	mov.l		%d2, FTEMP_LO(%a0)	# store FTEMP_LO
+	and.l		&0xe0000000, %d0	# clear all but G,R,S
+
+	mov.l		(%sp)+,%d2		# restore temp register
+	rts
+
+#
+# case (d1>=64)
+#
+# %d0 = denorm threshold
+# %d1 = amt to shift
+#
+case_3:
+	mov.w		%d0, FTEMP_EX(%a0)	# insert denorm threshold
+
+	cmpi.w		%d1, &65		# is shift amt > 65?
+	blt.b		case3_64		# no; it's == 64
+	beq.b		case3_65		# no; it's == 65
+
+#
+# case (d1>65)
+#
+# Shift value is > 65 and out of range. All bits are shifted off.
+# Return a zero mantissa with the sticky bit set
+#
+	clr.l		FTEMP_HI(%a0)		# clear hi(mantissa)
+	clr.l		FTEMP_LO(%a0)		# clear lo(mantissa)
+	mov.l		&0x20000000, %d0	# set sticky bit
+	rts
+
+#
+# case (d1 == 64)
+#
+#	---------------------------------------------------------
+#	|     FTEMP_HI	  |	FTEMP_LO     |grs000.........000|
+#	---------------------------------------------------------
+#	<-------(32)------>
+#	\		   \
+#	 \		    \
+#	  \		     \
+#	   \		      ------------------------------
+#	    -------------------------------		    \
+#					   \		     \
+#					    \		      \
+#					     \		       \
+#					      <-------(32)------>
+#	---------------------------------------------------------
+#	|0...............0|0................0|grs		|
+#	---------------------------------------------------------
+#
+case3_64:
+	mov.l		FTEMP_HI(%a0), %d0	# fetch hi(mantissa)
+	mov.l		%d0, %d1		# make a copy
+	and.l		&0xc0000000, %d0	# extract G,R
+	and.l		&0x3fffffff, %d1	# extract other bits
+
+	bra.b		case3_complete
+
+#
+# case (d1 == 65)
+#
+#	---------------------------------------------------------
+#	|     FTEMP_HI	  |	FTEMP_LO     |grs000.........000|
+#	---------------------------------------------------------
+#	<-------(32)------>
+#	\		   \
+#	 \		    \
+#	  \		     \
+#	   \		      ------------------------------
+#	    --------------------------------		    \
+#					    \		     \
+#					     \		      \
+#					      \		       \
+#					       <-------(31)----->
+#	---------------------------------------------------------
+#	|0...............0|0................0|0rs		|
+#	---------------------------------------------------------
+#
+case3_65:
+	mov.l		FTEMP_HI(%a0), %d0	# fetch hi(mantissa)
+	and.l		&0x80000000, %d0	# extract R bit
+	lsr.l		&0x1, %d0		# shift high bit into R bit
+	and.l		&0x7fffffff, %d1	# extract other bits
+
+case3_complete:
+# last operation done was an "and" of the bits shifted off so the condition
+# codes are already set so branch accordingly.
+	bne.b		case3_set_sticky	# yes; go set new sticky
+	tst.l		FTEMP_LO(%a0)		# were any bits shifted off?
+	bne.b		case3_set_sticky	# yes; go set new sticky
+	tst.b		GRS(%a6)		# were any bits shifted off?
+	bne.b		case3_set_sticky	# yes; go set new sticky
+
+#
+# no bits were shifted off so don't set the sticky bit.
+# the guard and
+# the entire mantissa is zero.
+#
+	clr.l		FTEMP_HI(%a0)		# clear hi(mantissa)
+	clr.l		FTEMP_LO(%a0)		# clear lo(mantissa)
+	rts
+
+#
+# some bits were shifted off so set the sticky bit.
+# the entire mantissa is zero.
+#
+case3_set_sticky:
+	bset		&rnd_stky_bit,%d0	# set new sticky bit
+	clr.l		FTEMP_HI(%a0)		# clear hi(mantissa)
+	clr.l		FTEMP_LO(%a0)		# clear lo(mantissa)
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_round(): round result according to precision/mode		#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	a0	  = ptr to input operand in internal extended format	#
+#	d1(hi)    = contains rounding precision:			#
+#			ext = $0000xxxx					#
+#			sgl = $0004xxxx					#
+#			dbl = $0008xxxx					#
+#	d1(lo)	  = contains rounding mode:				#
+#			RN  = $xxxx0000					#
+#			RZ  = $xxxx0001					#
+#			RM  = $xxxx0002					#
+#			RP  = $xxxx0003					#
+#	d0{31:29} = contains the g,r,s bits (extended)			#
+#									#
+# OUTPUT **************************************************************	#
+#	a0 = pointer to rounded result					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	On return the value pointed to by a0 is correctly rounded,	#
+#	a0 is preserved and the g-r-s bits in d0 are cleared.		#
+#	The result is not typed - the tag field is invalid.  The	#
+#	result is still in the internal extended format.		#
+#									#
+#	The INEX bit of USER_FPSR will be set if the rounded result was	#
+#	inexact (i.e. if any of the g-r-s bits were set).		#
+#									#
+#########################################################################
+
+	global		_round
+_round:
+#
+# ext_grs() looks at the rounding precision and sets the appropriate
+# G,R,S bits.
+# If (G,R,S == 0) then result is exact and round is done, else set
+# the inex flag in status reg and continue.
+#
+	bsr.l		ext_grs			# extract G,R,S
+
+	tst.l		%d0			# are G,R,S zero?
+	beq.w		truncate		# yes; round is complete
+
+	or.w		&inx2a_mask, 2+USER_FPSR(%a6) # set inex2/ainex
+
+#
+# Use rounding mode as an index into a jump table for these modes.
+# All of the following assumes grs != 0.
+#
+	mov.w		(tbl_mode.b,%pc,%d1.w*2), %a1 # load jump offset
+	jmp		(tbl_mode.b,%pc,%a1)	# jmp to rnd mode handler
+
+tbl_mode:
+	short		rnd_near - tbl_mode
+	short		truncate - tbl_mode	# RZ always truncates
+	short		rnd_mnus - tbl_mode
+	short		rnd_plus - tbl_mode
+
+#################################################################
+#	ROUND PLUS INFINITY					#
+#								#
+#	If sign of fp number = 0 (positive), then add 1 to l.	#
+#################################################################
+rnd_plus:
+	tst.b		FTEMP_SGN(%a0)		# check for sign
+	bmi.w		truncate		# if positive then truncate
+
+	mov.l		&0xffffffff, %d0	# force g,r,s to be all f's
+	swap		%d1			# set up d1 for round prec.
+
+	cmpi.b		%d1, &s_mode		# is prec = sgl?
+	beq.w		add_sgl			# yes
+	bgt.w		add_dbl			# no; it's dbl
+	bra.w		add_ext			# no; it's ext
+
+#################################################################
+#	ROUND MINUS INFINITY					#
+#								#
+#	If sign of fp number = 1 (negative), then add 1 to l.	#
+#################################################################
+rnd_mnus:
+	tst.b		FTEMP_SGN(%a0)		# check for sign
+	bpl.w		truncate		# if negative then truncate
+
+	mov.l		&0xffffffff, %d0	# force g,r,s to be all f's
+	swap		%d1			# set up d1 for round prec.
+
+	cmpi.b		%d1, &s_mode		# is prec = sgl?
+	beq.w		add_sgl			# yes
+	bgt.w		add_dbl			# no; it's dbl
+	bra.w		add_ext			# no; it's ext
+
+#################################################################
+#	ROUND NEAREST						#
+#								#
+#	If (g=1), then add 1 to l and if (r=s=0), then clear l	#
+#	Note that this will round to even in case of a tie.	#
+#################################################################
+rnd_near:
+	asl.l		&0x1, %d0		# shift g-bit to c-bit
+	bcc.w		truncate		# if (g=1) then
+
+	swap		%d1			# set up d1 for round prec.
+
+	cmpi.b		%d1, &s_mode		# is prec = sgl?
+	beq.w		add_sgl			# yes
+	bgt.w		add_dbl			# no; it's dbl
+	bra.w		add_ext			# no; it's ext
+
+# *** LOCAL EQUATES ***
+set	ad_1_sgl,	0x00000100	# constant to add 1 to l-bit in sgl prec
+set	ad_1_dbl,	0x00000800	# constant to add 1 to l-bit in dbl prec
+
+#########################
+#	ADD SINGLE	#
+#########################
+add_sgl:
+	add.l		&ad_1_sgl, FTEMP_HI(%a0)
+	bcc.b		scc_clr			# no mantissa overflow
+	roxr.w		FTEMP_HI(%a0)		# shift v-bit back in
+	roxr.w		FTEMP_HI+2(%a0)		# shift v-bit back in
+	add.w		&0x1, FTEMP_EX(%a0)	# and incr exponent
+scc_clr:
+	tst.l		%d0			# test for rs = 0
+	bne.b		sgl_done
+	and.w		&0xfe00, FTEMP_HI+2(%a0) # clear the l-bit
+sgl_done:
+	and.l		&0xffffff00, FTEMP_HI(%a0) # truncate bits beyond sgl limit
+	clr.l		FTEMP_LO(%a0)		# clear d2
+	rts
+
+#########################
+#	ADD EXTENDED	#
+#########################
+add_ext:
+	addq.l		&1,FTEMP_LO(%a0)	# add 1 to l-bit
+	bcc.b		xcc_clr			# test for carry out
+	addq.l		&1,FTEMP_HI(%a0)	# propagate carry
+	bcc.b		xcc_clr
+	roxr.w		FTEMP_HI(%a0)		# mant is 0 so restore v-bit
+	roxr.w		FTEMP_HI+2(%a0)		# mant is 0 so restore v-bit
+	roxr.w		FTEMP_LO(%a0)
+	roxr.w		FTEMP_LO+2(%a0)
+	add.w		&0x1,FTEMP_EX(%a0)	# and inc exp
+xcc_clr:
+	tst.l		%d0			# test rs = 0
+	bne.b		add_ext_done
+	and.b		&0xfe,FTEMP_LO+3(%a0)	# clear the l bit
+add_ext_done:
+	rts
+
+#########################
+#	ADD DOUBLE	#
+#########################
+add_dbl:
+	add.l		&ad_1_dbl, FTEMP_LO(%a0) # add 1 to lsb
+	bcc.b		dcc_clr			# no carry
+	addq.l		&0x1, FTEMP_HI(%a0)	# propagate carry
+	bcc.b		dcc_clr			# no carry
+
+	roxr.w		FTEMP_HI(%a0)		# mant is 0 so restore v-bit
+	roxr.w		FTEMP_HI+2(%a0)		# mant is 0 so restore v-bit
+	roxr.w		FTEMP_LO(%a0)
+	roxr.w		FTEMP_LO+2(%a0)
+	addq.w		&0x1, FTEMP_EX(%a0)	# incr exponent
+dcc_clr:
+	tst.l		%d0			# test for rs = 0
+	bne.b		dbl_done
+	and.w		&0xf000, FTEMP_LO+2(%a0) # clear the l-bit
+
+dbl_done:
+	and.l		&0xfffff800,FTEMP_LO(%a0) # truncate bits beyond dbl limit
+	rts
+
+###########################
+# Truncate all other bits #
+###########################
+truncate:
+	swap		%d1			# select rnd prec
+
+	cmpi.b		%d1, &s_mode		# is prec sgl?
+	beq.w		sgl_done		# yes
+	bgt.b		dbl_done		# no; it's dbl
+	rts					# no; it's ext
+
+
+#
+# ext_grs(): extract guard, round and sticky bits according to
+#	     rounding precision.
+#
+# INPUT
+#	d0	   = extended precision g,r,s (in d0{31:29})
+#	d1	   = {PREC,ROUND}
+# OUTPUT
+#	d0{31:29}  = guard, round, sticky
+#
+# The ext_grs extract the guard/round/sticky bits according to the
+# selected rounding precision. It is called by the round subroutine
+# only.  All registers except d0 are kept intact. d0 becomes an
+# updated guard,round,sticky in d0{31:29}
+#
+# Notes: the ext_grs uses the round PREC, and therefore has to swap d1
+#	 prior to usage, and needs to restore d1 to original. this
+#	 routine is tightly tied to the round routine and not meant to
+#	 uphold standard subroutine calling practices.
+#
+
+ext_grs:
+	swap		%d1			# have d1.w point to round precision
+	tst.b		%d1			# is rnd prec = extended?
+	bne.b		ext_grs_not_ext		# no; go handle sgl or dbl
+
+#
+# %d0 actually already hold g,r,s since _round() had it before calling
+# this function. so, as long as we don't disturb it, we are "returning" it.
+#
+ext_grs_ext:
+	swap		%d1			# yes; return to correct positions
+	rts
+
+ext_grs_not_ext:
+	movm.l		&0x3000, -(%sp)		# make some temp registers {d2/d3}
+
+	cmpi.b		%d1, &s_mode		# is rnd prec = sgl?
+	bne.b		ext_grs_dbl		# no; go handle dbl
+
+#
+# sgl:
+#	96		64	  40	32		0
+#	-----------------------------------------------------
+#	| EXP	|XXXXXXX|	  |xx	|		|grs|
+#	-----------------------------------------------------
+#			<--(24)--->nn\			   /
+#				   ee ---------------------
+#				   ww		|
+#						v
+#				   gr	   new sticky
+#
+ext_grs_sgl:
+	bfextu		FTEMP_HI(%a0){&24:&2}, %d3 # sgl prec. g-r are 2 bits right
+	mov.l		&30, %d2		# of the sgl prec. limits
+	lsl.l		%d2, %d3		# shift g-r bits to MSB of d3
+	mov.l		FTEMP_HI(%a0), %d2	# get word 2 for s-bit test
+	and.l		&0x0000003f, %d2	# s bit is the or of all other
+	bne.b		ext_grs_st_stky		# bits to the right of g-r
+	tst.l		FTEMP_LO(%a0)		# test lower mantissa
+	bne.b		ext_grs_st_stky		# if any are set, set sticky
+	tst.l		%d0			# test original g,r,s
+	bne.b		ext_grs_st_stky		# if any are set, set sticky
+	bra.b		ext_grs_end_sd		# if words 3 and 4 are clr, exit
+
+#
+# dbl:
+#	96		64		32	 11	0
+#	-----------------------------------------------------
+#	| EXP	|XXXXXXX|		|	 |xx	|grs|
+#	-----------------------------------------------------
+#						  nn\	    /
+#						  ee -------
+#						  ww	|
+#							v
+#						  gr	new sticky
+#
+ext_grs_dbl:
+	bfextu		FTEMP_LO(%a0){&21:&2}, %d3 # dbl-prec. g-r are 2 bits right
+	mov.l		&30, %d2		# of the dbl prec. limits
+	lsl.l		%d2, %d3		# shift g-r bits to the MSB of d3
+	mov.l		FTEMP_LO(%a0), %d2	# get lower mantissa  for s-bit test
+	and.l		&0x000001ff, %d2	# s bit is the or-ing of all
+	bne.b		ext_grs_st_stky		# other bits to the right of g-r
+	tst.l		%d0			# test word original g,r,s
+	bne.b		ext_grs_st_stky		# if any are set, set sticky
+	bra.b		ext_grs_end_sd		# if clear, exit
+
+ext_grs_st_stky:
+	bset		&rnd_stky_bit, %d3	# set sticky bit
+ext_grs_end_sd:
+	mov.l		%d3, %d0		# return grs to d0
+
+	movm.l		(%sp)+, &0xc		# restore scratch registers {d2/d3}
+
+	swap		%d1			# restore d1 to original
+	rts
+
+#########################################################################
+# norm(): normalize the mantissa of an extended precision input. the	#
+#	  input operand should not be normalized already.		#
+#									#
+# XDEF ****************************************************************	#
+#	norm()								#
+#									#
+# XREF **************************************************************** #
+#	none								#
+#									#
+# INPUT *************************************************************** #
+#	a0 = pointer fp extended precision operand to normalize		#
+#									#
+# OUTPUT ************************************************************** #
+#	d0 = number of bit positions the mantissa was shifted		#
+#	a0 = the input operand's mantissa is normalized; the exponent	#
+#	     is unchanged.						#
+#									#
+#########################################################################
+	global		norm
+norm:
+	mov.l		%d2, -(%sp)		# create some temp regs
+	mov.l		%d3, -(%sp)
+
+	mov.l		FTEMP_HI(%a0), %d0	# load hi(mantissa)
+	mov.l		FTEMP_LO(%a0), %d1	# load lo(mantissa)
+
+	bfffo		%d0{&0:&32}, %d2	# how many places to shift?
+	beq.b		norm_lo			# hi(man) is all zeroes!
+
+norm_hi:
+	lsl.l		%d2, %d0		# left shift hi(man)
+	bfextu		%d1{&0:%d2}, %d3	# extract lo bits
+
+	or.l		%d3, %d0		# create hi(man)
+	lsl.l		%d2, %d1		# create lo(man)
+
+	mov.l		%d0, FTEMP_HI(%a0)	# store new hi(man)
+	mov.l		%d1, FTEMP_LO(%a0)	# store new lo(man)
+
+	mov.l		%d2, %d0		# return shift amount
+
+	mov.l		(%sp)+, %d3		# restore temp regs
+	mov.l		(%sp)+, %d2
+
+	rts
+
+norm_lo:
+	bfffo		%d1{&0:&32}, %d2	# how many places to shift?
+	lsl.l		%d2, %d1		# shift lo(man)
+	add.l		&32, %d2		# add 32 to shft amount
+
+	mov.l		%d1, FTEMP_HI(%a0)	# store hi(man)
+	clr.l		FTEMP_LO(%a0)		# lo(man) is now zero
+
+	mov.l		%d2, %d0		# return shift amount
+
+	mov.l		(%sp)+, %d3		# restore temp regs
+	mov.l		(%sp)+, %d2
+
+	rts
+
+#########################################################################
+# unnorm_fix(): - changes an UNNORM to one of NORM, DENORM, or ZERO	#
+#		- returns corresponding optype tag			#
+#									#
+# XDEF ****************************************************************	#
+#	unnorm_fix()							#
+#									#
+# XREF **************************************************************** #
+#	norm() - normalize the mantissa					#
+#									#
+# INPUT *************************************************************** #
+#	a0 = pointer to unnormalized extended precision number		#
+#									#
+# OUTPUT ************************************************************** #
+#	d0 = optype tag - is corrected to one of NORM, DENORM, or ZERO	#
+#	a0 = input operand has been converted to a norm, denorm, or	#
+#	     zero; both the exponent and mantissa are changed.		#
+#									#
+#########################################################################
+
+	global		unnorm_fix
+unnorm_fix:
+	bfffo		FTEMP_HI(%a0){&0:&32}, %d0 # how many shifts are needed?
+	bne.b		unnorm_shift		# hi(man) is not all zeroes
+
+#
+# hi(man) is all zeroes so see if any bits in lo(man) are set
+#
+unnorm_chk_lo:
+	bfffo		FTEMP_LO(%a0){&0:&32}, %d0 # is operand really a zero?
+	beq.w		unnorm_zero		# yes
+
+	add.w		&32, %d0		# no; fix shift distance
+
+#
+# d0 = # shifts needed for complete normalization
+#
+unnorm_shift:
+	clr.l		%d1			# clear top word
+	mov.w		FTEMP_EX(%a0), %d1	# extract exponent
+	and.w		&0x7fff, %d1		# strip off sgn
+
+	cmp.w		%d0, %d1		# will denorm push exp < 0?
+	bgt.b		unnorm_nrm_zero		# yes; denorm only until exp = 0
+
+#
+# exponent would not go < 0. therefore, number stays normalized
+#
+	sub.w		%d0, %d1		# shift exponent value
+	mov.w		FTEMP_EX(%a0), %d0	# load old exponent
+	and.w		&0x8000, %d0		# save old sign
+	or.w		%d0, %d1		# {sgn,new exp}
+	mov.w		%d1, FTEMP_EX(%a0)	# insert new exponent
+
+	bsr.l		norm			# normalize UNNORM
+
+	mov.b		&NORM, %d0		# return new optype tag
+	rts
+
+#
+# exponent would go < 0, so only denormalize until exp = 0
+#
+unnorm_nrm_zero:
+	cmp.b		%d1, &32		# is exp <= 32?
+	bgt.b		unnorm_nrm_zero_lrg	# no; go handle large exponent
+
+	bfextu		FTEMP_HI(%a0){%d1:&32}, %d0 # extract new hi(man)
+	mov.l		%d0, FTEMP_HI(%a0)	# save new hi(man)
+
+	mov.l		FTEMP_LO(%a0), %d0	# fetch old lo(man)
+	lsl.l		%d1, %d0		# extract new lo(man)
+	mov.l		%d0, FTEMP_LO(%a0)	# save new lo(man)
+
+	and.w		&0x8000, FTEMP_EX(%a0)	# set exp = 0
+
+	mov.b		&DENORM, %d0		# return new optype tag
+	rts
+
+#
+# only mantissa bits set are in lo(man)
+#
+unnorm_nrm_zero_lrg:
+	sub.w		&32, %d1		# adjust shft amt by 32
+
+	mov.l		FTEMP_LO(%a0), %d0	# fetch old lo(man)
+	lsl.l		%d1, %d0		# left shift lo(man)
+
+	mov.l		%d0, FTEMP_HI(%a0)	# store new hi(man)
+	clr.l		FTEMP_LO(%a0)		# lo(man) = 0
+
+	and.w		&0x8000, FTEMP_EX(%a0)	# set exp = 0
+
+	mov.b		&DENORM, %d0		# return new optype tag
+	rts
+
+#
+# whole mantissa is zero so this UNNORM is actually a zero
+#
+unnorm_zero:
+	and.w		&0x8000, FTEMP_EX(%a0)	# force exponent to zero
+
+	mov.b		&ZERO, %d0		# fix optype tag
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	set_tag_x(): return the optype of the input ext fp number	#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision operand			#
+#									#
+# OUTPUT **************************************************************	#
+#	d0 = value of type tag						#
+#		one of: NORM, INF, QNAN, SNAN, DENORM, UNNORM, ZERO	#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Simply test the exponent, j-bit, and mantissa values to		#
+# determine the type of operand.					#
+#	If it's an unnormalized zero, alter the operand and force it	#
+# to be a normal zero.							#
+#									#
+#########################################################################
+
+	global		set_tag_x
+set_tag_x:
+	mov.w		FTEMP_EX(%a0), %d0	# extract exponent
+	andi.w		&0x7fff, %d0		# strip off sign
+	cmpi.w		%d0, &0x7fff		# is (EXP == MAX)?
+	beq.b		inf_or_nan_x
+not_inf_or_nan_x:
+	btst		&0x7,FTEMP_HI(%a0)
+	beq.b		not_norm_x
+is_norm_x:
+	mov.b		&NORM, %d0
+	rts
+not_norm_x:
+	tst.w		%d0			# is exponent = 0?
+	bne.b		is_unnorm_x
+not_unnorm_x:
+	tst.l		FTEMP_HI(%a0)
+	bne.b		is_denorm_x
+	tst.l		FTEMP_LO(%a0)
+	bne.b		is_denorm_x
+is_zero_x:
+	mov.b		&ZERO, %d0
+	rts
+is_denorm_x:
+	mov.b		&DENORM, %d0
+	rts
+# must distinguish now "Unnormalized zeroes" which we
+# must convert to zero.
+is_unnorm_x:
+	tst.l		FTEMP_HI(%a0)
+	bne.b		is_unnorm_reg_x
+	tst.l		FTEMP_LO(%a0)
+	bne.b		is_unnorm_reg_x
+# it's an "unnormalized zero". let's convert it to an actual zero...
+	andi.w		&0x8000,FTEMP_EX(%a0)	# clear exponent
+	mov.b		&ZERO, %d0
+	rts
+is_unnorm_reg_x:
+	mov.b		&UNNORM, %d0
+	rts
+inf_or_nan_x:
+	tst.l		FTEMP_LO(%a0)
+	bne.b		is_nan_x
+	mov.l		FTEMP_HI(%a0), %d0
+	and.l		&0x7fffffff, %d0	# msb is a don't care!
+	bne.b		is_nan_x
+is_inf_x:
+	mov.b		&INF, %d0
+	rts
+is_nan_x:
+	btst		&0x6, FTEMP_HI(%a0)
+	beq.b		is_snan_x
+	mov.b		&QNAN, %d0
+	rts
+is_snan_x:
+	mov.b		&SNAN, %d0
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	set_tag_d(): return the optype of the input dbl fp number	#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = points to double precision operand				#
+#									#
+# OUTPUT **************************************************************	#
+#	d0 = value of type tag						#
+#		one of: NORM, INF, QNAN, SNAN, DENORM, ZERO		#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Simply test the exponent, j-bit, and mantissa values to		#
+# determine the type of operand.					#
+#									#
+#########################################################################
+
+	global		set_tag_d
+set_tag_d:
+	mov.l		FTEMP(%a0), %d0
+	mov.l		%d0, %d1
+
+	andi.l		&0x7ff00000, %d0
+	beq.b		zero_or_denorm_d
+
+	cmpi.l		%d0, &0x7ff00000
+	beq.b		inf_or_nan_d
+
+is_norm_d:
+	mov.b		&NORM, %d0
+	rts
+zero_or_denorm_d:
+	and.l		&0x000fffff, %d1
+	bne		is_denorm_d
+	tst.l		4+FTEMP(%a0)
+	bne		is_denorm_d
+is_zero_d:
+	mov.b		&ZERO, %d0
+	rts
+is_denorm_d:
+	mov.b		&DENORM, %d0
+	rts
+inf_or_nan_d:
+	and.l		&0x000fffff, %d1
+	bne		is_nan_d
+	tst.l		4+FTEMP(%a0)
+	bne		is_nan_d
+is_inf_d:
+	mov.b		&INF, %d0
+	rts
+is_nan_d:
+	btst		&19, %d1
+	bne		is_qnan_d
+is_snan_d:
+	mov.b		&SNAN, %d0
+	rts
+is_qnan_d:
+	mov.b		&QNAN, %d0
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	set_tag_s(): return the optype of the input sgl fp number	#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to single precision operand			#
+#									#
+# OUTPUT **************************************************************	#
+#	d0 = value of type tag						#
+#		one of: NORM, INF, QNAN, SNAN, DENORM, ZERO		#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Simply test the exponent, j-bit, and mantissa values to		#
+# determine the type of operand.					#
+#									#
+#########################################################################
+
+	global		set_tag_s
+set_tag_s:
+	mov.l		FTEMP(%a0), %d0
+	mov.l		%d0, %d1
+
+	andi.l		&0x7f800000, %d0
+	beq.b		zero_or_denorm_s
+
+	cmpi.l		%d0, &0x7f800000
+	beq.b		inf_or_nan_s
+
+is_norm_s:
+	mov.b		&NORM, %d0
+	rts
+zero_or_denorm_s:
+	and.l		&0x007fffff, %d1
+	bne		is_denorm_s
+is_zero_s:
+	mov.b		&ZERO, %d0
+	rts
+is_denorm_s:
+	mov.b		&DENORM, %d0
+	rts
+inf_or_nan_s:
+	and.l		&0x007fffff, %d1
+	bne		is_nan_s
+is_inf_s:
+	mov.b		&INF, %d0
+	rts
+is_nan_s:
+	btst		&22, %d1
+	bne		is_qnan_s
+is_snan_s:
+	mov.b		&SNAN, %d0
+	rts
+is_qnan_s:
+	mov.b		&QNAN, %d0
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	unf_res(): routine to produce default underflow result of a	#
+#		   scaled extended precision number; this is used by	#
+#		   fadd/fdiv/fmul/etc. emulation routines.		#
+#	unf_res4(): same as above but for fsglmul/fsgldiv which use	#
+#		    single round prec and extended prec mode.		#
+#									#
+# XREF ****************************************************************	#
+#	_denorm() - denormalize according to scale factor		#
+#	_round() - round denormalized number according to rnd prec	#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precison operand			#
+#	d0 = scale factor						#
+#	d1 = rounding precision/mode					#
+#									#
+# OUTPUT **************************************************************	#
+#	a0 = pointer to default underflow result in extended precision	#
+#	d0.b = result FPSR_cc which caller may or may not want to save	#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Convert the input operand to "internal format" which means the	#
+# exponent is extended to 16 bits and the sign is stored in the unused	#
+# portion of the extended precison operand. Denormalize the number	#
+# according to the scale factor passed in d0. Then, round the		#
+# denormalized result.							#
+#	Set the FPSR_exc bits as appropriate but return the cc bits in	#
+# d0 in case the caller doesn't want to save them (as is the case for	#
+# fmove out).								#
+#	unf_res4() for fsglmul/fsgldiv forces the denorm to extended	#
+# precision and the rounding mode to single.				#
+#									#
+#########################################################################
+	global		unf_res
+unf_res:
+	mov.l		%d1, -(%sp)		# save rnd prec,mode on stack
+
+	btst		&0x7, FTEMP_EX(%a0)	# make "internal" format
+	sne		FTEMP_SGN(%a0)
+
+	mov.w		FTEMP_EX(%a0), %d1	# extract exponent
+	and.w		&0x7fff, %d1
+	sub.w		%d0, %d1
+	mov.w		%d1, FTEMP_EX(%a0)	# insert 16 bit exponent
+
+	mov.l		%a0, -(%sp)		# save operand ptr during calls
+
+	mov.l		0x4(%sp),%d0		# pass rnd prec.
+	andi.w		&0x00c0,%d0
+	lsr.w		&0x4,%d0
+	bsr.l		_denorm			# denorm result
+
+	mov.l		(%sp),%a0
+	mov.w		0x6(%sp),%d1		# load prec:mode into %d1
+	andi.w		&0xc0,%d1		# extract rnd prec
+	lsr.w		&0x4,%d1
+	swap		%d1
+	mov.w		0x6(%sp),%d1
+	andi.w		&0x30,%d1
+	lsr.w		&0x4,%d1
+	bsr.l		_round			# round the denorm
+
+	mov.l		(%sp)+, %a0
+
+# result is now rounded properly. convert back to normal format
+	bclr		&0x7, FTEMP_EX(%a0)	# clear sgn first; may have residue
+	tst.b		FTEMP_SGN(%a0)		# is "internal result" sign set?
+	beq.b		unf_res_chkifzero	# no; result is positive
+	bset		&0x7, FTEMP_EX(%a0)	# set result sgn
+	clr.b		FTEMP_SGN(%a0)		# clear temp sign
+
+# the number may have become zero after rounding. set ccodes accordingly.
+unf_res_chkifzero:
+	clr.l		%d0
+	tst.l		FTEMP_HI(%a0)		# is value now a zero?
+	bne.b		unf_res_cont		# no
+	tst.l		FTEMP_LO(%a0)
+	bne.b		unf_res_cont		# no
+#	bset		&z_bit, FPSR_CC(%a6)	# yes; set zero ccode bit
+	bset		&z_bit, %d0		# yes; set zero ccode bit
+
+unf_res_cont:
+
+#
+# can inex1 also be set along with unfl and inex2???
+#
+# we know that underflow has occurred. aunfl should be set if INEX2 is also set.
+#
+	btst		&inex2_bit, FPSR_EXCEPT(%a6) # is INEX2 set?
+	beq.b		unf_res_end		# no
+	bset		&aunfl_bit, FPSR_AEXCEPT(%a6) # yes; set aunfl
+
+unf_res_end:
+	add.l		&0x4, %sp		# clear stack
+	rts
+
+# unf_res() for fsglmul() and fsgldiv().
+	global		unf_res4
+unf_res4:
+	mov.l		%d1,-(%sp)		# save rnd prec,mode on stack
+
+	btst		&0x7,FTEMP_EX(%a0)	# make "internal" format
+	sne		FTEMP_SGN(%a0)
+
+	mov.w		FTEMP_EX(%a0),%d1	# extract exponent
+	and.w		&0x7fff,%d1
+	sub.w		%d0,%d1
+	mov.w		%d1,FTEMP_EX(%a0)	# insert 16 bit exponent
+
+	mov.l		%a0,-(%sp)		# save operand ptr during calls
+
+	clr.l		%d0			# force rnd prec = ext
+	bsr.l		_denorm			# denorm result
+
+	mov.l		(%sp),%a0
+	mov.w		&s_mode,%d1		# force rnd prec = sgl
+	swap		%d1
+	mov.w		0x6(%sp),%d1		# load rnd mode
+	andi.w		&0x30,%d1		# extract rnd prec
+	lsr.w		&0x4,%d1
+	bsr.l		_round			# round the denorm
+
+	mov.l		(%sp)+,%a0
+
+# result is now rounded properly. convert back to normal format
+	bclr		&0x7,FTEMP_EX(%a0)	# clear sgn first; may have residue
+	tst.b		FTEMP_SGN(%a0)		# is "internal result" sign set?
+	beq.b		unf_res4_chkifzero	# no; result is positive
+	bset		&0x7,FTEMP_EX(%a0)	# set result sgn
+	clr.b		FTEMP_SGN(%a0)		# clear temp sign
+
+# the number may have become zero after rounding. set ccodes accordingly.
+unf_res4_chkifzero:
+	clr.l		%d0
+	tst.l		FTEMP_HI(%a0)		# is value now a zero?
+	bne.b		unf_res4_cont		# no
+	tst.l		FTEMP_LO(%a0)
+	bne.b		unf_res4_cont		# no
+#	bset		&z_bit,FPSR_CC(%a6)	# yes; set zero ccode bit
+	bset		&z_bit,%d0		# yes; set zero ccode bit
+
+unf_res4_cont:
+
+#
+# can inex1 also be set along with unfl and inex2???
+#
+# we know that underflow has occurred. aunfl should be set if INEX2 is also set.
+#
+	btst		&inex2_bit,FPSR_EXCEPT(%a6) # is INEX2 set?
+	beq.b		unf_res4_end		# no
+	bset		&aunfl_bit,FPSR_AEXCEPT(%a6) # yes; set aunfl
+
+unf_res4_end:
+	add.l		&0x4,%sp		# clear stack
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	ovf_res(): routine to produce the default overflow result of	#
+#		   an overflowing number.				#
+#	ovf_res2(): same as above but the rnd mode/prec are passed	#
+#		    differently.					#
+#									#
+# XREF ****************************************************************	#
+#	none								#
+#									#
+# INPUT ***************************************************************	#
+#	d1.b	= '-1' => (-); '0' => (+)				#
+#   ovf_res():								#
+#	d0	= rnd mode/prec						#
+#   ovf_res2():								#
+#	hi(d0)	= rnd prec						#
+#	lo(d0)	= rnd mode						#
+#									#
+# OUTPUT **************************************************************	#
+#	a0	= points to extended precision result			#
+#	d0.b	= condition code bits					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	The default overflow result can be determined by the sign of	#
+# the result and the rounding mode/prec in effect. These bits are	#
+# concatenated together to create an index into the default result	#
+# table. A pointer to the correct result is returned in a0. The		#
+# resulting condition codes are returned in d0 in case the caller	#
+# doesn't want FPSR_cc altered (as is the case for fmove out).		#
+#									#
+#########################################################################
+
+	global		ovf_res
+ovf_res:
+	andi.w		&0x10,%d1		# keep result sign
+	lsr.b		&0x4,%d0		# shift prec/mode
+	or.b		%d0,%d1			# concat the two
+	mov.w		%d1,%d0			# make a copy
+	lsl.b		&0x1,%d1		# multiply d1 by 2
+	bra.b		ovf_res_load
+
+	global		ovf_res2
+ovf_res2:
+	and.w		&0x10, %d1		# keep result sign
+	or.b		%d0, %d1		# insert rnd mode
+	swap		%d0
+	or.b		%d0, %d1		# insert rnd prec
+	mov.w		%d1, %d0		# make a copy
+	lsl.b		&0x1, %d1		# shift left by 1
+
+#
+# use the rounding mode, precision, and result sign as in index into the
+# two tables below to fetch the default result and the result ccodes.
+#
+ovf_res_load:
+	mov.b		(tbl_ovfl_cc.b,%pc,%d0.w*1), %d0 # fetch result ccodes
+	lea		(tbl_ovfl_result.b,%pc,%d1.w*8), %a0 # return result ptr
+
+	rts
+
+tbl_ovfl_cc:
+	byte		0x2, 0x0, 0x0, 0x2
+	byte		0x2, 0x0, 0x0, 0x2
+	byte		0x2, 0x0, 0x0, 0x2
+	byte		0x0, 0x0, 0x0, 0x0
+	byte		0x2+0x8, 0x8, 0x2+0x8, 0x8
+	byte		0x2+0x8, 0x8, 0x2+0x8, 0x8
+	byte		0x2+0x8, 0x8, 0x2+0x8, 0x8
+
+tbl_ovfl_result:
+	long		0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RN
+	long		0x7ffe0000,0xffffffff,0xffffffff,0x00000000 # +EXT; RZ
+	long		0x7ffe0000,0xffffffff,0xffffffff,0x00000000 # +EXT; RM
+	long		0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RP
+
+	long		0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RN
+	long		0x407e0000,0xffffff00,0x00000000,0x00000000 # +SGL; RZ
+	long		0x407e0000,0xffffff00,0x00000000,0x00000000 # +SGL; RM
+	long		0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RP
+
+	long		0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RN
+	long		0x43fe0000,0xffffffff,0xfffff800,0x00000000 # +DBL; RZ
+	long		0x43fe0000,0xffffffff,0xfffff800,0x00000000 # +DBL; RM
+	long		0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RP
+
+	long		0x00000000,0x00000000,0x00000000,0x00000000
+	long		0x00000000,0x00000000,0x00000000,0x00000000
+	long		0x00000000,0x00000000,0x00000000,0x00000000
+	long		0x00000000,0x00000000,0x00000000,0x00000000
+
+	long		0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RN
+	long		0xfffe0000,0xffffffff,0xffffffff,0x00000000 # -EXT; RZ
+	long		0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RM
+	long		0xfffe0000,0xffffffff,0xffffffff,0x00000000 # -EXT; RP
+
+	long		0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RN
+	long		0xc07e0000,0xffffff00,0x00000000,0x00000000 # -SGL; RZ
+	long		0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RM
+	long		0xc07e0000,0xffffff00,0x00000000,0x00000000 # -SGL; RP
+
+	long		0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RN
+	long		0xc3fe0000,0xffffffff,0xfffff800,0x00000000 # -DBL; RZ
+	long		0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RM
+	long		0xc3fe0000,0xffffffff,0xfffff800,0x00000000 # -DBL; RP
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	get_packed(): fetch a packed operand from memory and then	#
+#		      convert it to a floating-point binary number.	#
+#									#
+# XREF ****************************************************************	#
+#	_dcalc_ea() - calculate the correct <ea>			#
+#	_mem_read() - fetch the packed operand from memory		#
+#	facc_in_x() - the fetch failed so jump to special exit code	#
+#	decbin()    - convert packed to binary extended precision	#
+#									#
+# INPUT ***************************************************************	#
+#	None								#
+#									#
+# OUTPUT **************************************************************	#
+#	If no failure on _mem_read():					#
+#	FP_SRC(a6) = packed operand now as a binary FP number		#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Get the correct <ea> whihc is the value on the exception stack	#
+# frame w/ maybe a correction factor if the <ea> is -(an) or (an)+.	#
+# Then, fetch the operand from memory. If the fetch fails, exit		#
+# through facc_in_x().							#
+#	If the packed operand is a ZERO,NAN, or INF, convert it to	#
+# its binary representation here. Else, call decbin() which will	#
+# convert the packed value to an extended precision binary value.	#
+#									#
+#########################################################################
+
+# the stacked <ea> for packed is correct except for -(An).
+# the base reg must be updated for both -(An) and (An)+.
+	global		get_packed
+get_packed:
+	mov.l		&0xc,%d0		# packed is 12 bytes
+	bsr.l		_dcalc_ea		# fetch <ea>; correct An
+
+	lea		FP_SRC(%a6),%a1		# pass: ptr to super dst
+	mov.l		&0xc,%d0		# pass: 12 bytes
+	bsr.l		_dmem_read		# read packed operand
+
+	tst.l		%d1			# did dfetch fail?
+	bne.l		facc_in_x		# yes
+
+# The packed operand is an INF or a NAN if the exponent field is all ones.
+	bfextu		FP_SRC(%a6){&1:&15},%d0	# get exp
+	cmpi.w		%d0,&0x7fff		# INF or NAN?
+	bne.b		gp_try_zero		# no
+	rts					# operand is an INF or NAN
+
+# The packed operand is a zero if the mantissa is all zero, else it's
+# a normal packed op.
+gp_try_zero:
+	mov.b		3+FP_SRC(%a6),%d0	# get byte 4
+	andi.b		&0x0f,%d0		# clear all but last nybble
+	bne.b		gp_not_spec		# not a zero
+	tst.l		FP_SRC_HI(%a6)		# is lw 2 zero?
+	bne.b		gp_not_spec		# not a zero
+	tst.l		FP_SRC_LO(%a6)		# is lw 3 zero?
+	bne.b		gp_not_spec		# not a zero
+	rts					# operand is a ZERO
+gp_not_spec:
+	lea		FP_SRC(%a6),%a0		# pass: ptr to packed op
+	bsr.l		decbin			# convert to extended
+	fmovm.x		&0x80,FP_SRC(%a6)	# make this the srcop
+	rts
+
+#########################################################################
+# decbin(): Converts normalized packed bcd value pointed to by register	#
+#	    a0 to extended-precision value in fp0.			#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to normalized packed bcd value			#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = exact fp representation of the packed bcd value.		#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Expected is a normal bcd (i.e. non-exceptional; all inf, zero,	#
+#	and NaN operands are dispatched without entering this routine)	#
+#	value in 68881/882 format at location (a0).			#
+#									#
+#	A1. Convert the bcd exponent to binary by successive adds and	#
+#	muls. Set the sign according to SE. Subtract 16 to compensate	#
+#	for the mantissa which is to be interpreted as 17 integer	#
+#	digits, rather than 1 integer and 16 fraction digits.		#
+#	Note: this operation can never overflow.			#
+#									#
+#	A2. Convert the bcd mantissa to binary by successive		#
+#	adds and muls in FP0. Set the sign according to SM.		#
+#	The mantissa digits will be converted with the decimal point	#
+#	assumed following the least-significant digit.			#
+#	Note: this operation can never overflow.			#
+#									#
+#	A3. Count the number of leading/trailing zeros in the		#
+#	bcd string.  If SE is positive, count the leading zeros;	#
+#	if negative, count the trailing zeros.  Set the adjusted	#
+#	exponent equal to the exponent from A1 and the zero count	#
+#	added if SM = 1 and subtracted if SM = 0.  Scale the		#
+#	mantissa the equivalent of forcing in the bcd value:		#
+#									#
+#	SM = 0	a non-zero digit in the integer position		#
+#	SM = 1	a non-zero digit in Mant0, lsd of the fraction		#
+#									#
+#	this will insure that any value, regardless of its		#
+#	representation (ex. 0.1E2, 1E1, 10E0, 100E-1), is converted	#
+#	consistently.							#
+#									#
+#	A4. Calculate the factor 10^exp in FP1 using a table of		#
+#	10^(2^n) values.  To reduce the error in forming factors	#
+#	greater than 10^27, a directed rounding scheme is used with	#
+#	tables rounded to RN, RM, and RP, according to the table	#
+#	in the comments of the pwrten section.				#
+#									#
+#	A5. Form the final binary number by scaling the mantissa by	#
+#	the exponent factor.  This is done by multiplying the		#
+#	mantissa in FP0 by the factor in FP1 if the adjusted		#
+#	exponent sign is positive, and dividing FP0 by FP1 if		#
+#	it is negative.							#
+#									#
+#	Clean up and return. Check if the final mul or div was inexact.	#
+#	If so, set INEX1 in USER_FPSR.					#
+#									#
+#########################################################################
+
+#
+#	PTENRN, PTENRM, and PTENRP are arrays of powers of 10 rounded
+#	to nearest, minus, and plus, respectively.  The tables include
+#	10**{1,2,4,8,16,32,64,128,256,512,1024,2048,4096}.  No rounding
+#	is required until the power is greater than 27, however, all
+#	tables include the first 5 for ease of indexing.
+#
+RTABLE:
+	byte		0,0,0,0
+	byte		2,3,2,3
+	byte		2,3,3,2
+	byte		3,2,2,3
+
+	set		FNIBS,7
+	set		FSTRT,0
+
+	set		ESTRT,4
+	set		EDIGITS,2
+
+	global		decbin
+decbin:
+	mov.l		0x0(%a0),FP_SCR0_EX(%a6) # make a copy of input
+	mov.l		0x4(%a0),FP_SCR0_HI(%a6) # so we don't alter it
+	mov.l		0x8(%a0),FP_SCR0_LO(%a6)
+
+	lea		FP_SCR0(%a6),%a0
+
+	movm.l		&0x3c00,-(%sp)		# save d2-d5
+	fmovm.x		&0x1,-(%sp)		# save fp1
+#
+# Calculate exponent:
+#  1. Copy bcd value in memory for use as a working copy.
+#  2. Calculate absolute value of exponent in d1 by mul and add.
+#  3. Correct for exponent sign.
+#  4. Subtract 16 to compensate for interpreting the mant as all integer digits.
+#     (i.e., all digits assumed left of the decimal point.)
+#
+# Register usage:
+#
+#  calc_e:
+#	(*)  d0: temp digit storage
+#	(*)  d1: accumulator for binary exponent
+#	(*)  d2: digit count
+#	(*)  d3: offset pointer
+#	( )  d4: first word of bcd
+#	( )  a0: pointer to working bcd value
+#	( )  a6: pointer to original bcd value
+#	(*)  FP_SCR1: working copy of original bcd value
+#	(*)  L_SCR1: copy of original exponent word
+#
+calc_e:
+	mov.l		&EDIGITS,%d2		# # of nibbles (digits) in fraction part
+	mov.l		&ESTRT,%d3		# counter to pick up digits
+	mov.l		(%a0),%d4		# get first word of bcd
+	clr.l		%d1			# zero d1 for accumulator
+e_gd:
+	mulu.l		&0xa,%d1		# mul partial product by one digit place
+	bfextu		%d4{%d3:&4},%d0		# get the digit and zero extend into d0
+	add.l		%d0,%d1			# d1 = d1 + d0
+	addq.b		&4,%d3			# advance d3 to the next digit
+	dbf.w		%d2,e_gd		# if we have used all 3 digits, exit loop
+	btst		&30,%d4			# get SE
+	beq.b		e_pos			# don't negate if pos
+	neg.l		%d1			# negate before subtracting
+e_pos:
+	sub.l		&16,%d1			# sub to compensate for shift of mant
+	bge.b		e_save			# if still pos, do not neg
+	neg.l		%d1			# now negative, make pos and set SE
+	or.l		&0x40000000,%d4		# set SE in d4,
+	or.l		&0x40000000,(%a0)	# and in working bcd
+e_save:
+	mov.l		%d1,-(%sp)		# save exp on stack
+#
+#
+# Calculate mantissa:
+#  1. Calculate absolute value of mantissa in fp0 by mul and add.
+#  2. Correct for mantissa sign.
+#     (i.e., all digits assumed left of the decimal point.)
+#
+# Register usage:
+#
+#  calc_m:
+#	(*)  d0: temp digit storage
+#	(*)  d1: lword counter
+#	(*)  d2: digit count
+#	(*)  d3: offset pointer
+#	( )  d4: words 2 and 3 of bcd
+#	( )  a0: pointer to working bcd value
+#	( )  a6: pointer to original bcd value
+#	(*) fp0: mantissa accumulator
+#	( )  FP_SCR1: working copy of original bcd value
+#	( )  L_SCR1: copy of original exponent word
+#
+calc_m:
+	mov.l		&1,%d1			# word counter, init to 1
+	fmov.s		&0x00000000,%fp0	# accumulator
+#
+#
+#  Since the packed number has a long word between the first & second parts,
+#  get the integer digit then skip down & get the rest of the
+#  mantissa.  We will unroll the loop once.
+#
+	bfextu		(%a0){&28:&4},%d0	# integer part is ls digit in long word
+	fadd.b		%d0,%fp0		# add digit to sum in fp0
+#
+#
+#  Get the rest of the mantissa.
+#
+loadlw:
+	mov.l		(%a0,%d1.L*4),%d4	# load mantissa lonqword into d4
+	mov.l		&FSTRT,%d3		# counter to pick up digits
+	mov.l		&FNIBS,%d2		# reset number of digits per a0 ptr
+md2b:
+	fmul.s		&0x41200000,%fp0	# fp0 = fp0 * 10
+	bfextu		%d4{%d3:&4},%d0		# get the digit and zero extend
+	fadd.b		%d0,%fp0		# fp0 = fp0 + digit
+#
+#
+#  If all the digits (8) in that long word have been converted (d2=0),
+#  then inc d1 (=2) to point to the next long word and reset d3 to 0
+#  to initialize the digit offset, and set d2 to 7 for the digit count;
+#  else continue with this long word.
+#
+	addq.b		&4,%d3			# advance d3 to the next digit
+	dbf.w		%d2,md2b		# check for last digit in this lw
+nextlw:
+	addq.l		&1,%d1			# inc lw pointer in mantissa
+	cmp.l		%d1,&2			# test for last lw
+	ble.b		loadlw			# if not, get last one
+#
+#  Check the sign of the mant and make the value in fp0 the same sign.
+#
+m_sign:
+	btst		&31,(%a0)		# test sign of the mantissa
+	beq.b		ap_st_z			# if clear, go to append/strip zeros
+	fneg.x		%fp0			# if set, negate fp0
+#
+# Append/strip zeros:
+#
+#  For adjusted exponents which have an absolute value greater than 27*,
+#  this routine calculates the amount needed to normalize the mantissa
+#  for the adjusted exponent.  That number is subtracted from the exp
+#  if the exp was positive, and added if it was negative.  The purpose
+#  of this is to reduce the value of the exponent and the possibility
+#  of error in calculation of pwrten.
+#
+#  1. Branch on the sign of the adjusted exponent.
+#  2p.(positive exp)
+#   2. Check M16 and the digits in lwords 2 and 3 in decending order.
+#   3. Add one for each zero encountered until a non-zero digit.
+#   4. Subtract the count from the exp.
+#   5. Check if the exp has crossed zero in #3 above; make the exp abs
+#	   and set SE.
+#	6. Multiply the mantissa by 10**count.
+#  2n.(negative exp)
+#   2. Check the digits in lwords 3 and 2 in decending order.
+#   3. Add one for each zero encountered until a non-zero digit.
+#   4. Add the count to the exp.
+#   5. Check if the exp has crossed zero in #3 above; clear SE.
+#   6. Divide the mantissa by 10**count.
+#
+#  *Why 27?  If the adjusted exponent is within -28 < expA < 28, than
+#   any adjustment due to append/strip zeros will drive the resultane
+#   exponent towards zero.  Since all pwrten constants with a power
+#   of 27 or less are exact, there is no need to use this routine to
+#   attempt to lessen the resultant exponent.
+#
+# Register usage:
+#
+#  ap_st_z:
+#	(*)  d0: temp digit storage
+#	(*)  d1: zero count
+#	(*)  d2: digit count
+#	(*)  d3: offset pointer
+#	( )  d4: first word of bcd
+#	(*)  d5: lword counter
+#	( )  a0: pointer to working bcd value
+#	( )  FP_SCR1: working copy of original bcd value
+#	( )  L_SCR1: copy of original exponent word
+#
+#
+# First check the absolute value of the exponent to see if this
+# routine is necessary.  If so, then check the sign of the exponent
+# and do append (+) or strip (-) zeros accordingly.
+# This section handles a positive adjusted exponent.
+#
+ap_st_z:
+	mov.l		(%sp),%d1		# load expA for range test
+	cmp.l		%d1,&27			# test is with 27
+	ble.w		pwrten			# if abs(expA) <28, skip ap/st zeros
+	btst		&30,(%a0)		# check sign of exp
+	bne.b		ap_st_n			# if neg, go to neg side
+	clr.l		%d1			# zero count reg
+	mov.l		(%a0),%d4		# load lword 1 to d4
+	bfextu		%d4{&28:&4},%d0		# get M16 in d0
+	bne.b		ap_p_fx			# if M16 is non-zero, go fix exp
+	addq.l		&1,%d1			# inc zero count
+	mov.l		&1,%d5			# init lword counter
+	mov.l		(%a0,%d5.L*4),%d4	# get lword 2 to d4
+	bne.b		ap_p_cl			# if lw 2 is zero, skip it
+	addq.l		&8,%d1			# and inc count by 8
+	addq.l		&1,%d5			# inc lword counter
+	mov.l		(%a0,%d5.L*4),%d4	# get lword 3 to d4
+ap_p_cl:
+	clr.l		%d3			# init offset reg
+	mov.l		&7,%d2			# init digit counter
+ap_p_gd:
+	bfextu		%d4{%d3:&4},%d0		# get digit
+	bne.b		ap_p_fx			# if non-zero, go to fix exp
+	addq.l		&4,%d3			# point to next digit
+	addq.l		&1,%d1			# inc digit counter
+	dbf.w		%d2,ap_p_gd		# get next digit
+ap_p_fx:
+	mov.l		%d1,%d0			# copy counter to d2
+	mov.l		(%sp),%d1		# get adjusted exp from memory
+	sub.l		%d0,%d1			# subtract count from exp
+	bge.b		ap_p_fm			# if still pos, go to pwrten
+	neg.l		%d1			# now its neg; get abs
+	mov.l		(%a0),%d4		# load lword 1 to d4
+	or.l		&0x40000000,%d4		# and set SE in d4
+	or.l		&0x40000000,(%a0)	# and in memory
+#
+# Calculate the mantissa multiplier to compensate for the striping of
+# zeros from the mantissa.
+#
+ap_p_fm:
+	lea.l		PTENRN(%pc),%a1		# get address of power-of-ten table
+	clr.l		%d3			# init table index
+	fmov.s		&0x3f800000,%fp1	# init fp1 to 1
+	mov.l		&3,%d2			# init d2 to count bits in counter
+ap_p_el:
+	asr.l		&1,%d0			# shift lsb into carry
+	bcc.b		ap_p_en			# if 1, mul fp1 by pwrten factor
+	fmul.x		(%a1,%d3),%fp1		# mul by 10**(d3_bit_no)
+ap_p_en:
+	add.l		&12,%d3			# inc d3 to next rtable entry
+	tst.l		%d0			# check if d0 is zero
+	bne.b		ap_p_el			# if not, get next bit
+	fmul.x		%fp1,%fp0		# mul mantissa by 10**(no_bits_shifted)
+	bra.b		pwrten			# go calc pwrten
+#
+# This section handles a negative adjusted exponent.
+#
+ap_st_n:
+	clr.l		%d1			# clr counter
+	mov.l		&2,%d5			# set up d5 to point to lword 3
+	mov.l		(%a0,%d5.L*4),%d4	# get lword 3
+	bne.b		ap_n_cl			# if not zero, check digits
+	sub.l		&1,%d5			# dec d5 to point to lword 2
+	addq.l		&8,%d1			# inc counter by 8
+	mov.l		(%a0,%d5.L*4),%d4	# get lword 2
+ap_n_cl:
+	mov.l		&28,%d3			# point to last digit
+	mov.l		&7,%d2			# init digit counter
+ap_n_gd:
+	bfextu		%d4{%d3:&4},%d0		# get digit
+	bne.b		ap_n_fx			# if non-zero, go to exp fix
+	subq.l		&4,%d3			# point to previous digit
+	addq.l		&1,%d1			# inc digit counter
+	dbf.w		%d2,ap_n_gd		# get next digit
+ap_n_fx:
+	mov.l		%d1,%d0			# copy counter to d0
+	mov.l		(%sp),%d1		# get adjusted exp from memory
+	sub.l		%d0,%d1			# subtract count from exp
+	bgt.b		ap_n_fm			# if still pos, go fix mantissa
+	neg.l		%d1			# take abs of exp and clr SE
+	mov.l		(%a0),%d4		# load lword 1 to d4
+	and.l		&0xbfffffff,%d4		# and clr SE in d4
+	and.l		&0xbfffffff,(%a0)	# and in memory
+#
+# Calculate the mantissa multiplier to compensate for the appending of
+# zeros to the mantissa.
+#
+ap_n_fm:
+	lea.l		PTENRN(%pc),%a1		# get address of power-of-ten table
+	clr.l		%d3			# init table index
+	fmov.s		&0x3f800000,%fp1	# init fp1 to 1
+	mov.l		&3,%d2			# init d2 to count bits in counter
+ap_n_el:
+	asr.l		&1,%d0			# shift lsb into carry
+	bcc.b		ap_n_en			# if 1, mul fp1 by pwrten factor
+	fmul.x		(%a1,%d3),%fp1		# mul by 10**(d3_bit_no)
+ap_n_en:
+	add.l		&12,%d3			# inc d3 to next rtable entry
+	tst.l		%d0			# check if d0 is zero
+	bne.b		ap_n_el			# if not, get next bit
+	fdiv.x		%fp1,%fp0		# div mantissa by 10**(no_bits_shifted)
+#
+#
+# Calculate power-of-ten factor from adjusted and shifted exponent.
+#
+# Register usage:
+#
+#  pwrten:
+#	(*)  d0: temp
+#	( )  d1: exponent
+#	(*)  d2: {FPCR[6:5],SM,SE} as index in RTABLE; temp
+#	(*)  d3: FPCR work copy
+#	( )  d4: first word of bcd
+#	(*)  a1: RTABLE pointer
+#  calc_p:
+#	(*)  d0: temp
+#	( )  d1: exponent
+#	(*)  d3: PWRTxx table index
+#	( )  a0: pointer to working copy of bcd
+#	(*)  a1: PWRTxx pointer
+#	(*) fp1: power-of-ten accumulator
+#
+# Pwrten calculates the exponent factor in the selected rounding mode
+# according to the following table:
+#
+#	Sign of Mant  Sign of Exp  Rounding Mode  PWRTEN Rounding Mode
+#
+#	ANY	  ANY	RN	RN
+#
+#	 +	   +	RP	RP
+#	 -	   +	RP	RM
+#	 +	   -	RP	RM
+#	 -	   -	RP	RP
+#
+#	 +	   +	RM	RM
+#	 -	   +	RM	RP
+#	 +	   -	RM	RP
+#	 -	   -	RM	RM
+#
+#	 +	   +	RZ	RM
+#	 -	   +	RZ	RM
+#	 +	   -	RZ	RP
+#	 -	   -	RZ	RP
+#
+#
+pwrten:
+	mov.l		USER_FPCR(%a6),%d3	# get user's FPCR
+	bfextu		%d3{&26:&2},%d2		# isolate rounding mode bits
+	mov.l		(%a0),%d4		# reload 1st bcd word to d4
+	asl.l		&2,%d2			# format d2 to be
+	bfextu		%d4{&0:&2},%d0		# {FPCR[6],FPCR[5],SM,SE}
+	add.l		%d0,%d2			# in d2 as index into RTABLE
+	lea.l		RTABLE(%pc),%a1		# load rtable base
+	mov.b		(%a1,%d2),%d0		# load new rounding bits from table
+	clr.l		%d3			# clear d3 to force no exc and extended
+	bfins		%d0,%d3{&26:&2}		# stuff new rounding bits in FPCR
+	fmov.l		%d3,%fpcr		# write new FPCR
+	asr.l		&1,%d0			# write correct PTENxx table
+	bcc.b		not_rp			# to a1
+	lea.l		PTENRP(%pc),%a1		# it is RP
+	bra.b		calc_p			# go to init section
+not_rp:
+	asr.l		&1,%d0			# keep checking
+	bcc.b		not_rm
+	lea.l		PTENRM(%pc),%a1		# it is RM
+	bra.b		calc_p			# go to init section
+not_rm:
+	lea.l		PTENRN(%pc),%a1		# it is RN
+calc_p:
+	mov.l		%d1,%d0			# copy exp to d0;use d0
+	bpl.b		no_neg			# if exp is negative,
+	neg.l		%d0			# invert it
+	or.l		&0x40000000,(%a0)	# and set SE bit
+no_neg:
+	clr.l		%d3			# table index
+	fmov.s		&0x3f800000,%fp1	# init fp1 to 1
+e_loop:
+	asr.l		&1,%d0			# shift next bit into carry
+	bcc.b		e_next			# if zero, skip the mul
+	fmul.x		(%a1,%d3),%fp1		# mul by 10**(d3_bit_no)
+e_next:
+	add.l		&12,%d3			# inc d3 to next rtable entry
+	tst.l		%d0			# check if d0 is zero
+	bne.b		e_loop			# not zero, continue shifting
+#
+#
+#  Check the sign of the adjusted exp and make the value in fp0 the
+#  same sign. If the exp was pos then multiply fp1*fp0;
+#  else divide fp0/fp1.
+#
+# Register Usage:
+#  norm:
+#	( )  a0: pointer to working bcd value
+#	(*) fp0: mantissa accumulator
+#	( ) fp1: scaling factor - 10**(abs(exp))
+#
+pnorm:
+	btst		&30,(%a0)		# test the sign of the exponent
+	beq.b		mul			# if clear, go to multiply
+div:
+	fdiv.x		%fp1,%fp0		# exp is negative, so divide mant by exp
+	bra.b		end_dec
+mul:
+	fmul.x		%fp1,%fp0		# exp is positive, so multiply by exp
+#
+#
+# Clean up and return with result in fp0.
+#
+# If the final mul/div in decbin incurred an inex exception,
+# it will be inex2, but will be reported as inex1 by get_op.
+#
+end_dec:
+	fmov.l		%fpsr,%d0		# get status register
+	bclr		&inex2_bit+8,%d0	# test for inex2 and clear it
+	beq.b		no_exc			# skip this if no exc
+	ori.w		&inx1a_mask,2+USER_FPSR(%a6) # set INEX1/AINEX
+no_exc:
+	add.l		&0x4,%sp		# clear 1 lw param
+	fmovm.x		(%sp)+,&0x40		# restore fp1
+	movm.l		(%sp)+,&0x3c		# restore d2-d5
+	fmov.l		&0x0,%fpcr
+	fmov.l		&0x0,%fpsr
+	rts
+
+#########################################################################
+# bindec(): Converts an input in extended precision format to bcd format#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to the input extended precision value in memory.	#
+#	     the input may be either normalized, unnormalized, or	#
+#	     denormalized.						#
+#	d0 = contains the k-factor sign-extended to 32-bits.		#
+#									#
+# OUTPUT **************************************************************	#
+#	FP_SCR0(a6) = bcd format result on the stack.			#
+#									#
+# ALGORITHM ***********************************************************	#
+#									#
+#	A1.	Set RM and size ext;  Set SIGMA = sign of input.	#
+#		The k-factor is saved for use in d7. Clear the		#
+#		BINDEC_FLG for separating normalized/denormalized	#
+#		input.  If input is unnormalized or denormalized,	#
+#		normalize it.						#
+#									#
+#	A2.	Set X = abs(input).					#
+#									#
+#	A3.	Compute ILOG.						#
+#		ILOG is the log base 10 of the input value.  It is	#
+#		approximated by adding e + 0.f when the original	#
+#		value is viewed as 2^^e * 1.f in extended precision.	#
+#		This value is stored in d6.				#
+#									#
+#	A4.	Clr INEX bit.						#
+#		The operation in A3 above may have set INEX2.		#
+#									#
+#	A5.	Set ICTR = 0;						#
+#		ICTR is a flag used in A13.  It must be set before the	#
+#		loop entry A6.						#
+#									#
+#	A6.	Calculate LEN.						#
+#		LEN is the number of digits to be displayed.  The	#
+#		k-factor can dictate either the total number of digits,	#
+#		if it is a positive number, or the number of digits	#
+#		after the decimal point which are to be included as	#
+#		significant.  See the 68882 manual for examples.	#
+#		If LEN is computed to be greater than 17, set OPERR in	#
+#		USER_FPSR.  LEN is stored in d4.			#
+#									#
+#	A7.	Calculate SCALE.					#
+#		SCALE is equal to 10^ISCALE, where ISCALE is the number	#
+#		of decimal places needed to insure LEN integer digits	#
+#		in the output before conversion to bcd. LAMBDA is the	#
+#		sign of ISCALE, used in A9. Fp1 contains		#
+#		10^^(abs(ISCALE)) using a rounding mode which is a	#
+#		function of the original rounding mode and the signs	#
+#		of ISCALE and X.  A table is given in the code.		#
+#									#
+#	A8.	Clr INEX; Force RZ.					#
+#		The operation in A3 above may have set INEX2.		#
+#		RZ mode is forced for the scaling operation to insure	#
+#		only one rounding error.  The grs bits are collected in #
+#		the INEX flag for use in A10.				#
+#									#
+#	A9.	Scale X -> Y.						#
+#		The mantissa is scaled to the desired number of		#
+#		significant digits.  The excess digits are collected	#
+#		in INEX2.						#
+#									#
+#	A10.	Or in INEX.						#
+#		If INEX is set, round error occurred.  This is		#
+#		compensated for by 'or-ing' in the INEX2 flag to	#
+#		the lsb of Y.						#
+#									#
+#	A11.	Restore original FPCR; set size ext.			#
+#		Perform FINT operation in the user's rounding mode.	#
+#		Keep the size to extended.				#
+#									#
+#	A12.	Calculate YINT = FINT(Y) according to user's rounding	#
+#		mode.  The FPSP routine sintd0 is used.  The output	#
+#		is in fp0.						#
+#									#
+#	A13.	Check for LEN digits.					#
+#		If the int operation results in more than LEN digits,	#
+#		or less than LEN -1 digits, adjust ILOG and repeat from	#
+#		A6.  This test occurs only on the first pass.  If the	#
+#		result is exactly 10^LEN, decrement ILOG and divide	#
+#		the mantissa by 10.					#
+#									#
+#	A14.	Convert the mantissa to bcd.				#
+#		The binstr routine is used to convert the LEN digit	#
+#		mantissa to bcd in memory.  The input to binstr is	#
+#		to be a fraction; i.e. (mantissa)/10^LEN and adjusted	#
+#		such that the decimal point is to the left of bit 63.	#
+#		The bcd digits are stored in the correct position in	#
+#		the final string area in memory.			#
+#									#
+#	A15.	Convert the exponent to bcd.				#
+#		As in A14 above, the exp is converted to bcd and the	#
+#		digits are stored in the final string.			#
+#		Test the length of the final exponent string.  If the	#
+#		length is 4, set operr.					#
+#									#
+#	A16.	Write sign bits to final string.			#
+#									#
+#########################################################################
+
+set	BINDEC_FLG,	EXC_TEMP	# DENORM flag
+
+# Constants in extended precision
+PLOG2:
+	long		0x3FFD0000,0x9A209A84,0xFBCFF798,0x00000000
+PLOG2UP1:
+	long		0x3FFD0000,0x9A209A84,0xFBCFF799,0x00000000
+
+# Constants in single precision
+FONE:
+	long		0x3F800000,0x00000000,0x00000000,0x00000000
+FTWO:
+	long		0x40000000,0x00000000,0x00000000,0x00000000
+FTEN:
+	long		0x41200000,0x00000000,0x00000000,0x00000000
+F4933:
+	long		0x459A2800,0x00000000,0x00000000,0x00000000
+
+RBDTBL:
+	byte		0,0,0,0
+	byte		3,3,2,2
+	byte		3,2,2,3
+	byte		2,3,3,2
+
+#	Implementation Notes:
+#
+#	The registers are used as follows:
+#
+#		d0: scratch; LEN input to binstr
+#		d1: scratch
+#		d2: upper 32-bits of mantissa for binstr
+#		d3: scratch;lower 32-bits of mantissa for binstr
+#		d4: LEN
+#		d5: LAMBDA/ICTR
+#		d6: ILOG
+#		d7: k-factor
+#		a0: ptr for original operand/final result
+#		a1: scratch pointer
+#		a2: pointer to FP_X; abs(original value) in ext
+#		fp0: scratch
+#		fp1: scratch
+#		fp2: scratch
+#		F_SCR1:
+#		F_SCR2:
+#		L_SCR1:
+#		L_SCR2:
+
+	global		bindec
+bindec:
+	movm.l		&0x3f20,-(%sp)	#  {%d2-%d7/%a2}
+	fmovm.x		&0x7,-(%sp)	#  {%fp0-%fp2}
+
+# A1. Set RM and size ext. Set SIGMA = sign input;
+#     The k-factor is saved for use in d7.  Clear BINDEC_FLG for
+#     separating  normalized/denormalized input.  If the input
+#     is a denormalized number, set the BINDEC_FLG memory word
+#     to signal denorm.  If the input is unnormalized, normalize
+#     the input and test for denormalized result.
+#
+	fmov.l		&rm_mode*0x10,%fpcr	# set RM and ext
+	mov.l		(%a0),L_SCR2(%a6)	# save exponent for sign check
+	mov.l		%d0,%d7		# move k-factor to d7
+
+	clr.b		BINDEC_FLG(%a6)	# clr norm/denorm flag
+	cmpi.b		STAG(%a6),&DENORM # is input a DENORM?
+	bne.w		A2_str		# no; input is a NORM
+
+#
+# Normalize the denorm
+#
+un_de_norm:
+	mov.w		(%a0),%d0
+	and.w		&0x7fff,%d0	# strip sign of normalized exp
+	mov.l		4(%a0),%d1
+	mov.l		8(%a0),%d2
+norm_loop:
+	sub.w		&1,%d0
+	lsl.l		&1,%d2
+	roxl.l		&1,%d1
+	tst.l		%d1
+	bge.b		norm_loop
+#
+# Test if the normalized input is denormalized
+#
+	tst.w		%d0
+	bgt.b		pos_exp		# if greater than zero, it is a norm
+	st		BINDEC_FLG(%a6)	# set flag for denorm
+pos_exp:
+	and.w		&0x7fff,%d0	# strip sign of normalized exp
+	mov.w		%d0,(%a0)
+	mov.l		%d1,4(%a0)
+	mov.l		%d2,8(%a0)
+
+# A2. Set X = abs(input).
+#
+A2_str:
+	mov.l		(%a0),FP_SCR1(%a6)	# move input to work space
+	mov.l		4(%a0),FP_SCR1+4(%a6)	# move input to work space
+	mov.l		8(%a0),FP_SCR1+8(%a6)	# move input to work space
+	and.l		&0x7fffffff,FP_SCR1(%a6)	# create abs(X)
+
+# A3. Compute ILOG.
+#     ILOG is the log base 10 of the input value.  It is approx-
+#     imated by adding e + 0.f when the original value is viewed
+#     as 2^^e * 1.f in extended precision.  This value is stored
+#     in d6.
+#
+# Register usage:
+#	Input/Output
+#	d0: k-factor/exponent
+#	d2: x/x
+#	d3: x/x
+#	d4: x/x
+#	d5: x/x
+#	d6: x/ILOG
+#	d7: k-factor/Unchanged
+#	a0: ptr for original operand/final result
+#	a1: x/x
+#	a2: x/x
+#	fp0: x/float(ILOG)
+#	fp1: x/x
+#	fp2: x/x
+#	F_SCR1:x/x
+#	F_SCR2:Abs(X)/Abs(X) with $3fff exponent
+#	L_SCR1:x/x
+#	L_SCR2:first word of X packed/Unchanged
+
+	tst.b		BINDEC_FLG(%a6)	# check for denorm
+	beq.b		A3_cont		# if clr, continue with norm
+	mov.l		&-4933,%d6	# force ILOG = -4933
+	bra.b		A4_str
+A3_cont:
+	mov.w		FP_SCR1(%a6),%d0	# move exp to d0
+	mov.w		&0x3fff,FP_SCR1(%a6)	# replace exponent with 0x3fff
+	fmov.x		FP_SCR1(%a6),%fp0	# now fp0 has 1.f
+	sub.w		&0x3fff,%d0	# strip off bias
+	fadd.w		%d0,%fp0	# add in exp
+	fsub.s		FONE(%pc),%fp0	# subtract off 1.0
+	fbge.w		pos_res		# if pos, branch
+	fmul.x		PLOG2UP1(%pc),%fp0	# if neg, mul by LOG2UP1
+	fmov.l		%fp0,%d6	# put ILOG in d6 as a lword
+	bra.b		A4_str		# go move out ILOG
+pos_res:
+	fmul.x		PLOG2(%pc),%fp0	# if pos, mul by LOG2
+	fmov.l		%fp0,%d6	# put ILOG in d6 as a lword
+
+
+# A4. Clr INEX bit.
+#     The operation in A3 above may have set INEX2.
+
+A4_str:
+	fmov.l		&0,%fpsr	# zero all of fpsr - nothing needed
+
+
+# A5. Set ICTR = 0;
+#     ICTR is a flag used in A13.  It must be set before the
+#     loop entry A6. The lower word of d5 is used for ICTR.
+
+	clr.w		%d5		# clear ICTR
+
+# A6. Calculate LEN.
+#     LEN is the number of digits to be displayed.  The k-factor
+#     can dictate either the total number of digits, if it is
+#     a positive number, or the number of digits after the
+#     original decimal point which are to be included as
+#     significant.  See the 68882 manual for examples.
+#     If LEN is computed to be greater than 17, set OPERR in
+#     USER_FPSR.  LEN is stored in d4.
+#
+# Register usage:
+#	Input/Output
+#	d0: exponent/Unchanged
+#	d2: x/x/scratch
+#	d3: x/x
+#	d4: exc picture/LEN
+#	d5: ICTR/Unchanged
+#	d6: ILOG/Unchanged
+#	d7: k-factor/Unchanged
+#	a0: ptr for original operand/final result
+#	a1: x/x
+#	a2: x/x
+#	fp0: float(ILOG)/Unchanged
+#	fp1: x/x
+#	fp2: x/x
+#	F_SCR1:x/x
+#	F_SCR2:Abs(X) with $3fff exponent/Unchanged
+#	L_SCR1:x/x
+#	L_SCR2:first word of X packed/Unchanged
+
+A6_str:
+	tst.l		%d7		# branch on sign of k
+	ble.b		k_neg		# if k <= 0, LEN = ILOG + 1 - k
+	mov.l		%d7,%d4		# if k > 0, LEN = k
+	bra.b		len_ck		# skip to LEN check
+k_neg:
+	mov.l		%d6,%d4		# first load ILOG to d4
+	sub.l		%d7,%d4		# subtract off k
+	addq.l		&1,%d4		# add in the 1
+len_ck:
+	tst.l		%d4		# LEN check: branch on sign of LEN
+	ble.b		LEN_ng		# if neg, set LEN = 1
+	cmp.l		%d4,&17		# test if LEN > 17
+	ble.b		A7_str		# if not, forget it
+	mov.l		&17,%d4		# set max LEN = 17
+	tst.l		%d7		# if negative, never set OPERR
+	ble.b		A7_str		# if positive, continue
+	or.l		&opaop_mask,USER_FPSR(%a6)	# set OPERR & AIOP in USER_FPSR
+	bra.b		A7_str		# finished here
+LEN_ng:
+	mov.l		&1,%d4		# min LEN is 1
+
+
+# A7. Calculate SCALE.
+#     SCALE is equal to 10^ISCALE, where ISCALE is the number
+#     of decimal places needed to insure LEN integer digits
+#     in the output before conversion to bcd. LAMBDA is the sign
+#     of ISCALE, used in A9.  Fp1 contains 10^^(abs(ISCALE)) using
+#     the rounding mode as given in the following table (see
+#     Coonen, p. 7.23 as ref.; however, the SCALE variable is
+#     of opposite sign in bindec.sa from Coonen).
+#
+#	Initial					USE
+#	FPCR[6:5]	LAMBDA	SIGN(X)		FPCR[6:5]
+#	----------------------------------------------
+#	 RN	00	   0	   0		00/0	RN
+#	 RN	00	   0	   1		00/0	RN
+#	 RN	00	   1	   0		00/0	RN
+#	 RN	00	   1	   1		00/0	RN
+#	 RZ	01	   0	   0		11/3	RP
+#	 RZ	01	   0	   1		11/3	RP
+#	 RZ	01	   1	   0		10/2	RM
+#	 RZ	01	   1	   1		10/2	RM
+#	 RM	10	   0	   0		11/3	RP
+#	 RM	10	   0	   1		10/2	RM
+#	 RM	10	   1	   0		10/2	RM
+#	 RM	10	   1	   1		11/3	RP
+#	 RP	11	   0	   0		10/2	RM
+#	 RP	11	   0	   1		11/3	RP
+#	 RP	11	   1	   0		11/3	RP
+#	 RP	11	   1	   1		10/2	RM
+#
+# Register usage:
+#	Input/Output
+#	d0: exponent/scratch - final is 0
+#	d2: x/0 or 24 for A9
+#	d3: x/scratch - offset ptr into PTENRM array
+#	d4: LEN/Unchanged
+#	d5: 0/ICTR:LAMBDA
+#	d6: ILOG/ILOG or k if ((k<=0)&(ILOG<k))
+#	d7: k-factor/Unchanged
+#	a0: ptr for original operand/final result
+#	a1: x/ptr to PTENRM array
+#	a2: x/x
+#	fp0: float(ILOG)/Unchanged
+#	fp1: x/10^ISCALE
+#	fp2: x/x
+#	F_SCR1:x/x
+#	F_SCR2:Abs(X) with $3fff exponent/Unchanged
+#	L_SCR1:x/x
+#	L_SCR2:first word of X packed/Unchanged
+
+A7_str:
+	tst.l		%d7		# test sign of k
+	bgt.b		k_pos		# if pos and > 0, skip this
+	cmp.l		%d7,%d6		# test k - ILOG
+	blt.b		k_pos		# if ILOG >= k, skip this
+	mov.l		%d7,%d6		# if ((k<0) & (ILOG < k)) ILOG = k
+k_pos:
+	mov.l		%d6,%d0		# calc ILOG + 1 - LEN in d0
+	addq.l		&1,%d0		# add the 1
+	sub.l		%d4,%d0		# sub off LEN
+	swap		%d5		# use upper word of d5 for LAMBDA
+	clr.w		%d5		# set it zero initially
+	clr.w		%d2		# set up d2 for very small case
+	tst.l		%d0		# test sign of ISCALE
+	bge.b		iscale		# if pos, skip next inst
+	addq.w		&1,%d5		# if neg, set LAMBDA true
+	cmp.l		%d0,&0xffffecd4	# test iscale <= -4908
+	bgt.b		no_inf		# if false, skip rest
+	add.l		&24,%d0		# add in 24 to iscale
+	mov.l		&24,%d2		# put 24 in d2 for A9
+no_inf:
+	neg.l		%d0		# and take abs of ISCALE
+iscale:
+	fmov.s		FONE(%pc),%fp1	# init fp1 to 1
+	bfextu		USER_FPCR(%a6){&26:&2},%d1	# get initial rmode bits
+	lsl.w		&1,%d1		# put them in bits 2:1
+	add.w		%d5,%d1		# add in LAMBDA
+	lsl.w		&1,%d1		# put them in bits 3:1
+	tst.l		L_SCR2(%a6)	# test sign of original x
+	bge.b		x_pos		# if pos, don't set bit 0
+	addq.l		&1,%d1		# if neg, set bit 0
+x_pos:
+	lea.l		RBDTBL(%pc),%a2	# load rbdtbl base
+	mov.b		(%a2,%d1),%d3	# load d3 with new rmode
+	lsl.l		&4,%d3		# put bits in proper position
+	fmov.l		%d3,%fpcr	# load bits into fpu
+	lsr.l		&4,%d3		# put bits in proper position
+	tst.b		%d3		# decode new rmode for pten table
+	bne.b		not_rn		# if zero, it is RN
+	lea.l		PTENRN(%pc),%a1	# load a1 with RN table base
+	bra.b		rmode		# exit decode
+not_rn:
+	lsr.b		&1,%d3		# get lsb in carry
+	bcc.b		not_rp2		# if carry clear, it is RM
+	lea.l		PTENRP(%pc),%a1	# load a1 with RP table base
+	bra.b		rmode		# exit decode
+not_rp2:
+	lea.l		PTENRM(%pc),%a1	# load a1 with RM table base
+rmode:
+	clr.l		%d3		# clr table index
+e_loop2:
+	lsr.l		&1,%d0		# shift next bit into carry
+	bcc.b		e_next2		# if zero, skip the mul
+	fmul.x		(%a1,%d3),%fp1	# mul by 10**(d3_bit_no)
+e_next2:
+	add.l		&12,%d3		# inc d3 to next pwrten table entry
+	tst.l		%d0		# test if ISCALE is zero
+	bne.b		e_loop2		# if not, loop
+
+# A8. Clr INEX; Force RZ.
+#     The operation in A3 above may have set INEX2.
+#     RZ mode is forced for the scaling operation to insure
+#     only one rounding error.  The grs bits are collected in
+#     the INEX flag for use in A10.
+#
+# Register usage:
+#	Input/Output
+
+	fmov.l		&0,%fpsr	# clr INEX
+	fmov.l		&rz_mode*0x10,%fpcr	# set RZ rounding mode
+
+# A9. Scale X -> Y.
+#     The mantissa is scaled to the desired number of significant
+#     digits.  The excess digits are collected in INEX2. If mul,
+#     Check d2 for excess 10 exponential value.  If not zero,
+#     the iscale value would have caused the pwrten calculation
+#     to overflow.  Only a negative iscale can cause this, so
+#     multiply by 10^(d2), which is now only allowed to be 24,
+#     with a multiply by 10^8 and 10^16, which is exact since
+#     10^24 is exact.  If the input was denormalized, we must
+#     create a busy stack frame with the mul command and the
+#     two operands, and allow the fpu to complete the multiply.
+#
+# Register usage:
+#	Input/Output
+#	d0: FPCR with RZ mode/Unchanged
+#	d2: 0 or 24/unchanged
+#	d3: x/x
+#	d4: LEN/Unchanged
+#	d5: ICTR:LAMBDA
+#	d6: ILOG/Unchanged
+#	d7: k-factor/Unchanged
+#	a0: ptr for original operand/final result
+#	a1: ptr to PTENRM array/Unchanged
+#	a2: x/x
+#	fp0: float(ILOG)/X adjusted for SCALE (Y)
+#	fp1: 10^ISCALE/Unchanged
+#	fp2: x/x
+#	F_SCR1:x/x
+#	F_SCR2:Abs(X) with $3fff exponent/Unchanged
+#	L_SCR1:x/x
+#	L_SCR2:first word of X packed/Unchanged
+
+A9_str:
+	fmov.x		(%a0),%fp0	# load X from memory
+	fabs.x		%fp0		# use abs(X)
+	tst.w		%d5		# LAMBDA is in lower word of d5
+	bne.b		sc_mul		# if neg (LAMBDA = 1), scale by mul
+	fdiv.x		%fp1,%fp0	# calculate X / SCALE -> Y to fp0
+	bra.w		A10_st		# branch to A10
+
+sc_mul:
+	tst.b		BINDEC_FLG(%a6)	# check for denorm
+	beq.w		A9_norm		# if norm, continue with mul
+
+# for DENORM, we must calculate:
+#	fp0 = input_op * 10^ISCALE * 10^24
+# since the input operand is a DENORM, we can't multiply it directly.
+# so, we do the multiplication of the exponents and mantissas separately.
+# in this way, we avoid underflow on intermediate stages of the
+# multiplication and guarantee a result without exception.
+	fmovm.x		&0x2,-(%sp)	# save 10^ISCALE to stack
+
+	mov.w		(%sp),%d3	# grab exponent
+	andi.w		&0x7fff,%d3	# clear sign
+	ori.w		&0x8000,(%a0)	# make DENORM exp negative
+	add.w		(%a0),%d3	# add DENORM exp to 10^ISCALE exp
+	subi.w		&0x3fff,%d3	# subtract BIAS
+	add.w		36(%a1),%d3
+	subi.w		&0x3fff,%d3	# subtract BIAS
+	add.w		48(%a1),%d3
+	subi.w		&0x3fff,%d3	# subtract BIAS
+
+	bmi.w		sc_mul_err	# is result is DENORM, punt!!!
+
+	andi.w		&0x8000,(%sp)	# keep sign
+	or.w		%d3,(%sp)	# insert new exponent
+	andi.w		&0x7fff,(%a0)	# clear sign bit on DENORM again
+	mov.l		0x8(%a0),-(%sp) # put input op mantissa on stk
+	mov.l		0x4(%a0),-(%sp)
+	mov.l		&0x3fff0000,-(%sp) # force exp to zero
+	fmovm.x		(%sp)+,&0x80	# load normalized DENORM into fp0
+	fmul.x		(%sp)+,%fp0
+
+#	fmul.x	36(%a1),%fp0	# multiply fp0 by 10^8
+#	fmul.x	48(%a1),%fp0	# multiply fp0 by 10^16
+	mov.l		36+8(%a1),-(%sp) # get 10^8 mantissa
+	mov.l		36+4(%a1),-(%sp)
+	mov.l		&0x3fff0000,-(%sp) # force exp to zero
+	mov.l		48+8(%a1),-(%sp) # get 10^16 mantissa
+	mov.l		48+4(%a1),-(%sp)
+	mov.l		&0x3fff0000,-(%sp)# force exp to zero
+	fmul.x		(%sp)+,%fp0	# multiply fp0 by 10^8
+	fmul.x		(%sp)+,%fp0	# multiply fp0 by 10^16
+	bra.b		A10_st
+
+sc_mul_err:
+	bra.b		sc_mul_err
+
+A9_norm:
+	tst.w		%d2		# test for small exp case
+	beq.b		A9_con		# if zero, continue as normal
+	fmul.x		36(%a1),%fp0	# multiply fp0 by 10^8
+	fmul.x		48(%a1),%fp0	# multiply fp0 by 10^16
+A9_con:
+	fmul.x		%fp1,%fp0	# calculate X * SCALE -> Y to fp0
+
+# A10. Or in INEX.
+#      If INEX is set, round error occurred.  This is compensated
+#      for by 'or-ing' in the INEX2 flag to the lsb of Y.
+#
+# Register usage:
+#	Input/Output
+#	d0: FPCR with RZ mode/FPSR with INEX2 isolated
+#	d2: x/x
+#	d3: x/x
+#	d4: LEN/Unchanged
+#	d5: ICTR:LAMBDA
+#	d6: ILOG/Unchanged
+#	d7: k-factor/Unchanged
+#	a0: ptr for original operand/final result
+#	a1: ptr to PTENxx array/Unchanged
+#	a2: x/ptr to FP_SCR1(a6)
+#	fp0: Y/Y with lsb adjusted
+#	fp1: 10^ISCALE/Unchanged
+#	fp2: x/x
+
+A10_st:
+	fmov.l		%fpsr,%d0	# get FPSR
+	fmov.x		%fp0,FP_SCR1(%a6)	# move Y to memory
+	lea.l		FP_SCR1(%a6),%a2	# load a2 with ptr to FP_SCR1
+	btst		&9,%d0		# check if INEX2 set
+	beq.b		A11_st		# if clear, skip rest
+	or.l		&1,8(%a2)	# or in 1 to lsb of mantissa
+	fmov.x		FP_SCR1(%a6),%fp0	# write adjusted Y back to fpu
+
+
+# A11. Restore original FPCR; set size ext.
+#      Perform FINT operation in the user's rounding mode.  Keep
+#      the size to extended.  The sintdo entry point in the sint
+#      routine expects the FPCR value to be in USER_FPCR for
+#      mode and precision.  The original FPCR is saved in L_SCR1.
+
+A11_st:
+	mov.l		USER_FPCR(%a6),L_SCR1(%a6)	# save it for later
+	and.l		&0x00000030,USER_FPCR(%a6)	# set size to ext,
+#					;block exceptions
+
+
+# A12. Calculate YINT = FINT(Y) according to user's rounding mode.
+#      The FPSP routine sintd0 is used.  The output is in fp0.
+#
+# Register usage:
+#	Input/Output
+#	d0: FPSR with AINEX cleared/FPCR with size set to ext
+#	d2: x/x/scratch
+#	d3: x/x
+#	d4: LEN/Unchanged
+#	d5: ICTR:LAMBDA/Unchanged
+#	d6: ILOG/Unchanged
+#	d7: k-factor/Unchanged
+#	a0: ptr for original operand/src ptr for sintdo
+#	a1: ptr to PTENxx array/Unchanged
+#	a2: ptr to FP_SCR1(a6)/Unchanged
+#	a6: temp pointer to FP_SCR1(a6) - orig value saved and restored
+#	fp0: Y/YINT
+#	fp1: 10^ISCALE/Unchanged
+#	fp2: x/x
+#	F_SCR1:x/x
+#	F_SCR2:Y adjusted for inex/Y with original exponent
+#	L_SCR1:x/original USER_FPCR
+#	L_SCR2:first word of X packed/Unchanged
+
+A12_st:
+	movm.l	&0xc0c0,-(%sp)	# save regs used by sintd0	 {%d0-%d1/%a0-%a1}
+	mov.l	L_SCR1(%a6),-(%sp)
+	mov.l	L_SCR2(%a6),-(%sp)
+
+	lea.l		FP_SCR1(%a6),%a0	# a0 is ptr to FP_SCR1(a6)
+	fmov.x		%fp0,(%a0)	# move Y to memory at FP_SCR1(a6)
+	tst.l		L_SCR2(%a6)	# test sign of original operand
+	bge.b		do_fint12		# if pos, use Y
+	or.l		&0x80000000,(%a0)	# if neg, use -Y
+do_fint12:
+	mov.l	USER_FPSR(%a6),-(%sp)
+#	bsr	sintdo		# sint routine returns int in fp0
+
+	fmov.l	USER_FPCR(%a6),%fpcr
+	fmov.l	&0x0,%fpsr			# clear the AEXC bits!!!
+##	mov.l		USER_FPCR(%a6),%d0	# ext prec/keep rnd mode
+##	andi.l		&0x00000030,%d0
+##	fmov.l		%d0,%fpcr
+	fint.x		FP_SCR1(%a6),%fp0	# do fint()
+	fmov.l	%fpsr,%d0
+	or.w	%d0,FPSR_EXCEPT(%a6)
+##	fmov.l		&0x0,%fpcr
+##	fmov.l		%fpsr,%d0		# don't keep ccodes
+##	or.w		%d0,FPSR_EXCEPT(%a6)
+
+	mov.b	(%sp),USER_FPSR(%a6)
+	add.l	&4,%sp
+
+	mov.l	(%sp)+,L_SCR2(%a6)
+	mov.l	(%sp)+,L_SCR1(%a6)
+	movm.l	(%sp)+,&0x303	# restore regs used by sint	 {%d0-%d1/%a0-%a1}
+
+	mov.l	L_SCR2(%a6),FP_SCR1(%a6)	# restore original exponent
+	mov.l	L_SCR1(%a6),USER_FPCR(%a6)	# restore user's FPCR
+
+# A13. Check for LEN digits.
+#      If the int operation results in more than LEN digits,
+#      or less than LEN -1 digits, adjust ILOG and repeat from
+#      A6.  This test occurs only on the first pass.  If the
+#      result is exactly 10^LEN, decrement ILOG and divide
+#      the mantissa by 10.  The calculation of 10^LEN cannot
+#      be inexact, since all powers of ten upto 10^27 are exact
+#      in extended precision, so the use of a previous power-of-ten
+#      table will introduce no error.
+#
+#
+# Register usage:
+#	Input/Output
+#	d0: FPCR with size set to ext/scratch final = 0
+#	d2: x/x
+#	d3: x/scratch final = x
+#	d4: LEN/LEN adjusted
+#	d5: ICTR:LAMBDA/LAMBDA:ICTR
+#	d6: ILOG/ILOG adjusted
+#	d7: k-factor/Unchanged
+#	a0: pointer into memory for packed bcd string formation
+#	a1: ptr to PTENxx array/Unchanged
+#	a2: ptr to FP_SCR1(a6)/Unchanged
+#	fp0: int portion of Y/abs(YINT) adjusted
+#	fp1: 10^ISCALE/Unchanged
+#	fp2: x/10^LEN
+#	F_SCR1:x/x
+#	F_SCR2:Y with original exponent/Unchanged
+#	L_SCR1:original USER_FPCR/Unchanged
+#	L_SCR2:first word of X packed/Unchanged
+
+A13_st:
+	swap		%d5		# put ICTR in lower word of d5
+	tst.w		%d5		# check if ICTR = 0
+	bne		not_zr		# if non-zero, go to second test
+#
+# Compute 10^(LEN-1)
+#
+	fmov.s		FONE(%pc),%fp2	# init fp2 to 1.0
+	mov.l		%d4,%d0		# put LEN in d0
+	subq.l		&1,%d0		# d0 = LEN -1
+	clr.l		%d3		# clr table index
+l_loop:
+	lsr.l		&1,%d0		# shift next bit into carry
+	bcc.b		l_next		# if zero, skip the mul
+	fmul.x		(%a1,%d3),%fp2	# mul by 10**(d3_bit_no)
+l_next:
+	add.l		&12,%d3		# inc d3 to next pwrten table entry
+	tst.l		%d0		# test if LEN is zero
+	bne.b		l_loop		# if not, loop
+#
+# 10^LEN-1 is computed for this test and A14.  If the input was
+# denormalized, check only the case in which YINT > 10^LEN.
+#
+	tst.b		BINDEC_FLG(%a6)	# check if input was norm
+	beq.b		A13_con		# if norm, continue with checking
+	fabs.x		%fp0		# take abs of YINT
+	bra		test_2
+#
+# Compare abs(YINT) to 10^(LEN-1) and 10^LEN
+#
+A13_con:
+	fabs.x		%fp0		# take abs of YINT
+	fcmp.x		%fp0,%fp2	# compare abs(YINT) with 10^(LEN-1)
+	fbge.w		test_2		# if greater, do next test
+	subq.l		&1,%d6		# subtract 1 from ILOG
+	mov.w		&1,%d5		# set ICTR
+	fmov.l		&rm_mode*0x10,%fpcr	# set rmode to RM
+	fmul.s		FTEN(%pc),%fp2	# compute 10^LEN
+	bra.w		A6_str		# return to A6 and recompute YINT
+test_2:
+	fmul.s		FTEN(%pc),%fp2	# compute 10^LEN
+	fcmp.x		%fp0,%fp2	# compare abs(YINT) with 10^LEN
+	fblt.w		A14_st		# if less, all is ok, go to A14
+	fbgt.w		fix_ex		# if greater, fix and redo
+	fdiv.s		FTEN(%pc),%fp0	# if equal, divide by 10
+	addq.l		&1,%d6		# and inc ILOG
+	bra.b		A14_st		# and continue elsewhere
+fix_ex:
+	addq.l		&1,%d6		# increment ILOG by 1
+	mov.w		&1,%d5		# set ICTR
+	fmov.l		&rm_mode*0x10,%fpcr	# set rmode to RM
+	bra.w		A6_str		# return to A6 and recompute YINT
+#
+# Since ICTR <> 0, we have already been through one adjustment,
+# and shouldn't have another; this is to check if abs(YINT) = 10^LEN
+# 10^LEN is again computed using whatever table is in a1 since the
+# value calculated cannot be inexact.
+#
+not_zr:
+	fmov.s		FONE(%pc),%fp2	# init fp2 to 1.0
+	mov.l		%d4,%d0		# put LEN in d0
+	clr.l		%d3		# clr table index
+z_loop:
+	lsr.l		&1,%d0		# shift next bit into carry
+	bcc.b		z_next		# if zero, skip the mul
+	fmul.x		(%a1,%d3),%fp2	# mul by 10**(d3_bit_no)
+z_next:
+	add.l		&12,%d3		# inc d3 to next pwrten table entry
+	tst.l		%d0		# test if LEN is zero
+	bne.b		z_loop		# if not, loop
+	fabs.x		%fp0		# get abs(YINT)
+	fcmp.x		%fp0,%fp2	# check if abs(YINT) = 10^LEN
+	fbneq.w		A14_st		# if not, skip this
+	fdiv.s		FTEN(%pc),%fp0	# divide abs(YINT) by 10
+	addq.l		&1,%d6		# and inc ILOG by 1
+	addq.l		&1,%d4		# and inc LEN
+	fmul.s		FTEN(%pc),%fp2	# if LEN++, the get 10^^LEN
+
+# A14. Convert the mantissa to bcd.
+#      The binstr routine is used to convert the LEN digit
+#      mantissa to bcd in memory.  The input to binstr is
+#      to be a fraction; i.e. (mantissa)/10^LEN and adjusted
+#      such that the decimal point is to the left of bit 63.
+#      The bcd digits are stored in the correct position in
+#      the final string area in memory.
+#
+#
+# Register usage:
+#	Input/Output
+#	d0: x/LEN call to binstr - final is 0
+#	d1: x/0
+#	d2: x/ms 32-bits of mant of abs(YINT)
+#	d3: x/ls 32-bits of mant of abs(YINT)
+#	d4: LEN/Unchanged
+#	d5: ICTR:LAMBDA/LAMBDA:ICTR
+#	d6: ILOG
+#	d7: k-factor/Unchanged
+#	a0: pointer into memory for packed bcd string formation
+#	    /ptr to first mantissa byte in result string
+#	a1: ptr to PTENxx array/Unchanged
+#	a2: ptr to FP_SCR1(a6)/Unchanged
+#	fp0: int portion of Y/abs(YINT) adjusted
+#	fp1: 10^ISCALE/Unchanged
+#	fp2: 10^LEN/Unchanged
+#	F_SCR1:x/Work area for final result
+#	F_SCR2:Y with original exponent/Unchanged
+#	L_SCR1:original USER_FPCR/Unchanged
+#	L_SCR2:first word of X packed/Unchanged
+
+A14_st:
+	fmov.l		&rz_mode*0x10,%fpcr	# force rz for conversion
+	fdiv.x		%fp2,%fp0	# divide abs(YINT) by 10^LEN
+	lea.l		FP_SCR0(%a6),%a0
+	fmov.x		%fp0,(%a0)	# move abs(YINT)/10^LEN to memory
+	mov.l		4(%a0),%d2	# move 2nd word of FP_RES to d2
+	mov.l		8(%a0),%d3	# move 3rd word of FP_RES to d3
+	clr.l		4(%a0)		# zero word 2 of FP_RES
+	clr.l		8(%a0)		# zero word 3 of FP_RES
+	mov.l		(%a0),%d0	# move exponent to d0
+	swap		%d0		# put exponent in lower word
+	beq.b		no_sft		# if zero, don't shift
+	sub.l		&0x3ffd,%d0	# sub bias less 2 to make fract
+	tst.l		%d0		# check if > 1
+	bgt.b		no_sft		# if so, don't shift
+	neg.l		%d0		# make exp positive
+m_loop:
+	lsr.l		&1,%d2		# shift d2:d3 right, add 0s
+	roxr.l		&1,%d3		# the number of places
+	dbf.w		%d0,m_loop	# given in d0
+no_sft:
+	tst.l		%d2		# check for mantissa of zero
+	bne.b		no_zr		# if not, go on
+	tst.l		%d3		# continue zero check
+	beq.b		zer_m		# if zero, go directly to binstr
+no_zr:
+	clr.l		%d1		# put zero in d1 for addx
+	add.l		&0x00000080,%d3	# inc at bit 7
+	addx.l		%d1,%d2		# continue inc
+	and.l		&0xffffff80,%d3	# strip off lsb not used by 882
+zer_m:
+	mov.l		%d4,%d0		# put LEN in d0 for binstr call
+	addq.l		&3,%a0		# a0 points to M16 byte in result
+	bsr		binstr		# call binstr to convert mant
+
+
+# A15. Convert the exponent to bcd.
+#      As in A14 above, the exp is converted to bcd and the
+#      digits are stored in the final string.
+#
+#      Digits are stored in L_SCR1(a6) on return from BINDEC as:
+#
+#	 32               16 15                0
+#	-----------------------------------------
+#	|  0 | e3 | e2 | e1 | e4 |  X |  X |  X |
+#	-----------------------------------------
+#
+# And are moved into their proper places in FP_SCR0.  If digit e4
+# is non-zero, OPERR is signaled.  In all cases, all 4 digits are
+# written as specified in the 881/882 manual for packed decimal.
+#
+# Register usage:
+#	Input/Output
+#	d0: x/LEN call to binstr - final is 0
+#	d1: x/scratch (0);shift count for final exponent packing
+#	d2: x/ms 32-bits of exp fraction/scratch
+#	d3: x/ls 32-bits of exp fraction
+#	d4: LEN/Unchanged
+#	d5: ICTR:LAMBDA/LAMBDA:ICTR
+#	d6: ILOG
+#	d7: k-factor/Unchanged
+#	a0: ptr to result string/ptr to L_SCR1(a6)
+#	a1: ptr to PTENxx array/Unchanged
+#	a2: ptr to FP_SCR1(a6)/Unchanged
+#	fp0: abs(YINT) adjusted/float(ILOG)
+#	fp1: 10^ISCALE/Unchanged
+#	fp2: 10^LEN/Unchanged
+#	F_SCR1:Work area for final result/BCD result
+#	F_SCR2:Y with original exponent/ILOG/10^4
+#	L_SCR1:original USER_FPCR/Exponent digits on return from binstr
+#	L_SCR2:first word of X packed/Unchanged
+
+A15_st:
+	tst.b		BINDEC_FLG(%a6)	# check for denorm
+	beq.b		not_denorm
+	ftest.x		%fp0		# test for zero
+	fbeq.w		den_zero	# if zero, use k-factor or 4933
+	fmov.l		%d6,%fp0	# float ILOG
+	fabs.x		%fp0		# get abs of ILOG
+	bra.b		convrt
+den_zero:
+	tst.l		%d7		# check sign of the k-factor
+	blt.b		use_ilog	# if negative, use ILOG
+	fmov.s		F4933(%pc),%fp0	# force exponent to 4933
+	bra.b		convrt		# do it
+use_ilog:
+	fmov.l		%d6,%fp0	# float ILOG
+	fabs.x		%fp0		# get abs of ILOG
+	bra.b		convrt
+not_denorm:
+	ftest.x		%fp0		# test for zero
+	fbneq.w		not_zero	# if zero, force exponent
+	fmov.s		FONE(%pc),%fp0	# force exponent to 1
+	bra.b		convrt		# do it
+not_zero:
+	fmov.l		%d6,%fp0	# float ILOG
+	fabs.x		%fp0		# get abs of ILOG
+convrt:
+	fdiv.x		24(%a1),%fp0	# compute ILOG/10^4
+	fmov.x		%fp0,FP_SCR1(%a6)	# store fp0 in memory
+	mov.l		4(%a2),%d2	# move word 2 to d2
+	mov.l		8(%a2),%d3	# move word 3 to d3
+	mov.w		(%a2),%d0	# move exp to d0
+	beq.b		x_loop_fin	# if zero, skip the shift
+	sub.w		&0x3ffd,%d0	# subtract off bias
+	neg.w		%d0		# make exp positive
+x_loop:
+	lsr.l		&1,%d2		# shift d2:d3 right
+	roxr.l		&1,%d3		# the number of places
+	dbf.w		%d0,x_loop	# given in d0
+x_loop_fin:
+	clr.l		%d1		# put zero in d1 for addx
+	add.l		&0x00000080,%d3	# inc at bit 6
+	addx.l		%d1,%d2		# continue inc
+	and.l		&0xffffff80,%d3	# strip off lsb not used by 882
+	mov.l		&4,%d0		# put 4 in d0 for binstr call
+	lea.l		L_SCR1(%a6),%a0	# a0 is ptr to L_SCR1 for exp digits
+	bsr		binstr		# call binstr to convert exp
+	mov.l		L_SCR1(%a6),%d0	# load L_SCR1 lword to d0
+	mov.l		&12,%d1		# use d1 for shift count
+	lsr.l		%d1,%d0		# shift d0 right by 12
+	bfins		%d0,FP_SCR0(%a6){&4:&12}	# put e3:e2:e1 in FP_SCR0
+	lsr.l		%d1,%d0		# shift d0 right by 12
+	bfins		%d0,FP_SCR0(%a6){&16:&4}	# put e4 in FP_SCR0
+	tst.b		%d0		# check if e4 is zero
+	beq.b		A16_st		# if zero, skip rest
+	or.l		&opaop_mask,USER_FPSR(%a6)	# set OPERR & AIOP in USER_FPSR
+
+
+# A16. Write sign bits to final string.
+#	   Sigma is bit 31 of initial value; RHO is bit 31 of d6 (ILOG).
+#
+# Register usage:
+#	Input/Output
+#	d0: x/scratch - final is x
+#	d2: x/x
+#	d3: x/x
+#	d4: LEN/Unchanged
+#	d5: ICTR:LAMBDA/LAMBDA:ICTR
+#	d6: ILOG/ILOG adjusted
+#	d7: k-factor/Unchanged
+#	a0: ptr to L_SCR1(a6)/Unchanged
+#	a1: ptr to PTENxx array/Unchanged
+#	a2: ptr to FP_SCR1(a6)/Unchanged
+#	fp0: float(ILOG)/Unchanged
+#	fp1: 10^ISCALE/Unchanged
+#	fp2: 10^LEN/Unchanged
+#	F_SCR1:BCD result with correct signs
+#	F_SCR2:ILOG/10^4
+#	L_SCR1:Exponent digits on return from binstr
+#	L_SCR2:first word of X packed/Unchanged
+
+A16_st:
+	clr.l		%d0		# clr d0 for collection of signs
+	and.b		&0x0f,FP_SCR0(%a6)	# clear first nibble of FP_SCR0
+	tst.l		L_SCR2(%a6)	# check sign of original mantissa
+	bge.b		mant_p		# if pos, don't set SM
+	mov.l		&2,%d0		# move 2 in to d0 for SM
+mant_p:
+	tst.l		%d6		# check sign of ILOG
+	bge.b		wr_sgn		# if pos, don't set SE
+	addq.l		&1,%d0		# set bit 0 in d0 for SE
+wr_sgn:
+	bfins		%d0,FP_SCR0(%a6){&0:&2}	# insert SM and SE into FP_SCR0
+
+# Clean up and restore all registers used.
+
+	fmov.l		&0,%fpsr	# clear possible inex2/ainex bits
+	fmovm.x		(%sp)+,&0xe0	#  {%fp0-%fp2}
+	movm.l		(%sp)+,&0x4fc	#  {%d2-%d7/%a2}
+	rts
+
+	global		PTENRN
+PTENRN:
+	long		0x40020000,0xA0000000,0x00000000	# 10 ^ 1
+	long		0x40050000,0xC8000000,0x00000000	# 10 ^ 2
+	long		0x400C0000,0x9C400000,0x00000000	# 10 ^ 4
+	long		0x40190000,0xBEBC2000,0x00000000	# 10 ^ 8
+	long		0x40340000,0x8E1BC9BF,0x04000000	# 10 ^ 16
+	long		0x40690000,0x9DC5ADA8,0x2B70B59E	# 10 ^ 32
+	long		0x40D30000,0xC2781F49,0xFFCFA6D5	# 10 ^ 64
+	long		0x41A80000,0x93BA47C9,0x80E98CE0	# 10 ^ 128
+	long		0x43510000,0xAA7EEBFB,0x9DF9DE8E	# 10 ^ 256
+	long		0x46A30000,0xE319A0AE,0xA60E91C7	# 10 ^ 512
+	long		0x4D480000,0xC9767586,0x81750C17	# 10 ^ 1024
+	long		0x5A920000,0x9E8B3B5D,0xC53D5DE5	# 10 ^ 2048
+	long		0x75250000,0xC4605202,0x8A20979B	# 10 ^ 4096
+
+	global		PTENRP
+PTENRP:
+	long		0x40020000,0xA0000000,0x00000000	# 10 ^ 1
+	long		0x40050000,0xC8000000,0x00000000	# 10 ^ 2
+	long		0x400C0000,0x9C400000,0x00000000	# 10 ^ 4
+	long		0x40190000,0xBEBC2000,0x00000000	# 10 ^ 8
+	long		0x40340000,0x8E1BC9BF,0x04000000	# 10 ^ 16
+	long		0x40690000,0x9DC5ADA8,0x2B70B59E	# 10 ^ 32
+	long		0x40D30000,0xC2781F49,0xFFCFA6D6	# 10 ^ 64
+	long		0x41A80000,0x93BA47C9,0x80E98CE0	# 10 ^ 128
+	long		0x43510000,0xAA7EEBFB,0x9DF9DE8E	# 10 ^ 256
+	long		0x46A30000,0xE319A0AE,0xA60E91C7	# 10 ^ 512
+	long		0x4D480000,0xC9767586,0x81750C18	# 10 ^ 1024
+	long		0x5A920000,0x9E8B3B5D,0xC53D5DE5	# 10 ^ 2048
+	long		0x75250000,0xC4605202,0x8A20979B	# 10 ^ 4096
+
+	global		PTENRM
+PTENRM:
+	long		0x40020000,0xA0000000,0x00000000	# 10 ^ 1
+	long		0x40050000,0xC8000000,0x00000000	# 10 ^ 2
+	long		0x400C0000,0x9C400000,0x00000000	# 10 ^ 4
+	long		0x40190000,0xBEBC2000,0x00000000	# 10 ^ 8
+	long		0x40340000,0x8E1BC9BF,0x04000000	# 10 ^ 16
+	long		0x40690000,0x9DC5ADA8,0x2B70B59D	# 10 ^ 32
+	long		0x40D30000,0xC2781F49,0xFFCFA6D5	# 10 ^ 64
+	long		0x41A80000,0x93BA47C9,0x80E98CDF	# 10 ^ 128
+	long		0x43510000,0xAA7EEBFB,0x9DF9DE8D	# 10 ^ 256
+	long		0x46A30000,0xE319A0AE,0xA60E91C6	# 10 ^ 512
+	long		0x4D480000,0xC9767586,0x81750C17	# 10 ^ 1024
+	long		0x5A920000,0x9E8B3B5D,0xC53D5DE4	# 10 ^ 2048
+	long		0x75250000,0xC4605202,0x8A20979A	# 10 ^ 4096
+
+#########################################################################
+# binstr(): Converts a 64-bit binary integer to bcd.			#
+#									#
+# INPUT *************************************************************** #
+#	d2:d3 = 64-bit binary integer					#
+#	d0    = desired length (LEN)					#
+#	a0    = pointer to start in memory for bcd characters		#
+#		(This pointer must point to byte 4 of the first		#
+#		 lword of the packed decimal memory string.)		#
+#									#
+# OUTPUT ************************************************************** #
+#	a0 = pointer to LEN bcd digits representing the 64-bit integer.	#
+#									#
+# ALGORITHM ***********************************************************	#
+#	The 64-bit binary is assumed to have a decimal point before	#
+#	bit 63.  The fraction is multiplied by 10 using a mul by 2	#
+#	shift and a mul by 8 shift.  The bits shifted out of the	#
+#	msb form a decimal digit.  This process is iterated until	#
+#	LEN digits are formed.						#
+#									#
+# A1. Init d7 to 1.  D7 is the byte digit counter, and if 1, the	#
+#     digit formed will be assumed the least significant.  This is	#
+#     to force the first byte formed to have a 0 in the upper 4 bits.	#
+#									#
+# A2. Beginning of the loop:						#
+#     Copy the fraction in d2:d3 to d4:d5.				#
+#									#
+# A3. Multiply the fraction in d2:d3 by 8 using bit-field		#
+#     extracts and shifts.  The three msbs from d2 will go into d1.	#
+#									#
+# A4. Multiply the fraction in d4:d5 by 2 using shifts.  The msb	#
+#     will be collected by the carry.					#
+#									#
+# A5. Add using the carry the 64-bit quantities in d2:d3 and d4:d5	#
+#     into d2:d3.  D1 will contain the bcd digit formed.		#
+#									#
+# A6. Test d7.  If zero, the digit formed is the ms digit.  If non-	#
+#     zero, it is the ls digit.  Put the digit in its place in the	#
+#     upper word of d0.  If it is the ls digit, write the word		#
+#     from d0 to memory.						#
+#									#
+# A7. Decrement d6 (LEN counter) and repeat the loop until zero.	#
+#									#
+#########################################################################
+
+#	Implementation Notes:
+#
+#	The registers are used as follows:
+#
+#		d0: LEN counter
+#		d1: temp used to form the digit
+#		d2: upper 32-bits of fraction for mul by 8
+#		d3: lower 32-bits of fraction for mul by 8
+#		d4: upper 32-bits of fraction for mul by 2
+#		d5: lower 32-bits of fraction for mul by 2
+#		d6: temp for bit-field extracts
+#		d7: byte digit formation word;digit count {0,1}
+#		a0: pointer into memory for packed bcd string formation
+#
+
+	global		binstr
+binstr:
+	movm.l		&0xff00,-(%sp)	#  {%d0-%d7}
+
+#
+# A1: Init d7
+#
+	mov.l		&1,%d7		# init d7 for second digit
+	subq.l		&1,%d0		# for dbf d0 would have LEN+1 passes
+#
+# A2. Copy d2:d3 to d4:d5.  Start loop.
+#
+loop:
+	mov.l		%d2,%d4		# copy the fraction before muls
+	mov.l		%d3,%d5		# to d4:d5
+#
+# A3. Multiply d2:d3 by 8; extract msbs into d1.
+#
+	bfextu		%d2{&0:&3},%d1	# copy 3 msbs of d2 into d1
+	asl.l		&3,%d2		# shift d2 left by 3 places
+	bfextu		%d3{&0:&3},%d6	# copy 3 msbs of d3 into d6
+	asl.l		&3,%d3		# shift d3 left by 3 places
+	or.l		%d6,%d2		# or in msbs from d3 into d2
+#
+# A4. Multiply d4:d5 by 2; add carry out to d1.
+#
+	asl.l		&1,%d5		# mul d5 by 2
+	roxl.l		&1,%d4		# mul d4 by 2
+	swap		%d6		# put 0 in d6 lower word
+	addx.w		%d6,%d1		# add in extend from mul by 2
+#
+# A5. Add mul by 8 to mul by 2.  D1 contains the digit formed.
+#
+	add.l		%d5,%d3		# add lower 32 bits
+	nop				# ERRATA FIX #13 (Rev. 1.2 6/6/90)
+	addx.l		%d4,%d2		# add with extend upper 32 bits
+	nop				# ERRATA FIX #13 (Rev. 1.2 6/6/90)
+	addx.w		%d6,%d1		# add in extend from add to d1
+	swap		%d6		# with d6 = 0; put 0 in upper word
+#
+# A6. Test d7 and branch.
+#
+	tst.w		%d7		# if zero, store digit & to loop
+	beq.b		first_d		# if non-zero, form byte & write
+sec_d:
+	swap		%d7		# bring first digit to word d7b
+	asl.w		&4,%d7		# first digit in upper 4 bits d7b
+	add.w		%d1,%d7		# add in ls digit to d7b
+	mov.b		%d7,(%a0)+	# store d7b byte in memory
+	swap		%d7		# put LEN counter in word d7a
+	clr.w		%d7		# set d7a to signal no digits done
+	dbf.w		%d0,loop	# do loop some more!
+	bra.b		end_bstr	# finished, so exit
+first_d:
+	swap		%d7		# put digit word in d7b
+	mov.w		%d1,%d7		# put new digit in d7b
+	swap		%d7		# put LEN counter in word d7a
+	addq.w		&1,%d7		# set d7a to signal first digit done
+	dbf.w		%d0,loop	# do loop some more!
+	swap		%d7		# put last digit in string
+	lsl.w		&4,%d7		# move it to upper 4 bits
+	mov.b		%d7,(%a0)+	# store it in memory string
+#
+# Clean up and return with result in fp0.
+#
+end_bstr:
+	movm.l		(%sp)+,&0xff	#  {%d0-%d7}
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	facc_in_b(): dmem_read_byte failed				#
+#	facc_in_w(): dmem_read_word failed				#
+#	facc_in_l(): dmem_read_long failed				#
+#	facc_in_d(): dmem_read of dbl prec failed			#
+#	facc_in_x(): dmem_read of ext prec failed			#
+#									#
+#	facc_out_b(): dmem_write_byte failed				#
+#	facc_out_w(): dmem_write_word failed				#
+#	facc_out_l(): dmem_write_long failed				#
+#	facc_out_d(): dmem_write of dbl prec failed			#
+#	facc_out_x(): dmem_write of ext prec failed			#
+#									#
+# XREF ****************************************************************	#
+#	_real_access() - exit through access error handler		#
+#									#
+# INPUT ***************************************************************	#
+#	None								#
+#									#
+# OUTPUT **************************************************************	#
+#	None								#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Flow jumps here when an FP data fetch call gets an error	#
+# result. This means the operating system wants an access error frame	#
+# made out of the current exception stack frame.			#
+#	So, we first call restore() which makes sure that any updated	#
+# -(an)+ register gets returned to its pre-exception value and then	#
+# we change the stack to an access error stack frame.			#
+#									#
+#########################################################################
+
+facc_in_b:
+	movq.l		&0x1,%d0			# one byte
+	bsr.w		restore				# fix An
+
+	mov.w		&0x0121,EXC_VOFF(%a6)		# set FSLW
+	bra.w		facc_finish
+
+facc_in_w:
+	movq.l		&0x2,%d0			# two bytes
+	bsr.w		restore				# fix An
+
+	mov.w		&0x0141,EXC_VOFF(%a6)		# set FSLW
+	bra.b		facc_finish
+
+facc_in_l:
+	movq.l		&0x4,%d0			# four bytes
+	bsr.w		restore				# fix An
+
+	mov.w		&0x0101,EXC_VOFF(%a6)		# set FSLW
+	bra.b		facc_finish
+
+facc_in_d:
+	movq.l		&0x8,%d0			# eight bytes
+	bsr.w		restore				# fix An
+
+	mov.w		&0x0161,EXC_VOFF(%a6)		# set FSLW
+	bra.b		facc_finish
+
+facc_in_x:
+	movq.l		&0xc,%d0			# twelve bytes
+	bsr.w		restore				# fix An
+
+	mov.w		&0x0161,EXC_VOFF(%a6)		# set FSLW
+	bra.b		facc_finish
+
+################################################################
+
+facc_out_b:
+	movq.l		&0x1,%d0			# one byte
+	bsr.w		restore				# restore An
+
+	mov.w		&0x00a1,EXC_VOFF(%a6)		# set FSLW
+	bra.b		facc_finish
+
+facc_out_w:
+	movq.l		&0x2,%d0			# two bytes
+	bsr.w		restore				# restore An
+
+	mov.w		&0x00c1,EXC_VOFF(%a6)		# set FSLW
+	bra.b		facc_finish
+
+facc_out_l:
+	movq.l		&0x4,%d0			# four bytes
+	bsr.w		restore				# restore An
+
+	mov.w		&0x0081,EXC_VOFF(%a6)		# set FSLW
+	bra.b		facc_finish
+
+facc_out_d:
+	movq.l		&0x8,%d0			# eight bytes
+	bsr.w		restore				# restore An
+
+	mov.w		&0x00e1,EXC_VOFF(%a6)		# set FSLW
+	bra.b		facc_finish
+
+facc_out_x:
+	mov.l		&0xc,%d0			# twelve bytes
+	bsr.w		restore				# restore An
+
+	mov.w		&0x00e1,EXC_VOFF(%a6)		# set FSLW
+
+# here's where we actually create the access error frame from the
+# current exception stack frame.
+facc_finish:
+	mov.l		USER_FPIAR(%a6),EXC_PC(%a6) # store current PC
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+
+	mov.l		(%sp),-(%sp)		# store SR, hi(PC)
+	mov.l		0x8(%sp),0x4(%sp)	# store lo(PC)
+	mov.l		0xc(%sp),0x8(%sp)	# store EA
+	mov.l		&0x00000001,0xc(%sp)	# store FSLW
+	mov.w		0x6(%sp),0xc(%sp)	# fix FSLW (size)
+	mov.w		&0x4008,0x6(%sp)	# store voff
+
+	btst		&0x5,(%sp)		# supervisor or user mode?
+	beq.b		facc_out2		# user
+	bset		&0x2,0xd(%sp)		# set supervisor TM bit
+
+facc_out2:
+	bra.l		_real_access
+
+##################################################################
+
+# if the effective addressing mode was predecrement or postincrement,
+# the emulation has already changed its value to the correct post-
+# instruction value. but since we're exiting to the access error
+# handler, then AN must be returned to its pre-instruction value.
+# we do that here.
+restore:
+	mov.b		EXC_OPWORD+0x1(%a6),%d1
+	andi.b		&0x38,%d1		# extract opmode
+	cmpi.b		%d1,&0x18		# postinc?
+	beq.w		rest_inc
+	cmpi.b		%d1,&0x20		# predec?
+	beq.w		rest_dec
+	rts
+
+rest_inc:
+	mov.b		EXC_OPWORD+0x1(%a6),%d1
+	andi.w		&0x0007,%d1		# fetch An
+
+	mov.w		(tbl_rest_inc.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_rest_inc.b,%pc,%d1.w*1)
+
+tbl_rest_inc:
+	short		ri_a0 - tbl_rest_inc
+	short		ri_a1 - tbl_rest_inc
+	short		ri_a2 - tbl_rest_inc
+	short		ri_a3 - tbl_rest_inc
+	short		ri_a4 - tbl_rest_inc
+	short		ri_a5 - tbl_rest_inc
+	short		ri_a6 - tbl_rest_inc
+	short		ri_a7 - tbl_rest_inc
+
+ri_a0:
+	sub.l		%d0,EXC_DREGS+0x8(%a6)	# fix stacked a0
+	rts
+ri_a1:
+	sub.l		%d0,EXC_DREGS+0xc(%a6)	# fix stacked a1
+	rts
+ri_a2:
+	sub.l		%d0,%a2			# fix a2
+	rts
+ri_a3:
+	sub.l		%d0,%a3			# fix a3
+	rts
+ri_a4:
+	sub.l		%d0,%a4			# fix a4
+	rts
+ri_a5:
+	sub.l		%d0,%a5			# fix a5
+	rts
+ri_a6:
+	sub.l		%d0,(%a6)		# fix stacked a6
+	rts
+# if it's a fmove out instruction, we don't have to fix a7
+# because we hadn't changed it yet. if it's an opclass two
+# instruction (data moved in) and the exception was in supervisor
+# mode, then also also wasn't updated. if it was user mode, then
+# restore the correct a7 which is in the USP currently.
+ri_a7:
+	cmpi.b		EXC_VOFF(%a6),&0x30	# move in or out?
+	bne.b		ri_a7_done		# out
+
+	btst		&0x5,EXC_SR(%a6)	# user or supervisor?
+	bne.b		ri_a7_done		# supervisor
+	movc		%usp,%a0		# restore USP
+	sub.l		%d0,%a0
+	movc		%a0,%usp
+ri_a7_done:
+	rts
+
+# need to invert adjustment value if the <ea> was predec
+rest_dec:
+	neg.l		%d0
+	bra.b		rest_inc
diff --git a/arch/m68k/ifpsp060/src/ftest.S b/arch/m68k/ifpsp060/src/ftest.S
new file mode 100644
index 0000000..2edcbae
--- /dev/null
+++ b/arch/m68k/ifpsp060/src/ftest.S
@@ -0,0 +1,1456 @@
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
+M68000 Hi-Performance Microprocessor Division
+M68060 Software Package
+Production Release P1.00 -- October 10, 1994
+
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc.  All rights reserved.
+
+THE SOFTWARE is provided on an "AS IS" basis and without warranty.
+To the maximum extent permitted by applicable law,
+MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
+INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+and any warranty against infringement with regard to the SOFTWARE
+(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
+
+To the maximum extent permitted by applicable law,
+IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
+(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
+BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
+ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
+Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
+
+You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
+so long as this entire notice is retained without alteration in any modified and/or
+redistributed versions, and that such modified versions are clearly identified as such.
+No licenses are granted by implication, estoppel or otherwise under any patents
+or trademarks of Motorola, Inc.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#############################################
+set	SREGS,		-64
+set	IREGS,		-128
+set	IFPREGS,	-224
+set	SFPREGS,	-320
+set	IFPCREGS,	-332
+set	SFPCREGS,	-344
+set	ICCR,		-346
+set	SCCR,		-348
+set	TESTCTR,	-352
+set	DATA,		-384
+
+#############################################
+TESTTOP:
+	bra.l		_060TESTS_
+	short		0x0000
+
+	bra.l		_060TESTS_unimp
+	short		0x0000
+
+	bra.l		_060TESTS_enable
+	short		0x0000
+
+start_str:
+	string		"Testing 68060 FPSP started:\n"
+
+start_str_unimp:
+	string		"Testing 68060 FPSP unimplemented instruction started:\n"
+
+start_str_enable:
+	string		"Testing 68060 FPSP exception enabled started:\n"
+
+pass_str:
+	string		"passed\n"
+
+fail_str:
+	string		" failed\n"
+
+	align		0x4
+chk_test:
+	tst.l		%d0
+	bne.b		test_fail
+test_pass:
+	pea		pass_str(%pc)
+	bsr.l		_print_str
+	addq.l		&0x4,%sp
+	rts
+test_fail:
+	mov.l		%d1,-(%sp)
+	bsr.l		_print_num
+	addq.l		&0x4,%sp
+
+	pea		fail_str(%pc)
+	bsr.l		_print_str
+	addq.l		&0x4,%sp
+	rts
+
+#############################################
+_060TESTS_:
+	link		%a6,&-384
+
+	movm.l		&0x3f3c,-(%sp)
+	fmovm.x		&0xff,-(%sp)
+
+	pea		start_str(%pc)
+	bsr.l		_print_str
+	addq.l		&0x4,%sp
+
+### effadd
+	clr.l		TESTCTR(%a6)
+	pea		effadd_str(%pc)
+	bsr.l		_print_str
+	addq.l		&0x4,%sp
+
+	bsr.l		effadd_0
+
+	bsr.l		chk_test
+
+### unsupp
+	clr.l		TESTCTR(%a6)
+	pea		unsupp_str(%pc)
+	bsr.l		_print_str
+	addq.l		&0x4,%sp
+
+	bsr.l		unsupp_0
+
+	bsr.l		chk_test
+
+### ovfl non-maskable
+	clr.l		TESTCTR(%a6)
+	pea		ovfl_nm_str(%pc)
+	bsr.l		_print_str
+	bsr.l		ovfl_nm_0
+
+	bsr.l		chk_test
+
+### unfl non-maskable
+	clr.l		TESTCTR(%a6)
+	pea		unfl_nm_str(%pc)
+	bsr.l		_print_str
+	bsr.l		unfl_nm_0
+
+	bsr.l		chk_test
+
+	movm.l		(%sp)+,&0x3cfc
+	fmovm.x		(%sp)+,&0xff
+
+	unlk		%a6
+	rts
+
+_060TESTS_unimp:
+	link		%a6,&-384
+
+	movm.l		&0x3f3c,-(%sp)
+	fmovm.x		&0xff,-(%sp)
+
+	pea		start_str_unimp(%pc)
+	bsr.l		_print_str
+	addq.l		&0x4,%sp
+
+### unimp
+	clr.l		TESTCTR(%a6)
+	pea		unimp_str(%pc)
+	bsr.l		_print_str
+	addq.l		&0x4,%sp
+
+	bsr.l		unimp_0
+
+	bsr.l		chk_test
+
+	movm.l		(%sp)+,&0x3cfc
+	fmovm.x		(%sp)+,&0xff
+
+	unlk		%a6
+	rts
+
+_060TESTS_enable:
+	link		%a6,&-384
+
+	movm.l		&0x3f3c,-(%sp)
+	fmovm.x		&0xff,-(%sp)
+
+	pea		start_str_enable(%pc)
+	bsr.l		_print_str
+	addq.l		&0x4,%sp
+
+### snan
+	clr.l		TESTCTR(%a6)
+	pea		snan_str(%pc)
+	bsr.l		_print_str
+	bsr.l		snan_0
+
+	bsr.l		chk_test
+
+### operr
+	clr.l		TESTCTR(%a6)
+	pea		operr_str(%pc)
+	bsr.l		_print_str
+	bsr.l		operr_0
+
+	bsr.l		chk_test
+
+### ovfl
+	clr.l		TESTCTR(%a6)
+	pea		ovfl_str(%pc)
+	bsr.l		_print_str
+	bsr.l		ovfl_0
+
+	bsr.l		chk_test
+
+### unfl
+	clr.l		TESTCTR(%a6)
+	pea		unfl_str(%pc)
+	bsr.l		_print_str
+	bsr.l		unfl_0
+
+	bsr.l		chk_test
+
+### dz
+	clr.l		TESTCTR(%a6)
+	pea		dz_str(%pc)
+	bsr.l		_print_str
+	bsr.l		dz_0
+
+	bsr.l		chk_test
+
+### inexact
+	clr.l		TESTCTR(%a6)
+	pea		inex_str(%pc)
+	bsr.l		_print_str
+	bsr.l		inex_0
+
+	bsr.l		chk_test
+
+	movm.l		(%sp)+,&0x3cfc
+	fmovm.x		(%sp)+,&0xff
+
+	unlk		%a6
+	rts
+
+#############################################
+#############################################
+
+unimp_str:
+	string		"\tUnimplemented FP instructions..."
+
+	align		0x4
+unimp_0:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	mov.w		&0x0000,ICCR(%a6)
+	movm.l		&0x7fff,IREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	mov.l		&0x40000000,DATA+0x0(%a6)
+	mov.l		&0xc90fdaa2,DATA+0x4(%a6)
+	mov.l		&0x2168c235,DATA+0x8(%a6)
+
+	mov.w		&0x0000,%cc
+unimp_0_pc:
+	fsin.x		DATA(%a6),%fp0
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+	mov.l		&0xbfbf0000,IFPREGS+0x0(%a6)
+	mov.l		&0x80000000,IFPREGS+0x4(%a6)
+	mov.l		&0x00000000,IFPREGS+0x8(%a6)
+	mov.l		&0x08000208,IFPCREGS+0x4(%a6)
+	lea		unimp_0_pc(%pc),%a0
+	mov.l		%a0,IFPCREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+unimp_1:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	mov.w		&0x0000,ICCR(%a6)
+	movm.l		&0x7fff,IREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	mov.l		&0x3ffe0000,DATA+0x0(%a6)
+	mov.l		&0xc90fdaa2,DATA+0x4(%a6)
+	mov.l		&0x2168c235,DATA+0x8(%a6)
+
+	mov.w		&0x0000,%cc
+unimp_1_pc:
+	ftan.x		DATA(%a6),%fp0
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+	mov.l		&0x3fff0000,IFPREGS+0x0(%a6)
+	mov.l		&0x80000000,IFPREGS+0x4(%a6)
+	mov.l		&0x00000000,IFPREGS+0x8(%a6)
+	mov.l		&0x00000208,IFPCREGS+0x4(%a6)
+	lea		unimp_1_pc(%pc),%a0
+	mov.l		%a0,IFPCREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+# fmovecr
+unimp_2:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	mov.w		&0x0000,ICCR(%a6)
+	movm.l		&0x7fff,IREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	mov.w		&0x0000,%cc
+unimp_2_pc:
+	fmovcr.x	&0x31,%fp0
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+	mov.l		&0x40000000,IFPREGS+0x0(%a6)
+	mov.l		&0x935d8ddd,IFPREGS+0x4(%a6)
+	mov.l		&0xaaa8ac17,IFPREGS+0x8(%a6)
+	mov.l		&0x00000208,IFPCREGS+0x4(%a6)
+	lea		unimp_2_pc(%pc),%a0
+	mov.l		%a0,IFPCREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+# fscc
+unimp_3:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	fmov.l		&0x0f000000,%fpsr
+	mov.l		&0x00,%d7
+
+	mov.w		&0x0000,ICCR(%a6)
+	movm.l		&0x7fff,IREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	mov.w		&0x0000,%cc
+unimp_3_pc:
+	fsgt		%d7
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+	mov.l		&0x0f008080,IFPCREGS+0x4(%a6)
+	lea		unimp_3_pc(%pc),%a0
+	mov.l		%a0,IFPCREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+# fdbcc
+unimp_4:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	fmov.l		&0x0f000000,%fpsr
+	mov.l		&0x2,%d7
+
+	mov.w		&0x0000,ICCR(%a6)
+	movm.l		&0x7fff,IREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	mov.w		&0x0000,%cc
+unimp_4_pc:
+	fdbgt.w		%d7,unimp_4_pc
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+	mov.w		&0xffff,IREGS+28+2(%a6)
+	mov.l		&0x0f008080,IFPCREGS+0x4(%a6)
+	lea		unimp_4_pc(%pc),%a0
+	mov.l		%a0,IFPCREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+# ftrapcc
+unimp_5:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	fmov.l		&0x0f000000,%fpsr
+
+	mov.w		&0x0000,ICCR(%a6)
+	movm.l		&0x7fff,IREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	mov.w		&0x0000,%cc
+unimp_5_pc:
+	ftpgt.l		&0xabcdef01
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+	mov.l		&0x0f008080,IFPCREGS+0x4(%a6)
+	lea		unimp_5_pc(%pc),%a0
+	mov.l		%a0,IFPCREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+	clr.l		%d0
+	rts
+
+#############################################
+
+effadd_str:
+	string		"\tUnimplemented <ea>..."
+
+	align		0x4
+effadd_0:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	mov.w		&0x0000,ICCR(%a6)
+	movm.l		&0x7fff,IREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	fmov.b		&0x2,%fp0
+
+	mov.w		&0x0000,%cc
+effadd_0_pc:
+	fmul.x		&0xc00000008000000000000000,%fp0
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+	mov.l		&0xc0010000,IFPREGS+0x0(%a6)
+	mov.l		&0x80000000,IFPREGS+0x4(%a6)
+	mov.l		&0x00000000,IFPREGS+0x8(%a6)
+	mov.l		&0x08000000,IFPCREGS+0x4(%a6)
+	lea		effadd_0_pc(%pc),%a0
+	mov.l		%a0,IFPCREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+effadd_1:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	mov.w		&0x0000,ICCR(%a6)
+	movm.l		&0x7fff,IREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	mov.w		&0x0000,%cc
+effadd_1_pc:
+	fabs.p		&0xc12300012345678912345678,%fp0
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+	mov.l		&0x3e660000,IFPREGS+0x0(%a6)
+	mov.l		&0xd0ed23e8,IFPREGS+0x4(%a6)
+	mov.l		&0xd14035bc,IFPREGS+0x8(%a6)
+	mov.l		&0x00000108,IFPCREGS+0x4(%a6)
+	lea		effadd_1_pc(%pc),%a0
+	mov.l		%a0,IFPCREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+fmovml_0:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	fmovm.l		&0xffffffffffffffff,%fpcr,%fpsr
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+	mov.l		&0x0000fff0,IFPCREGS+0x0(%a6)
+	mov.l		&0x0ffffff8,IFPCREGS+0x4(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+fmovml_1:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	fmovm.l		&0xffffffffffffffff,%fpcr,%fpiar
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+	mov.l		&0x0000fff0,IFPCREGS+0x0(%a6)
+	mov.l		&0xffffffff,IFPCREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+fmovml_2:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	fmovm.l		&0xffffffffffffffff,%fpsr,%fpiar
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+	mov.l		&0x0ffffff8,IFPCREGS+0x4(%a6)
+	mov.l		&0xffffffff,IFPCREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+fmovml_3:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	fmovm.l		&0xffffffffffffffffffffffff,%fpcr,%fpsr,%fpiar
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+	mov.l		&0x0000fff0,IFPCREGS+0x0(%a6)
+	mov.l		&0x0ffffff8,IFPCREGS+0x4(%a6)
+	mov.l		&0xffffffff,IFPCREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+# fmovmx dynamic
+fmovmx_0:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	fmov.b		&0x1,%fp0
+	fmov.b		&0x2,%fp1
+	fmov.b		&0x3,%fp2
+	fmov.b		&0x4,%fp3
+	fmov.b		&0x5,%fp4
+	fmov.b		&0x6,%fp5
+	fmov.b		&0x7,%fp6
+	fmov.b		&0x8,%fp7
+
+	fmov.l		&0x0,%fpiar
+	mov.l		&0xffffffaa,%d0
+
+	mov.w		&0x0000,ICCR(%a6)
+	movm.l		&0xffff,IREGS(%a6)
+
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+
+	mov.w		&0x0000,%cc
+
+	fmovm.x		%d0,-(%sp)
+
+	mov.w		%cc,SCCR(%a6)
+
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+	fmov.s		&0x7f800000,%fp1
+	fmov.s		&0x7f800000,%fp3
+	fmov.s		&0x7f800000,%fp5
+	fmov.s		&0x7f800000,%fp7
+
+	fmov.x		(%sp)+,%fp1
+	fmov.x		(%sp)+,%fp3
+	fmov.x		(%sp)+,%fp5
+	fmov.x		(%sp)+,%fp7
+
+	movm.l		&0xffff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+fmovmx_1:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	fmov.b		&0x1,%fp0
+	fmov.b		&0x2,%fp1
+	fmov.b		&0x3,%fp2
+	fmov.b		&0x4,%fp3
+	fmov.b		&0x5,%fp4
+	fmov.b		&0x6,%fp5
+	fmov.b		&0x7,%fp6
+	fmov.b		&0x8,%fp7
+
+	fmov.x		%fp6,-(%sp)
+	fmov.x		%fp4,-(%sp)
+	fmov.x		%fp2,-(%sp)
+	fmov.x		%fp0,-(%sp)
+
+	fmovm.x		&0xff,IFPREGS(%a6)
+
+	fmov.s		&0x7f800000,%fp6
+	fmov.s		&0x7f800000,%fp4
+	fmov.s		&0x7f800000,%fp2
+	fmov.s		&0x7f800000,%fp0
+
+	fmov.l		&0x0,%fpiar
+	fmov.l		&0x0,%fpsr
+	mov.l		&0xffffffaa,%d0
+
+	mov.w		&0x0000,ICCR(%a6)
+	movm.l		&0xffff,IREGS(%a6)
+
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	mov.w		&0x0000,%cc
+
+	fmovm.x		(%sp)+,%d0
+
+	mov.w		%cc,SCCR(%a6)
+
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+	movm.l		&0xffff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+fmovmx_2:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	fmov.b		&0x1,%fp0
+	fmov.b		&0x2,%fp1
+	fmov.b		&0x3,%fp2
+	fmov.b		&0x4,%fp3
+	fmov.b		&0x5,%fp4
+	fmov.b		&0x6,%fp5
+	fmov.b		&0x7,%fp6
+	fmov.b		&0x8,%fp7
+
+	fmov.l		&0x0,%fpiar
+	mov.l		&0xffffff00,%d0
+
+	mov.w		&0x0000,ICCR(%a6)
+	movm.l		&0xffff,IREGS(%a6)
+
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+
+	mov.w		&0x0000,%cc
+
+	fmovm.x		%d0,-(%sp)
+
+	mov.w		%cc,SCCR(%a6)
+
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+	movm.l		&0xffff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+	clr.l		%d0
+	rts
+
+###########################################################
+
+# This test will take a non-maskable overflow directly.
+ovfl_nm_str:
+	string		"\tNon-maskable overflow..."
+
+	align		0x4
+ovfl_nm_0:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	mov.w		&0x0000,ICCR(%a6)
+	movm.l		&0x7fff,IREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	fmov.b		&0x2,%fp0
+	mov.l		&0x7ffe0000,DATA+0x0(%a6)
+	mov.l		&0x80000000,DATA+0x4(%a6)
+	mov.l		&0x00000000,DATA+0x8(%a6)
+
+	mov.w		&0x0000,%cc
+ovfl_nm_0_pc:
+	fmul.x		DATA(%a6),%fp0
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+	mov.l		&0x7fff0000,IFPREGS+0x0(%a6)
+	mov.l		&0x00000000,IFPREGS+0x4(%a6)
+	mov.l		&0x00000000,IFPREGS+0x8(%a6)
+	mov.l		&0x02001048,IFPCREGS+0x4(%a6)
+	lea		ovfl_nm_0_pc(%pc),%a0
+	mov.l		%a0,IFPCREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+	clr.l		%d0
+	rts
+
+###########################################################
+
+# This test will take an overflow directly.
+ovfl_str:
+	string		"\tEnabled overflow..."
+
+	align		0x4
+ovfl_0:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	mov.w		&0x0000,ICCR(%a6)
+	movm.l		&0x7fff,IREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+	fmov.l		&0x00001000,%fpcr
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	fmov.b		&0x2,%fp0
+	mov.l		&0x7ffe0000,DATA+0x0(%a6)
+	mov.l		&0x80000000,DATA+0x4(%a6)
+	mov.l		&0x00000000,DATA+0x8(%a6)
+
+	mov.w		&0x0000,%cc
+ovfl_0_pc:
+	fmul.x		DATA(%a6),%fp0
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+	mov.l		&0x7fff0000,IFPREGS+0x0(%a6)
+	mov.l		&0x00000000,IFPREGS+0x4(%a6)
+	mov.l		&0x00000000,IFPREGS+0x8(%a6)
+	mov.l		&0x02001048,IFPCREGS+0x4(%a6)
+	lea		ovfl_0_pc(%pc),%a0
+	mov.l		%a0,IFPCREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+	clr.l		%d0
+	rts
+
+#####################################################################
+
+# This test will take an underflow directly.
+unfl_str:
+	string		"\tEnabled underflow..."
+
+	align		0x4
+unfl_0:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	mov.w		&0x0000,ICCR(%a6)
+	movm.l		&0x7fff,IREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+	fmov.l		&0x00000800,%fpcr
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	mov.l		&0x00000000,DATA+0x0(%a6)
+	mov.l		&0x80000000,DATA+0x4(%a6)
+	mov.l		&0x00000000,DATA+0x8(%a6)
+	fmovm.x		DATA(%a6),&0x80
+
+	mov.w		&0x0000,%cc
+unfl_0_pc:
+	fdiv.b		&0x2,%fp0
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+	mov.l		&0x00000000,IFPREGS+0x0(%a6)
+	mov.l		&0x40000000,IFPREGS+0x4(%a6)
+	mov.l		&0x00000000,IFPREGS+0x8(%a6)
+	mov.l		&0x00000800,IFPCREGS+0x4(%a6)
+	lea		unfl_0_pc(%pc),%a0
+	mov.l		%a0,IFPCREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+	clr.l		%d0
+	rts
+
+#####################################################################
+
+# This test will take a non-maskable underflow directly.
+unfl_nm_str:
+	string		"\tNon-maskable underflow..."
+
+	align		0x4
+unfl_nm_0:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	mov.w		&0x0000,ICCR(%a6)
+	movm.l		&0x7fff,IREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	mov.l		&0x00000000,DATA+0x0(%a6)
+	mov.l		&0x80000000,DATA+0x4(%a6)
+	mov.l		&0x00000000,DATA+0x8(%a6)
+	fmovm.x		DATA(%a6),&0x80
+
+	mov.w		&0x0000,%cc
+unfl_nm_0_pc:
+	fdiv.b		&0x2,%fp0
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+	mov.l		&0x00000000,IFPREGS+0x0(%a6)
+	mov.l		&0x40000000,IFPREGS+0x4(%a6)
+	mov.l		&0x00000000,IFPREGS+0x8(%a6)
+	mov.l		&0x00000800,IFPCREGS+0x4(%a6)
+	lea		unfl_nm_0_pc(%pc),%a0
+	mov.l		%a0,IFPCREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+	clr.l		%d0
+	rts
+
+#####################################################################
+
+inex_str:
+	string		"\tEnabled inexact..."
+
+	align		0x4
+inex_0:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	mov.w		&0x0000,ICCR(%a6)
+	movm.l		&0x7fff,IREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+	fmov.l		&0x00000200,%fpcr		# enable inexact
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	mov.l		&0x50000000,DATA+0x0(%a6)
+	mov.l		&0x80000000,DATA+0x4(%a6)
+	mov.l		&0x00000000,DATA+0x8(%a6)
+	fmovm.x		DATA(%a6),&0x80
+
+	mov.w		&0x0000,%cc
+inex_0_pc:
+	fadd.b		&0x2,%fp0
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+	mov.l		&0x50000000,IFPREGS+0x0(%a6)
+	mov.l		&0x80000000,IFPREGS+0x4(%a6)
+	mov.l		&0x00000000,IFPREGS+0x8(%a6)
+	mov.l		&0x00000208,IFPCREGS+0x4(%a6)
+	lea		inex_0_pc(%pc),%a0
+	mov.l		%a0,IFPCREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+	clr.l		%d0
+	rts
+
+#####################################################################
+
+snan_str:
+	string		"\tEnabled SNAN..."
+
+	align		0x4
+snan_0:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	mov.w		&0x0000,ICCR(%a6)
+	movm.l		&0x7fff,IREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+	fmov.l		&0x00004000,%fpcr		# enable SNAN
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	mov.l		&0xffff0000,DATA+0x0(%a6)
+	mov.l		&0x00000000,DATA+0x4(%a6)
+	mov.l		&0x00000001,DATA+0x8(%a6)
+	fmovm.x		DATA(%a6),&0x80
+
+	mov.w		&0x0000,%cc
+snan_0_pc:
+	fadd.b		&0x2,%fp0
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+	mov.l		&0xffff0000,IFPREGS+0x0(%a6)
+	mov.l		&0x00000000,IFPREGS+0x4(%a6)
+	mov.l		&0x00000001,IFPREGS+0x8(%a6)
+	mov.l		&0x09004080,IFPCREGS+0x4(%a6)
+	lea		snan_0_pc(%pc),%a0
+	mov.l		%a0,IFPCREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+	clr.l		%d0
+	rts
+
+#####################################################################
+
+operr_str:
+	string		"\tEnabled OPERR..."
+
+	align		0x4
+operr_0:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	mov.w		&0x0000,ICCR(%a6)
+	movm.l		&0x7fff,IREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+	fmov.l		&0x00002000,%fpcr		# enable OPERR
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	mov.l		&0xffff0000,DATA+0x0(%a6)
+	mov.l		&0x00000000,DATA+0x4(%a6)
+	mov.l		&0x00000000,DATA+0x8(%a6)
+	fmovm.x		DATA(%a6),&0x80
+
+	mov.w		&0x0000,%cc
+operr_0_pc:
+	fadd.s		&0x7f800000,%fp0
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+	mov.l		&0xffff0000,IFPREGS+0x0(%a6)
+	mov.l		&0x00000000,IFPREGS+0x4(%a6)
+	mov.l		&0x00000000,IFPREGS+0x8(%a6)
+	mov.l		&0x01002080,IFPCREGS+0x4(%a6)
+	lea		operr_0_pc(%pc),%a0
+	mov.l		%a0,IFPCREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+	clr.l		%d0
+	rts
+
+#####################################################################
+
+dz_str:
+	string		"\tEnabled DZ..."
+
+	align		0x4
+dz_0:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	mov.w		&0x0000,ICCR(%a6)
+	movm.l		&0x7fff,IREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+	fmov.l		&0x00000400,%fpcr		# enable DZ
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	mov.l		&0x40000000,DATA+0x0(%a6)
+	mov.l		&0x80000000,DATA+0x4(%a6)
+	mov.l		&0x00000000,DATA+0x8(%a6)
+	fmovm.x		DATA(%a6),&0x80
+
+	mov.w		&0x0000,%cc
+dz_0_pc:
+	fdiv.b		&0x0,%fp0
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+	mov.l		&0x40000000,IFPREGS+0x0(%a6)
+	mov.l		&0x80000000,IFPREGS+0x4(%a6)
+	mov.l		&0x00000000,IFPREGS+0x8(%a6)
+	mov.l		&0x02000410,IFPCREGS+0x4(%a6)
+	lea		dz_0_pc(%pc),%a0
+	mov.l		%a0,IFPCREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+	clr.l		%d0
+	rts
+
+#####################################################################
+
+unsupp_str:
+	string		"\tUnimplemented data type/format..."
+
+# an unnormalized number
+	align		0x4
+unsupp_0:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	mov.w		&0x0000,ICCR(%a6)
+	movm.l		&0x7fff,IREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	mov.l		&0xc03f0000,DATA+0x0(%a6)
+	mov.l		&0x00000000,DATA+0x4(%a6)
+	mov.l		&0x00000001,DATA+0x8(%a6)
+	fmov.b		&0x2,%fp0
+	mov.w		&0x0000,%cc
+unsupp_0_pc:
+	fmul.x		DATA(%a6),%fp0
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+	mov.l		&0xc0010000,IFPREGS+0x0(%a6)
+	mov.l		&0x80000000,IFPREGS+0x4(%a6)
+	mov.l		&0x00000000,IFPREGS+0x8(%a6)
+	mov.l		&0x08000000,IFPCREGS+0x4(%a6)
+	lea		unsupp_0_pc(%pc),%a0
+	mov.l		%a0,IFPCREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+# a denormalized number
+unsupp_1:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	mov.w		&0x0000,ICCR(%a6)
+	movm.l		&0x7fff,IREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	mov.l		&0x80000000,DATA+0x0(%a6)
+	mov.l		&0x01000000,DATA+0x4(%a6)
+	mov.l		&0x00000000,DATA+0x8(%a6)
+	fmov.l		&0x7fffffff,%fp0
+
+	mov.w		&0x0000,%cc
+unsupp_1_pc:
+	fmul.x		DATA(%a6),%fp0
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+	mov.l		&0x80170000,IFPREGS+0x0(%a6)
+	mov.l		&0xfffffffe,IFPREGS+0x4(%a6)
+	mov.l		&0x00000000,IFPREGS+0x8(%a6)
+	mov.l		&0x08000000,IFPCREGS+0x4(%a6)
+	lea		unsupp_1_pc(%pc),%a0
+	mov.l		%a0,IFPCREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+# packed
+unsupp_2:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	fmovm.x		DEF_FPREGS(%pc),&0xff
+	fmovm.l		DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+	mov.w		&0x0000,ICCR(%a6)
+	movm.l		&0x7fff,IREGS(%a6)
+	fmovm.x		&0xff,IFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+	mov.l		&0xc1230001,DATA+0x0(%a6)
+	mov.l		&0x23456789,DATA+0x4(%a6)
+	mov.l		&0x12345678,DATA+0x8(%a6)
+
+	mov.w		&0x0000,%cc
+unsupp_2_pc:
+	fabs.p		DATA(%a6),%fp0
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	fmovm.x		&0xff,SFPREGS(%a6)
+	fmovm.l		%fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+	mov.l		&0x3e660000,IFPREGS+0x0(%a6)
+	mov.l		&0xd0ed23e8,IFPREGS+0x4(%a6)
+	mov.l		&0xd14035bc,IFPREGS+0x8(%a6)
+	mov.l		&0x00000108,IFPCREGS+0x4(%a6)
+	lea		unsupp_2_pc(%pc),%a0
+	mov.l		%a0,IFPCREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	bsr.l		chkfpregs
+	tst.b		%d0
+	bne.l		error
+
+	clr.l		%d0
+	rts
+
+###########################################################
+###########################################################
+
+chkregs:
+	lea		IREGS(%a6),%a0
+	lea		SREGS(%a6),%a1
+	mov.l		&14,%d0
+chkregs_loop:
+	cmp.l		(%a0)+,(%a1)+
+	bne.l		chkregs_error
+	dbra.w		%d0,chkregs_loop
+
+	mov.w		ICCR(%a6),%d0
+	mov.w		SCCR(%a6),%d1
+	cmp.w		%d0,%d1
+	bne.l		chkregs_error
+
+	clr.l		%d0
+	rts
+
+chkregs_error:
+	movq.l		&0x1,%d0
+	rts
+
+error:
+	mov.l		TESTCTR(%a6),%d1
+	movq.l		&0x1,%d0
+	rts
+
+chkfpregs:
+	lea		IFPREGS(%a6),%a0
+	lea		SFPREGS(%a6),%a1
+	mov.l		&23,%d0
+chkfpregs_loop:
+	cmp.l		(%a0)+,(%a1)+
+	bne.l		chkfpregs_error
+	dbra.w		%d0,chkfpregs_loop
+
+	lea		IFPCREGS(%a6),%a0
+	lea		SFPCREGS(%a6),%a1
+	cmp.l		(%a0)+,(%a1)+
+	bne.l		chkfpregs_error
+	cmp.l		(%a0)+,(%a1)+
+	bne.l		chkfpregs_error
+	cmp.l		(%a0)+,(%a1)+
+	bne.l		chkfpregs_error
+
+	clr.l		%d0
+	rts
+
+chkfpregs_error:
+	movq.l		&0x1,%d0
+	rts
+
+DEF_REGS:
+	long		0xacacacac, 0xacacacac, 0xacacacac, 0xacacacac
+	long		0xacacacac, 0xacacacac, 0xacacacac, 0xacacacac
+
+	long		0xacacacac, 0xacacacac, 0xacacacac, 0xacacacac
+	long		0xacacacac, 0xacacacac, 0xacacacac, 0xacacacac
+
+DEF_FPREGS:
+	long		0x7fff0000, 0xffffffff, 0xffffffff
+	long		0x7fff0000, 0xffffffff, 0xffffffff
+	long		0x7fff0000, 0xffffffff, 0xffffffff
+	long		0x7fff0000, 0xffffffff, 0xffffffff
+	long		0x7fff0000, 0xffffffff, 0xffffffff
+	long		0x7fff0000, 0xffffffff, 0xffffffff
+	long		0x7fff0000, 0xffffffff, 0xffffffff
+	long		0x7fff0000, 0xffffffff, 0xffffffff
+
+DEF_FPCREGS:
+	long		0x00000000, 0x00000000, 0x00000000
+
+############################################################
+
+_print_str:
+	mov.l		%d0,-(%sp)
+	mov.l		(TESTTOP-0x80+0x0,%pc),%d0
+	pea		(TESTTOP-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+_print_num:
+	mov.l		%d0,-(%sp)
+	mov.l		(TESTTOP-0x80+0x4,%pc),%d0
+	pea		(TESTTOP-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+############################################################
diff --git a/arch/m68k/ifpsp060/src/ilsp.S b/arch/m68k/ifpsp060/src/ilsp.S
new file mode 100644
index 0000000..afa7422
--- /dev/null
+++ b/arch/m68k/ifpsp060/src/ilsp.S
@@ -0,0 +1,932 @@
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
+M68000 Hi-Performance Microprocessor Division
+M68060 Software Package
+Production Release P1.00 -- October 10, 1994
+
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc.  All rights reserved.
+
+THE SOFTWARE is provided on an "AS IS" basis and without warranty.
+To the maximum extent permitted by applicable law,
+MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
+INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+and any warranty against infringement with regard to the SOFTWARE
+(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
+
+To the maximum extent permitted by applicable law,
+IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
+(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
+BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
+ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
+Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
+
+You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
+so long as this entire notice is retained without alteration in any modified and/or
+redistributed versions, and that such modified versions are clearly identified as such.
+No licenses are granted by implication, estoppel or otherwise under any patents
+or trademarks of Motorola, Inc.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# litop.s:
+#	This file is appended to the top of the 060FPLSP package
+# and contains the entry points into the package. The user, in
+# effect, branches to one of the branch table entries located here.
+#
+
+	bra.l	_060LSP__idivs64_
+	short	0x0000
+	bra.l	_060LSP__idivu64_
+	short	0x0000
+
+	bra.l	_060LSP__imuls64_
+	short	0x0000
+	bra.l	_060LSP__imulu64_
+	short	0x0000
+
+	bra.l	_060LSP__cmp2_Ab_
+	short	0x0000
+	bra.l	_060LSP__cmp2_Aw_
+	short	0x0000
+	bra.l	_060LSP__cmp2_Al_
+	short	0x0000
+	bra.l	_060LSP__cmp2_Db_
+	short	0x0000
+	bra.l	_060LSP__cmp2_Dw_
+	short	0x0000
+	bra.l	_060LSP__cmp2_Dl_
+	short	0x0000
+
+# leave room for future possible aditions.
+	align	0x200
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_060LSP__idivu64_(): Emulate 64-bit unsigned div instruction.	#
+#	_060LSP__idivs64_(): Emulate 64-bit signed div instruction.	#
+#									#
+#	This is the library version which is accessed as a subroutine	#
+#	and therefore does not work exactly like the 680X0 div{s,u}.l	#
+#	64-bit divide instruction.					#
+#									#
+# XREF ****************************************************************	#
+#	None.								#
+#									#
+# INPUT ***************************************************************	#
+#	0x4(sp)  = divisor						#
+#	0x8(sp)  = hi(dividend)						#
+#	0xc(sp)  = lo(dividend)						#
+#	0x10(sp) = pointer to location to place quotient/remainder	#
+#									#
+# OUTPUT **************************************************************	#
+#	0x10(sp) = points to location of remainder/quotient.		#
+#		   remainder is in first longword, quotient is in 2nd.	#
+#									#
+# ALGORITHM ***********************************************************	#
+#	If the operands are signed, make them unsigned and save the	#
+# sign info for later. Separate out special cases like divide-by-zero	#
+# or 32-bit divides if possible. Else, use a special math algorithm	#
+# to calculate the result.						#
+#	Restore sign info if signed instruction. Set the condition	#
+# codes before performing the final "rts". If the divisor was equal to	#
+# zero, then perform a divide-by-zero using a 16-bit implemented	#
+# divide instruction. This way, the operating system can record that	#
+# the event occurred even though it may not point to the correct place.	#
+#									#
+#########################################################################
+
+set	POSNEG,		-1
+set	NDIVISOR,	-2
+set	NDIVIDEND,	-3
+set	DDSECOND,	-4
+set	DDNORMAL,	-8
+set	DDQUOTIENT,	-12
+set	DIV64_CC,	-16
+
+##########
+# divs.l #
+##########
+	global		_060LSP__idivs64_
+_060LSP__idivs64_:
+# PROLOGUE BEGIN ########################################################
+	link.w		%a6,&-16
+	movm.l		&0x3f00,-(%sp)		# save d2-d7
+#	fmovm.l		&0x0,-(%sp)		# save no fpregs
+# PROLOGUE END ##########################################################
+
+	mov.w		%cc,DIV64_CC(%a6)
+	st		POSNEG(%a6)		# signed operation
+	bra.b		ldiv64_cont
+
+##########
+# divu.l #
+##########
+	global		_060LSP__idivu64_
+_060LSP__idivu64_:
+# PROLOGUE BEGIN ########################################################
+	link.w		%a6,&-16
+	movm.l		&0x3f00,-(%sp)		# save d2-d7
+#	fmovm.l		&0x0,-(%sp)		# save no fpregs
+# PROLOGUE END ##########################################################
+
+	mov.w		%cc,DIV64_CC(%a6)
+	sf		POSNEG(%a6)		# unsigned operation
+
+ldiv64_cont:
+	mov.l		0x8(%a6),%d7		# fetch divisor
+
+	beq.w		ldiv64eq0		# divisor is = 0!!!
+
+	mov.l		0xc(%a6), %d5		# get dividend hi
+	mov.l		0x10(%a6), %d6		# get dividend lo
+
+# separate signed and unsigned divide
+	tst.b		POSNEG(%a6)		# signed or unsigned?
+	beq.b		ldspecialcases		# use positive divide
+
+# save the sign of the divisor
+# make divisor unsigned if it's negative
+	tst.l		%d7			# chk sign of divisor
+	slt		NDIVISOR(%a6)		# save sign of divisor
+	bpl.b		ldsgndividend
+	neg.l		%d7			# complement negative divisor
+
+# save the sign of the dividend
+# make dividend unsigned if it's negative
+ldsgndividend:
+	tst.l		%d5			# chk sign of hi(dividend)
+	slt		NDIVIDEND(%a6)		# save sign of dividend
+	bpl.b		ldspecialcases
+
+	mov.w		&0x0, %cc		# clear 'X' cc bit
+	negx.l		%d6			# complement signed dividend
+	negx.l		%d5
+
+# extract some special cases:
+#	- is (dividend == 0) ?
+#	- is (hi(dividend) == 0 && (divisor <= lo(dividend))) ? (32-bit div)
+ldspecialcases:
+	tst.l		%d5			# is (hi(dividend) == 0)
+	bne.b		ldnormaldivide		# no, so try it the long way
+
+	tst.l		%d6			# is (lo(dividend) == 0), too
+	beq.w		lddone			# yes, so (dividend == 0)
+
+	cmp.l		%d7,%d6			# is (divisor <= lo(dividend))
+	bls.b		ld32bitdivide		# yes, so use 32 bit divide
+
+	exg		%d5,%d6			# q = 0, r = dividend
+	bra.w		ldivfinish		# can't divide, we're done.
+
+ld32bitdivide:
+	tdivu.l		%d7, %d5:%d6		# it's only a 32/32 bit div!
+
+	bra.b		ldivfinish
+
+ldnormaldivide:
+# last special case:
+#	- is hi(dividend) >= divisor ? if yes, then overflow
+	cmp.l		%d7,%d5
+	bls.b		lddovf			# answer won't fit in 32 bits
+
+# perform the divide algorithm:
+	bsr.l		ldclassical		# do int divide
+
+# separate into signed and unsigned finishes.
+ldivfinish:
+	tst.b		POSNEG(%a6)		# do divs, divu separately
+	beq.b		lddone			# divu has no processing!!!
+
+# it was a divs.l, so ccode setting is a little more complicated...
+	tst.b		NDIVIDEND(%a6)		# remainder has same sign
+	beq.b		ldcc			# as dividend.
+	neg.l		%d5			# sgn(rem) = sgn(dividend)
+ldcc:
+	mov.b		NDIVISOR(%a6), %d0
+	eor.b		%d0, NDIVIDEND(%a6)	# chk if quotient is negative
+	beq.b		ldqpos			# branch to quot positive
+
+# 0x80000000 is the largest number representable as a 32-bit negative
+# number. the negative of 0x80000000 is 0x80000000.
+	cmpi.l		%d6, &0x80000000	# will (-quot) fit in 32 bits?
+	bhi.b		lddovf
+
+	neg.l		%d6			# make (-quot) 2's comp
+
+	bra.b		lddone
+
+ldqpos:
+	btst		&0x1f, %d6		# will (+quot) fit in 32 bits?
+	bne.b		lddovf
+
+lddone:
+# if the register numbers are the same, only the quotient gets saved.
+# so, if we always save the quotient second, we save ourselves a cmp&beq
+	andi.w		&0x10,DIV64_CC(%a6)
+	mov.w		DIV64_CC(%a6),%cc
+	tst.l		%d6			# may set 'N' ccode bit
+
+# here, the result is in d1 and d0. the current strategy is to save
+# the values at the location pointed to by a0.
+# use movm here to not disturb the condition codes.
+ldexit:
+	movm.l		&0x0060,([0x14,%a6])	# save result
+
+# EPILOGUE BEGIN ########################################################
+#	fmovm.l		(%sp)+,&0x0		# restore no fpregs
+	movm.l		(%sp)+,&0x00fc		# restore d2-d7
+	unlk		%a6
+# EPILOGUE END ##########################################################
+
+	rts
+
+# the result should be the unchanged dividend
+lddovf:
+	mov.l		0xc(%a6), %d5		# get dividend hi
+	mov.l		0x10(%a6), %d6		# get dividend lo
+
+	andi.w		&0x1c,DIV64_CC(%a6)
+	ori.w		&0x02,DIV64_CC(%a6)	# set 'V' ccode bit
+	mov.w		DIV64_CC(%a6),%cc
+
+	bra.b		ldexit
+
+ldiv64eq0:
+	mov.l		0xc(%a6),([0x14,%a6])
+	mov.l		0x10(%a6),([0x14,%a6],0x4)
+
+	mov.w		DIV64_CC(%a6),%cc
+
+# EPILOGUE BEGIN ########################################################
+#	fmovm.l		(%sp)+,&0x0		# restore no fpregs
+	movm.l		(%sp)+,&0x00fc		# restore d2-d7
+	unlk		%a6
+# EPILOGUE END ##########################################################
+
+	divu.w		&0x0,%d0		# force a divbyzero exception
+	rts
+
+###########################################################################
+#########################################################################
+# This routine uses the 'classical' Algorithm D from Donald Knuth's	#
+# Art of Computer Programming, vol II, Seminumerical Algorithms.	#
+# For this implementation b=2**16, and the target is U1U2U3U4/V1V2,	#
+# where U,V are words of the quadword dividend and longword divisor,	#
+# and U1, V1 are the most significant words.				#
+#									#
+# The most sig. longword of the 64 bit dividend must be in %d5, least	#
+# in %d6. The divisor must be in the variable ddivisor, and the		#
+# signed/unsigned flag ddusign must be set (0=unsigned,1=signed).	#
+# The quotient is returned in %d6, remainder in %d5, unless the		#
+# v (overflow) bit is set in the saved %ccr. If overflow, the dividend	#
+# is unchanged.								#
+#########################################################################
+ldclassical:
+# if the divisor msw is 0, use simpler algorithm then the full blown
+# one at ddknuth:
+
+	cmpi.l		%d7, &0xffff
+	bhi.b		lddknuth		# go use D. Knuth algorithm
+
+# Since the divisor is only a word (and larger than the mslw of the dividend),
+# a simpler algorithm may be used :
+# In the general case, four quotient words would be created by
+# dividing the divisor word into each dividend word. In this case,
+# the first two quotient words must be zero, or overflow would occur.
+# Since we already checked this case above, we can treat the most significant
+# longword of the dividend as (0) remainder (see Knuth) and merely complete
+# the last two divisions to get a quotient longword and word remainder:
+
+	clr.l		%d1
+	swap		%d5			# same as r*b if previous step rqd
+	swap		%d6			# get u3 to lsw position
+	mov.w		%d6, %d5		# rb + u3
+
+	divu.w		%d7, %d5
+
+	mov.w		%d5, %d1		# first quotient word
+	swap		%d6			# get u4
+	mov.w		%d6, %d5		# rb + u4
+
+	divu.w		%d7, %d5
+
+	swap		%d1
+	mov.w		%d5, %d1		# 2nd quotient 'digit'
+	clr.w		%d5
+	swap		%d5			# now remainder
+	mov.l		%d1, %d6		# and quotient
+
+	rts
+
+lddknuth:
+# In this algorithm, the divisor is treated as a 2 digit (word) number
+# which is divided into a 3 digit (word) dividend to get one quotient
+# digit (word). After subtraction, the dividend is shifted and the
+# process repeated. Before beginning, the divisor and quotient are
+# 'normalized' so that the process of estimating the quotient digit
+# will yield verifiably correct results..
+
+	clr.l		DDNORMAL(%a6)		# count of shifts for normalization
+	clr.b		DDSECOND(%a6)		# clear flag for quotient digits
+	clr.l		%d1			# %d1 will hold trial quotient
+lddnchk:
+	btst		&31, %d7		# must we normalize? first word of
+	bne.b		lddnormalized		# divisor (V1) must be >= 65536/2
+	addq.l		&0x1, DDNORMAL(%a6)	# count normalization shifts
+	lsl.l		&0x1, %d7		# shift the divisor
+	lsl.l		&0x1, %d6		# shift u4,u3 with overflow to u2
+	roxl.l		&0x1, %d5		# shift u1,u2
+	bra.w		lddnchk
+lddnormalized:
+
+# Now calculate an estimate of the quotient words (msw first, then lsw).
+# The comments use subscripts for the first quotient digit determination.
+	mov.l		%d7, %d3		# divisor
+	mov.l		%d5, %d2		# dividend mslw
+	swap		%d2
+	swap		%d3
+	cmp.w		%d2, %d3		# V1 = U1 ?
+	bne.b		lddqcalc1
+	mov.w		&0xffff, %d1		# use max trial quotient word
+	bra.b		lddadj0
+lddqcalc1:
+	mov.l		%d5, %d1
+
+	divu.w		%d3, %d1		# use quotient of mslw/msw
+
+	andi.l		&0x0000ffff, %d1	# zero any remainder
+lddadj0:
+
+# now test the trial quotient and adjust. This step plus the
+# normalization assures (according to Knuth) that the trial
+# quotient will be at worst 1 too large.
+	mov.l		%d6, -(%sp)
+	clr.w		%d6			# word u3 left
+	swap		%d6			# in lsw position
+lddadj1: mov.l		%d7, %d3
+	mov.l		%d1, %d2
+	mulu.w		%d7, %d2		# V2q
+	swap		%d3
+	mulu.w		%d1, %d3		# V1q
+	mov.l		%d5, %d4		# U1U2
+	sub.l		%d3, %d4		# U1U2 - V1q
+
+	swap		%d4
+
+	mov.w		%d4,%d0
+	mov.w		%d6,%d4			# insert lower word (U3)
+
+	tst.w		%d0			# is upper word set?
+	bne.w		lddadjd1
+
+#	add.l		%d6, %d4		# (U1U2 - V1q) + U3
+
+	cmp.l		%d2, %d4
+	bls.b		lddadjd1		# is V2q > (U1U2-V1q) + U3 ?
+	subq.l		&0x1, %d1		# yes, decrement and recheck
+	bra.b		lddadj1
+lddadjd1:
+# now test the word by multiplying it by the divisor (V1V2) and comparing
+# the 3 digit (word) result with the current dividend words
+	mov.l		%d5, -(%sp)		# save %d5 (%d6 already saved)
+	mov.l		%d1, %d6
+	swap		%d6			# shift answer to ms 3 words
+	mov.l		%d7, %d5
+	bsr.l		ldmm2
+	mov.l		%d5, %d2		# now %d2,%d3 are trial*divisor
+	mov.l		%d6, %d3
+	mov.l		(%sp)+, %d5		# restore dividend
+	mov.l		(%sp)+, %d6
+	sub.l		%d3, %d6
+	subx.l		%d2, %d5		# subtract double precision
+	bcc		ldd2nd			# no carry, do next quotient digit
+	subq.l		&0x1, %d1		# q is one too large
+# need to add back divisor longword to current ms 3 digits of dividend
+# - according to Knuth, this is done only 2 out of 65536 times for random
+# divisor, dividend selection.
+	clr.l		%d2
+	mov.l		%d7, %d3
+	swap		%d3
+	clr.w		%d3			# %d3 now ls word of divisor
+	add.l		%d3, %d6		# aligned with 3rd word of dividend
+	addx.l		%d2, %d5
+	mov.l		%d7, %d3
+	clr.w		%d3			# %d3 now ms word of divisor
+	swap		%d3			# aligned with 2nd word of dividend
+	add.l		%d3, %d5
+ldd2nd:
+	tst.b		DDSECOND(%a6)	# both q words done?
+	bne.b		lddremain
+# first quotient digit now correct. store digit and shift the
+# (subtracted) dividend
+	mov.w		%d1, DDQUOTIENT(%a6)
+	clr.l		%d1
+	swap		%d5
+	swap		%d6
+	mov.w		%d6, %d5
+	clr.w		%d6
+	st		DDSECOND(%a6)		# second digit
+	bra.w		lddnormalized
+lddremain:
+# add 2nd word to quotient, get the remainder.
+	mov.w		%d1, DDQUOTIENT+2(%a6)
+# shift down one word/digit to renormalize remainder.
+	mov.w		%d5, %d6
+	swap		%d6
+	swap		%d5
+	mov.l		DDNORMAL(%a6), %d7	# get norm shift count
+	beq.b		lddrn
+	subq.l		&0x1, %d7		# set for loop count
+lddnlp:
+	lsr.l		&0x1, %d5		# shift into %d6
+	roxr.l		&0x1, %d6
+	dbf		%d7, lddnlp
+lddrn:
+	mov.l		%d6, %d5		# remainder
+	mov.l		DDQUOTIENT(%a6), %d6	# quotient
+
+	rts
+ldmm2:
+# factors for the 32X32->64 multiplication are in %d5 and %d6.
+# returns 64 bit result in %d5 (hi) %d6(lo).
+# destroys %d2,%d3,%d4.
+
+# multiply hi,lo words of each factor to get 4 intermediate products
+	mov.l		%d6, %d2
+	mov.l		%d6, %d3
+	mov.l		%d5, %d4
+	swap		%d3
+	swap		%d4
+	mulu.w		%d5, %d6		# %d6 <- lsw*lsw
+	mulu.w		%d3, %d5		# %d5 <- msw-dest*lsw-source
+	mulu.w		%d4, %d2		# %d2 <- msw-source*lsw-dest
+	mulu.w		%d4, %d3		# %d3 <- msw*msw
+# now use swap and addx to consolidate to two longwords
+	clr.l		%d4
+	swap		%d6
+	add.w		%d5, %d6		# add msw of l*l to lsw of m*l product
+	addx.w		%d4, %d3		# add any carry to m*m product
+	add.w		%d2, %d6		# add in lsw of other m*l product
+	addx.w		%d4, %d3		# add any carry to m*m product
+	swap		%d6			# %d6 is low 32 bits of final product
+	clr.w		%d5
+	clr.w		%d2			# lsw of two mixed products used,
+	swap		%d5			# now use msws of longwords
+	swap		%d2
+	add.l		%d2, %d5
+	add.l		%d3, %d5	# %d5 now ms 32 bits of final product
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_060LSP__imulu64_(): Emulate 64-bit unsigned mul instruction	#
+#	_060LSP__imuls64_(): Emulate 64-bit signed mul instruction.	#
+#									#
+#	This is the library version which is accessed as a subroutine	#
+#	and therefore does not work exactly like the 680X0 mul{s,u}.l	#
+#	64-bit multiply instruction.					#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	0x4(sp) = multiplier						#
+#	0x8(sp) = multiplicand						#
+#	0xc(sp) = pointer to location to place 64-bit result		#
+#									#
+# OUTPUT **************************************************************	#
+#	0xc(sp) = points to location of 64-bit result			#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Perform the multiply in pieces using 16x16->32 unsigned		#
+# multiplies and "add" instructions.					#
+#	Set the condition codes as appropriate before performing an	#
+# "rts".								#
+#									#
+#########################################################################
+
+set MUL64_CC, -4
+
+	global		_060LSP__imulu64_
+_060LSP__imulu64_:
+
+# PROLOGUE BEGIN ########################################################
+	link.w		%a6,&-4
+	movm.l		&0x3800,-(%sp)		# save d2-d4
+#	fmovm.l		&0x0,-(%sp)		# save no fpregs
+# PROLOGUE END ##########################################################
+
+	mov.w		%cc,MUL64_CC(%a6)	# save incoming ccodes
+
+	mov.l		0x8(%a6),%d0		# store multiplier in d0
+	beq.w		mulu64_zero		# handle zero separately
+
+	mov.l		0xc(%a6),%d1		# get multiplicand in d1
+	beq.w		mulu64_zero		# handle zero separately
+
+#########################################################################
+#	63			   32				0	#
+#	----------------------------					#
+#	| hi(mplier) * hi(mplicand)|					#
+#	----------------------------					#
+#		     -----------------------------			#
+#		     | hi(mplier) * lo(mplicand) |			#
+#		     -----------------------------			#
+#		     -----------------------------			#
+#		     | lo(mplier) * hi(mplicand) |			#
+#		     -----------------------------			#
+#	  |			   -----------------------------	#
+#	--|--			   | lo(mplier) * lo(mplicand) |	#
+#	  |			   -----------------------------	#
+#	========================================================	#
+#	--------------------------------------------------------	#
+#	|	hi(result)	   |	    lo(result)         |	#
+#	--------------------------------------------------------	#
+#########################################################################
+mulu64_alg:
+# load temp registers with operands
+	mov.l		%d0,%d2			# mr in d2
+	mov.l		%d0,%d3			# mr in d3
+	mov.l		%d1,%d4			# md in d4
+	swap		%d3			# hi(mr) in lo d3
+	swap		%d4			# hi(md) in lo d4
+
+# complete necessary multiplies:
+	mulu.w		%d1,%d0			# [1] lo(mr) * lo(md)
+	mulu.w		%d3,%d1			# [2] hi(mr) * lo(md)
+	mulu.w		%d4,%d2			# [3] lo(mr) * hi(md)
+	mulu.w		%d4,%d3			# [4] hi(mr) * hi(md)
+
+# add lo portions of [2],[3] to hi portion of [1].
+# add carries produced from these adds to [4].
+# lo([1]) is the final lo 16 bits of the result.
+	clr.l		%d4			# load d4 w/ zero value
+	swap		%d0			# hi([1]) <==> lo([1])
+	add.w		%d1,%d0			# hi([1]) + lo([2])
+	addx.l		%d4,%d3			#    [4]  + carry
+	add.w		%d2,%d0			# hi([1]) + lo([3])
+	addx.l		%d4,%d3			#    [4]  + carry
+	swap		%d0			# lo([1]) <==> hi([1])
+
+# lo portions of [2],[3] have been added in to final result.
+# now, clear lo, put hi in lo reg, and add to [4]
+	clr.w		%d1			# clear lo([2])
+	clr.w		%d2			# clear hi([3])
+	swap		%d1			# hi([2]) in lo d1
+	swap		%d2			# hi([3]) in lo d2
+	add.l		%d2,%d1			#    [4]  + hi([2])
+	add.l		%d3,%d1			#    [4]  + hi([3])
+
+# now, grab the condition codes. only one that can be set is 'N'.
+# 'N' CAN be set if the operation is unsigned if bit 63 is set.
+	mov.w		MUL64_CC(%a6),%d4
+	andi.b		&0x10,%d4		# keep old 'X' bit
+	tst.l		%d1			# may set 'N' bit
+	bpl.b		mulu64_ddone
+	ori.b		&0x8,%d4		# set 'N' bit
+mulu64_ddone:
+	mov.w		%d4,%cc
+
+# here, the result is in d1 and d0. the current strategy is to save
+# the values at the location pointed to by a0.
+# use movm here to not disturb the condition codes.
+mulu64_end:
+	exg		%d1,%d0
+	movm.l		&0x0003,([0x10,%a6])		# save result
+
+# EPILOGUE BEGIN ########################################################
+#	fmovm.l		(%sp)+,&0x0		# restore no fpregs
+	movm.l		(%sp)+,&0x001c		# restore d2-d4
+	unlk		%a6
+# EPILOGUE END ##########################################################
+
+	rts
+
+# one or both of the operands is zero so the result is also zero.
+# save the zero result to the register file and set the 'Z' ccode bit.
+mulu64_zero:
+	clr.l		%d0
+	clr.l		%d1
+
+	mov.w		MUL64_CC(%a6),%d4
+	andi.b		&0x10,%d4
+	ori.b		&0x4,%d4
+	mov.w		%d4,%cc			# set 'Z' ccode bit
+
+	bra.b		mulu64_end
+
+##########
+# muls.l #
+##########
+	global		_060LSP__imuls64_
+_060LSP__imuls64_:
+
+# PROLOGUE BEGIN ########################################################
+	link.w		%a6,&-4
+	movm.l		&0x3c00,-(%sp)		# save d2-d5
+#	fmovm.l		&0x0,-(%sp)		# save no fpregs
+# PROLOGUE END ##########################################################
+
+	mov.w		%cc,MUL64_CC(%a6)	# save incoming ccodes
+
+	mov.l		0x8(%a6),%d0		# store multiplier in d0
+	beq.b		mulu64_zero		# handle zero separately
+
+	mov.l		0xc(%a6),%d1		# get multiplicand in d1
+	beq.b		mulu64_zero		# handle zero separately
+
+	clr.b		%d5			# clear sign tag
+	tst.l		%d0			# is multiplier negative?
+	bge.b		muls64_chk_md_sgn	# no
+	neg.l		%d0			# make multiplier positive
+
+	ori.b		&0x1,%d5		# save multiplier sgn
+
+# the result sign is the exclusive or of the operand sign bits.
+muls64_chk_md_sgn:
+	tst.l		%d1			# is multiplicand negative?
+	bge.b		muls64_alg		# no
+	neg.l		%d1			# make multiplicand positive
+
+	eori.b		&0x1,%d5		# calculate correct sign
+
+#########################################################################
+#	63			   32				0	#
+#	----------------------------					#
+#	| hi(mplier) * hi(mplicand)|					#
+#	----------------------------					#
+#		     -----------------------------			#
+#		     | hi(mplier) * lo(mplicand) |			#
+#		     -----------------------------			#
+#		     -----------------------------			#
+#		     | lo(mplier) * hi(mplicand) |			#
+#		     -----------------------------			#
+#	  |			   -----------------------------	#
+#	--|--			   | lo(mplier) * lo(mplicand) |	#
+#	  |			   -----------------------------	#
+#	========================================================	#
+#	--------------------------------------------------------	#
+#	|	hi(result)	   |	    lo(result)         |	#
+#	--------------------------------------------------------	#
+#########################################################################
+muls64_alg:
+# load temp registers with operands
+	mov.l		%d0,%d2			# mr in d2
+	mov.l		%d0,%d3			# mr in d3
+	mov.l		%d1,%d4			# md in d4
+	swap		%d3			# hi(mr) in lo d3
+	swap		%d4			# hi(md) in lo d4
+
+# complete necessary multiplies:
+	mulu.w		%d1,%d0			# [1] lo(mr) * lo(md)
+	mulu.w		%d3,%d1			# [2] hi(mr) * lo(md)
+	mulu.w		%d4,%d2			# [3] lo(mr) * hi(md)
+	mulu.w		%d4,%d3			# [4] hi(mr) * hi(md)
+
+# add lo portions of [2],[3] to hi portion of [1].
+# add carries produced from these adds to [4].
+# lo([1]) is the final lo 16 bits of the result.
+	clr.l		%d4			# load d4 w/ zero value
+	swap		%d0			# hi([1]) <==> lo([1])
+	add.w		%d1,%d0			# hi([1]) + lo([2])
+	addx.l		%d4,%d3			#    [4]  + carry
+	add.w		%d2,%d0			# hi([1]) + lo([3])
+	addx.l		%d4,%d3			#    [4]  + carry
+	swap		%d0			# lo([1]) <==> hi([1])
+
+# lo portions of [2],[3] have been added in to final result.
+# now, clear lo, put hi in lo reg, and add to [4]
+	clr.w		%d1			# clear lo([2])
+	clr.w		%d2			# clear hi([3])
+	swap		%d1			# hi([2]) in lo d1
+	swap		%d2			# hi([3]) in lo d2
+	add.l		%d2,%d1			#    [4]  + hi([2])
+	add.l		%d3,%d1			#    [4]  + hi([3])
+
+	tst.b		%d5			# should result be signed?
+	beq.b		muls64_done		# no
+
+# result should be a signed negative number.
+# compute 2's complement of the unsigned number:
+#   -negate all bits and add 1
+muls64_neg:
+	not.l		%d0			# negate lo(result) bits
+	not.l		%d1			# negate hi(result) bits
+	addq.l		&1,%d0			# add 1 to lo(result)
+	addx.l		%d4,%d1			# add carry to hi(result)
+
+muls64_done:
+	mov.w		MUL64_CC(%a6),%d4
+	andi.b		&0x10,%d4		# keep old 'X' bit
+	tst.l		%d1			# may set 'N' bit
+	bpl.b		muls64_ddone
+	ori.b		&0x8,%d4		# set 'N' bit
+muls64_ddone:
+	mov.w		%d4,%cc
+
+# here, the result is in d1 and d0. the current strategy is to save
+# the values at the location pointed to by a0.
+# use movm here to not disturb the condition codes.
+muls64_end:
+	exg		%d1,%d0
+	movm.l		&0x0003,([0x10,%a6])	# save result at (a0)
+
+# EPILOGUE BEGIN ########################################################
+#	fmovm.l		(%sp)+,&0x0		# restore no fpregs
+	movm.l		(%sp)+,&0x003c		# restore d2-d5
+	unlk		%a6
+# EPILOGUE END ##########################################################
+
+	rts
+
+# one or both of the operands is zero so the result is also zero.
+# save the zero result to the register file and set the 'Z' ccode bit.
+muls64_zero:
+	clr.l		%d0
+	clr.l		%d1
+
+	mov.w		MUL64_CC(%a6),%d4
+	andi.b		&0x10,%d4
+	ori.b		&0x4,%d4
+	mov.w		%d4,%cc			# set 'Z' ccode bit
+
+	bra.b		muls64_end
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_060LSP__cmp2_Ab_(): Emulate "cmp2.b An,<ea>".			#
+#	_060LSP__cmp2_Aw_(): Emulate "cmp2.w An,<ea>".			#
+#	_060LSP__cmp2_Al_(): Emulate "cmp2.l An,<ea>".			#
+#	_060LSP__cmp2_Db_(): Emulate "cmp2.b Dn,<ea>".			#
+#	_060LSP__cmp2_Dw_(): Emulate "cmp2.w Dn,<ea>".			#
+#	_060LSP__cmp2_Dl_(): Emulate "cmp2.l Dn,<ea>".			#
+#									#
+#	This is the library version which is accessed as a subroutine	#
+#	and therefore does not work exactly like the 680X0 "cmp2"	#
+#	instruction.							#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	0x4(sp) = Rn							#
+#	0x8(sp) = pointer to boundary pair				#
+#									#
+# OUTPUT **************************************************************	#
+#	cc = condition codes are set correctly				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	In the interest of simplicity, all operands are converted to	#
+# longword size whether the operation is byte, word, or long. The	#
+# bounds are sign extended accordingly. If Rn is a data regsiter, Rn is #
+# also sign extended. If Rn is an address register, it need not be sign #
+# extended since the full register is always used.			#
+#	The condition codes are set correctly before the final "rts".	#
+#									#
+#########################################################################
+
+set	CMP2_CC,	-4
+
+	global		_060LSP__cmp2_Ab_
+_060LSP__cmp2_Ab_:
+
+# PROLOGUE BEGIN ########################################################
+	link.w		%a6,&-4
+	movm.l		&0x3800,-(%sp)		# save d2-d4
+#	fmovm.l		&0x0,-(%sp)		# save no fpregs
+# PROLOGUE END ##########################################################
+
+	mov.w		%cc,CMP2_CC(%a6)
+	mov.l		0x8(%a6), %d2		# get regval
+
+	mov.b		([0xc,%a6],0x0),%d0
+	mov.b		([0xc,%a6],0x1),%d1
+
+	extb.l		%d0			# sign extend lo bnd
+	extb.l		%d1			# sign extend hi bnd
+	bra.w		l_cmp2_cmp		# go do the compare emulation
+
+	global		_060LSP__cmp2_Aw_
+_060LSP__cmp2_Aw_:
+
+# PROLOGUE BEGIN ########################################################
+	link.w		%a6,&-4
+	movm.l		&0x3800,-(%sp)		# save d2-d4
+#	fmovm.l		&0x0,-(%sp)		# save no fpregs
+# PROLOGUE END ##########################################################
+
+	mov.w		%cc,CMP2_CC(%a6)
+	mov.l		0x8(%a6), %d2		# get regval
+
+	mov.w		([0xc,%a6],0x0),%d0
+	mov.w		([0xc,%a6],0x2),%d1
+
+	ext.l		%d0			# sign extend lo bnd
+	ext.l		%d1			# sign extend hi bnd
+	bra.w		l_cmp2_cmp		# go do the compare emulation
+
+	global		_060LSP__cmp2_Al_
+_060LSP__cmp2_Al_:
+
+# PROLOGUE BEGIN ########################################################
+	link.w		%a6,&-4
+	movm.l		&0x3800,-(%sp)		# save d2-d4
+#	fmovm.l		&0x0,-(%sp)		# save no fpregs
+# PROLOGUE END ##########################################################
+
+	mov.w		%cc,CMP2_CC(%a6)
+	mov.l		0x8(%a6), %d2		# get regval
+
+	mov.l		([0xc,%a6],0x0),%d0
+	mov.l		([0xc,%a6],0x4),%d1
+	bra.w		l_cmp2_cmp		# go do the compare emulation
+
+	global		_060LSP__cmp2_Db_
+_060LSP__cmp2_Db_:
+
+# PROLOGUE BEGIN ########################################################
+	link.w		%a6,&-4
+	movm.l		&0x3800,-(%sp)		# save d2-d4
+#	fmovm.l		&0x0,-(%sp)		# save no fpregs
+# PROLOGUE END ##########################################################
+
+	mov.w		%cc,CMP2_CC(%a6)
+	mov.l		0x8(%a6), %d2		# get regval
+
+	mov.b		([0xc,%a6],0x0),%d0
+	mov.b		([0xc,%a6],0x1),%d1
+
+	extb.l		%d0			# sign extend lo bnd
+	extb.l		%d1			# sign extend hi bnd
+
+# operation is a data register compare.
+# sign extend byte to long so we can do simple longword compares.
+	extb.l		%d2			# sign extend data byte
+	bra.w		l_cmp2_cmp		# go do the compare emulation
+
+	global		_060LSP__cmp2_Dw_
+_060LSP__cmp2_Dw_:
+
+# PROLOGUE BEGIN ########################################################
+	link.w		%a6,&-4
+	movm.l		&0x3800,-(%sp)		# save d2-d4
+#	fmovm.l		&0x0,-(%sp)		# save no fpregs
+# PROLOGUE END ##########################################################
+
+	mov.w		%cc,CMP2_CC(%a6)
+	mov.l		0x8(%a6), %d2		# get regval
+
+	mov.w		([0xc,%a6],0x0),%d0
+	mov.w		([0xc,%a6],0x2),%d1
+
+	ext.l		%d0			# sign extend lo bnd
+	ext.l		%d1			# sign extend hi bnd
+
+# operation is a data register compare.
+# sign extend word to long so we can do simple longword compares.
+	ext.l		%d2			# sign extend data word
+	bra.w		l_cmp2_cmp		# go emulate compare
+
+	global		_060LSP__cmp2_Dl_
+_060LSP__cmp2_Dl_:
+
+# PROLOGUE BEGIN ########################################################
+	link.w		%a6,&-4
+	movm.l		&0x3800,-(%sp)		# save d2-d4
+#	fmovm.l		&0x0,-(%sp)		# save no fpregs
+# PROLOGUE END ##########################################################
+
+	mov.w		%cc,CMP2_CC(%a6)
+	mov.l		0x8(%a6), %d2		# get regval
+
+	mov.l		([0xc,%a6],0x0),%d0
+	mov.l		([0xc,%a6],0x4),%d1
+
+#
+# To set the ccodes correctly:
+#	(1) save 'Z' bit from (Rn - lo)
+#	(2) save 'Z' and 'N' bits from ((hi - lo) - (Rn - hi))
+#	(3) keep 'X', 'N', and 'V' from before instruction
+#	(4) combine ccodes
+#
+l_cmp2_cmp:
+	sub.l		%d0, %d2		# (Rn - lo)
+	mov.w		%cc, %d3		# fetch resulting ccodes
+	andi.b		&0x4, %d3		# keep 'Z' bit
+	sub.l		%d0, %d1		# (hi - lo)
+	cmp.l		%d1,%d2			# ((hi - lo) - (Rn - hi))
+
+	mov.w		%cc, %d4		# fetch resulting ccodes
+	or.b		%d4, %d3		# combine w/ earlier ccodes
+	andi.b		&0x5, %d3		# keep 'Z' and 'N'
+
+	mov.w		CMP2_CC(%a6), %d4	# fetch old ccodes
+	andi.b		&0x1a, %d4		# keep 'X','N','V' bits
+	or.b		%d3, %d4		# insert new ccodes
+	mov.w		%d4,%cc			# save new ccodes
+
+# EPILOGUE BEGIN ########################################################
+#	fmovm.l		(%sp)+,&0x0		# restore no fpregs
+	movm.l		(%sp)+,&0x001c		# restore d2-d4
+	unlk		%a6
+# EPILOGUE END ##########################################################
+
+	rts
diff --git a/arch/m68k/ifpsp060/src/isp.S b/arch/m68k/ifpsp060/src/isp.S
new file mode 100644
index 0000000..b269091
--- /dev/null
+++ b/arch/m68k/ifpsp060/src/isp.S
@@ -0,0 +1,4299 @@
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
+M68000 Hi-Performance Microprocessor Division
+M68060 Software Package
+Production Release P1.00 -- October 10, 1994
+
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc.  All rights reserved.
+
+THE SOFTWARE is provided on an "AS IS" basis and without warranty.
+To the maximum extent permitted by applicable law,
+MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
+INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+and any warranty against infringement with regard to the SOFTWARE
+(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
+
+To the maximum extent permitted by applicable law,
+IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
+(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
+BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
+ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
+Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
+
+You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
+so long as this entire notice is retained without alteration in any modified and/or
+redistributed versions, and that such modified versions are clearly identified as such.
+No licenses are granted by implication, estoppel or otherwise under any patents
+or trademarks of Motorola, Inc.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# ireal.s:
+#	This file is appended to the top of the 060ISP package
+# and contains the entry points into the package. The user, in
+# effect, branches to one of the branch table entries located
+# after _060ISP_TABLE.
+#	Also, subroutine stubs exist in this file (_isp_done for
+# example) that are referenced by the ISP package itself in order
+# to call a given routine. The stub routine actually performs the
+# callout. The ISP code does a "bsr" to the stub routine. This
+# extra layer of hierarchy adds a slight performance penalty but
+# it makes the ISP code easier to read and more mainatinable.
+#
+
+set	_off_chk,	0x00
+set	_off_divbyzero,	0x04
+set	_off_trace,	0x08
+set	_off_access,	0x0c
+set	_off_done,	0x10
+
+set	_off_cas,	0x14
+set	_off_cas2,	0x18
+set	_off_lock,	0x1c
+set	_off_unlock,	0x20
+
+set	_off_imr,	0x40
+set	_off_dmr,	0x44
+set	_off_dmw,	0x48
+set	_off_irw,	0x4c
+set	_off_irl,	0x50
+set	_off_drb,	0x54
+set	_off_drw,	0x58
+set	_off_drl,	0x5c
+set	_off_dwb,	0x60
+set	_off_dww,	0x64
+set	_off_dwl,	0x68
+
+_060ISP_TABLE:
+
+# Here's the table of ENTRY POINTS for those linking the package.
+	bra.l		_isp_unimp
+	short		0x0000
+
+	bra.l		_isp_cas
+	short		0x0000
+
+	bra.l		_isp_cas2
+	short		0x0000
+
+	bra.l		_isp_cas_finish
+	short		0x0000
+
+	bra.l		_isp_cas2_finish
+	short		0x0000
+
+	bra.l		_isp_cas_inrange
+	short		0x0000
+
+	bra.l		_isp_cas_terminate
+	short		0x0000
+
+	bra.l		_isp_cas_restart
+	short		0x0000
+
+	space		64
+
+#############################################################
+
+	global		_real_chk
+_real_chk:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060ISP_TABLE-0x80+_off_chk,%pc),%d0
+	pea.l		(_060ISP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_divbyzero
+_real_divbyzero:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060ISP_TABLE-0x80+_off_divbyzero,%pc),%d0
+	pea.l		(_060ISP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_trace
+_real_trace:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060ISP_TABLE-0x80+_off_trace,%pc),%d0
+	pea.l		(_060ISP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_access
+_real_access:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060ISP_TABLE-0x80+_off_access,%pc),%d0
+	pea.l		(_060ISP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_isp_done
+_isp_done:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060ISP_TABLE-0x80+_off_done,%pc),%d0
+	pea.l		(_060ISP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+#######################################
+
+	global		_real_cas
+_real_cas:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060ISP_TABLE-0x80+_off_cas,%pc),%d0
+	pea.l		(_060ISP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_cas2
+_real_cas2:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060ISP_TABLE-0x80+_off_cas2,%pc),%d0
+	pea.l		(_060ISP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_lock_page
+_real_lock_page:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060ISP_TABLE-0x80+_off_lock,%pc),%d0
+	pea.l		(_060ISP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_unlock_page
+_real_unlock_page:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060ISP_TABLE-0x80+_off_unlock,%pc),%d0
+	pea.l		(_060ISP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+#######################################
+
+	global		_imem_read
+_imem_read:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060ISP_TABLE-0x80+_off_imr,%pc),%d0
+	pea.l		(_060ISP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_read
+_dmem_read:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060ISP_TABLE-0x80+_off_dmr,%pc),%d0
+	pea.l		(_060ISP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_write
+_dmem_write:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060ISP_TABLE-0x80+_off_dmw,%pc),%d0
+	pea.l		(_060ISP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_imem_read_word
+_imem_read_word:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060ISP_TABLE-0x80+_off_irw,%pc),%d0
+	pea.l		(_060ISP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_imem_read_long
+_imem_read_long:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060ISP_TABLE-0x80+_off_irl,%pc),%d0
+	pea.l		(_060ISP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_read_byte
+_dmem_read_byte:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060ISP_TABLE-0x80+_off_drb,%pc),%d0
+	pea.l		(_060ISP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_read_word
+_dmem_read_word:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060ISP_TABLE-0x80+_off_drw,%pc),%d0
+	pea.l		(_060ISP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_read_long
+_dmem_read_long:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060ISP_TABLE-0x80+_off_drl,%pc),%d0
+	pea.l		(_060ISP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_write_byte
+_dmem_write_byte:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060ISP_TABLE-0x80+_off_dwb,%pc),%d0
+	pea.l		(_060ISP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_write_word
+_dmem_write_word:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060ISP_TABLE-0x80+_off_dww,%pc),%d0
+	pea.l		(_060ISP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_write_long
+_dmem_write_long:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060ISP_TABLE-0x80+_off_dwl,%pc),%d0
+	pea.l		(_060ISP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+#
+# This file contains a set of define statements for constants
+# in oreder to promote readability within the core code itself.
+#
+
+set LOCAL_SIZE,		96			# stack frame size(bytes)
+set LV,			-LOCAL_SIZE		# stack offset
+
+set EXC_ISR,		0x4			# stack status register
+set EXC_IPC,		0x6			# stack pc
+set EXC_IVOFF,		0xa			# stacked vector offset
+
+set EXC_AREGS,		LV+64			# offset of all address regs
+set EXC_DREGS,		LV+32			# offset of all data regs
+
+set EXC_A7,		EXC_AREGS+(7*4)		# offset of a7
+set EXC_A6,		EXC_AREGS+(6*4)		# offset of a6
+set EXC_A5,		EXC_AREGS+(5*4)		# offset of a5
+set EXC_A4,		EXC_AREGS+(4*4)		# offset of a4
+set EXC_A3,		EXC_AREGS+(3*4)		# offset of a3
+set EXC_A2,		EXC_AREGS+(2*4)		# offset of a2
+set EXC_A1,		EXC_AREGS+(1*4)		# offset of a1
+set EXC_A0,		EXC_AREGS+(0*4)		# offset of a0
+set EXC_D7,		EXC_DREGS+(7*4)		# offset of d7
+set EXC_D6,		EXC_DREGS+(6*4)		# offset of d6
+set EXC_D5,		EXC_DREGS+(5*4)		# offset of d5
+set EXC_D4,		EXC_DREGS+(4*4)		# offset of d4
+set EXC_D3,		EXC_DREGS+(3*4)		# offset of d3
+set EXC_D2,		EXC_DREGS+(2*4)		# offset of d2
+set EXC_D1,		EXC_DREGS+(1*4)		# offset of d1
+set EXC_D0,		EXC_DREGS+(0*4)		# offset of d0
+
+set EXC_TEMP,		LV+16			# offset of temp stack space
+
+set EXC_SAVVAL,		LV+12			# offset of old areg value
+set EXC_SAVREG,		LV+11			# offset of old areg index
+
+set SPCOND_FLG,		LV+10			# offset of spc condition flg
+
+set EXC_CC,		LV+8			# offset of cc register
+set EXC_EXTWPTR,	LV+4			# offset of current PC
+set EXC_EXTWORD,	LV+2			# offset of current ext opword
+set EXC_OPWORD,		LV+0			# offset of current opword
+
+###########################
+# SPecial CONDition FLaGs #
+###########################
+set mia7_flg,		0x04			# (a7)+ flag
+set mda7_flg,		0x08			# -(a7) flag
+set ichk_flg,		0x10			# chk exception flag
+set idbyz_flg,		0x20			# divbyzero flag
+set restore_flg,	0x40			# restore -(an)+ flag
+set immed_flg,		0x80			# immediate data flag
+
+set mia7_bit,		0x2			# (a7)+ bit
+set mda7_bit,		0x3			# -(a7) bit
+set ichk_bit,		0x4			# chk exception bit
+set idbyz_bit,		0x5			# divbyzero bit
+set restore_bit,	0x6			# restore -(a7)+ bit
+set immed_bit,		0x7			# immediate data bit
+
+#########
+# Misc. #
+#########
+set BYTE,		1			# len(byte) == 1 byte
+set WORD,		2			# len(word) == 2 bytes
+set LONG,		4			# len(longword) == 4 bytes
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_isp_unimp(): 060ISP entry point for Unimplemented Instruction	#
+#									#
+#	This handler should be the first code executed upon taking the	#
+#	"Unimplemented Integer Instruction" exception in an operating	#
+#	system.								#
+#									#
+# XREF ****************************************************************	#
+#	_imem_read_{word,long}() - read instruction word/longword	#
+#	_mul64() - emulate 64-bit multiply				#
+#	_div64() - emulate 64-bit divide				#
+#	_moveperipheral() - emulate "movep"				#
+#	_compandset() - emulate misaligned "cas"			#
+#	_compandset2() - emulate "cas2"					#
+#	_chk2_cmp2() - emulate "cmp2" and "chk2"			#
+#	_isp_done() - "callout" for normal final exit			#
+#	_real_trace() - "callout" for Trace exception			#
+#	_real_chk() - "callout" for Chk exception			#
+#	_real_divbyzero() - "callout" for DZ exception			#
+#	_real_access() - "callout" for access error exception		#
+#									#
+# INPUT ***************************************************************	#
+#	- The system stack contains the Unimp Int Instr stack frame	#
+#									#
+# OUTPUT **************************************************************	#
+#	If Trace exception:						#
+#	- The system stack changed to contain Trace exc stack frame	#
+#	If Chk exception:						#
+#	- The system stack changed to contain Chk exc stack frame	#
+#	If DZ exception:						#
+#	- The system stack changed to contain DZ exc stack frame	#
+#	If access error exception:					#
+#	- The system stack changed to contain access err exc stk frame	#
+#	Else:								#
+#	- Results saved as appropriate					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	This handler fetches the first instruction longword from	#
+# memory and decodes it to determine which of the unimplemented		#
+# integer instructions caused this exception. This handler then calls	#
+# one of _mul64(), _div64(), _moveperipheral(), _compandset(),		#
+# _compandset2(), or _chk2_cmp2() as appropriate.			#
+#	Some of these instructions, by their nature, may produce other	#
+# types of exceptions. "div" can produce a divide-by-zero exception,	#
+# and "chk2" can cause a "Chk" exception. In both cases, the current	#
+# exception stack frame must be converted to an exception stack frame	#
+# of the correct exception type and an exit must be made through	#
+# _real_divbyzero() or _real_chk() as appropriate. In addition, all	#
+# instructions may be executing while Trace is enabled. If so, then	#
+# a Trace exception stack frame must be created and an exit made	#
+# through _real_trace().						#
+#	Meanwhile, if any read or write to memory using the		#
+# _mem_{read,write}() "callout"s returns a failing value, then an	#
+# access error frame must be created and an exit made through		#
+# _real_access().							#
+#	If none of these occur, then a normal exit is made through	#
+# _isp_done().								#
+#									#
+#	This handler, upon entry, saves almost all user-visible		#
+# address and data registers to the stack. Although this may seem to	#
+# cause excess memory traffic, it was found that due to having to	#
+# access these register files for things like data retrieval and <ea>	#
+# calculations, it was more efficient to have them on the stack where	#
+# they could be accessed by indexing rather than to make subroutine	#
+# calls to retrieve a register of a particular index.			#
+#									#
+#########################################################################
+
+	global		_isp_unimp
+_isp_unimp:
+	link.w		%a6,&-LOCAL_SIZE	# create room for stack frame
+
+	movm.l		&0x3fff,EXC_DREGS(%a6)	# store d0-d7/a0-a5
+	mov.l		(%a6),EXC_A6(%a6)	# store a6
+
+	btst		&0x5,EXC_ISR(%a6)	# from s or u mode?
+	bne.b		uieh_s			# supervisor mode
+uieh_u:
+	mov.l		%usp,%a0		# fetch user stack pointer
+	mov.l		%a0,EXC_A7(%a6)		# store a7
+	bra.b		uieh_cont
+uieh_s:
+	lea		0xc(%a6),%a0
+	mov.l		%a0,EXC_A7(%a6)		# store corrected sp
+
+###############################################################################
+
+uieh_cont:
+	clr.b		SPCOND_FLG(%a6)		# clear "special case" flag
+
+	mov.w		EXC_ISR(%a6),EXC_CC(%a6) # store cc copy on stack
+	mov.l		EXC_IPC(%a6),EXC_EXTWPTR(%a6) # store extwptr on stack
+
+#
+# fetch the opword and first extension word pointed to by the stacked pc
+# and store them to the stack for now
+#
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch opword & extword
+	mov.l		%d0,EXC_OPWORD(%a6)	# store extword on stack
+
+
+#########################################################################
+# muls.l	0100 1100 00 |<ea>|	0*** 1100 0000 0***		#
+# mulu.l	0100 1100 00 |<ea>|	0*** 0100 0000 0***		#
+#									#
+# divs.l	0100 1100 01 |<ea>|	0*** 1100 0000 0***		#
+# divu.l	0100 1100 01 |<ea>|	0*** 0100 0000 0***		#
+#									#
+# movep.w m2r	0000 ***1 00 001***	| <displacement>  |		#
+# movep.l m2r	0000 ***1 01 001***	| <displacement>  |		#
+# movep.w r2m	0000 ***1 10 001***	| <displacement>  |		#
+# movep.l r2m	0000 ***1 11 001***	| <displacement>  |		#
+#									#
+# cas.w		0000 1100 11 |<ea>|	0000 000* **00 0***		#
+# cas.l		0000 1110 11 |<ea>|	0000 000* **00 0***		#
+#									#
+# cas2.w	0000 1100 11 111100	**** 000* **00 0***		#
+#					**** 000* **00 0***		#
+# cas2.l	0000 1110 11 111100	**** 000* **00 0***		#
+#					**** 000* **00 0***		#
+#									#
+# chk2.b	0000 0000 11 |<ea>|	**** 1000 0000 0000		#
+# chk2.w	0000 0010 11 |<ea>|	**** 1000 0000 0000		#
+# chk2.l	0000 0100 11 |<ea>|	**** 1000 0000 0000		#
+#									#
+# cmp2.b	0000 0000 11 |<ea>|	**** 0000 0000 0000		#
+# cmp2.w	0000 0010 11 |<ea>|	**** 0000 0000 0000		#
+# cmp2.l	0000 0100 11 |<ea>|	**** 0000 0000 0000		#
+#########################################################################
+
+#
+# using bit 14 of the operation word, separate into 2 groups:
+# (group1) mul64, div64
+# (group2) movep, chk2, cmp2, cas2, cas
+#
+	btst		&0x1e,%d0		# group1 or group2
+	beq.b		uieh_group2		# go handle group2
+
+#
+# now, w/ group1, make mul64's decode the fastest since it will
+# most likely be used the most.
+#
+uieh_group1:
+	btst		&0x16,%d0		# test for div64
+	bne.b		uieh_div64		# go handle div64
+
+uieh_mul64:
+# mul64() may use ()+ addressing and may, therefore, alter a7
+
+	bsr.l		_mul64			# _mul64()
+
+	btst		&0x5,EXC_ISR(%a6)	# supervisor mode?
+	beq.w		uieh_done
+	btst		&mia7_bit,SPCOND_FLG(%a6) # was a7 changed?
+	beq.w		uieh_done		# no
+	btst		&0x7,EXC_ISR(%a6)	# is trace enabled?
+	bne.w		uieh_trace_a7		# yes
+	bra.w		uieh_a7			# no
+
+uieh_div64:
+# div64() may use ()+ addressing and may, therefore, alter a7.
+# div64() may take a divide by zero exception.
+
+	bsr.l		_div64			# _div64()
+
+# here, we sort out all of the special cases that may have happened.
+	btst		&mia7_bit,SPCOND_FLG(%a6) # was a7 changed?
+	bne.b		uieh_div64_a7		# yes
+uieh_div64_dbyz:
+	btst		&idbyz_bit,SPCOND_FLG(%a6) # did divide-by-zero occur?
+	bne.w		uieh_divbyzero		# yes
+	bra.w		uieh_done		# no
+uieh_div64_a7:
+	btst		&0x5,EXC_ISR(%a6)	# supervisor mode?
+	beq.b		uieh_div64_dbyz		# no
+# here, a7 has been incremented by 4 bytes in supervisor mode. we still
+# may have the following 3 cases:
+#	(i)	(a7)+
+#	(ii)	(a7)+; trace
+#	(iii)	(a7)+; divide-by-zero
+#
+	btst		&idbyz_bit,SPCOND_FLG(%a6) # did divide-by-zero occur?
+	bne.w		uieh_divbyzero_a7	# yes
+	tst.b		EXC_ISR(%a6)		# no; is trace enabled?
+	bmi.w		uieh_trace_a7		# yes
+	bra.w		uieh_a7			# no
+
+#
+# now, w/ group2, make movep's decode the fastest since it will
+# most likely be used the most.
+#
+uieh_group2:
+	btst		&0x18,%d0		# test for not movep
+	beq.b		uieh_not_movep
+
+
+	bsr.l		_moveperipheral		# _movep()
+	bra.w		uieh_done
+
+uieh_not_movep:
+	btst		&0x1b,%d0		# test for chk2,cmp2
+	beq.b		uieh_chk2cmp2		# go handle chk2,cmp2
+
+	swap		%d0			# put opword in lo word
+	cmpi.b		%d0,&0xfc		# test for cas2
+	beq.b		uieh_cas2		# go handle cas2
+
+uieh_cas:
+
+	bsr.l		_compandset		# _cas()
+
+# the cases of "cas Dc,Du,(a7)+" and "cas Dc,Du,-(a7)" used from supervisor
+# mode are simply not considered valid and therefore are not handled.
+
+	bra.w		uieh_done
+
+uieh_cas2:
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word		# read extension word
+
+	tst.l		%d1			# ifetch error?
+	bne.w		isp_iacc		# yes
+
+	bsr.l		_compandset2		# _cas2()
+	bra.w		uieh_done
+
+uieh_chk2cmp2:
+# chk2 may take a chk exception
+
+	bsr.l		_chk2_cmp2		# _chk2_cmp2()
+
+# here we check to see if a chk trap should be taken
+	cmpi.b		SPCOND_FLG(%a6),&ichk_flg
+	bne.w		uieh_done
+	bra.b		uieh_chk_trap
+
+###########################################################################
+
+#
+# the required emulation has been completed. now, clean up the necessary stack
+# info and prepare for rte
+#
+uieh_done:
+	mov.b		EXC_CC+1(%a6),EXC_ISR+1(%a6) # insert new ccodes
+
+# if exception occurred in user mode, then we have to restore a7 in case it
+# changed. we don't have to update a7  for supervisor mose because that case
+# doesn't flow through here
+	btst		&0x5,EXC_ISR(%a6)	# user or supervisor?
+	bne.b		uieh_finish		# supervisor
+
+	mov.l		EXC_A7(%a6),%a0		# fetch user stack pointer
+	mov.l		%a0,%usp		# restore it
+
+uieh_finish:
+	movm.l		EXC_DREGS(%a6),&0x3fff	# restore d0-d7/a0-a5
+
+	btst		&0x7,EXC_ISR(%a6)	# is trace mode on?
+	bne.b		uieh_trace		# yes;go handle trace mode
+
+	mov.l		EXC_EXTWPTR(%a6),EXC_IPC(%a6) # new pc on stack frame
+	mov.l		EXC_A6(%a6),(%a6)	# prepare new a6 for unlink
+	unlk		%a6			# unlink stack frame
+	bra.l		_isp_done
+
+#
+# The instruction that was just emulated was also being traced. The trace
+# trap for this instruction will be lost unless we jump to the trace handler.
+# So, here we create a Trace Exception format number two exception stack
+# frame from the Unimplemented Integer Intruction Exception stack frame
+# format number zero and jump to the user supplied hook "_real_trace()".
+#
+#		   UIEH FRAME		   TRACE FRAME
+#		*****************	*****************
+#		* 0x0 *  0x0f4	*	*    Current	*
+#		*****************	*      PC	*
+#		*    Current	*	*****************
+#		*      PC	*	* 0x2 *  0x024	*
+#		*****************	*****************
+#		*      SR	*	*     Next	*
+#		*****************	*      PC	*
+#	      ->*     Old	*	*****************
+#  from link -->*      A6	*	*      SR	*
+#	        *****************	*****************
+#	       /*      A7	*	*      New	* <-- for final unlink
+#	      / *		*	*      A6	*
+# link frame <  *****************	*****************
+#	      \ ~		~	~		~
+#	       \*****************	*****************
+#
+uieh_trace:
+	mov.l		EXC_A6(%a6),-0x4(%a6)
+	mov.w		EXC_ISR(%a6),0x0(%a6)
+	mov.l		EXC_IPC(%a6),0x8(%a6)
+	mov.l		EXC_EXTWPTR(%a6),0x2(%a6)
+	mov.w		&0x2024,0x6(%a6)
+	sub.l		&0x4,%a6
+	unlk		%a6
+	bra.l		_real_trace
+
+#
+#	   UIEH FRAME		    CHK FRAME
+#	*****************	*****************
+#	* 0x0 *  0x0f4	*	*    Current	*
+#	*****************	*      PC	*
+#	*    Current	*	*****************
+#	*      PC	*	* 0x2 *  0x018	*
+#	*****************	*****************
+#	*      SR	*	*     Next	*
+#	*****************	*      PC	*
+#	    (4 words)		*****************
+#				*      SR	*
+#				*****************
+#				    (6 words)
+#
+# the chk2 instruction should take a chk trap. so, here we must create a
+# chk stack frame from an unimplemented integer instruction exception frame
+# and jump to the user supplied entry point "_real_chk()".
+#
+uieh_chk_trap:
+	mov.b		EXC_CC+1(%a6),EXC_ISR+1(%a6) # insert new ccodes
+	movm.l		EXC_DREGS(%a6),&0x3fff	# restore d0-d7/a0-a5
+
+	mov.w		EXC_ISR(%a6),(%a6)	# put new SR on stack
+	mov.l		EXC_IPC(%a6),0x8(%a6)	# put "Current PC" on stack
+	mov.l		EXC_EXTWPTR(%a6),0x2(%a6) # put "Next PC" on stack
+	mov.w		&0x2018,0x6(%a6)	# put Vector Offset on stack
+
+	mov.l		EXC_A6(%a6),%a6		# restore a6
+	add.l		&LOCAL_SIZE,%sp		# clear stack frame
+
+	bra.l		_real_chk
+
+#
+#	   UIEH FRAME		 DIVBYZERO FRAME
+#	*****************	*****************
+#	* 0x0 *  0x0f4	*	*    Current	*
+#	*****************	*      PC	*
+#	*    Current	*	*****************
+#	*      PC	*	* 0x2 *  0x014	*
+#	*****************	*****************
+#	*      SR	*	*     Next	*
+#	*****************	*      PC	*
+#	    (4 words)		*****************
+#				*      SR	*
+#				*****************
+#				    (6 words)
+#
+# the divide instruction should take an integer divide by zero trap. so, here
+# we must create a divbyzero stack frame from an unimplemented integer
+# instruction exception frame and jump to the user supplied entry point
+# "_real_divbyzero()".
+#
+uieh_divbyzero:
+	mov.b		EXC_CC+1(%a6),EXC_ISR+1(%a6) # insert new ccodes
+	movm.l		EXC_DREGS(%a6),&0x3fff	# restore d0-d7/a0-a5
+
+	mov.w		EXC_ISR(%a6),(%a6)	# put new SR on stack
+	mov.l		EXC_IPC(%a6),0x8(%a6)	# put "Current PC" on stack
+	mov.l		EXC_EXTWPTR(%a6),0x2(%a6) # put "Next PC" on stack
+	mov.w		&0x2014,0x6(%a6)	# put Vector Offset on stack
+
+	mov.l		EXC_A6(%a6),%a6		# restore a6
+	add.l		&LOCAL_SIZE,%sp		# clear stack frame
+
+	bra.l		_real_divbyzero
+
+#
+#				 DIVBYZERO FRAME
+#				*****************
+#				*    Current	*
+#	   UIEH FRAME		*      PC	*
+#	*****************	*****************
+#	* 0x0 *  0x0f4	*	* 0x2 * 0x014	*
+#	*****************	*****************
+#	*    Current	*	*     Next	*
+#	*      PC	*	*      PC	*
+#	*****************	*****************
+#	*      SR	*	*      SR	*
+#	*****************	*****************
+#	    (4 words)		    (6 words)
+#
+# the divide instruction should take an integer divide by zero trap. so, here
+# we must create a divbyzero stack frame from an unimplemented integer
+# instruction exception frame and jump to the user supplied entry point
+# "_real_divbyzero()".
+#
+# However, we must also deal with the fact that (a7)+ was used from supervisor
+# mode, thereby shifting the stack frame up 4 bytes.
+#
+uieh_divbyzero_a7:
+	mov.b		EXC_CC+1(%a6),EXC_ISR+1(%a6) # insert new ccodes
+	movm.l		EXC_DREGS(%a6),&0x3fff	# restore d0-d7/a0-a5
+
+	mov.l		EXC_IPC(%a6),0xc(%a6)	# put "Current PC" on stack
+	mov.w		&0x2014,0xa(%a6)	# put Vector Offset on stack
+	mov.l		EXC_EXTWPTR(%a6),0x6(%a6) # put "Next PC" on stack
+
+	mov.l		EXC_A6(%a6),%a6		# restore a6
+	add.l		&4+LOCAL_SIZE,%sp	# clear stack frame
+
+	bra.l		_real_divbyzero
+
+#
+#				   TRACE FRAME
+#				*****************
+#				*    Current	*
+#	   UIEH FRAME		*      PC	*
+#	*****************	*****************
+#	* 0x0 *  0x0f4	*	* 0x2 * 0x024	*
+#	*****************	*****************
+#	*    Current	*	*     Next	*
+#	*      PC	*	*      PC	*
+#	*****************	*****************
+#	*      SR	*	*      SR	*
+#	*****************	*****************
+#	    (4 words)		    (6 words)
+#
+#
+# The instruction that was just emulated was also being traced. The trace
+# trap for this instruction will be lost unless we jump to the trace handler.
+# So, here we create a Trace Exception format number two exception stack
+# frame from the Unimplemented Integer Intruction Exception stack frame
+# format number zero and jump to the user supplied hook "_real_trace()".
+#
+# However, we must also deal with the fact that (a7)+ was used from supervisor
+# mode, thereby shifting the stack frame up 4 bytes.
+#
+uieh_trace_a7:
+	mov.b		EXC_CC+1(%a6),EXC_ISR+1(%a6) # insert new ccodes
+	movm.l		EXC_DREGS(%a6),&0x3fff	# restore d0-d7/a0-a5
+
+	mov.l		EXC_IPC(%a6),0xc(%a6)	# put "Current PC" on stack
+	mov.w		&0x2024,0xa(%a6)	# put Vector Offset on stack
+	mov.l		EXC_EXTWPTR(%a6),0x6(%a6) # put "Next PC" on stack
+
+	mov.l		EXC_A6(%a6),%a6		# restore a6
+	add.l		&4+LOCAL_SIZE,%sp	# clear stack frame
+
+	bra.l		_real_trace
+
+#
+#				   UIEH FRAME
+#				*****************
+#				* 0x0 * 0x0f4	*
+#	   UIEH FRAME		*****************
+#	*****************	*     Next	*
+#	* 0x0 *  0x0f4	*	*      PC	*
+#	*****************	*****************
+#	*    Current	*	*      SR	*
+#	*      PC	*	*****************
+#	*****************	    (4 words)
+#	*      SR	*
+#	*****************
+#	    (4 words)
+uieh_a7:
+	mov.b		EXC_CC+1(%a6),EXC_ISR+1(%a6) # insert new ccodes
+	movm.l		EXC_DREGS(%a6),&0x3fff	# restore d0-d7/a0-a5
+
+	mov.w		&0x00f4,0xe(%a6)	# put Vector Offset on stack
+	mov.l		EXC_EXTWPTR(%a6),0xa(%a6) # put "Next PC" on stack
+	mov.w		EXC_ISR(%a6),0x8(%a6)	# put SR on stack
+
+	mov.l		EXC_A6(%a6),%a6		# restore a6
+	add.l		&8+LOCAL_SIZE,%sp	# clear stack frame
+	bra.l		_isp_done
+
+##########
+
+# this is the exit point if a data read or write fails.
+# a0 = failing address
+# d0 = fslw
+isp_dacc:
+	mov.l		%a0,(%a6)		# save address
+	mov.l		%d0,-0x4(%a6)		# save partial fslw
+
+	lea		-64(%a6),%sp
+	movm.l		(%sp)+,&0x7fff		# restore d0-d7/a0-a6
+
+	mov.l		0xc(%sp),-(%sp)		# move voff,hi(pc)
+	mov.l		0x4(%sp),0x10(%sp)	# store fslw
+	mov.l		0xc(%sp),0x4(%sp)	# store sr,lo(pc)
+	mov.l		0x8(%sp),0xc(%sp)	# store address
+	mov.l		(%sp)+,0x4(%sp)		# store voff,hi(pc)
+	mov.w		&0x4008,0x6(%sp)	# store new voff
+
+	bra.b		isp_acc_exit
+
+# this is the exit point if an instruction word read fails.
+# FSLW:
+#	misaligned = true
+#	read = true
+#	size = word
+#	instruction = true
+#	software emulation error = true
+isp_iacc:
+	movm.l		EXC_DREGS(%a6),&0x3fff	# restore d0-d7/a0-a5
+	unlk		%a6			# unlink frame
+	sub.w		&0x8,%sp		# make room for acc frame
+	mov.l		0x8(%sp),(%sp)		# store sr,lo(pc)
+	mov.w		0xc(%sp),0x4(%sp)	# store hi(pc)
+	mov.w		&0x4008,0x6(%sp)	# store new voff
+	mov.l		0x2(%sp),0x8(%sp)	# store address (=pc)
+	mov.l		&0x09428001,0xc(%sp)	# store fslw
+
+isp_acc_exit:
+	btst		&0x5,(%sp)		# user or supervisor?
+	beq.b		isp_acc_exit2		# user
+	bset		&0x2,0xd(%sp)		# set supervisor TM bit
+isp_acc_exit2:
+	bra.l		_real_access
+
+# if the addressing mode was (an)+ or -(an), the address register must
+# be restored to its pre-exception value before entering _real_access.
+isp_restore:
+	cmpi.b		SPCOND_FLG(%a6),&restore_flg # do we need a restore?
+	bne.b		isp_restore_done	# no
+	clr.l		%d0
+	mov.b		EXC_SAVREG(%a6),%d0	# regno to restore
+	mov.l		EXC_SAVVAL(%a6),(EXC_AREGS,%a6,%d0.l*4) # restore value
+isp_restore_done:
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_calc_ea(): routine to calculate effective address		#
+#									#
+# XREF ****************************************************************	#
+#	_imem_read_word() - read instruction word			#
+#	_imem_read_long() - read instruction longword			#
+#	_dmem_read_long() - read data longword (for memory indirect)	#
+#	isp_iacc() - handle instruction access error exception		#
+#	isp_dacc() - handle data access error exception			#
+#									#
+# INPUT ***************************************************************	#
+#	d0 = number of bytes related to effective address (w,l)		#
+#									#
+# OUTPUT **************************************************************	#
+#	If exiting through isp_dacc...					#
+#		a0 = failing address					#
+#		d0 = FSLW						#
+#	elsif exiting though isp_iacc...				#
+#		none							#
+#	else								#
+#		a0 = effective address					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	The effective address type is decoded from the opword residing	#
+# on the stack. A jump table is used to vector to a routine for the	#
+# appropriate mode. Since none of the emulated integer instructions	#
+# uses byte-sized operands, only handle word and long operations.	#
+#									#
+#	Dn,An	- shouldn't enter here					#
+#	(An)	- fetch An value from stack				#
+#	-(An)	- fetch An value from stack; return decr value;		#
+#		  place decr value on stack; store old value in case of	#
+#		  future access error; if -(a7), set mda7_flg in	#
+#		  SPCOND_FLG						#
+#	(An)+	- fetch An value from stack; return value;		#
+#		  place incr value on stack; store old value in case of	#
+#		  future access error; if (a7)+, set mia7_flg in	#
+#		  SPCOND_FLG						#
+#	(d16,An) - fetch An value from stack; read d16 using		#
+#		  _imem_read_word(); fetch may fail -> branch to	#
+#		  isp_iacc()						#
+#	(xxx).w,(xxx).l - use _imem_read_{word,long}() to fetch		#
+#		  address; fetch may fail				#
+#	#<data> - return address of immediate value; set immed_flg	#
+#		  in SPCOND_FLG						#
+#	(d16,PC) - fetch stacked PC value; read d16 using		#
+#		  _imem_read_word(); fetch may fail -> branch to	#
+#		  isp_iacc()						#
+#	everything else - read needed displacements as appropriate w/	#
+#		  _imem_read_{word,long}(); read may fail; if memory	#
+#		  indirect, read indirect address using			#
+#		  _dmem_read_long() which may also fail			#
+#									#
+#########################################################################
+
+	global		_calc_ea
+_calc_ea:
+	mov.l		%d0,%a0			# move # bytes to a0
+
+# MODE and REG are taken from the EXC_OPWORD.
+	mov.w		EXC_OPWORD(%a6),%d0	# fetch opcode word
+	mov.w		%d0,%d1			# make a copy
+
+	andi.w		&0x3f,%d0		# extract mode field
+	andi.l		&0x7,%d1		# extract reg  field
+
+# jump to the corresponding function for each {MODE,REG} pair.
+	mov.w		(tbl_ea_mode.b,%pc,%d0.w*2), %d0 # fetch jmp distance
+	jmp		(tbl_ea_mode.b,%pc,%d0.w*1) # jmp to correct ea mode
+
+	swbeg		&64
+tbl_ea_mode:
+	short		tbl_ea_mode	-	tbl_ea_mode
+	short		tbl_ea_mode	-	tbl_ea_mode
+	short		tbl_ea_mode	-	tbl_ea_mode
+	short		tbl_ea_mode	-	tbl_ea_mode
+	short		tbl_ea_mode	-	tbl_ea_mode
+	short		tbl_ea_mode	-	tbl_ea_mode
+	short		tbl_ea_mode	-	tbl_ea_mode
+	short		tbl_ea_mode	-	tbl_ea_mode
+
+	short		tbl_ea_mode	-	tbl_ea_mode
+	short		tbl_ea_mode	-	tbl_ea_mode
+	short		tbl_ea_mode	-	tbl_ea_mode
+	short		tbl_ea_mode	-	tbl_ea_mode
+	short		tbl_ea_mode	-	tbl_ea_mode
+	short		tbl_ea_mode	-	tbl_ea_mode
+	short		tbl_ea_mode	-	tbl_ea_mode
+	short		tbl_ea_mode	-	tbl_ea_mode
+
+	short		addr_ind_a0	-	tbl_ea_mode
+	short		addr_ind_a1	-	tbl_ea_mode
+	short		addr_ind_a2	-	tbl_ea_mode
+	short		addr_ind_a3	-	tbl_ea_mode
+	short		addr_ind_a4	-	tbl_ea_mode
+	short		addr_ind_a5	-	tbl_ea_mode
+	short		addr_ind_a6	-	tbl_ea_mode
+	short		addr_ind_a7	-	tbl_ea_mode
+
+	short		addr_ind_p_a0	-	tbl_ea_mode
+	short		addr_ind_p_a1	-	tbl_ea_mode
+	short		addr_ind_p_a2	-	tbl_ea_mode
+	short		addr_ind_p_a3	-	tbl_ea_mode
+	short		addr_ind_p_a4	-	tbl_ea_mode
+	short		addr_ind_p_a5	-	tbl_ea_mode
+	short		addr_ind_p_a6	-	tbl_ea_mode
+	short		addr_ind_p_a7	-	tbl_ea_mode
+
+	short		addr_ind_m_a0		-	tbl_ea_mode
+	short		addr_ind_m_a1		-	tbl_ea_mode
+	short		addr_ind_m_a2		-	tbl_ea_mode
+	short		addr_ind_m_a3		-	tbl_ea_mode
+	short		addr_ind_m_a4		-	tbl_ea_mode
+	short		addr_ind_m_a5		-	tbl_ea_mode
+	short		addr_ind_m_a6		-	tbl_ea_mode
+	short		addr_ind_m_a7		-	tbl_ea_mode
+
+	short		addr_ind_disp_a0	-	tbl_ea_mode
+	short		addr_ind_disp_a1	-	tbl_ea_mode
+	short		addr_ind_disp_a2	-	tbl_ea_mode
+	short		addr_ind_disp_a3	-	tbl_ea_mode
+	short		addr_ind_disp_a4	-	tbl_ea_mode
+	short		addr_ind_disp_a5	-	tbl_ea_mode
+	short		addr_ind_disp_a6	-	tbl_ea_mode
+	short		addr_ind_disp_a7	-	tbl_ea_mode
+
+	short		_addr_ind_ext		-	tbl_ea_mode
+	short		_addr_ind_ext		-	tbl_ea_mode
+	short		_addr_ind_ext		-	tbl_ea_mode
+	short		_addr_ind_ext		-	tbl_ea_mode
+	short		_addr_ind_ext		-	tbl_ea_mode
+	short		_addr_ind_ext		-	tbl_ea_mode
+	short		_addr_ind_ext		-	tbl_ea_mode
+	short		_addr_ind_ext		-	tbl_ea_mode
+
+	short		abs_short		-	tbl_ea_mode
+	short		abs_long		-	tbl_ea_mode
+	short		pc_ind			-	tbl_ea_mode
+	short		pc_ind_ext		-	tbl_ea_mode
+	short		immediate		-	tbl_ea_mode
+	short		tbl_ea_mode		-	tbl_ea_mode
+	short		tbl_ea_mode		-	tbl_ea_mode
+	short		tbl_ea_mode		-	tbl_ea_mode
+
+###################################
+# Address register indirect: (An) #
+###################################
+addr_ind_a0:
+	mov.l		EXC_A0(%a6),%a0		# Get current a0
+	rts
+
+addr_ind_a1:
+	mov.l		EXC_A1(%a6),%a0		# Get current a1
+	rts
+
+addr_ind_a2:
+	mov.l		EXC_A2(%a6),%a0		# Get current a2
+	rts
+
+addr_ind_a3:
+	mov.l		EXC_A3(%a6),%a0		# Get current a3
+	rts
+
+addr_ind_a4:
+	mov.l		EXC_A4(%a6),%a0		# Get current a4
+	rts
+
+addr_ind_a5:
+	mov.l		EXC_A5(%a6),%a0		# Get current a5
+	rts
+
+addr_ind_a6:
+	mov.l		EXC_A6(%a6),%a0		# Get current a6
+	rts
+
+addr_ind_a7:
+	mov.l		EXC_A7(%a6),%a0		# Get current a7
+	rts
+
+#####################################################
+# Address register indirect w/ postincrement: (An)+ #
+#####################################################
+addr_ind_p_a0:
+	mov.l		%a0,%d0			# copy no. bytes
+	mov.l		EXC_A0(%a6),%a0		# load current value
+	add.l		%a0,%d0			# increment
+	mov.l		%d0,EXC_A0(%a6)		# save incremented value
+
+	mov.l		%a0,EXC_SAVVAL(%a6)	# save in case of access error
+	mov.b		&0x0,EXC_SAVREG(%a6)	# save regno, too
+	mov.b		&restore_flg,SPCOND_FLG(%a6) # set flag
+	rts
+
+addr_ind_p_a1:
+	mov.l		%a0,%d0			# copy no. bytes
+	mov.l		EXC_A1(%a6),%a0		# load current value
+	add.l		%a0,%d0			# increment
+	mov.l		%d0,EXC_A1(%a6)		# save incremented value
+
+	mov.l		%a0,EXC_SAVVAL(%a6)	# save in case of access error
+	mov.b		&0x1,EXC_SAVREG(%a6)	# save regno, too
+	mov.b		&restore_flg,SPCOND_FLG(%a6) # set flag
+	rts
+
+addr_ind_p_a2:
+	mov.l		%a0,%d0			# copy no. bytes
+	mov.l		EXC_A2(%a6),%a0		# load current value
+	add.l		%a0,%d0			# increment
+	mov.l		%d0,EXC_A2(%a6)		# save incremented value
+
+	mov.l		%a0,EXC_SAVVAL(%a6)	# save in case of access error
+	mov.b		&0x2,EXC_SAVREG(%a6)	# save regno, too
+	mov.b		&restore_flg,SPCOND_FLG(%a6) # set flag
+	rts
+
+addr_ind_p_a3:
+	mov.l		%a0,%d0			# copy no. bytes
+	mov.l		EXC_A3(%a6),%a0		# load current value
+	add.l		%a0,%d0			# increment
+	mov.l		%d0,EXC_A3(%a6)		# save incremented value
+
+	mov.l		%a0,EXC_SAVVAL(%a6)	# save in case of access error
+	mov.b		&0x3,EXC_SAVREG(%a6)	# save regno, too
+	mov.b		&restore_flg,SPCOND_FLG(%a6) # set flag
+	rts
+
+addr_ind_p_a4:
+	mov.l		%a0,%d0			# copy no. bytes
+	mov.l		EXC_A4(%a6),%a0		# load current value
+	add.l		%a0,%d0			# increment
+	mov.l		%d0,EXC_A4(%a6)		# save incremented value
+
+	mov.l		%a0,EXC_SAVVAL(%a6)	# save in case of access error
+	mov.b		&0x4,EXC_SAVREG(%a6)	# save regno, too
+	mov.b		&restore_flg,SPCOND_FLG(%a6) # set flag
+	rts
+
+addr_ind_p_a5:
+	mov.l		%a0,%d0			# copy no. bytes
+	mov.l		EXC_A5(%a6),%a0		# load current value
+	add.l		%a0,%d0			# increment
+	mov.l		%d0,EXC_A5(%a6)		# save incremented value
+
+	mov.l		%a0,EXC_SAVVAL(%a6)	# save in case of access error
+	mov.b		&0x5,EXC_SAVREG(%a6)	# save regno, too
+	mov.b		&restore_flg,SPCOND_FLG(%a6) # set flag
+	rts
+
+addr_ind_p_a6:
+	mov.l		%a0,%d0			# copy no. bytes
+	mov.l		EXC_A6(%a6),%a0		# load current value
+	add.l		%a0,%d0			# increment
+	mov.l		%d0,EXC_A6(%a6)		# save incremented value
+
+	mov.l		%a0,EXC_SAVVAL(%a6)	# save in case of access error
+	mov.b		&0x6,EXC_SAVREG(%a6)	# save regno, too
+	mov.b		&restore_flg,SPCOND_FLG(%a6) # set flag
+	rts
+
+addr_ind_p_a7:
+	mov.b		&mia7_flg,SPCOND_FLG(%a6) # set "special case" flag
+
+	mov.l		%a0,%d0			# copy no. bytes
+	mov.l		EXC_A7(%a6),%a0		# load current value
+	add.l		%a0,%d0			# increment
+	mov.l		%d0,EXC_A7(%a6)		# save incremented value
+	rts
+
+####################################################
+# Address register indirect w/ predecrement: -(An) #
+####################################################
+addr_ind_m_a0:
+	mov.l		EXC_A0(%a6),%d0		# Get current a0
+	mov.l		%d0,EXC_SAVVAL(%a6)	# save in case of access error
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,EXC_A0(%a6)		# Save decr value
+	mov.l		%d0,%a0
+
+	mov.b		&0x0,EXC_SAVREG(%a6)	# save regno, too
+	mov.b		&restore_flg,SPCOND_FLG(%a6) # set flag
+	rts
+
+addr_ind_m_a1:
+	mov.l		EXC_A1(%a6),%d0		# Get current a1
+	mov.l		%d0,EXC_SAVVAL(%a6)	# save in case of access error
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,EXC_A1(%a6)		# Save decr value
+	mov.l		%d0,%a0
+
+	mov.b		&0x1,EXC_SAVREG(%a6)	# save regno, too
+	mov.b		&restore_flg,SPCOND_FLG(%a6) # set flag
+	rts
+
+addr_ind_m_a2:
+	mov.l		EXC_A2(%a6),%d0		# Get current a2
+	mov.l		%d0,EXC_SAVVAL(%a6)	# save in case of access error
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,EXC_A2(%a6)		# Save decr value
+	mov.l		%d0,%a0
+
+	mov.b		&0x2,EXC_SAVREG(%a6)	# save regno, too
+	mov.b		&restore_flg,SPCOND_FLG(%a6) # set flag
+	rts
+
+addr_ind_m_a3:
+	mov.l		EXC_A3(%a6),%d0		# Get current a3
+	mov.l		%d0,EXC_SAVVAL(%a6)	# save in case of access error
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,EXC_A3(%a6)		# Save decr value
+	mov.l		%d0,%a0
+
+	mov.b		&0x3,EXC_SAVREG(%a6)	# save regno, too
+	mov.b		&restore_flg,SPCOND_FLG(%a6) # set flag
+	rts
+
+addr_ind_m_a4:
+	mov.l		EXC_A4(%a6),%d0		# Get current a4
+	mov.l		%d0,EXC_SAVVAL(%a6)	# save in case of access error
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,EXC_A4(%a6)		# Save decr value
+	mov.l		%d0,%a0
+
+	mov.b		&0x4,EXC_SAVREG(%a6)	# save regno, too
+	mov.b		&restore_flg,SPCOND_FLG(%a6) # set flag
+	rts
+
+addr_ind_m_a5:
+	mov.l		EXC_A5(%a6),%d0		# Get current a5
+	mov.l		%d0,EXC_SAVVAL(%a6)	# save in case of access error
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,EXC_A5(%a6)		# Save decr value
+	mov.l		%d0,%a0
+
+	mov.b		&0x5,EXC_SAVREG(%a6)	# save regno, too
+	mov.b		&restore_flg,SPCOND_FLG(%a6) # set flag
+	rts
+
+addr_ind_m_a6:
+	mov.l		EXC_A6(%a6),%d0		# Get current a6
+	mov.l		%d0,EXC_SAVVAL(%a6)	# save in case of access error
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,EXC_A6(%a6)		# Save decr value
+	mov.l		%d0,%a0
+
+	mov.b		&0x6,EXC_SAVREG(%a6)	# save regno, too
+	mov.b		&restore_flg,SPCOND_FLG(%a6) # set flag
+	rts
+
+addr_ind_m_a7:
+	mov.b		&mda7_flg,SPCOND_FLG(%a6) # set "special case" flag
+
+	mov.l		EXC_A7(%a6),%d0		# Get current a7
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,EXC_A7(%a6)		# Save decr value
+	mov.l		%d0,%a0
+	rts
+
+########################################################
+# Address register indirect w/ displacement: (d16, An) #
+########################################################
+addr_ind_disp_a0:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# ifetch error?
+	bne.l		isp_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+	add.l		EXC_A0(%a6),%a0		# a0 + d16
+	rts
+
+addr_ind_disp_a1:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# ifetch error?
+	bne.l		isp_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+	add.l		EXC_A1(%a6),%a0		# a1 + d16
+	rts
+
+addr_ind_disp_a2:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# ifetch error?
+	bne.l		isp_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+	add.l		EXC_A2(%a6),%a0		# a2 + d16
+	rts
+
+addr_ind_disp_a3:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# ifetch error?
+	bne.l		isp_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+	add.l		EXC_A3(%a6),%a0		# a3 + d16
+	rts
+
+addr_ind_disp_a4:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# ifetch error?
+	bne.l		isp_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+	add.l		EXC_A4(%a6),%a0		# a4 + d16
+	rts
+
+addr_ind_disp_a5:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# ifetch error?
+	bne.l		isp_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+	add.l		EXC_A5(%a6),%a0		# a5 + d16
+	rts
+
+addr_ind_disp_a6:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# ifetch error?
+	bne.l		isp_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+	add.l		EXC_A6(%a6),%a0		# a6 + d16
+	rts
+
+addr_ind_disp_a7:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# ifetch error?
+	bne.l		isp_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+	add.l		EXC_A7(%a6),%a0		# a7 + d16
+	rts
+
+########################################################################
+# Address register indirect w/ index(8-bit displacement): (dn, An, Xn) #
+#    "       "         "    w/   "  (base displacement): (bd, An, Xn)  #
+# Memory indirect postindexed: ([bd, An], Xn, od)		       #
+# Memory indirect preindexed: ([bd, An, Xn], od)		       #
+########################################################################
+_addr_ind_ext:
+	mov.l		%d1,-(%sp)
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word		# fetch extword in d0
+
+	tst.l		%d1			# ifetch error?
+	bne.l		isp_iacc		# yes
+
+	mov.l		(%sp)+,%d1
+
+	mov.l		(EXC_AREGS,%a6,%d1.w*4),%a0 # put base in a0
+
+	btst		&0x8,%d0
+	beq.b		addr_ind_index_8bit	# for ext word or not?
+
+	movm.l		&0x3c00,-(%sp)		# save d2-d5
+
+	mov.l		%d0,%d5			# put extword in d5
+	mov.l		%a0,%d3			# put base in d3
+
+	bra.l		calc_mem_ind		# calc memory indirect
+
+addr_ind_index_8bit:
+	mov.l		%d2,-(%sp)		# save old d2
+
+	mov.l		%d0,%d1
+	rol.w		&0x4,%d1
+	andi.w		&0xf,%d1		# extract index regno
+
+	mov.l		(EXC_DREGS,%a6,%d1.w*4),%d1 # fetch index reg value
+
+	btst		&0xb,%d0		# is it word or long?
+	bne.b		aii8_long
+	ext.l		%d1			# sign extend word index
+aii8_long:
+	mov.l		%d0,%d2
+	rol.w		&0x7,%d2
+	andi.l		&0x3,%d2		# extract scale value
+
+	lsl.l		%d2,%d1			# shift index by scale
+
+	extb.l		%d0			# sign extend displacement
+	add.l		%d1,%d0			# index + disp
+	add.l		%d0,%a0			# An + (index + disp)
+
+	mov.l		(%sp)+,%d2		# restore old d2
+	rts
+
+######################
+# Immediate: #<data> #
+#########################################################################
+# word, long: <ea> of the data is the current extension word		#
+#	pointer value. new extension word pointer is simply the old	#
+#	plus the number of bytes in the data type(2 or 4).		#
+#########################################################################
+immediate:
+	mov.b		&immed_flg,SPCOND_FLG(%a6) # set immediate flag
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch extension word ptr
+	rts
+
+###########################
+# Absolute short: (XXX).W #
+###########################
+abs_short:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word		# fetch short address
+
+	tst.l		%d1			# ifetch error?
+	bne.l		isp_iacc		# yes
+
+	mov.w		%d0,%a0			# return <ea> in a0
+	rts
+
+##########################
+# Absolute long: (XXX).L #
+##########################
+abs_long:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch long address
+
+	tst.l		%d1			# ifetch error?
+	bne.l		isp_iacc		# yes
+
+	mov.l		%d0,%a0			# return <ea> in a0
+	rts
+
+#######################################################
+# Program counter indirect w/ displacement: (d16, PC) #
+#######################################################
+pc_ind:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word		# fetch word displacement
+
+	tst.l		%d1			# ifetch error?
+	bne.l		isp_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+
+	add.l		EXC_EXTWPTR(%a6),%a0	# pc + d16
+
+# _imem_read_word() increased the extwptr by 2. need to adjust here.
+	subq.l		&0x2,%a0		# adjust <ea>
+
+	rts
+
+##########################################################
+# PC indirect w/ index(8-bit displacement): (d8, PC, An) #
+# "     "     w/   "  (base displacement): (bd, PC, An)  #
+# PC memory indirect postindexed: ([bd, PC], Xn, od)     #
+# PC memory indirect preindexed: ([bd, PC, Xn], od)      #
+##########################################################
+pc_ind_ext:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word		# fetch ext word
+
+	tst.l		%d1			# ifetch error?
+	bne.l		isp_iacc		# yes
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# put base in a0
+	subq.l		&0x2,%a0		# adjust base
+
+	btst		&0x8,%d0		# is disp only 8 bits?
+	beq.b		pc_ind_index_8bit	# yes
+
+# the indexed addressing mode uses a base displacement of size
+# word or long
+	movm.l		&0x3c00,-(%sp)		# save d2-d5
+
+	mov.l		%d0,%d5			# put extword in d5
+	mov.l		%a0,%d3			# put base in d3
+
+	bra.l		calc_mem_ind		# calc memory indirect
+
+pc_ind_index_8bit:
+	mov.l		%d2,-(%sp)		# create a temp register
+
+	mov.l		%d0,%d1			# make extword copy
+	rol.w		&0x4,%d1		# rotate reg num into place
+	andi.w		&0xf,%d1		# extract register number
+
+	mov.l		(EXC_DREGS,%a6,%d1.w*4),%d1 # fetch index reg value
+
+	btst		&0xb,%d0		# is index word or long?
+	bne.b		pii8_long		# long
+	ext.l		%d1			# sign extend word index
+pii8_long:
+	mov.l		%d0,%d2			# make extword copy
+	rol.w		&0x7,%d2		# rotate scale value into place
+	andi.l		&0x3,%d2		# extract scale value
+
+	lsl.l		%d2,%d1			# shift index by scale
+
+	extb.l		%d0			# sign extend displacement
+	add.l		%d1,%d0			# index + disp
+	add.l		%d0,%a0			# An + (index + disp)
+
+	mov.l		(%sp)+,%d2		# restore temp register
+
+	rts
+
+# a5 = exc_extwptr	(global to uaeh)
+# a4 = exc_opword	(global to uaeh)
+# a3 = exc_dregs	(global to uaeh)
+
+# d2 = index		(internal "     "    )
+# d3 = base		(internal "     "    )
+# d4 = od		(internal "     "    )
+# d5 = extword		(internal "     "    )
+calc_mem_ind:
+	btst		&0x6,%d5		# is the index suppressed?
+	beq.b		calc_index
+	clr.l		%d2			# yes, so index = 0
+	bra.b		base_supp_ck
+calc_index:
+	bfextu		%d5{&16:&4},%d2
+	mov.l		(EXC_DREGS,%a6,%d2.w*4),%d2
+	btst		&0xb,%d5		# is index word or long?
+	bne.b		no_ext
+	ext.l		%d2
+no_ext:
+	bfextu		%d5{&21:&2},%d0
+	lsl.l		%d0,%d2
+base_supp_ck:
+	btst		&0x7,%d5		# is the bd suppressed?
+	beq.b		no_base_sup
+	clr.l		%d3
+no_base_sup:
+	bfextu		%d5{&26:&2},%d0	# get bd size
+#	beq.l		_error			# if (size == 0) it's reserved
+	cmpi.b		%d0,&2
+	blt.b		no_bd
+	beq.b		get_word_bd
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long
+
+	tst.l		%d1			# ifetch error?
+	bne.l		isp_iacc		# yes
+
+	bra.b		chk_ind
+get_word_bd:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# ifetch error?
+	bne.l		isp_iacc		# yes
+
+	ext.l		%d0			# sign extend bd
+
+chk_ind:
+	add.l		%d0,%d3			# base += bd
+no_bd:
+	bfextu		%d5{&30:&2},%d0		# is od suppressed?
+	beq.w		aii_bd
+	cmpi.b		%d0,&0x2
+	blt.b		null_od
+	beq.b		word_od
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long
+
+	tst.l		%d1			# ifetch error?
+	bne.l		isp_iacc		# yes
+
+	bra.b		add_them
+
+word_od:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# ifetch error?
+	bne.l		isp_iacc		# yes
+
+	ext.l		%d0			# sign extend od
+	bra.b		add_them
+
+null_od:
+	clr.l		%d0
+add_them:
+	mov.l		%d0,%d4
+	btst		&0x2,%d5		# pre or post indexing?
+	beq.b		pre_indexed
+
+	mov.l		%d3,%a0
+	bsr.l		_dmem_read_long
+
+	tst.l		%d1			# dfetch error?
+	bne.b		calc_ea_err		# yes
+
+	add.l		%d2,%d0			# <ea> += index
+	add.l		%d4,%d0			# <ea> += od
+	bra.b		done_ea
+
+pre_indexed:
+	add.l		%d2,%d3			# preindexing
+	mov.l		%d3,%a0
+	bsr.l		_dmem_read_long
+
+	tst.l		%d1			# ifetch error?
+	bne.b		calc_ea_err		# yes
+
+	add.l		%d4,%d0			# ea += od
+	bra.b		done_ea
+
+aii_bd:
+	add.l		%d2,%d3			# ea = (base + bd) + index
+	mov.l		%d3,%d0
+done_ea:
+	mov.l		%d0,%a0
+
+	movm.l		(%sp)+,&0x003c		# restore d2-d5
+	rts
+
+# if dmem_read_long() returns a fail message in d1, the package
+# must create an access error frame. here, we pass a skeleton fslw
+# and the failing address to the routine that creates the new frame.
+# FSLW:
+#	read = true
+#	size = longword
+#	TM = data
+#	software emulation error = true
+calc_ea_err:
+	mov.l		%d3,%a0			# pass failing address
+	mov.l		&0x01010001,%d0		# pass fslw
+	bra.l		isp_dacc
+
+#########################################################################
+# XDEF **************************************************************** #
+#	_moveperipheral(): routine to emulate movep instruction		#
+#									#
+# XREF **************************************************************** #
+#	_dmem_read_byte() - read byte from memory			#
+#	_dmem_write_byte() - write byte to memory			#
+#	isp_dacc() - handle data access error exception			#
+#									#
+# INPUT *************************************************************** #
+#	none								#
+#									#
+# OUTPUT ************************************************************** #
+#	If exiting through isp_dacc...					#
+#		a0 = failing address					#
+#		d0 = FSLW						#
+#	else								#
+#		none							#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Decode the movep instruction words stored at EXC_OPWORD and	#
+# either read or write the required bytes from/to memory. Use the	#
+# _dmem_{read,write}_byte() routines. If one of the memory routines	#
+# returns a failing value, we must pass the failing address and	a FSLW	#
+# to the _isp_dacc() routine.						#
+#	Since this instruction is used to access peripherals, make sure	#
+# to only access the required bytes.					#
+#									#
+#########################################################################
+
+###########################
+# movep.(w,l)	Dx,(d,Ay) #
+# movep.(w,l)	(d,Ay),Dx #
+###########################
+	global		_moveperipheral
+_moveperipheral:
+	mov.w		EXC_OPWORD(%a6),%d1	# fetch the opcode word
+
+	mov.b		%d1,%d0
+	and.w		&0x7,%d0		# extract Ay from opcode word
+
+	mov.l		(EXC_AREGS,%a6,%d0.w*4),%a0 # fetch ay
+
+	add.w		EXC_EXTWORD(%a6),%a0	# add: an + sgn_ext(disp)
+
+	btst		&0x7,%d1		# (reg 2 mem) or (mem 2 reg)
+	beq.w		mem2reg
+
+# reg2mem: fetch dx, then write it to memory
+reg2mem:
+	mov.w		%d1,%d0
+	rol.w		&0x7,%d0
+	and.w		&0x7,%d0		# extract Dx from opcode word
+
+	mov.l		(EXC_DREGS,%a6,%d0.w*4), %d0 # fetch dx
+
+	btst		&0x6,%d1		# word or long operation?
+	beq.b		r2mwtrans
+
+# a0 = dst addr
+# d0 = Dx
+r2mltrans:
+	mov.l		%d0,%d2			# store data
+	mov.l		%a0,%a2			# store addr
+	rol.l		&0x8,%d2
+	mov.l		%d2,%d0
+
+	bsr.l		_dmem_write_byte	# os  : write hi
+
+	tst.l		%d1			# dfetch error?
+	bne.w		movp_write_err		# yes
+
+	add.w		&0x2,%a2		# incr addr
+	mov.l		%a2,%a0
+	rol.l		&0x8,%d2
+	mov.l		%d2,%d0
+
+	bsr.l		_dmem_write_byte	# os  : write lo
+
+	tst.l		%d1			# dfetch error?
+	bne.w		movp_write_err		# yes
+
+	add.w		&0x2,%a2		# incr addr
+	mov.l		%a2,%a0
+	rol.l		&0x8,%d2
+	mov.l		%d2,%d0
+
+	bsr.l		_dmem_write_byte	# os  : write lo
+
+	tst.l		%d1			# dfetch error?
+	bne.w		movp_write_err		# yes
+
+	add.w		&0x2,%a2		# incr addr
+	mov.l		%a2,%a0
+	rol.l		&0x8,%d2
+	mov.l		%d2,%d0
+
+	bsr.l		_dmem_write_byte	# os  : write lo
+
+	tst.l		%d1			# dfetch error?
+	bne.w		movp_write_err		# yes
+
+	rts
+
+# a0 = dst addr
+# d0 = Dx
+r2mwtrans:
+	mov.l		%d0,%d2			# store data
+	mov.l		%a0,%a2			# store addr
+	lsr.w		&0x8,%d0
+
+	bsr.l		_dmem_write_byte	# os  : write hi
+
+	tst.l		%d1			# dfetch error?
+	bne.w		movp_write_err		# yes
+
+	add.w		&0x2,%a2
+	mov.l		%a2,%a0
+	mov.l		%d2,%d0
+
+	bsr.l		_dmem_write_byte	# os  : write lo
+
+	tst.l		%d1			# dfetch error?
+	bne.w		movp_write_err		# yes
+
+	rts
+
+# mem2reg: read bytes from memory.
+# determines the dest register, and then writes the bytes into it.
+mem2reg:
+	btst		&0x6,%d1		# word or long operation?
+	beq.b		m2rwtrans
+
+# a0 = dst addr
+m2rltrans:
+	mov.l		%a0,%a2			# store addr
+
+	bsr.l		_dmem_read_byte		# read first byte
+
+	tst.l		%d1			# dfetch error?
+	bne.w		movp_read_err		# yes
+
+	mov.l		%d0,%d2
+
+	add.w		&0x2,%a2		# incr addr by 2 bytes
+	mov.l		%a2,%a0
+
+	bsr.l		_dmem_read_byte		# read second byte
+
+	tst.l		%d1			# dfetch error?
+	bne.w		movp_read_err		# yes
+
+	lsl.w		&0x8,%d2
+	mov.b		%d0,%d2			# append bytes
+
+	add.w		&0x2,%a2		# incr addr by 2 bytes
+	mov.l		%a2,%a0
+
+	bsr.l		_dmem_read_byte		# read second byte
+
+	tst.l		%d1			# dfetch error?
+	bne.w		movp_read_err		# yes
+
+	lsl.l		&0x8,%d2
+	mov.b		%d0,%d2			# append bytes
+
+	add.w		&0x2,%a2		# incr addr by 2 bytes
+	mov.l		%a2,%a0
+
+	bsr.l		_dmem_read_byte		# read second byte
+
+	tst.l		%d1			# dfetch error?
+	bne.w		movp_read_err		# yes
+
+	lsl.l		&0x8,%d2
+	mov.b		%d0,%d2			# append bytes
+
+	mov.b		EXC_OPWORD(%a6),%d1
+	lsr.b		&0x1,%d1
+	and.w		&0x7,%d1		# extract Dx from opcode word
+
+	mov.l		%d2,(EXC_DREGS,%a6,%d1.w*4) # store dx
+
+	rts
+
+# a0 = dst addr
+m2rwtrans:
+	mov.l		%a0,%a2			# store addr
+
+	bsr.l		_dmem_read_byte		# read first byte
+
+	tst.l		%d1			# dfetch error?
+	bne.w		movp_read_err		# yes
+
+	mov.l		%d0,%d2
+
+	add.w		&0x2,%a2		# incr addr by 2 bytes
+	mov.l		%a2,%a0
+
+	bsr.l		_dmem_read_byte		# read second byte
+
+	tst.l		%d1			# dfetch error?
+	bne.w		movp_read_err		# yes
+
+	lsl.w		&0x8,%d2
+	mov.b		%d0,%d2			# append bytes
+
+	mov.b		EXC_OPWORD(%a6),%d1
+	lsr.b		&0x1,%d1
+	and.w		&0x7,%d1		# extract Dx from opcode word
+
+	mov.w		%d2,(EXC_DREGS+2,%a6,%d1.w*4) # store dx
+
+	rts
+
+# if dmem_{read,write}_byte() returns a fail message in d1, the package
+# must create an access error frame. here, we pass a skeleton fslw
+# and the failing address to the routine that creates the new frame.
+# FSLW:
+#	write = true
+#	size = byte
+#	TM = data
+#	software emulation error = true
+movp_write_err:
+	mov.l		%a2,%a0			# pass failing address
+	mov.l		&0x00a10001,%d0		# pass fslw
+	bra.l		isp_dacc
+
+# FSLW:
+#	read = true
+#	size = byte
+#	TM = data
+#	software emulation error = true
+movp_read_err:
+	mov.l		%a2,%a0			# pass failing address
+	mov.l		&0x01210001,%d0		# pass fslw
+	bra.l		isp_dacc
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_chk2_cmp2(): routine to emulate chk2/cmp2 instructions		#
+#									#
+# XREF ****************************************************************	#
+#	_calc_ea(): calculate effective address				#
+#	_dmem_read_long(): read operands				#
+#	_dmem_read_word(): read operands				#
+#	isp_dacc(): handle data access error exception			#
+#									#
+# INPUT ***************************************************************	#
+#	none								#
+#									#
+# OUTPUT **************************************************************	#
+#	If exiting through isp_dacc...					#
+#		a0 = failing address					#
+#		d0 = FSLW						#
+#	else								#
+#		none							#
+#									#
+# ALGORITHM ***********************************************************	#
+#	First, calculate the effective address, then fetch the byte,	#
+# word, or longword sized operands. Then, in the interest of		#
+# simplicity, all operands are converted to longword size whether the	#
+# operation is byte, word, or long. The bounds are sign extended	#
+# accordingly. If Rn is a data regsiter, Rn is also sign extended. If	#
+# Rn is an address register, it need not be sign extended since the	#
+# full register is always used.						#
+#	The comparisons are made and the condition codes calculated.	#
+# If the instruction is chk2 and the Rn value is out-of-bounds, set	#
+# the ichk_flg in SPCOND_FLG.						#
+#	If the memory fetch returns a failing value, pass the failing	#
+# address and FSLW to the isp_dacc() routine.				#
+#									#
+#########################################################################
+
+	global		_chk2_cmp2
+_chk2_cmp2:
+
+# passing size parameter doesn't matter since chk2 & cmp2 can't do
+# either predecrement, postincrement, or immediate.
+	bsr.l		_calc_ea		# calculate <ea>
+
+	mov.b		EXC_EXTWORD(%a6), %d0	# fetch hi extension word
+	rol.b		&0x4, %d0		# rotate reg bits into lo
+	and.w		&0xf, %d0		# extract reg bits
+
+	mov.l		(EXC_DREGS,%a6,%d0.w*4), %d2 # get regval
+
+	cmpi.b		EXC_OPWORD(%a6), &0x2	# what size is operation?
+	blt.b		chk2_cmp2_byte		# size == byte
+	beq.b		chk2_cmp2_word		# size == word
+
+# the bounds are longword size. call routine to read the lower
+# bound into d0 and the higher bound into d1.
+chk2_cmp2_long:
+	mov.l		%a0,%a2			# save copy of <ea>
+	bsr.l		_dmem_read_long		# fetch long lower bound
+
+	tst.l		%d1			# dfetch error?
+	bne.w		chk2_cmp2_err_l		# yes
+
+	mov.l		%d0,%d3			# save long lower bound
+	addq.l		&0x4,%a2
+	mov.l		%a2,%a0			# pass <ea> of long upper bound
+	bsr.l		_dmem_read_long		# fetch long upper bound
+
+	tst.l		%d1			# dfetch error?
+	bne.w		chk2_cmp2_err_l		# yes
+
+	mov.l		%d0,%d1			# long upper bound in d1
+	mov.l		%d3,%d0			# long lower bound in d0
+	bra.w		chk2_cmp2_compare	# go do the compare emulation
+
+# the bounds are word size. fetch them in one subroutine call by
+# reading a longword. sign extend both. if it's a data operation,
+# sign extend Rn to long, also.
+chk2_cmp2_word:
+	mov.l		%a0,%a2
+	bsr.l		_dmem_read_long		# fetch 2 word bounds
+
+	tst.l		%d1			# dfetch error?
+	bne.w		chk2_cmp2_err_l		# yes
+
+	mov.w		%d0, %d1		# place hi in %d1
+	swap		%d0			# place lo in %d0
+
+	ext.l		%d0			# sign extend lo bnd
+	ext.l		%d1			# sign extend hi bnd
+
+	btst		&0x7, EXC_EXTWORD(%a6)	# address compare?
+	bne.w		chk2_cmp2_compare	# yes; don't sign extend
+
+# operation is a data register compare.
+# sign extend word to long so we can do simple longword compares.
+	ext.l		%d2			# sign extend data word
+	bra.w		chk2_cmp2_compare	# go emulate compare
+
+# the bounds are byte size. fetch them in one subroutine call by
+# reading a word. sign extend both. if it's a data operation,
+# sign extend Rn to long, also.
+chk2_cmp2_byte:
+	mov.l		%a0,%a2
+	bsr.l		_dmem_read_word		# fetch 2 byte bounds
+
+	tst.l		%d1			# dfetch error?
+	bne.w		chk2_cmp2_err_w		# yes
+
+	mov.b		%d0, %d1		# place hi in %d1
+	lsr.w		&0x8, %d0		# place lo in %d0
+
+	extb.l		%d0			# sign extend lo bnd
+	extb.l		%d1			# sign extend hi bnd
+
+	btst		&0x7, EXC_EXTWORD(%a6)	# address compare?
+	bne.b		chk2_cmp2_compare	# yes; don't sign extend
+
+# operation is a data register compare.
+# sign extend byte to long so we can do simple longword compares.
+	extb.l		%d2			# sign extend data byte
+
+#
+# To set the ccodes correctly:
+#	(1) save 'Z' bit from (Rn - lo)
+#	(2) save 'Z' and 'N' bits from ((hi - lo) - (Rn - hi))
+#	(3) keep 'X', 'N', and 'V' from before instruction
+#	(4) combine ccodes
+#
+chk2_cmp2_compare:
+	sub.l		%d0, %d2		# (Rn - lo)
+	mov.w		%cc, %d3		# fetch resulting ccodes
+	andi.b		&0x4, %d3		# keep 'Z' bit
+	sub.l		%d0, %d1		# (hi - lo)
+	cmp.l		%d1,%d2			# ((hi - lo) - (Rn - hi))
+
+	mov.w		%cc, %d4		# fetch resulting ccodes
+	or.b		%d4, %d3		# combine w/ earlier ccodes
+	andi.b		&0x5, %d3		# keep 'Z' and 'N'
+
+	mov.w		EXC_CC(%a6), %d4	# fetch old ccodes
+	andi.b		&0x1a, %d4		# keep 'X','N','V' bits
+	or.b		%d3, %d4		# insert new ccodes
+	mov.w		%d4, EXC_CC(%a6)	# save new ccodes
+
+	btst		&0x3, EXC_EXTWORD(%a6)	# separate chk2,cmp2
+	bne.b		chk2_finish		# it's a chk2
+
+	rts
+
+# this code handles the only difference between chk2 and cmp2. chk2 would
+# have trapped out if the value was out of bounds. we check this by seeing
+# if the 'N' bit was set by the operation.
+chk2_finish:
+	btst		&0x0, %d4		# is 'N' bit set?
+	bne.b		chk2_trap		# yes;chk2 should trap
+	rts
+chk2_trap:
+	mov.b		&ichk_flg,SPCOND_FLG(%a6) # set "special case" flag
+	rts
+
+# if dmem_read_{long,word}() returns a fail message in d1, the package
+# must create an access error frame. here, we pass a skeleton fslw
+# and the failing address to the routine that creates the new frame.
+# FSLW:
+#	read = true
+#	size = longword
+#	TM = data
+#	software emulation error = true
+chk2_cmp2_err_l:
+	mov.l		%a2,%a0			# pass failing address
+	mov.l		&0x01010001,%d0		# pass fslw
+	bra.l		isp_dacc
+
+# FSLW:
+#	read = true
+#	size = word
+#	TM = data
+#	software emulation error = true
+chk2_cmp2_err_w:
+	mov.l		%a2,%a0			# pass failing address
+	mov.l		&0x01410001,%d0		# pass fslw
+	bra.l		isp_dacc
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_div64(): routine to emulate div{u,s}.l <ea>,Dr:Dq		#
+#							64/32->32r:32q	#
+#									#
+# XREF ****************************************************************	#
+#	_calc_ea() - calculate effective address			#
+#	isp_iacc() - handle instruction access error exception		#
+#	isp_dacc() - handle data access error exception			#
+#	isp_restore() - restore An on access error w/ -() or ()+	#
+#									#
+# INPUT ***************************************************************	#
+#	none								#
+#									#
+# OUTPUT **************************************************************	#
+#	If exiting through isp_dacc...					#
+#		a0 = failing address					#
+#		d0 = FSLW						#
+#	else								#
+#		none							#
+#									#
+# ALGORITHM ***********************************************************	#
+#	First, decode the operand location. If it's in Dn, fetch from	#
+# the stack. If it's in memory, use _calc_ea() to calculate the		#
+# effective address. Use _dmem_read_long() to fetch at that address.	#
+# Unless the operand is immediate data. Then use _imem_read_long().	#
+# Send failures to isp_dacc() or isp_iacc() as appropriate.		#
+#	If the operands are signed, make them unsigned and save	the	#
+# sign info for later. Separate out special cases like divide-by-zero	#
+# or 32-bit divides if possible. Else, use a special math algorithm	#
+# to calculate the result.						#
+#	Restore sign info if signed instruction. Set the condition	#
+# codes. Set idbyz_flg in SPCOND_FLG if divisor was zero. Store the	#
+# quotient and remainder in the appropriate data registers on the stack.#
+#									#
+#########################################################################
+
+set	NDIVISOR,	EXC_TEMP+0x0
+set	NDIVIDEND,	EXC_TEMP+0x1
+set	NDRSAVE,	EXC_TEMP+0x2
+set	NDQSAVE,	EXC_TEMP+0x4
+set	DDSECOND,	EXC_TEMP+0x6
+set	DDQUOTIENT,	EXC_TEMP+0x8
+set	DDNORMAL,	EXC_TEMP+0xc
+
+	global		_div64
+#############
+# div(u,s)l #
+#############
+_div64:
+	mov.b		EXC_OPWORD+1(%a6), %d0
+	andi.b		&0x38, %d0		# extract src mode
+
+	bne.w		dcontrolmodel_s		# %dn dest or control mode?
+
+	mov.b		EXC_OPWORD+1(%a6), %d0	# extract Dn from opcode
+	andi.w		&0x7, %d0
+	mov.l		(EXC_DREGS,%a6,%d0.w*4), %d7 # fetch divisor from register
+
+dgotsrcl:
+	beq.w		div64eq0		# divisor is = 0!!!
+
+	mov.b		EXC_EXTWORD+1(%a6), %d0	# extract Dr from extword
+	mov.b		EXC_EXTWORD(%a6), %d1	# extract Dq from extword
+	and.w		&0x7, %d0
+	lsr.b		&0x4, %d1
+	and.w		&0x7, %d1
+	mov.w		%d0, NDRSAVE(%a6)	# save Dr for later
+	mov.w		%d1, NDQSAVE(%a6)	# save Dq for later
+
+# fetch %dr and %dq directly off stack since all regs are saved there
+	mov.l		(EXC_DREGS,%a6,%d0.w*4), %d5 # get dividend hi
+	mov.l		(EXC_DREGS,%a6,%d1.w*4), %d6 # get dividend lo
+
+# separate signed and unsigned divide
+	btst		&0x3, EXC_EXTWORD(%a6)	# signed or unsigned?
+	beq.b		dspecialcases		# use positive divide
+
+# save the sign of the divisor
+# make divisor unsigned if it's negative
+	tst.l		%d7			# chk sign of divisor
+	slt		NDIVISOR(%a6)		# save sign of divisor
+	bpl.b		dsgndividend
+	neg.l		%d7			# complement negative divisor
+
+# save the sign of the dividend
+# make dividend unsigned if it's negative
+dsgndividend:
+	tst.l		%d5			# chk sign of hi(dividend)
+	slt		NDIVIDEND(%a6)		# save sign of dividend
+	bpl.b		dspecialcases
+
+	mov.w		&0x0, %cc		# clear 'X' cc bit
+	negx.l		%d6			# complement signed dividend
+	negx.l		%d5
+
+# extract some special cases:
+#	- is (dividend == 0) ?
+#	- is (hi(dividend) == 0 && (divisor <= lo(dividend))) ? (32-bit div)
+dspecialcases:
+	tst.l		%d5			# is (hi(dividend) == 0)
+	bne.b		dnormaldivide		# no, so try it the long way
+
+	tst.l		%d6			# is (lo(dividend) == 0), too
+	beq.w		ddone			# yes, so (dividend == 0)
+
+	cmp.l		%d7,%d6			# is (divisor <= lo(dividend))
+	bls.b		d32bitdivide		# yes, so use 32 bit divide
+
+	exg		%d5,%d6			# q = 0, r = dividend
+	bra.w		divfinish		# can't divide, we're done.
+
+d32bitdivide:
+	tdivu.l		%d7, %d5:%d6		# it's only a 32/32 bit div!
+
+	bra.b		divfinish
+
+dnormaldivide:
+# last special case:
+#	- is hi(dividend) >= divisor ? if yes, then overflow
+	cmp.l		%d7,%d5
+	bls.b		ddovf			# answer won't fit in 32 bits
+
+# perform the divide algorithm:
+	bsr.l		dclassical		# do int divide
+
+# separate into signed and unsigned finishes.
+divfinish:
+	btst		&0x3, EXC_EXTWORD(%a6)	# do divs, divu separately
+	beq.b		ddone			# divu has no processing!!!
+
+# it was a divs.l, so ccode setting is a little more complicated...
+	tst.b		NDIVIDEND(%a6)		# remainder has same sign
+	beq.b		dcc			# as dividend.
+	neg.l		%d5			# sgn(rem) = sgn(dividend)
+dcc:
+	mov.b		NDIVISOR(%a6), %d0
+	eor.b		%d0, NDIVIDEND(%a6)	# chk if quotient is negative
+	beq.b		dqpos			# branch to quot positive
+
+# 0x80000000 is the largest number representable as a 32-bit negative
+# number. the negative of 0x80000000 is 0x80000000.
+	cmpi.l		%d6, &0x80000000	# will (-quot) fit in 32 bits?
+	bhi.b		ddovf
+
+	neg.l		%d6			# make (-quot) 2's comp
+
+	bra.b		ddone
+
+dqpos:
+	btst		&0x1f, %d6		# will (+quot) fit in 32 bits?
+	bne.b		ddovf
+
+ddone:
+# at this point, result is normal so ccodes are set based on result.
+	mov.w		EXC_CC(%a6), %cc
+	tst.l		%d6			# set %ccode bits
+	mov.w		%cc, EXC_CC(%a6)
+
+	mov.w		NDRSAVE(%a6), %d0	# get Dr off stack
+	mov.w		NDQSAVE(%a6), %d1	# get Dq off stack
+
+# if the register numbers are the same, only the quotient gets saved.
+# so, if we always save the quotient second, we save ourselves a cmp&beq
+	mov.l		%d5, (EXC_DREGS,%a6,%d0.w*4) # save remainder
+	mov.l		%d6, (EXC_DREGS,%a6,%d1.w*4) # save quotient
+
+	rts
+
+ddovf:
+	bset		&0x1, EXC_CC+1(%a6)	# 'V' set on overflow
+	bclr		&0x0, EXC_CC+1(%a6)	# 'C' cleared on overflow
+
+	rts
+
+div64eq0:
+	andi.b		&0x1e, EXC_CC+1(%a6)	# clear 'C' bit on divbyzero
+	ori.b		&idbyz_flg,SPCOND_FLG(%a6) # set "special case" flag
+	rts
+
+###########################################################################
+#########################################################################
+# This routine uses the 'classical' Algorithm D from Donald Knuth's	#
+# Art of Computer Programming, vol II, Seminumerical Algorithms.	#
+# For this implementation b=2**16, and the target is U1U2U3U4/V1V2,	#
+# where U,V are words of the quadword dividend and longword divisor,	#
+# and U1, V1 are the most significant words.				#
+#									#
+# The most sig. longword of the 64 bit dividend must be in %d5, least	#
+# in %d6. The divisor must be in the variable ddivisor, and the		#
+# signed/unsigned flag ddusign must be set (0=unsigned,1=signed).	#
+# The quotient is returned in %d6, remainder in %d5, unless the		#
+# v (overflow) bit is set in the saved %ccr. If overflow, the dividend	#
+# is unchanged.								#
+#########################################################################
+dclassical:
+# if the divisor msw is 0, use simpler algorithm then the full blown
+# one at ddknuth:
+
+	cmpi.l		%d7, &0xffff
+	bhi.b		ddknuth			# go use D. Knuth algorithm
+
+# Since the divisor is only a word (and larger than the mslw of the dividend),
+# a simpler algorithm may be used :
+# In the general case, four quotient words would be created by
+# dividing the divisor word into each dividend word. In this case,
+# the first two quotient words must be zero, or overflow would occur.
+# Since we already checked this case above, we can treat the most significant
+# longword of the dividend as (0) remainder (see Knuth) and merely complete
+# the last two divisions to get a quotient longword and word remainder:
+
+	clr.l		%d1
+	swap		%d5			# same as r*b if previous step rqd
+	swap		%d6			# get u3 to lsw position
+	mov.w		%d6, %d5		# rb + u3
+
+	divu.w		%d7, %d5
+
+	mov.w		%d5, %d1		# first quotient word
+	swap		%d6			# get u4
+	mov.w		%d6, %d5		# rb + u4
+
+	divu.w		%d7, %d5
+
+	swap		%d1
+	mov.w		%d5, %d1		# 2nd quotient 'digit'
+	clr.w		%d5
+	swap		%d5			# now remainder
+	mov.l		%d1, %d6		# and quotient
+
+	rts
+
+ddknuth:
+# In this algorithm, the divisor is treated as a 2 digit (word) number
+# which is divided into a 3 digit (word) dividend to get one quotient
+# digit (word). After subtraction, the dividend is shifted and the
+# process repeated. Before beginning, the divisor and quotient are
+# 'normalized' so that the process of estimating the quotient digit
+# will yield verifiably correct results..
+
+	clr.l		DDNORMAL(%a6)		# count of shifts for normalization
+	clr.b		DDSECOND(%a6)		# clear flag for quotient digits
+	clr.l		%d1			# %d1 will hold trial quotient
+ddnchk:
+	btst		&31, %d7		# must we normalize? first word of
+	bne.b		ddnormalized		# divisor (V1) must be >= 65536/2
+	addq.l		&0x1, DDNORMAL(%a6)	# count normalization shifts
+	lsl.l		&0x1, %d7		# shift the divisor
+	lsl.l		&0x1, %d6		# shift u4,u3 with overflow to u2
+	roxl.l		&0x1, %d5		# shift u1,u2
+	bra.w		ddnchk
+ddnormalized:
+
+# Now calculate an estimate of the quotient words (msw first, then lsw).
+# The comments use subscripts for the first quotient digit determination.
+	mov.l		%d7, %d3		# divisor
+	mov.l		%d5, %d2		# dividend mslw
+	swap		%d2
+	swap		%d3
+	cmp.w		%d2, %d3		# V1 = U1 ?
+	bne.b		ddqcalc1
+	mov.w		&0xffff, %d1		# use max trial quotient word
+	bra.b		ddadj0
+ddqcalc1:
+	mov.l		%d5, %d1
+
+	divu.w		%d3, %d1		# use quotient of mslw/msw
+
+	andi.l		&0x0000ffff, %d1	# zero any remainder
+ddadj0:
+
+# now test the trial quotient and adjust. This step plus the
+# normalization assures (according to Knuth) that the trial
+# quotient will be at worst 1 too large.
+	mov.l		%d6, -(%sp)
+	clr.w		%d6			# word u3 left
+	swap		%d6			# in lsw position
+ddadj1: mov.l		%d7, %d3
+	mov.l		%d1, %d2
+	mulu.w		%d7, %d2		# V2q
+	swap		%d3
+	mulu.w		%d1, %d3		# V1q
+	mov.l		%d5, %d4		# U1U2
+	sub.l		%d3, %d4		# U1U2 - V1q
+
+	swap		%d4
+
+	mov.w		%d4,%d0
+	mov.w		%d6,%d4			# insert lower word (U3)
+
+	tst.w		%d0			# is upper word set?
+	bne.w		ddadjd1
+
+#	add.l		%d6, %d4		# (U1U2 - V1q) + U3
+
+	cmp.l		%d2, %d4
+	bls.b		ddadjd1			# is V2q > (U1U2-V1q) + U3 ?
+	subq.l		&0x1, %d1		# yes, decrement and recheck
+	bra.b		ddadj1
+ddadjd1:
+# now test the word by multiplying it by the divisor (V1V2) and comparing
+# the 3 digit (word) result with the current dividend words
+	mov.l		%d5, -(%sp)		# save %d5 (%d6 already saved)
+	mov.l		%d1, %d6
+	swap		%d6			# shift answer to ms 3 words
+	mov.l		%d7, %d5
+	bsr.l		dmm2
+	mov.l		%d5, %d2		# now %d2,%d3 are trial*divisor
+	mov.l		%d6, %d3
+	mov.l		(%sp)+, %d5		# restore dividend
+	mov.l		(%sp)+, %d6
+	sub.l		%d3, %d6
+	subx.l		%d2, %d5		# subtract double precision
+	bcc		dd2nd			# no carry, do next quotient digit
+	subq.l		&0x1, %d1		# q is one too large
+# need to add back divisor longword to current ms 3 digits of dividend
+# - according to Knuth, this is done only 2 out of 65536 times for random
+# divisor, dividend selection.
+	clr.l		%d2
+	mov.l		%d7, %d3
+	swap		%d3
+	clr.w		%d3			# %d3 now ls word of divisor
+	add.l		%d3, %d6		# aligned with 3rd word of dividend
+	addx.l		%d2, %d5
+	mov.l		%d7, %d3
+	clr.w		%d3			# %d3 now ms word of divisor
+	swap		%d3			# aligned with 2nd word of dividend
+	add.l		%d3, %d5
+dd2nd:
+	tst.b		DDSECOND(%a6)		# both q words done?
+	bne.b		ddremain
+# first quotient digit now correct. store digit and shift the
+# (subtracted) dividend
+	mov.w		%d1, DDQUOTIENT(%a6)
+	clr.l		%d1
+	swap		%d5
+	swap		%d6
+	mov.w		%d6, %d5
+	clr.w		%d6
+	st		DDSECOND(%a6)		# second digit
+	bra.w		ddnormalized
+ddremain:
+# add 2nd word to quotient, get the remainder.
+	mov.w		%d1, DDQUOTIENT+2(%a6)
+# shift down one word/digit to renormalize remainder.
+	mov.w		%d5, %d6
+	swap		%d6
+	swap		%d5
+	mov.l		DDNORMAL(%a6), %d7	# get norm shift count
+	beq.b		ddrn
+	subq.l		&0x1, %d7		# set for loop count
+ddnlp:
+	lsr.l		&0x1, %d5		# shift into %d6
+	roxr.l		&0x1, %d6
+	dbf		%d7, ddnlp
+ddrn:
+	mov.l		%d6, %d5		# remainder
+	mov.l		DDQUOTIENT(%a6), %d6	# quotient
+
+	rts
+dmm2:
+# factors for the 32X32->64 multiplication are in %d5 and %d6.
+# returns 64 bit result in %d5 (hi) %d6(lo).
+# destroys %d2,%d3,%d4.
+
+# multiply hi,lo words of each factor to get 4 intermediate products
+	mov.l		%d6, %d2
+	mov.l		%d6, %d3
+	mov.l		%d5, %d4
+	swap		%d3
+	swap		%d4
+	mulu.w		%d5, %d6		# %d6 <- lsw*lsw
+	mulu.w		%d3, %d5		# %d5 <- msw-dest*lsw-source
+	mulu.w		%d4, %d2		# %d2 <- msw-source*lsw-dest
+	mulu.w		%d4, %d3		# %d3 <- msw*msw
+# now use swap and addx to consolidate to two longwords
+	clr.l		%d4
+	swap		%d6
+	add.w		%d5, %d6		# add msw of l*l to lsw of m*l product
+	addx.w		%d4, %d3		# add any carry to m*m product
+	add.w		%d2, %d6		# add in lsw of other m*l product
+	addx.w		%d4, %d3		# add any carry to m*m product
+	swap		%d6			# %d6 is low 32 bits of final product
+	clr.w		%d5
+	clr.w		%d2			# lsw of two mixed products used,
+	swap		%d5			# now use msws of longwords
+	swap		%d2
+	add.l		%d2, %d5
+	add.l		%d3, %d5		# %d5 now ms 32 bits of final product
+	rts
+
+##########
+dcontrolmodel_s:
+	movq.l		&LONG,%d0
+	bsr.l		_calc_ea		# calc <ea>
+
+	cmpi.b		SPCOND_FLG(%a6),&immed_flg # immediate addressing mode?
+	beq.b		dimmed			# yes
+
+	mov.l		%a0,%a2
+	bsr.l		_dmem_read_long		# fetch divisor from <ea>
+
+	tst.l		%d1			# dfetch error?
+	bne.b		div64_err		# yes
+
+	mov.l		%d0, %d7
+	bra.w		dgotsrcl
+
+# we have to split out immediate data here because it must be read using
+# imem_read() instead of dmem_read(). this becomes especially important
+# if the fetch runs into some deadly fault.
+dimmed:
+	addq.l		&0x4,EXC_EXTWPTR(%a6)
+	bsr.l		_imem_read_long		# read immediate value
+
+	tst.l		%d1			# ifetch error?
+	bne.l		isp_iacc		# yes
+
+	mov.l		%d0,%d7
+	bra.w		dgotsrcl
+
+##########
+
+# if dmem_read_long() returns a fail message in d1, the package
+# must create an access error frame. here, we pass a skeleton fslw
+# and the failing address to the routine that creates the new frame.
+# also, we call isp_restore in case the effective addressing mode was
+# (an)+ or -(an) in which case the previous "an" value must be restored.
+# FSLW:
+#	read = true
+#	size = longword
+#	TM = data
+#	software emulation error = true
+div64_err:
+	bsr.l		isp_restore		# restore addr reg
+	mov.l		%a2,%a0			# pass failing address
+	mov.l		&0x01010001,%d0		# pass fslw
+	bra.l		isp_dacc
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_mul64(): routine to emulate mul{u,s}.l <ea>,Dh:Dl 32x32->64	#
+#									#
+# XREF ****************************************************************	#
+#	_calc_ea() - calculate effective address			#
+#	isp_iacc() - handle instruction access error exception		#
+#	isp_dacc() - handle data access error exception			#
+#	isp_restore() - restore An on access error w/ -() or ()+	#
+#									#
+# INPUT ***************************************************************	#
+#	none								#
+#									#
+# OUTPUT **************************************************************	#
+#	If exiting through isp_dacc...					#
+#		a0 = failing address					#
+#		d0 = FSLW						#
+#	else								#
+#		none							#
+#									#
+# ALGORITHM ***********************************************************	#
+#	First, decode the operand location. If it's in Dn, fetch from	#
+# the stack. If it's in memory, use _calc_ea() to calculate the		#
+# effective address. Use _dmem_read_long() to fetch at that address.	#
+# Unless the operand is immediate data. Then use _imem_read_long().	#
+# Send failures to isp_dacc() or isp_iacc() as appropriate.		#
+#	If the operands are signed, make them unsigned and save the	#
+# sign info for later. Perform the multiplication using 16x16->32	#
+# unsigned multiplies and "add" instructions. Store the high and low	#
+# portions of the result in the appropriate data registers on the	#
+# stack. Calculate the condition codes, also.				#
+#									#
+#########################################################################
+
+#############
+# mul(u,s)l #
+#############
+	global		_mul64
+_mul64:
+	mov.b		EXC_OPWORD+1(%a6), %d0	# extract src {mode,reg}
+	cmpi.b		%d0, &0x7		# is src mode Dn or other?
+	bgt.w		mul64_memop		# src is in memory
+
+# multiplier operand in the data register file.
+# must extract the register number and fetch the operand from the stack.
+mul64_regop:
+	andi.w		&0x7, %d0		# extract Dn
+	mov.l		(EXC_DREGS,%a6,%d0.w*4), %d3 # fetch multiplier
+
+# multiplier is in %d3. now, extract Dl and Dh fields and fetch the
+# multiplicand from the data register specified by Dl.
+mul64_multiplicand:
+	mov.w		EXC_EXTWORD(%a6), %d2	# fetch ext word
+	clr.w		%d1			# clear Dh reg
+	mov.b		%d2, %d1		# grab Dh
+	rol.w		&0x4, %d2		# align Dl byte
+	andi.w		&0x7, %d2		# extract Dl
+
+	mov.l		(EXC_DREGS,%a6,%d2.w*4), %d4 # get multiplicand
+
+# check for the case of "zero" result early
+	tst.l		%d4			# test multiplicand
+	beq.w		mul64_zero		# handle zero separately
+	tst.l		%d3			# test multiplier
+	beq.w		mul64_zero		# handle zero separately
+
+# multiplier is in %d3 and multiplicand is in %d4.
+# if the operation is to be signed, then the operands are converted
+# to unsigned and the result sign is saved for the end.
+	clr.b		EXC_TEMP(%a6)		# clear temp space
+	btst		&0x3, EXC_EXTWORD(%a6)	# signed or unsigned?
+	beq.b		mul64_alg		# unsigned; skip sgn calc
+
+	tst.l		%d3			# is multiplier negative?
+	bge.b		mul64_chk_md_sgn	# no
+	neg.l		%d3			# make multiplier positive
+	ori.b		&0x1, EXC_TEMP(%a6)	# save multiplier sgn
+
+# the result sign is the exclusive or of the operand sign bits.
+mul64_chk_md_sgn:
+	tst.l		%d4			# is multiplicand negative?
+	bge.b		mul64_alg		# no
+	neg.l		%d4			# make multiplicand positive
+	eori.b		&0x1, EXC_TEMP(%a6)	# calculate correct sign
+
+#########################################################################
+#	63			   32				0	#
+#	----------------------------					#
+#	| hi(mplier) * hi(mplicand)|					#
+#	----------------------------					#
+#		     -----------------------------			#
+#		     | hi(mplier) * lo(mplicand) |			#
+#		     -----------------------------			#
+#		     -----------------------------			#
+#		     | lo(mplier) * hi(mplicand) |			#
+#		     -----------------------------			#
+#	  |			   -----------------------------	#
+#	--|--			   | lo(mplier) * lo(mplicand) |	#
+#	  |			   -----------------------------	#
+#	========================================================	#
+#	--------------------------------------------------------	#
+#	|	hi(result)	   |	    lo(result)         |	#
+#	--------------------------------------------------------	#
+#########################################################################
+mul64_alg:
+# load temp registers with operands
+	mov.l		%d3, %d5		# mr in %d5
+	mov.l		%d3, %d6		# mr in %d6
+	mov.l		%d4, %d7		# md in %d7
+	swap		%d6			# hi(mr) in lo %d6
+	swap		%d7			# hi(md) in lo %d7
+
+# complete necessary multiplies:
+	mulu.w		%d4, %d3		# [1] lo(mr) * lo(md)
+	mulu.w		%d6, %d4		# [2] hi(mr) * lo(md)
+	mulu.w		%d7, %d5		# [3] lo(mr) * hi(md)
+	mulu.w		%d7, %d6		# [4] hi(mr) * hi(md)
+
+# add lo portions of [2],[3] to hi portion of [1].
+# add carries produced from these adds to [4].
+# lo([1]) is the final lo 16 bits of the result.
+	clr.l		%d7			# load %d7 w/ zero value
+	swap		%d3			# hi([1]) <==> lo([1])
+	add.w		%d4, %d3		# hi([1]) + lo([2])
+	addx.l		%d7, %d6		#    [4]  + carry
+	add.w		%d5, %d3		# hi([1]) + lo([3])
+	addx.l		%d7, %d6		#    [4]  + carry
+	swap		%d3			# lo([1]) <==> hi([1])
+
+# lo portions of [2],[3] have been added in to final result.
+# now, clear lo, put hi in lo reg, and add to [4]
+	clr.w		%d4			# clear lo([2])
+	clr.w		%d5			# clear hi([3])
+	swap		%d4			# hi([2]) in lo %d4
+	swap		%d5			# hi([3]) in lo %d5
+	add.l		%d5, %d4		#    [4]  + hi([2])
+	add.l		%d6, %d4		#    [4]  + hi([3])
+
+# unsigned result is now in {%d4,%d3}
+	tst.b		EXC_TEMP(%a6)		# should result be signed?
+	beq.b		mul64_done		# no
+
+# result should be a signed negative number.
+# compute 2's complement of the unsigned number:
+#   -negate all bits and add 1
+mul64_neg:
+	not.l		%d3			# negate lo(result) bits
+	not.l		%d4			# negate hi(result) bits
+	addq.l		&1, %d3			# add 1 to lo(result)
+	addx.l		%d7, %d4		# add carry to hi(result)
+
+# the result is saved to the register file.
+# for '040 compatibility, if Dl == Dh then only the hi(result) is
+# saved. so, saving hi after lo accomplishes this without need to
+# check Dl,Dh equality.
+mul64_done:
+	mov.l		%d3, (EXC_DREGS,%a6,%d2.w*4) # save lo(result)
+	mov.w		&0x0, %cc
+	mov.l		%d4, (EXC_DREGS,%a6,%d1.w*4) # save hi(result)
+
+# now, grab the condition codes. only one that can be set is 'N'.
+# 'N' CAN be set if the operation is unsigned if bit 63 is set.
+	mov.w		%cc, %d7		# fetch %ccr to see if 'N' set
+	andi.b		&0x8, %d7		# extract 'N' bit
+
+mul64_ccode_set:
+	mov.b		EXC_CC+1(%a6), %d6	# fetch previous %ccr
+	andi.b		&0x10, %d6		# all but 'X' bit changes
+
+	or.b		%d7, %d6		# group 'X' and 'N'
+	mov.b		%d6, EXC_CC+1(%a6)	# save new %ccr
+
+	rts
+
+# one or both of the operands is zero so the result is also zero.
+# save the zero result to the register file and set the 'Z' ccode bit.
+mul64_zero:
+	clr.l		(EXC_DREGS,%a6,%d2.w*4) # save lo(result)
+	clr.l		(EXC_DREGS,%a6,%d1.w*4) # save hi(result)
+
+	movq.l		&0x4, %d7		# set 'Z' ccode bit
+	bra.b		mul64_ccode_set		# finish ccode set
+
+##########
+
+# multiplier operand is in memory at the effective address.
+# must calculate the <ea> and go fetch the 32-bit operand.
+mul64_memop:
+	movq.l		&LONG, %d0		# pass # of bytes
+	bsr.l		_calc_ea		# calculate <ea>
+
+	cmpi.b		SPCOND_FLG(%a6),&immed_flg # immediate addressing mode?
+	beq.b		mul64_immed		# yes
+
+	mov.l		%a0,%a2
+	bsr.l		_dmem_read_long		# fetch src from addr (%a0)
+
+	tst.l		%d1			# dfetch error?
+	bne.w		mul64_err		# yes
+
+	mov.l		%d0, %d3		# store multiplier in %d3
+
+	bra.w		mul64_multiplicand
+
+# we have to split out immediate data here because it must be read using
+# imem_read() instead of dmem_read(). this becomes especially important
+# if the fetch runs into some deadly fault.
+mul64_immed:
+	addq.l		&0x4,EXC_EXTWPTR(%a6)
+	bsr.l		_imem_read_long		# read immediate value
+
+	tst.l		%d1			# ifetch error?
+	bne.l		isp_iacc		# yes
+
+	mov.l		%d0,%d3
+	bra.w		mul64_multiplicand
+
+##########
+
+# if dmem_read_long() returns a fail message in d1, the package
+# must create an access error frame. here, we pass a skeleton fslw
+# and the failing address to the routine that creates the new frame.
+# also, we call isp_restore in case the effective addressing mode was
+# (an)+ or -(an) in which case the previous "an" value must be restored.
+# FSLW:
+#	read = true
+#	size = longword
+#	TM = data
+#	software emulation error = true
+mul64_err:
+	bsr.l		isp_restore		# restore addr reg
+	mov.l		%a2,%a0			# pass failing address
+	mov.l		&0x01010001,%d0		# pass fslw
+	bra.l		isp_dacc
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_compandset2(): routine to emulate cas2()			#
+#			(internal to package)				#
+#									#
+#	_isp_cas2_finish(): store ccodes, store compare regs		#
+#			    (external to package)			#
+#									#
+# XREF ****************************************************************	#
+#	_real_lock_page() - "callout" to lock op's page from page-outs	#
+#	_cas_terminate2() - access error exit				#
+#	_real_cas2() - "callout" to core cas2 emulation code		#
+#	_real_unlock_page() - "callout" to unlock page			#
+#									#
+# INPUT ***************************************************************	#
+# _compandset2():							#
+#	d0 = instruction extension word					#
+#									#
+# _isp_cas2_finish():							#
+#	see cas2 core emulation code					#
+#									#
+# OUTPUT **************************************************************	#
+# _compandset2():							#
+#	see cas2 core emulation code					#
+#									#
+# _isp_cas_finish():							#
+#	None (register file or memroy changed as appropriate)		#
+#									#
+# ALGORITHM ***********************************************************	#
+# compandset2():							#
+#	Decode the instruction and fetch the appropriate Update and	#
+# Compare operands. Then call the "callout" _real_lock_page() for each	#
+# memory operand address so that the operating system can keep these	#
+# pages from being paged out. If either _real_lock_page() fails, exit	#
+# through _cas_terminate2(). Don't forget to unlock the 1st locked page	#
+# using _real_unlock_paged() if the 2nd lock-page fails.		#
+# Finally, branch to the core cas2 emulation code by calling the	#
+# "callout" _real_cas2().						#
+#									#
+# _isp_cas2_finish():							#
+#	Re-perform the comparison so we can determine the condition	#
+# codes which were too much trouble to keep around during the locked	#
+# emulation. Then unlock each operands page by calling the "callout"	#
+# _real_unlock_page().							#
+#									#
+#########################################################################
+
+set ADDR1,	EXC_TEMP+0xc
+set ADDR2,	EXC_TEMP+0x0
+set DC2,	EXC_TEMP+0xa
+set DC1,	EXC_TEMP+0x8
+
+	global		_compandset2
+_compandset2:
+	mov.l		%d0,EXC_TEMP+0x4(%a6)		# store for possible restart
+	mov.l		%d0,%d1			# extension word in d0
+
+	rol.w		&0x4,%d0
+	andi.w		&0xf,%d0		# extract Rn2
+	mov.l		(EXC_DREGS,%a6,%d0.w*4),%a1 # fetch ADDR2
+	mov.l		%a1,ADDR2(%a6)
+
+	mov.l		%d1,%d0
+
+	lsr.w		&0x6,%d1
+	andi.w		&0x7,%d1		# extract Du2
+	mov.l		(EXC_DREGS,%a6,%d1.w*4),%d5 # fetch Update2 Op
+
+	andi.w		&0x7,%d0		# extract Dc2
+	mov.l		(EXC_DREGS,%a6,%d0.w*4),%d3 # fetch Compare2 Op
+	mov.w		%d0,DC2(%a6)
+
+	mov.w		EXC_EXTWORD(%a6),%d0
+	mov.l		%d0,%d1
+
+	rol.w		&0x4,%d0
+	andi.w		&0xf,%d0		# extract Rn1
+	mov.l		(EXC_DREGS,%a6,%d0.w*4),%a0 # fetch ADDR1
+	mov.l		%a0,ADDR1(%a6)
+
+	mov.l		%d1,%d0
+
+	lsr.w		&0x6,%d1
+	andi.w		&0x7,%d1		# extract Du1
+	mov.l		(EXC_DREGS,%a6,%d1.w*4),%d4 # fetch Update1 Op
+
+	andi.w		&0x7,%d0		# extract Dc1
+	mov.l		(EXC_DREGS,%a6,%d0.w*4),%d2 # fetch Compare1 Op
+	mov.w		%d0,DC1(%a6)
+
+	btst		&0x1,EXC_OPWORD(%a6)	# word or long?
+	sne		%d7
+
+	btst		&0x5,EXC_ISR(%a6)	# user or supervisor?
+	sne		%d6
+
+	mov.l		%a0,%a2
+	mov.l		%a1,%a3
+
+	mov.l		%d7,%d1			# pass size
+	mov.l		%d6,%d0			# pass mode
+	bsr.l		_real_lock_page		# lock page
+	mov.l		%a2,%a0
+	tst.l		%d0			# error?
+	bne.l		_cas_terminate2		# yes
+
+	mov.l		%d7,%d1			# pass size
+	mov.l		%d6,%d0			# pass mode
+	mov.l		%a3,%a0			# pass addr
+	bsr.l		_real_lock_page		# lock page
+	mov.l		%a3,%a0
+	tst.l		%d0			# error?
+	bne.b		cas_preterm		# yes
+
+	mov.l		%a2,%a0
+	mov.l		%a3,%a1
+
+	bra.l		_real_cas2
+
+# if the 2nd lock attempt fails, then we must still unlock the
+# first page(s).
+cas_preterm:
+	mov.l		%d0,-(%sp)		# save FSLW
+	mov.l		%d7,%d1			# pass size
+	mov.l		%d6,%d0			# pass mode
+	mov.l		%a2,%a0			# pass ADDR1
+	bsr.l		_real_unlock_page	# unlock first page(s)
+	mov.l		(%sp)+,%d0		# restore FSLW
+	mov.l		%a3,%a0			# pass failing addr
+	bra.l		_cas_terminate2
+
+#############################################################
+
+	global		_isp_cas2_finish
+_isp_cas2_finish:
+	btst		&0x1,EXC_OPWORD(%a6)
+	bne.b		cas2_finish_l
+
+	mov.w		EXC_CC(%a6),%cc		# load old ccodes
+	cmp.w		%d0,%d2
+	bne.b		cas2_finish_w_save
+	cmp.w		%d1,%d3
+cas2_finish_w_save:
+	mov.w		%cc,EXC_CC(%a6)		# save new ccodes
+
+	tst.b		%d4			# update compare reg?
+	bne.b		cas2_finish_w_done	# no
+
+	mov.w		DC2(%a6),%d3		# fetch Dc2
+	mov.w		%d1,(2+EXC_DREGS,%a6,%d3.w*4) # store new Compare2 Op
+
+	mov.w		DC1(%a6),%d2		# fetch Dc1
+	mov.w		%d0,(2+EXC_DREGS,%a6,%d2.w*4) # store new Compare1 Op
+
+cas2_finish_w_done:
+	btst		&0x5,EXC_ISR(%a6)
+	sne		%d2
+	mov.l		%d2,%d0			# pass mode
+	sf		%d1			# pass size
+	mov.l		ADDR1(%a6),%a0		# pass ADDR1
+	bsr.l		_real_unlock_page	# unlock page
+
+	mov.l		%d2,%d0			# pass mode
+	sf		%d1			# pass size
+	mov.l		ADDR2(%a6),%a0		# pass ADDR2
+	bsr.l		_real_unlock_page	# unlock page
+	rts
+
+cas2_finish_l:
+	mov.w		EXC_CC(%a6),%cc		# load old ccodes
+	cmp.l		%d0,%d2
+	bne.b		cas2_finish_l_save
+	cmp.l		%d1,%d3
+cas2_finish_l_save:
+	mov.w		%cc,EXC_CC(%a6)		# save new ccodes
+
+	tst.b		%d4			# update compare reg?
+	bne.b		cas2_finish_l_done	# no
+
+	mov.w		DC2(%a6),%d3		# fetch Dc2
+	mov.l		%d1,(EXC_DREGS,%a6,%d3.w*4) # store new Compare2 Op
+
+	mov.w		DC1(%a6),%d2		# fetch Dc1
+	mov.l		%d0,(EXC_DREGS,%a6,%d2.w*4) # store new Compare1 Op
+
+cas2_finish_l_done:
+	btst		&0x5,EXC_ISR(%a6)
+	sne		%d2
+	mov.l		%d2,%d0			# pass mode
+	st		%d1			# pass size
+	mov.l		ADDR1(%a6),%a0		# pass ADDR1
+	bsr.l		_real_unlock_page	# unlock page
+
+	mov.l		%d2,%d0			# pass mode
+	st		%d1			# pass size
+	mov.l		ADDR2(%a6),%a0		# pass ADDR2
+	bsr.l		_real_unlock_page	# unlock page
+	rts
+
+########
+	global		cr_cas2
+cr_cas2:
+	mov.l		EXC_TEMP+0x4(%a6),%d0
+	bra.w		_compandset2
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_compandset(): routine to emulate cas w/ misaligned <ea>	#
+#		       (internal to package)				#
+#	_isp_cas_finish(): routine called when cas emulation completes	#
+#			   (external and internal to package)		#
+#	_isp_cas_restart(): restart cas emulation after a fault		#
+#			    (external to package)			#
+#	_isp_cas_terminate(): create access error stack frame on fault	#
+#			      (external and internal to package)	#
+#	_isp_cas_inrange(): checks whether instr addess is within range	#
+#			    of core cas/cas2emulation code		#
+#			    (external to package)			#
+#									#
+# XREF ****************************************************************	#
+#	_calc_ea(): calculate effective address				#
+#									#
+# INPUT ***************************************************************	#
+# compandset():								#
+#	none								#
+# _isp_cas_restart():							#
+#	d6 = previous sfc/dfc						#
+# _isp_cas_finish():							#
+# _isp_cas_terminate():							#
+#	a0 = failing address						#
+#	d0 = FSLW							#
+#	d6 = previous sfc/dfc						#
+# _isp_cas_inrange():							#
+#	a0 = instruction address to be checked				#
+#									#
+# OUTPUT **************************************************************	#
+# compandset():								#
+#		none							#
+# _isp_cas_restart():							#
+#	a0 = effective address						#
+#	d7 = word or longword flag					#
+# _isp_cas_finish():							#
+#	a0 = effective address						#
+# _isp_cas_terminate():							#
+#	initial register set before emulation exception			#
+# _isp_cas_inrange():							#
+#	d0 = 0 => in range; -1 => out of range				#
+#									#
+# ALGORITHM ***********************************************************	#
+#									#
+# compandset():								#
+#	First, calculate the effective address. Then, decode the	#
+# instruction word and fetch the "compare" (DC) and "update" (Du)	#
+# operands.								#
+#	Next, call the external routine _real_lock_page() so that the	#
+# operating system can keep this page from being paged out while we're	#
+# in this routine. If this call fails, jump to _cas_terminate2().	#
+#	The routine then branches to _real_cas(). This external routine	#
+# that actually emulates cas can be supplied by the external os or	#
+# made to point directly back into the 060ISP which has a routine for	#
+# this purpose.								#
+#									#
+# _isp_cas_finish():							#
+#	Either way, after emulation, the package is re-entered at	#
+# _isp_cas_finish(). This routine re-compares the operands in order to	#
+# set the condition codes. Finally, these routines will call		#
+# _real_unlock_page() in order to unlock the pages that were previously	#
+# locked.								#
+#									#
+# _isp_cas_restart():							#
+#	This routine can be entered from an access error handler where	#
+# the emulation sequence should be re-started from the beginning.	#
+#									#
+# _isp_cas_terminate():							#
+#	This routine can be entered from an access error handler where	#
+# an emulation operand access failed and the operating system would	#
+# like an access error stack frame created instead of the current	#
+# unimplemented integer instruction frame.				#
+#	Also, the package enters here if a call to _real_lock_page()	#
+# fails.								#
+#									#
+# _isp_cas_inrange():							#
+#	Checks to see whether the instruction address passed to it in	#
+# a0 is within the software package cas/cas2 emulation routines. This	#
+# can be helpful for an operating system to determine whether an access	#
+# error during emulation was due to a cas/cas2 emulation access.	#
+#									#
+#########################################################################
+
+set DC,		EXC_TEMP+0x8
+set ADDR,	EXC_TEMP+0x4
+
+	global		_compandset
+_compandset:
+	btst		&0x1,EXC_OPWORD(%a6)	# word or long operation?
+	bne.b		compandsetl		# long
+
+compandsetw:
+	movq.l		&0x2,%d0		# size = 2 bytes
+	bsr.l		_calc_ea		# a0 = calculated <ea>
+	mov.l		%a0,ADDR(%a6)		# save <ea> for possible restart
+	sf		%d7			# clear d7 for word size
+	bra.b		compandsetfetch
+
+compandsetl:
+	movq.l		&0x4,%d0		# size = 4 bytes
+	bsr.l		_calc_ea		# a0 = calculated <ea>
+	mov.l		%a0,ADDR(%a6)		# save <ea> for possible restart
+	st		%d7			# set d7 for longword size
+
+compandsetfetch:
+	mov.w		EXC_EXTWORD(%a6),%d0	# fetch cas extension word
+	mov.l		%d0,%d1			# make a copy
+
+	lsr.w		&0x6,%d0
+	andi.w		&0x7,%d0		# extract Du
+	mov.l		(EXC_DREGS,%a6,%d0.w*4),%d2 # get update operand
+
+	andi.w		&0x7,%d1		# extract Dc
+	mov.l		(EXC_DREGS,%a6,%d1.w*4),%d4 # get compare operand
+	mov.w		%d1,DC(%a6)		# save Dc
+
+	btst		&0x5,EXC_ISR(%a6)	# which mode for exception?
+	sne		%d6			# set on supervisor mode
+
+	mov.l		%a0,%a2			# save temporarily
+	mov.l		%d7,%d1			# pass size
+	mov.l		%d6,%d0			# pass mode
+	bsr.l		_real_lock_page		# lock page
+	tst.l		%d0			# did error occur?
+	bne.w		_cas_terminate2		# yes, clean up the mess
+	mov.l		%a2,%a0			# pass addr in a0
+
+	bra.l		_real_cas
+
+########
+	global		_isp_cas_finish
+_isp_cas_finish:
+	btst		&0x1,EXC_OPWORD(%a6)
+	bne.b		cas_finish_l
+
+# just do the compare again since it's faster than saving the ccodes
+# from the locked routine...
+cas_finish_w:
+	mov.w		EXC_CC(%a6),%cc		# restore cc
+	cmp.w		%d0,%d4			# do word compare
+	mov.w		%cc,EXC_CC(%a6)		# save cc
+
+	tst.b		%d1			# update compare reg?
+	bne.b		cas_finish_w_done	# no
+
+	mov.w		DC(%a6),%d3
+	mov.w		%d0,(EXC_DREGS+2,%a6,%d3.w*4) # Dc = destination
+
+cas_finish_w_done:
+	mov.l		ADDR(%a6),%a0		# pass addr
+	sf		%d1			# pass size
+	btst		&0x5,EXC_ISR(%a6)
+	sne		%d0			# pass mode
+	bsr.l		_real_unlock_page	# unlock page
+	rts
+
+# just do the compare again since it's faster than saving the ccodes
+# from the locked routine...
+cas_finish_l:
+	mov.w		EXC_CC(%a6),%cc		# restore cc
+	cmp.l		%d0,%d4			# do longword compare
+	mov.w		%cc,EXC_CC(%a6)		# save cc
+
+	tst.b		%d1			# update compare reg?
+	bne.b		cas_finish_l_done	# no
+
+	mov.w		DC(%a6),%d3
+	mov.l		%d0,(EXC_DREGS,%a6,%d3.w*4) # Dc = destination
+
+cas_finish_l_done:
+	mov.l		ADDR(%a6),%a0		# pass addr
+	st		%d1			# pass size
+	btst		&0x5,EXC_ISR(%a6)
+	sne		%d0			# pass mode
+	bsr.l		_real_unlock_page	# unlock page
+	rts
+
+########
+
+	global		_isp_cas_restart
+_isp_cas_restart:
+	mov.l		%d6,%sfc		# restore previous sfc
+	mov.l		%d6,%dfc		# restore previous dfc
+
+	cmpi.b		EXC_OPWORD+1(%a6),&0xfc	# cas or cas2?
+	beq.l		cr_cas2			# cas2
+cr_cas:
+	mov.l		ADDR(%a6),%a0		# load <ea>
+	btst		&0x1,EXC_OPWORD(%a6)	# word or long operation?
+	sne		%d7			# set d7 accordingly
+	bra.w		compandsetfetch
+
+########
+
+# At this stage, it would be nice if d0 held the FSLW.
+	global		_isp_cas_terminate
+_isp_cas_terminate:
+	mov.l		%d6,%sfc		# restore previous sfc
+	mov.l		%d6,%dfc		# restore previous dfc
+
+	global		_cas_terminate2
+_cas_terminate2:
+	mov.l		%a0,%a2			# copy failing addr to a2
+
+	mov.l		%d0,-(%sp)
+	bsr.l		isp_restore		# restore An (if ()+ or -())
+	mov.l		(%sp)+,%d0
+
+	addq.l		&0x4,%sp		# remove sub return addr
+	subq.l		&0x8,%sp		# make room for bigger stack
+	subq.l		&0x8,%a6		# shift frame ptr down, too
+	mov.l		&26,%d1			# want to move 51 longwords
+	lea		0x8(%sp),%a0		# get address of old stack
+	lea		0x0(%sp),%a1		# get address of new stack
+cas_term_cont:
+	mov.l		(%a0)+,(%a1)+		# move a longword
+	dbra.w		%d1,cas_term_cont	# keep going
+
+	mov.w		&0x4008,EXC_IVOFF(%a6)	# put new stk fmt, voff
+	mov.l		%a2,EXC_IVOFF+0x2(%a6)	# put faulting addr on stack
+	mov.l		%d0,EXC_IVOFF+0x6(%a6)	# put FSLW on stack
+	movm.l		EXC_DREGS(%a6),&0x3fff	# restore user regs
+	unlk		%a6			# unlink stack frame
+	bra.l		_real_access
+
+########
+
+	global		_isp_cas_inrange
+_isp_cas_inrange:
+	clr.l		%d0			# clear return result
+	lea		_CASHI(%pc),%a1		# load end of CAS core code
+	cmp.l		%a1,%a0			# is PC in range?
+	blt.b		cin_no			# no
+	lea		_CASLO(%pc),%a1		# load begin of CAS core code
+	cmp.l		%a0,%a1			# is PC in range?
+	blt.b		cin_no			# no
+	rts					# yes; return d0 = 0
+cin_no:
+	mov.l		&-0x1,%d0		# out of range; return d0 = -1
+	rts
+
+#################################################################
+#################################################################
+#################################################################
+# This is the start of the cas and cas2 "core" emulation code.	#
+# This is the section that may need to be replaced by the host	#
+# OS if it is too operating system-specific.			#
+# Please refer to the package documentation to see how to	#
+# "replace" this section, if necessary.				#
+#################################################################
+#################################################################
+#################################################################
+
+#       ######      ##      ######     ####
+#       #	   #  #     #         #    #
+#	#	  ######    ######        #
+#	#	  #    #         #      #
+#       ######    #    #    ######    ######
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_isp_cas2(): "core" emulation code for the cas2 instruction	#
+#									#
+# XREF ****************************************************************	#
+#	_isp_cas2_finish() - only exit point for this emulation code;	#
+#			     do clean-up; calculate ccodes; store	#
+#			     Compare Ops if appropriate.		#
+#									#
+# INPUT ***************************************************************	#
+#	*see chart below*						#
+#									#
+# OUTPUT **************************************************************	#
+#	*see chart below*						#
+#									#
+# ALGORITHM ***********************************************************	#
+#	(1) Make several copies of the effective address.		#
+#	(2) Save current SR; Then mask off all maskable interrupts.	#
+#	(3) Save current SFC/DFC (ASSUMED TO BE EQUAL!!!); Then set	#
+#	    according to whether exception occurred in user or		#
+#	    supervisor mode.						#
+#	(4) Use "plpaw" instruction to pre-load ATC with effective	#
+#	    address pages(s). THIS SHOULD NOT FAULT!!! The relevant	#
+#	    page(s) should have already been made resident prior to	#
+#	    entering this routine.					#
+#	(5) Push the operand lines from the cache w/ "cpushl".		#
+#	    In the 68040, this was done within the locked region. In	#
+#	    the 68060, it is done outside of the locked region.		#
+#	(6) Use "plpar" instruction to do a re-load of ATC entries for	#
+#	    ADDR1 since ADDR2 entries may have pushed ADDR1 out of the	#
+#	    ATC.							#
+#	(7) Pre-fetch the core emulation instructions by executing	#
+#	    one branch within each physical line (16 bytes) of the code	#
+#	    before actually executing the code.				#
+#	(8) Load the BUSCR w/ the bus lock value.			#
+#	(9) Fetch the source operands using "moves".			#
+#	(10)Do the compares. If both equal, go to step (13).		#
+#	(11)Unequal. No update occurs. But, we do write the DST1 op	#
+#	    back to itself (as w/ the '040) so we can gracefully unlock	#
+#	    the bus (and assert LOCKE*) using BUSCR and the final move.	#
+#	(12)Exit.							#
+#	(13)Write update operand to the DST locations. Use BUSCR to	#
+#	    assert LOCKE* for the final write operation.		#
+#	(14)Exit.							#
+#									#
+#	The algorithm is actually implemented slightly differently	#
+# depending on the size of the operation and the misalignment of the	#
+# operands. A misaligned operand must be written in aligned chunks or	#
+# else the BUSCR register control gets confused.			#
+#									#
+#########################################################################
+
+#################################################################
+# THIS IS THE STATE OF THE INTEGER REGISTER FILE UPON		#
+# ENTERING _isp_cas2().						#
+#								#
+# D0 = xxxxxxxx							#
+# D1 = xxxxxxxx							#
+# D2 = cmp operand 1						#
+# D3 = cmp operand 2						#
+# D4 = update oper 1						#
+# D5 = update oper 2						#
+# D6 = 'xxxxxxff if supervisor mode; 'xxxxxx00 if user mode	#
+# D7 = 'xxxxxxff if longword operation; 'xxxxxx00 if word	#
+# A0 = ADDR1							#
+# A1 = ADDR2							#
+# A2 = xxxxxxxx							#
+# A3 = xxxxxxxx							#
+# A4 = xxxxxxxx							#
+# A5 = xxxxxxxx							#
+# A6 = frame pointer						#
+# A7 = stack pointer						#
+#################################################################
+
+#	align		0x1000
+# beginning label used by _isp_cas_inrange()
+	global		_CASLO
+_CASLO:
+
+	global		_isp_cas2
+_isp_cas2:
+	tst.b		%d6			# user or supervisor mode?
+	bne.b		cas2_supervisor		# supervisor
+cas2_user:
+	movq.l		&0x1,%d0		# load user data fc
+	bra.b		cas2_cont
+cas2_supervisor:
+	movq.l		&0x5,%d0		# load supervisor data fc
+cas2_cont:
+	tst.b		%d7			# word or longword?
+	beq.w		cas2w			# word
+
+####
+cas2l:
+	mov.l		%a0,%a2			# copy ADDR1
+	mov.l		%a1,%a3			# copy ADDR2
+	mov.l		%a0,%a4			# copy ADDR1
+	mov.l		%a1,%a5			# copy ADDR2
+
+	addq.l		&0x3,%a4		# ADDR1+3
+	addq.l		&0x3,%a5		# ADDR2+3
+	mov.l		%a2,%d1			# ADDR1
+
+# mask interrupts levels 0-6. save old mask value.
+	mov.w		%sr,%d7			# save current SR
+	ori.w		&0x0700,%sr		# inhibit interrupts
+
+# load the SFC and DFC with the appropriate mode.
+	movc		%sfc,%d6		# save old SFC/DFC
+	movc		%d0,%sfc		# store new SFC
+	movc		%d0,%dfc		# store new DFC
+
+# pre-load the operand ATC. no page faults should occur here because
+# _real_lock_page() should have taken care of this.
+	plpaw		(%a2)			# load atc for ADDR1
+	plpaw		(%a4)			# load atc for ADDR1+3
+	plpaw		(%a3)			# load atc for ADDR2
+	plpaw		(%a5)			# load atc for ADDR2+3
+
+# push the operand lines from the cache if they exist.
+	cpushl		%dc,(%a2)		# push line for ADDR1
+	cpushl		%dc,(%a4)		# push line for ADDR1+3
+	cpushl		%dc,(%a3)		# push line for ADDR2
+	cpushl		%dc,(%a5)		# push line for ADDR2+2
+
+	mov.l		%d1,%a2			# ADDR1
+	addq.l		&0x3,%d1
+	mov.l		%d1,%a4			# ADDR1+3
+# if ADDR1 was ATC resident before the above "plpaw" and was executed
+# and it was the next entry scheduled for replacement and ADDR2
+# shares the same set, then the "plpaw" for ADDR2 can push the ADDR1
+# entries from the ATC. so, we do a second set of "plpa"s.
+	plpar		(%a2)			# load atc for ADDR1
+	plpar		(%a4)			# load atc for ADDR1+3
+
+# load the BUSCR values.
+	mov.l		&0x80000000,%a2		# assert LOCK* buscr value
+	mov.l		&0xa0000000,%a3		# assert LOCKE* buscr value
+	mov.l		&0x00000000,%a4		# buscr unlock value
+
+# there are three possible mis-aligned cases for longword cas. they
+# are separated because the final write which asserts LOCKE* must
+# be aligned.
+	mov.l		%a0,%d0			# is ADDR1 misaligned?
+	andi.b		&0x3,%d0
+	beq.b		CAS2L_ENTER		# no
+	cmpi.b		%d0,&0x2
+	beq.w		CAS2L2_ENTER		# yes; word misaligned
+	bra.w		CAS2L3_ENTER		# yes; byte misaligned
+
+#
+# D0 = dst operand 1 <-
+# D1 = dst operand 2 <-
+# D2 = cmp operand 1
+# D3 = cmp operand 2
+# D4 = update oper 1
+# D5 = update oper 2
+# D6 = old SFC/DFC
+# D7 = old SR
+# A0 = ADDR1
+# A1 = ADDR2
+# A2 = bus LOCK*  value
+# A3 = bus LOCKE* value
+# A4 = bus unlock value
+# A5 = xxxxxxxx
+#
+	align		0x10
+CAS2L_START:
+	movc		%a2,%buscr		# assert LOCK*
+	movs.l		(%a1),%d1		# fetch Dest2[31:0]
+	movs.l		(%a0),%d0		# fetch Dest1[31:0]
+	bra.b		CAS2L_CONT
+CAS2L_ENTER:
+	bra.b		~+16
+
+CAS2L_CONT:
+	cmp.l		%d0,%d2			# Dest1 - Compare1
+	bne.b		CAS2L_NOUPDATE
+	cmp.l		%d1,%d3			# Dest2 - Compare2
+	bne.b		CAS2L_NOUPDATE
+	movs.l		%d5,(%a1)		# Update2[31:0] -> DEST2
+	bra.b		CAS2L_UPDATE
+	bra.b		~+16
+
+CAS2L_UPDATE:
+	movc		%a3,%buscr		# assert LOCKE*
+	movs.l		%d4,(%a0)		# Update1[31:0] -> DEST1
+	movc		%a4,%buscr		# unlock the bus
+	bra.b		cas2l_update_done
+	bra.b		~+16
+
+CAS2L_NOUPDATE:
+	movc		%a3,%buscr		# assert LOCKE*
+	movs.l		%d0,(%a0)		# Dest1[31:0] -> DEST1
+	movc		%a4,%buscr		# unlock the bus
+	bra.b		cas2l_noupdate_done
+	bra.b		~+16
+
+CAS2L_FILLER:
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	bra.b		CAS2L_START
+
+####
+
+#################################################################
+# THIS MUST BE THE STATE OF THE INTEGER REGISTER FILE UPON	#
+# ENTERING _isp_cas2().						#
+#								#
+# D0 = destination[31:0] operand 1				#
+# D1 = destination[31:0] operand 2				#
+# D2 = cmp[31:0] operand 1					#
+# D3 = cmp[31:0] operand 2					#
+# D4 = 'xxxxxx11 -> no reg update; 'xxxxxx00 -> update required	#
+# D5 = xxxxxxxx							#
+# D6 = xxxxxxxx							#
+# D7 = xxxxxxxx							#
+# A0 = xxxxxxxx							#
+# A1 = xxxxxxxx							#
+# A2 = xxxxxxxx							#
+# A3 = xxxxxxxx							#
+# A4 = xxxxxxxx							#
+# A5 = xxxxxxxx							#
+# A6 = frame pointer						#
+# A7 = stack pointer						#
+#################################################################
+
+cas2l_noupdate_done:
+
+# restore previous SFC/DFC value.
+	movc		%d6,%sfc		# restore old SFC
+	movc		%d6,%dfc		# restore old DFC
+
+# restore previous interrupt mask level.
+	mov.w		%d7,%sr			# restore old SR
+
+	sf		%d4			# indicate no update was done
+	bra.l		_isp_cas2_finish
+
+cas2l_update_done:
+
+# restore previous SFC/DFC value.
+	movc		%d6,%sfc		# restore old SFC
+	movc		%d6,%dfc		# restore old DFC
+
+# restore previous interrupt mask level.
+	mov.w		%d7,%sr			# restore old SR
+
+	st		%d4			# indicate update was done
+	bra.l		_isp_cas2_finish
+####
+
+	align		0x10
+CAS2L2_START:
+	movc		%a2,%buscr		# assert LOCK*
+	movs.l		(%a1),%d1		# fetch Dest2[31:0]
+	movs.l		(%a0),%d0		# fetch Dest1[31:0]
+	bra.b		CAS2L2_CONT
+CAS2L2_ENTER:
+	bra.b		~+16
+
+CAS2L2_CONT:
+	cmp.l		%d0,%d2			# Dest1 - Compare1
+	bne.b		CAS2L2_NOUPDATE
+	cmp.l		%d1,%d3			# Dest2 - Compare2
+	bne.b		CAS2L2_NOUPDATE
+	movs.l		%d5,(%a1)		# Update2[31:0] -> Dest2
+	bra.b		CAS2L2_UPDATE
+	bra.b		~+16
+
+CAS2L2_UPDATE:
+	swap		%d4			# get Update1[31:16]
+	movs.w		%d4,(%a0)+		# Update1[31:16] -> DEST1
+	movc		%a3,%buscr		# assert LOCKE*
+	swap		%d4			# get Update1[15:0]
+	bra.b		CAS2L2_UPDATE2
+	bra.b		~+16
+
+CAS2L2_UPDATE2:
+	movs.w		%d4,(%a0)		# Update1[15:0] -> DEST1+0x2
+	movc		%a4,%buscr		# unlock the bus
+	bra.w		cas2l_update_done
+	nop
+	bra.b		~+16
+
+CAS2L2_NOUPDATE:
+	swap		%d0			# get Dest1[31:16]
+	movs.w		%d0,(%a0)+		# Dest1[31:16] -> DEST1
+	movc		%a3,%buscr		# assert LOCKE*
+	swap		%d0			# get Dest1[15:0]
+	bra.b		CAS2L2_NOUPDATE2
+	bra.b		~+16
+
+CAS2L2_NOUPDATE2:
+	movs.w		%d0,(%a0)		# Dest1[15:0] -> DEST1+0x2
+	movc		%a4,%buscr		# unlock the bus
+	bra.w		cas2l_noupdate_done
+	nop
+	bra.b		~+16
+
+CAS2L2_FILLER:
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	bra.b		CAS2L2_START
+
+#################################
+
+	align		0x10
+CAS2L3_START:
+	movc		%a2,%buscr		# assert LOCK*
+	movs.l		(%a1),%d1		# fetch Dest2[31:0]
+	movs.l		(%a0),%d0		# fetch Dest1[31:0]
+	bra.b		CAS2L3_CONT
+CAS2L3_ENTER:
+	bra.b		~+16
+
+CAS2L3_CONT:
+	cmp.l		%d0,%d2			# Dest1 - Compare1
+	bne.b		CAS2L3_NOUPDATE
+	cmp.l		%d1,%d3			# Dest2 - Compare2
+	bne.b		CAS2L3_NOUPDATE
+	movs.l		%d5,(%a1)		# Update2[31:0] -> DEST2
+	bra.b		CAS2L3_UPDATE
+	bra.b		~+16
+
+CAS2L3_UPDATE:
+	rol.l		&0x8,%d4		# get Update1[31:24]
+	movs.b		%d4,(%a0)+		# Update1[31:24] -> DEST1
+	swap		%d4			# get Update1[23:8]
+	movs.w		%d4,(%a0)+		# Update1[23:8] -> DEST1+0x1
+	bra.b		CAS2L3_UPDATE2
+	bra.b		~+16
+
+CAS2L3_UPDATE2:
+	rol.l		&0x8,%d4		# get Update1[7:0]
+	movc		%a3,%buscr		# assert LOCKE*
+	movs.b		%d4,(%a0)		# Update1[7:0] -> DEST1+0x3
+	bra.b		CAS2L3_UPDATE3
+	nop
+	bra.b		~+16
+
+CAS2L3_UPDATE3:
+	movc		%a4,%buscr		# unlock the bus
+	bra.w		cas2l_update_done
+	nop
+	nop
+	nop
+	bra.b		~+16
+
+CAS2L3_NOUPDATE:
+	rol.l		&0x8,%d0		# get Dest1[31:24]
+	movs.b		%d0,(%a0)+		# Dest1[31:24] -> DEST1
+	swap		%d0			# get Dest1[23:8]
+	movs.w		%d0,(%a0)+		# Dest1[23:8] -> DEST1+0x1
+	bra.b		CAS2L3_NOUPDATE2
+	bra.b		~+16
+
+CAS2L3_NOUPDATE2:
+	rol.l		&0x8,%d0		# get Dest1[7:0]
+	movc		%a3,%buscr		# assert LOCKE*
+	movs.b		%d0,(%a0)		# Update1[7:0] -> DEST1+0x3
+	bra.b		CAS2L3_NOUPDATE3
+	nop
+	bra.b		~+16
+
+CAS2L3_NOUPDATE3:
+	movc		%a4,%buscr		# unlock the bus
+	bra.w		cas2l_noupdate_done
+	nop
+	nop
+	nop
+	bra.b		~+14
+
+CAS2L3_FILLER:
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	bra.w		CAS2L3_START
+
+#############################################################
+#############################################################
+
+cas2w:
+	mov.l		%a0,%a2			# copy ADDR1
+	mov.l		%a1,%a3			# copy ADDR2
+	mov.l		%a0,%a4			# copy ADDR1
+	mov.l		%a1,%a5			# copy ADDR2
+
+	addq.l		&0x1,%a4		# ADDR1+1
+	addq.l		&0x1,%a5		# ADDR2+1
+	mov.l		%a2,%d1			# ADDR1
+
+# mask interrupt levels 0-6. save old mask value.
+	mov.w		%sr,%d7			# save current SR
+	ori.w		&0x0700,%sr		# inhibit interrupts
+
+# load the SFC and DFC with the appropriate mode.
+	movc		%sfc,%d6		# save old SFC/DFC
+	movc		%d0,%sfc		# store new SFC
+	movc		%d0,%dfc		# store new DFC
+
+# pre-load the operand ATC. no page faults should occur because
+# _real_lock_page() should have taken care of this.
+	plpaw		(%a2)			# load atc for ADDR1
+	plpaw		(%a4)			# load atc for ADDR1+1
+	plpaw		(%a3)			# load atc for ADDR2
+	plpaw		(%a5)			# load atc for ADDR2+1
+
+# push the operand cache lines from the cache if they exist.
+	cpushl		%dc,(%a2)		# push line for ADDR1
+	cpushl		%dc,(%a4)		# push line for ADDR1+1
+	cpushl		%dc,(%a3)		# push line for ADDR2
+	cpushl		%dc,(%a5)		# push line for ADDR2+1
+
+	mov.l		%d1,%a2			# ADDR1
+	addq.l		&0x3,%d1
+	mov.l		%d1,%a4			# ADDR1+3
+# if ADDR1 was ATC resident before the above "plpaw" and was executed
+# and it was the next entry scheduled for replacement and ADDR2
+# shares the same set, then the "plpaw" for ADDR2 can push the ADDR1
+# entries from the ATC. so, we do a second set of "plpa"s.
+	plpar		(%a2)			# load atc for ADDR1
+	plpar		(%a4)			# load atc for ADDR1+3
+
+# load the BUSCR values.
+	mov.l		&0x80000000,%a2		# assert LOCK* buscr value
+	mov.l		&0xa0000000,%a3		# assert LOCKE* buscr value
+	mov.l		&0x00000000,%a4		# buscr unlock value
+
+# there are two possible mis-aligned cases for word cas. they
+# are separated because the final write which asserts LOCKE* must
+# be aligned.
+	mov.l		%a0,%d0			# is ADDR1 misaligned?
+	btst		&0x0,%d0
+	bne.w		CAS2W2_ENTER		# yes
+	bra.b		CAS2W_ENTER		# no
+
+#
+# D0 = dst operand 1 <-
+# D1 = dst operand 2 <-
+# D2 = cmp operand 1
+# D3 = cmp operand 2
+# D4 = update oper 1
+# D5 = update oper 2
+# D6 = old SFC/DFC
+# D7 = old SR
+# A0 = ADDR1
+# A1 = ADDR2
+# A2 = bus LOCK*  value
+# A3 = bus LOCKE* value
+# A4 = bus unlock value
+# A5 = xxxxxxxx
+#
+	align		0x10
+CAS2W_START:
+	movc		%a2,%buscr		# assert LOCK*
+	movs.w		(%a1),%d1		# fetch Dest2[15:0]
+	movs.w		(%a0),%d0		# fetch Dest1[15:0]
+	bra.b		CAS2W_CONT2
+CAS2W_ENTER:
+	bra.b		~+16
+
+CAS2W_CONT2:
+	cmp.w		%d0,%d2			# Dest1 - Compare1
+	bne.b		CAS2W_NOUPDATE
+	cmp.w		%d1,%d3			# Dest2 - Compare2
+	bne.b		CAS2W_NOUPDATE
+	movs.w		%d5,(%a1)		# Update2[15:0] -> DEST2
+	bra.b		CAS2W_UPDATE
+	bra.b		~+16
+
+CAS2W_UPDATE:
+	movc		%a3,%buscr		# assert LOCKE*
+	movs.w		%d4,(%a0)		# Update1[15:0] -> DEST1
+	movc		%a4,%buscr		# unlock the bus
+	bra.b		cas2w_update_done
+	bra.b		~+16
+
+CAS2W_NOUPDATE:
+	movc		%a3,%buscr		# assert LOCKE*
+	movs.w		%d0,(%a0)		# Dest1[15:0] -> DEST1
+	movc		%a4,%buscr		# unlock the bus
+	bra.b		cas2w_noupdate_done
+	bra.b		~+16
+
+CAS2W_FILLER:
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	bra.b		CAS2W_START
+
+####
+
+#################################################################
+# THIS MUST BE THE STATE OF THE INTEGER REGISTER FILE UPON	#
+# ENTERING _isp_cas2().						#
+#								#
+# D0 = destination[15:0] operand 1				#
+# D1 = destination[15:0] operand 2				#
+# D2 = cmp[15:0] operand 1					#
+# D3 = cmp[15:0] operand 2					#
+# D4 = 'xxxxxx11 -> no reg update; 'xxxxxx00 -> update required	#
+# D5 = xxxxxxxx							#
+# D6 = xxxxxxxx							#
+# D7 = xxxxxxxx							#
+# A0 = xxxxxxxx							#
+# A1 = xxxxxxxx							#
+# A2 = xxxxxxxx							#
+# A3 = xxxxxxxx							#
+# A4 = xxxxxxxx							#
+# A5 = xxxxxxxx							#
+# A6 = frame pointer						#
+# A7 = stack pointer						#
+#################################################################
+
+cas2w_noupdate_done:
+
+# restore previous SFC/DFC value.
+	movc		%d6,%sfc		# restore old SFC
+	movc		%d6,%dfc		# restore old DFC
+
+# restore previous interrupt mask level.
+	mov.w		%d7,%sr			# restore old SR
+
+	sf		%d4			# indicate no update was done
+	bra.l		_isp_cas2_finish
+
+cas2w_update_done:
+
+# restore previous SFC/DFC value.
+	movc		%d6,%sfc		# restore old SFC
+	movc		%d6,%dfc		# restore old DFC
+
+# restore previous interrupt mask level.
+	mov.w		%d7,%sr			# restore old SR
+
+	st		%d4			# indicate update was done
+	bra.l		_isp_cas2_finish
+####
+
+	align		0x10
+CAS2W2_START:
+	movc		%a2,%buscr		# assert LOCK*
+	movs.w		(%a1),%d1		# fetch Dest2[15:0]
+	movs.w		(%a0),%d0		# fetch Dest1[15:0]
+	bra.b		CAS2W2_CONT2
+CAS2W2_ENTER:
+	bra.b		~+16
+
+CAS2W2_CONT2:
+	cmp.w		%d0,%d2			# Dest1 - Compare1
+	bne.b		CAS2W2_NOUPDATE
+	cmp.w		%d1,%d3			# Dest2 - Compare2
+	bne.b		CAS2W2_NOUPDATE
+	movs.w		%d5,(%a1)		# Update2[15:0] -> DEST2
+	bra.b		CAS2W2_UPDATE
+	bra.b		~+16
+
+CAS2W2_UPDATE:
+	ror.l		&0x8,%d4		# get Update1[15:8]
+	movs.b		%d4,(%a0)+		# Update1[15:8] -> DEST1
+	movc		%a3,%buscr		# assert LOCKE*
+	rol.l		&0x8,%d4		# get Update1[7:0]
+	bra.b		CAS2W2_UPDATE2
+	bra.b		~+16
+
+CAS2W2_UPDATE2:
+	movs.b		%d4,(%a0)		# Update1[7:0] -> DEST1+0x1
+	movc		%a4,%buscr		# unlock the bus
+	bra.w		cas2w_update_done
+	nop
+	bra.b		~+16
+
+CAS2W2_NOUPDATE:
+	ror.l		&0x8,%d0		# get Dest1[15:8]
+	movs.b		%d0,(%a0)+		# Dest1[15:8] -> DEST1
+	movc		%a3,%buscr		# assert LOCKE*
+	rol.l		&0x8,%d0		# get Dest1[7:0]
+	bra.b		CAS2W2_NOUPDATE2
+	bra.b		~+16
+
+CAS2W2_NOUPDATE2:
+	movs.b		%d0,(%a0)		# Dest1[7:0] -> DEST1+0x1
+	movc		%a4,%buscr		# unlock the bus
+	bra.w		cas2w_noupdate_done
+	nop
+	bra.b		~+16
+
+CAS2W2_FILLER:
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	bra.b		CAS2W2_START
+
+#       ######      ##      ######
+#       #	   #  #     #
+#	#	  ######    ######
+#	#	  #    #         #
+#       ######    #    #    ######
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_isp_cas(): "core" emulation code for the cas instruction	#
+#									#
+# XREF ****************************************************************	#
+#	_isp_cas_finish() - only exit point for this emulation code;	#
+#			    do clean-up					#
+#									#
+# INPUT ***************************************************************	#
+#	*see entry chart below*						#
+#									#
+# OUTPUT **************************************************************	#
+#	*see exit chart below*						#
+#									#
+# ALGORITHM ***********************************************************	#
+#	(1) Make several copies of the effective address.		#
+#	(2) Save current SR; Then mask off all maskable interrupts.	#
+#	(3) Save current DFC/SFC (ASSUMED TO BE EQUAL!!!); Then set	#
+#	    SFC/DFC according to whether exception occurred in user or	#
+#	    supervisor mode.						#
+#	(4) Use "plpaw" instruction to pre-load ATC with efective	#
+#	    address page(s). THIS SHOULD NOT FAULT!!! The relevant	#
+#	    page(s) should have been made resident prior to entering	#
+#	    this routine.						#
+#	(5) Push the operand lines from the cache w/ "cpushl".		#
+#	    In the 68040, this was done within the locked region. In	#
+#	    the 68060, it is done outside of the locked region.		#
+#	(6) Pre-fetch the core emulation instructions by executing one	#
+#	    branch within each physical line (16 bytes) of the code	#
+#	    before actually executing the code.				#
+#	(7) Load the BUSCR with the bus lock value.			#
+#	(8) Fetch the source operand.					#
+#	(9) Do the compare. If equal, go to step (12).			#
+#	(10)Unequal. No update occurs. But, we do write the DST op back	#
+#	    to itself (as w/ the '040) so we can gracefully unlock	#
+#	    the bus (and assert LOCKE*) using BUSCR and the final move.	#
+#	(11)Exit.							#
+#	(12)Write update operand to the DST location. Use BUSCR to	#
+#	    assert LOCKE* for the final write operation.		#
+#	(13)Exit.							#
+#									#
+#	The algorithm is actually implemented slightly differently	#
+# depending on the size of the operation and the misalignment of the	#
+# operand. A misaligned operand must be written in aligned chunks or	#
+# else the BUSCR register control gets confused.			#
+#									#
+#########################################################################
+
+#########################################################
+# THIS IS THE STATE OF THE INTEGER REGISTER FILE UPON	#
+# ENTERING _isp_cas().					#
+#							#
+# D0 = xxxxxxxx						#
+# D1 = xxxxxxxx						#
+# D2 = update operand					#
+# D3 = xxxxxxxx						#
+# D4 = compare operand					#
+# D5 = xxxxxxxx						#
+# D6 = supervisor ('xxxxxxff) or user mode ('xxxxxx00)	#
+# D7 = longword ('xxxxxxff) or word size ('xxxxxx00)	#
+# A0 = ADDR						#
+# A1 = xxxxxxxx						#
+# A2 = xxxxxxxx						#
+# A3 = xxxxxxxx						#
+# A4 = xxxxxxxx						#
+# A5 = xxxxxxxx						#
+# A6 = frame pointer					#
+# A7 = stack pointer					#
+#########################################################
+
+	global		_isp_cas
+_isp_cas:
+	tst.b		%d6			# user or supervisor mode?
+	bne.b		cas_super		# supervisor
+cas_user:
+	movq.l		&0x1,%d0		# load user data fc
+	bra.b		cas_cont
+cas_super:
+	movq.l		&0x5,%d0		# load supervisor data fc
+
+cas_cont:
+	tst.b		%d7			# word or longword?
+	bne.w		casl			# longword
+
+####
+casw:
+	mov.l		%a0,%a1			# make copy for plpaw1
+	mov.l		%a0,%a2			# make copy for plpaw2
+	addq.l		&0x1,%a2		# plpaw2 points to end of word
+
+	mov.l		%d2,%d3			# d3 = update[7:0]
+	lsr.w		&0x8,%d2		# d2 = update[15:8]
+
+# mask interrupt levels 0-6. save old mask value.
+	mov.w		%sr,%d7			# save current SR
+	ori.w		&0x0700,%sr		# inhibit interrupts
+
+# load the SFC and DFC with the appropriate mode.
+	movc		%sfc,%d6		# save old SFC/DFC
+	movc		%d0,%sfc		# load new sfc
+	movc		%d0,%dfc		# load new dfc
+
+# pre-load the operand ATC. no page faults should occur here because
+# _real_lock_page() should have taken care of this.
+	plpaw		(%a1)			# load atc for ADDR
+	plpaw		(%a2)			# load atc for ADDR+1
+
+# push the operand lines from the cache if they exist.
+	cpushl		%dc,(%a1)		# push dirty data
+	cpushl		%dc,(%a2)		# push dirty data
+
+# load the BUSCR values.
+	mov.l		&0x80000000,%a1		# assert LOCK* buscr value
+	mov.l		&0xa0000000,%a2		# assert LOCKE* buscr value
+	mov.l		&0x00000000,%a3		# buscr unlock value
+
+# pre-load the instruction cache for the following algorithm.
+# this will minimize the number of cycles that LOCK* will be asserted.
+	bra.b		CASW_ENTER		# start pre-loading icache
+
+#
+# D0 = dst operand <-
+# D1 = update[15:8] operand
+# D2 = update[7:0]  operand
+# D3 = xxxxxxxx
+# D4 = compare[15:0] operand
+# D5 = xxxxxxxx
+# D6 = old SFC/DFC
+# D7 = old SR
+# A0 = ADDR
+# A1 = bus LOCK*  value
+# A2 = bus LOCKE* value
+# A3 = bus unlock value
+# A4 = xxxxxxxx
+# A5 = xxxxxxxx
+#
+	align		0x10
+CASW_START:
+	movc		%a1,%buscr		# assert LOCK*
+	movs.w		(%a0),%d0		# fetch Dest[15:0]
+	cmp.w		%d0,%d4			# Dest - Compare
+	bne.b		CASW_NOUPDATE
+	bra.b		CASW_UPDATE
+CASW_ENTER:
+	bra.b		~+16
+
+CASW_UPDATE:
+	movs.b		%d2,(%a0)+		# Update[15:8] -> DEST
+	movc		%a2,%buscr		# assert LOCKE*
+	movs.b		%d3,(%a0)		# Update[7:0] -> DEST+0x1
+	bra.b		CASW_UPDATE2
+	bra.b		~+16
+
+CASW_UPDATE2:
+	movc		%a3,%buscr		# unlock the bus
+	bra.b		casw_update_done
+	nop
+	nop
+	nop
+	nop
+	bra.b		~+16
+
+CASW_NOUPDATE:
+	ror.l		&0x8,%d0		# get Dest[15:8]
+	movs.b		%d0,(%a0)+		# Dest[15:8] -> DEST
+	movc		%a2,%buscr		# assert LOCKE*
+	rol.l		&0x8,%d0		# get Dest[7:0]
+	bra.b		CASW_NOUPDATE2
+	bra.b		~+16
+
+CASW_NOUPDATE2:
+	movs.b		%d0,(%a0)		# Dest[7:0] -> DEST+0x1
+	movc		%a3,%buscr		# unlock the bus
+	bra.b		casw_noupdate_done
+	nop
+	nop
+	bra.b		~+16
+
+CASW_FILLER:
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	bra.b		CASW_START
+
+#################################################################
+# THIS MUST BE THE STATE OF THE INTEGER REGISTER FILE UPON	#
+# CALLING _isp_cas_finish().					#
+#								#
+# D0 = destination[15:0] operand				#
+# D1 = 'xxxxxx11 -> no reg update; 'xxxxxx00 -> update required	#
+# D2 = xxxxxxxx							#
+# D3 = xxxxxxxx							#
+# D4 = compare[15:0] operand					#
+# D5 = xxxxxxxx							#
+# D6 = xxxxxxxx							#
+# D7 = xxxxxxxx							#
+# A0 = xxxxxxxx							#
+# A1 = xxxxxxxx							#
+# A2 = xxxxxxxx							#
+# A3 = xxxxxxxx							#
+# A4 = xxxxxxxx							#
+# A5 = xxxxxxxx							#
+# A6 = frame pointer						#
+# A7 = stack pointer						#
+#################################################################
+
+casw_noupdate_done:
+
+# restore previous SFC/DFC value.
+	movc		%d6,%sfc		# restore old SFC
+	movc		%d6,%dfc		# restore old DFC
+
+# restore previous interrupt mask level.
+	mov.w		%d7,%sr			# restore old SR
+
+	sf		%d1			# indicate no update was done
+	bra.l		_isp_cas_finish
+
+casw_update_done:
+
+# restore previous SFC/DFC value.
+	movc		%d6,%sfc		# restore old SFC
+	movc		%d6,%dfc		# restore old DFC
+
+# restore previous interrupt mask level.
+	mov.w		%d7,%sr			# restore old SR
+
+	st		%d1			# indicate update was done
+	bra.l		_isp_cas_finish
+
+################
+
+# there are two possible mis-aligned cases for longword cas. they
+# are separated because the final write which asserts LOCKE* must
+# be an aligned write.
+casl:
+	mov.l		%a0,%a1			# make copy for plpaw1
+	mov.l		%a0,%a2			# make copy for plpaw2
+	addq.l		&0x3,%a2		# plpaw2 points to end of longword
+
+	mov.l		%a0,%d1			# byte or word misaligned?
+	btst		&0x0,%d1
+	bne.w		casl2			# byte misaligned
+
+	mov.l		%d2,%d3			# d3 = update[15:0]
+	swap		%d2			# d2 = update[31:16]
+
+# mask interrupts levels 0-6. save old mask value.
+	mov.w		%sr,%d7			# save current SR
+	ori.w		&0x0700,%sr		# inhibit interrupts
+
+# load the SFC and DFC with the appropriate mode.
+	movc		%sfc,%d6		# save old SFC/DFC
+	movc		%d0,%sfc		# load new sfc
+	movc		%d0,%dfc		# load new dfc
+
+# pre-load the operand ATC. no page faults should occur here because
+# _real_lock_page() should have taken care of this.
+	plpaw		(%a1)			# load atc for ADDR
+	plpaw		(%a2)			# load atc for ADDR+3
+
+# push the operand lines from the cache if they exist.
+	cpushl		%dc,(%a1)		# push dirty data
+	cpushl		%dc,(%a2)		# push dirty data
+
+# load the BUSCR values.
+	mov.l		&0x80000000,%a1		# assert LOCK* buscr value
+	mov.l		&0xa0000000,%a2		# assert LOCKE* buscr value
+	mov.l		&0x00000000,%a3		# buscr unlock value
+
+	bra.b		CASL_ENTER		# start pre-loading icache
+
+#
+# D0 = dst operand <-
+# D1 = xxxxxxxx
+# D2 = update[31:16] operand
+# D3 = update[15:0]  operand
+# D4 = compare[31:0] operand
+# D5 = xxxxxxxx
+# D6 = old SFC/DFC
+# D7 = old SR
+# A0 = ADDR
+# A1 = bus LOCK*  value
+# A2 = bus LOCKE* value
+# A3 = bus unlock value
+# A4 = xxxxxxxx
+# A5 = xxxxxxxx
+#
+	align		0x10
+CASL_START:
+	movc		%a1,%buscr		# assert LOCK*
+	movs.l		(%a0),%d0		# fetch Dest[31:0]
+	cmp.l		%d0,%d4			# Dest - Compare
+	bne.b		CASL_NOUPDATE
+	bra.b		CASL_UPDATE
+CASL_ENTER:
+	bra.b		~+16
+
+CASL_UPDATE:
+	movs.w		%d2,(%a0)+		# Update[31:16] -> DEST
+	movc		%a2,%buscr		# assert LOCKE*
+	movs.w		%d3,(%a0)		# Update[15:0] -> DEST+0x2
+	bra.b		CASL_UPDATE2
+	bra.b		~+16
+
+CASL_UPDATE2:
+	movc		%a3,%buscr		# unlock the bus
+	bra.b		casl_update_done
+	nop
+	nop
+	nop
+	nop
+	bra.b		~+16
+
+CASL_NOUPDATE:
+	swap		%d0			# get Dest[31:16]
+	movs.w		%d0,(%a0)+		# Dest[31:16] -> DEST
+	swap		%d0			# get Dest[15:0]
+	movc		%a2,%buscr		# assert LOCKE*
+	bra.b		CASL_NOUPDATE2
+	bra.b		~+16
+
+CASL_NOUPDATE2:
+	movs.w		%d0,(%a0)		# Dest[15:0] -> DEST+0x2
+	movc		%a3,%buscr		# unlock the bus
+	bra.b		casl_noupdate_done
+	nop
+	nop
+	bra.b		~+16
+
+CASL_FILLER:
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	bra.b		CASL_START
+
+#################################################################
+# THIS MUST BE THE STATE OF THE INTEGER REGISTER FILE UPON	#
+# CALLING _isp_cas_finish().					#
+#								#
+# D0 = destination[31:0] operand				#
+# D1 = 'xxxxxx11 -> no reg update; 'xxxxxx00 -> update required	#
+# D2 = xxxxxxxx							#
+# D3 = xxxxxxxx							#
+# D4 = compare[31:0] operand					#
+# D5 = xxxxxxxx							#
+# D6 = xxxxxxxx							#
+# D7 = xxxxxxxx							#
+# A0 = xxxxxxxx							#
+# A1 = xxxxxxxx							#
+# A2 = xxxxxxxx							#
+# A3 = xxxxxxxx							#
+# A4 = xxxxxxxx							#
+# A5 = xxxxxxxx							#
+# A6 = frame pointer						#
+# A7 = stack pointer						#
+#################################################################
+
+casl_noupdate_done:
+
+# restore previous SFC/DFC value.
+	movc		%d6,%sfc		# restore old SFC
+	movc		%d6,%dfc		# restore old DFC
+
+# restore previous interrupt mask level.
+	mov.w		%d7,%sr			# restore old SR
+
+	sf		%d1			# indicate no update was done
+	bra.l		_isp_cas_finish
+
+casl_update_done:
+
+# restore previous SFC/DFC value.
+	movc		%d6,%sfc		# restore old SFC
+	movc		%d6,%dfc		# restore old DFC
+
+# restore previous interrupts mask level.
+	mov.w		%d7,%sr			# restore old SR
+
+	st		%d1			# indicate update was done
+	bra.l		_isp_cas_finish
+
+#######################################
+casl2:
+	mov.l		%d2,%d5			# d5 = Update[7:0]
+	lsr.l		&0x8,%d2
+	mov.l		%d2,%d3			# d3 = Update[23:8]
+	swap		%d2			# d2 = Update[31:24]
+
+# mask interrupts levels 0-6. save old mask value.
+	mov.w		%sr,%d7			# save current SR
+	ori.w		&0x0700,%sr		# inhibit interrupts
+
+# load the SFC and DFC with the appropriate mode.
+	movc		%sfc,%d6		# save old SFC/DFC
+	movc		%d0,%sfc		# load new sfc
+	movc		%d0,%dfc		# load new dfc
+
+# pre-load the operand ATC. no page faults should occur here because
+# _real_lock_page() should have taken care of this already.
+	plpaw		(%a1)			# load atc for ADDR
+	plpaw		(%a2)			# load atc for ADDR+3
+
+# puch the operand lines from the cache if they exist.
+	cpushl		%dc,(%a1)		# push dirty data
+	cpushl		%dc,(%a2)		# push dirty data
+
+# load the BUSCR values.
+	mov.l		&0x80000000,%a1		# assert LOCK* buscr value
+	mov.l		&0xa0000000,%a2		# assert LOCKE* buscr value
+	mov.l		&0x00000000,%a3		# buscr unlock value
+
+# pre-load the instruction cache for the following algorithm.
+# this will minimize the number of cycles that LOCK* will be asserted.
+	bra.b		CASL2_ENTER		# start pre-loading icache
+
+#
+# D0 = dst operand <-
+# D1 = xxxxxxxx
+# D2 = update[31:24] operand
+# D3 = update[23:8]  operand
+# D4 = compare[31:0] operand
+# D5 = update[7:0]  operand
+# D6 = old SFC/DFC
+# D7 = old SR
+# A0 = ADDR
+# A1 = bus LOCK*  value
+# A2 = bus LOCKE* value
+# A3 = bus unlock value
+# A4 = xxxxxxxx
+# A5 = xxxxxxxx
+#
+	align		0x10
+CASL2_START:
+	movc		%a1,%buscr		# assert LOCK*
+	movs.l		(%a0),%d0		# fetch Dest[31:0]
+	cmp.l		%d0,%d4			# Dest - Compare
+	bne.b		CASL2_NOUPDATE
+	bra.b		CASL2_UPDATE
+CASL2_ENTER:
+	bra.b		~+16
+
+CASL2_UPDATE:
+	movs.b		%d2,(%a0)+		# Update[31:24] -> DEST
+	movs.w		%d3,(%a0)+		# Update[23:8] -> DEST+0x1
+	movc		%a2,%buscr		# assert LOCKE*
+	bra.b		CASL2_UPDATE2
+	bra.b		~+16
+
+CASL2_UPDATE2:
+	movs.b		%d5,(%a0)		# Update[7:0] -> DEST+0x3
+	movc		%a3,%buscr		# unlock the bus
+	bra.w		casl_update_done
+	nop
+	bra.b		~+16
+
+CASL2_NOUPDATE:
+	rol.l		&0x8,%d0		# get Dest[31:24]
+	movs.b		%d0,(%a0)+		# Dest[31:24] -> DEST
+	swap		%d0			# get Dest[23:8]
+	movs.w		%d0,(%a0)+		# Dest[23:8] -> DEST+0x1
+	bra.b		CASL2_NOUPDATE2
+	bra.b		~+16
+
+CASL2_NOUPDATE2:
+	rol.l		&0x8,%d0		# get Dest[7:0]
+	movc		%a2,%buscr		# assert LOCKE*
+	movs.b		%d0,(%a0)		# Dest[7:0] -> DEST+0x3
+	bra.b		CASL2_NOUPDATE3
+	nop
+	bra.b		~+16
+
+CASL2_NOUPDATE3:
+	movc		%a3,%buscr		# unlock the bus
+	bra.w		casl_noupdate_done
+	nop
+	nop
+	nop
+	bra.b		~+16
+
+CASL2_FILLER:
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	bra.b		CASL2_START
+
+####
+####
+# end label used by _isp_cas_inrange()
+	global		_CASHI
+_CASHI:
diff --git a/arch/m68k/ifpsp060/src/itest.S b/arch/m68k/ifpsp060/src/itest.S
new file mode 100644
index 0000000..ba4a30c
--- /dev/null
+++ b/arch/m68k/ifpsp060/src/itest.S
@@ -0,0 +1,6386 @@
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
+M68000 Hi-Performance Microprocessor Division
+M68060 Software Package
+Production Release P1.00 -- October 10, 1994
+
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc.  All rights reserved.
+
+THE SOFTWARE is provided on an "AS IS" basis and without warranty.
+To the maximum extent permitted by applicable law,
+MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
+INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+and any warranty against infringement with regard to the SOFTWARE
+(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
+
+To the maximum extent permitted by applicable law,
+IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
+(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
+BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
+ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
+Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
+
+You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
+so long as this entire notice is retained without alteration in any modified and/or
+redistributed versions, and that such modified versions are clearly identified as such.
+No licenses are granted by implication, estoppel or otherwise under any patents
+or trademarks of Motorola, Inc.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#############################################
+set	SREGS,		-64
+set	IREGS,		-128
+set	SCCR,		-130
+set	ICCR,		-132
+set	TESTCTR,	-136
+set	EAMEM,		-140
+set	EASTORE,	-144
+set	DATA,		-160
+
+#############################################
+TESTTOP:
+	bra.l		_060TESTS_
+
+start_str:
+	string		"Testing 68060 ISP started:\n"
+
+pass_str:
+	string		"passed\n"
+fail_str:
+	string		" failed\n"
+
+	align		0x4
+chk_test:
+	tst.l		%d0
+	bne.b		test_fail
+test_pass:
+	pea		pass_str(%pc)
+	bsr.l		_print_str
+	addq.l		&0x4,%sp
+	rts
+test_fail:
+	mov.l		%d1,-(%sp)
+	bsr.l		_print_num
+	addq.l		&0x4,%sp
+
+	pea		fail_str(%pc)
+	bsr.l		_print_str
+	addq.l		&0x4,%sp
+	rts
+
+#############################################
+_060TESTS_:
+	link		%a6,&-160
+
+	movm.l		&0x3f3c,-(%sp)
+
+	pea		start_str(%pc)
+	bsr.l		_print_str
+	addq.l		&0x4,%sp
+
+### mul
+	clr.l		TESTCTR(%a6)
+	pea		mulul_str(%pc)
+	bsr.l		_print_str
+	addq.l		&0x4,%sp
+
+	bsr.l		mulul_0
+
+	bsr.l		chk_test
+
+### div
+	clr.l		TESTCTR(%a6)
+	pea		divul_str(%pc)
+	bsr.l		_print_str
+	addq.l		&0x4,%sp
+
+	bsr.l		divul_0
+
+	bsr.l		chk_test
+
+### cmp2
+	clr.l		TESTCTR(%a6)
+	pea		cmp2_str(%pc)
+	bsr.l		_print_str
+	addq.l		&0x4,%sp
+
+	bsr.l		cmp2_1
+
+	bsr.l		chk_test
+
+### movp
+	clr.l		TESTCTR(%a6)
+	pea		movp_str(%pc)
+	bsr.l		_print_str
+	addq.l		&0x4,%sp
+
+	bsr.l		movp_0
+
+	bsr.l		chk_test
+
+### ea
+	clr.l		TESTCTR(%a6)
+	pea		ea_str(%pc)
+	bsr.l		_print_str
+	addq.l		&0x4,%sp
+
+	mov.l		&0x2,EAMEM(%a6)
+	bsr.l		ea_0
+
+	bsr.l		chk_test
+
+### cas
+	clr.l		TESTCTR(%a6)
+	pea		cas_str(%pc)
+	bsr.l		_print_str
+	addq.l		&0x4,%sp
+
+	bsr.l		cas0
+
+	bsr.l		chk_test
+
+### cas2
+	clr.l		TESTCTR(%a6)
+	pea		cas2_str(%pc)
+	bsr.l		_print_str
+	addq.l		&0x4,%sp
+
+	bsr.l		cas20
+
+	bsr.l		chk_test
+
+###
+	movm.l		(%sp)+,&0x3cfc
+
+	unlk		%a6
+	rts
+
+#############################################
+#############################################
+
+mulul_str:
+	string		"\t64-bit multiply..."
+
+	align		0x4
+mulul_0:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d1
+	mov.l		&0x99999999,%d2
+	mov.l		&0x88888888,%d3
+
+	mov.w		&0x0004,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	mulu.l		%d1,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	clr.l		IREGS+0x8(%a6)
+	clr.l		IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+mulul_1:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0x77777777,%d1
+	mov.l		&0x99999999,%d2
+	mov.l		&0x00000000,%d3
+
+	mov.w		&0x0004,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	mulu.l		%d1,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	clr.l		IREGS+0x8(%a6)
+	clr.l		IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+mulul_2:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0x00000010,%d1
+	mov.l		&0x66666666,%d2
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	mulu.l		%d1,%d2:%d2
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	mov.l		&0x00000006,IREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+mulul_3:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0x55555555,%d1
+	mov.l		&0x00000000,%d2
+	mov.l		&0x00000003,%d3
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	mulu.l		%d1,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	mov.l		&0x00000000,IREGS+0x8(%a6)
+	mov.l		&0xffffffff,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+mulul_4:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0x40000000,%d1
+	mov.l		&0x00000000,%d2
+	mov.l		&0x00000004,%d3
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	mulu.l		%d1,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	mov.l		&0x00000001,IREGS+0x8(%a6)
+	mov.l		&0x00000000,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+mulul_5:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0xffffffff,%d1
+	mov.l		&0x00000000,%d2
+	mov.l		&0xffffffff,%d3
+
+	mov.w		&0x0008,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	mulu.l		%d1,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	mov.l		&0xfffffffe,IREGS+0x8(%a6)
+	mov.l		&0x00000001,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+mulul_6:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0x80000000,%d1
+	mov.l		&0x00000000,%d2
+	mov.l		&0xffffffff,%d3
+
+	mov.w		&0x00000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	muls.l		%d1,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	mov.l		&0x00000000,IREGS+0x8(%a6)
+	mov.l		&0x80000000,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+mulul_7:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0x80000000,%d1
+	mov.l		&0x00000000,%d2
+	mov.l		&0x00000001,%d3
+
+	mov.w		&0x0008,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	muls.l		%d1,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	mov.l		&0xffffffff,IREGS+0x8(%a6)
+	mov.l		&0x80000000,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+mulul_8:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0x00000001,%d1
+	mov.l		&0x00000000,%d2
+	mov.l		&0x80000000,%d3
+
+	mov.w		&0x0008,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	muls.l		%d1,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	mov.l		&0xffffffff,IREGS+0x8(%a6)
+	mov.l		&0x80000000,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	mov.l		TESTCTR(%a6),%d1
+	clr.l		%d0
+	rts
+
+#############################################
+
+movp_str:
+	string	"\tmovep..."
+
+	align	0x4
+###############################
+# movep.w	%d0,(0x0,%a0) #
+###############################
+movp_0:
+	addq.l	&0x1,TESTCTR(%a6)
+
+	movm.l	DEF_REGS(%pc),&0x3fff
+
+	lea	DATA(%a6),%a0
+	mov.w	&0xaaaa,%d0
+	clr.b	0x0(%a0)
+	clr.b	0x2(%a0)
+
+	mov.w	&0x001f,ICCR(%a6)
+	mov.w	&0x001f,%cc
+	movm.l	&0x7fff,IREGS(%a6)
+
+	movp.w	%d0,(0x0,%a0)
+
+	mov.w	%cc,SCCR(%a6)
+	movm.l	&0x7fff,SREGS(%a6)
+
+	mov.b	0x2(%a0),%d1
+	lsl.w	&0x8,%d1
+	mov.b	0x0(%a0),%d1
+
+	cmp.w	%d0,%d1
+	bne.l	error
+
+	bsr.l	chkregs
+	tst.b	%d0
+	bne.l	error
+
+###############################
+# movep.w	%d0,(0x0,%a0) #
+###############################
+movp_1:
+	addq.l	&0x1,TESTCTR(%a6)
+
+	movm.l	DEF_REGS(%pc),&0x3fff
+
+	lea	DATA+0x4(%a6),%a0
+	mov.w	&0xaaaa,%d0
+	clr.l	-0x4(%a0)
+	clr.l	(%a0)
+	clr.l	0x4(%a0)
+
+	mov.w	&0x001f,ICCR(%a6)
+	mov.w	&0x001f,%cc
+	movm.l	&0x7fff,IREGS(%a6)
+
+	movp.w	%d0,(0x0,%a0)
+
+	mov.w	%cc,SCCR(%a6)
+	movm.l	&0x7fff,SREGS(%a6)
+
+	tst.l	-0x4(%a0)
+	bne.l	error
+	tst.l	0x4(%a0)
+	bne.l	error
+	cmpi.l	(%a0),&0xaa00aa00
+	bne.l	error
+
+	bsr.l	chkregs
+	tst.b	%d0
+	bne.l	error
+
+#####################################################
+# movep.w	%d0,(0x0,%a0)			    #
+#	- this test has %cc initially equal to zero #
+#####################################################
+movp_2:
+	addq.l	&0x1,TESTCTR(%a6)
+
+	movm.l	DEF_REGS(%pc),&0x3fff
+
+	lea	DATA(%a6),%a0
+	mov.w	&0xaaaa,%d0
+	clr.b	0x0(%a0)
+	clr.b	0x2(%a0)
+
+	mov.w	&0x0000,ICCR(%a6)
+	mov.w	&0x0000,%cc
+	movm.l	&0x7fff,IREGS(%a6)
+
+	movp.w	%d0,(0x0,%a0)
+
+	mov.w	%cc,SCCR(%a6)
+	movm.l	&0x7fff,SREGS(%a6)
+
+	mov.b	0x2(%a0),%d1
+	lsl.w	&0x8,%d1
+	mov.b	0x0(%a0),%d1
+
+	cmp.w	%d0,%d1
+	bne.l	error
+
+	bsr.l	chkregs
+	tst.b	%d0
+	bne.l	error
+
+###############################
+# movep.w	(0x0,%a0),%d0 #
+###############################
+movp_3:
+	addq.l	&0x1,TESTCTR(%a6)
+
+	movm.l	DEF_REGS(%pc),&0x3fff
+
+	lea	DATA(%a6),%a0
+	mov.b	&0xaa,0x0(%a0)
+	mov.b	&0xaa,0x2(%a0)
+
+	mov.w	&0x001f,ICCR(%a6)
+	mov.w	&0x001f,%cc
+	movm.l	&0x7fff,IREGS(%a6)
+
+	movp.w	(0x0,%a0),%d0
+
+	mov.w	%cc,SCCR(%a6)
+	movm.l	&0x7fff,SREGS(%a6)
+	mov.w	&0xaaaa,IREGS+0x2(%a6)
+
+	mov.w	&0xaaaa,%d1
+
+	cmp.w	%d0,%d1
+	bne.l	error
+
+	bsr.l	chkregs
+	tst.b	%d0
+	bne.l	error
+
+###############################
+# movep.l	%d0,(0x0,%a0) #
+###############################
+movp_4:
+	addq.l	&0x1,TESTCTR(%a6)
+
+	movm.l	DEF_REGS(%pc),&0x3fff
+
+	lea	DATA(%a6),%a0
+	mov.l	&0xaaaaaaaa,%d0
+	clr.b	0x0(%a0)
+	clr.b	0x2(%a0)
+	clr.b	0x4(%a0)
+	clr.b	0x6(%a0)
+
+	mov.w	&0x001f,ICCR(%a6)
+	mov.w	&0x001f,%cc
+	movm.l	&0x7fff,IREGS(%a6)
+
+	movp.l	%d0,(0x0,%a0)
+
+	mov.w	%cc,SCCR(%a6)
+	movm.l	&0x7fff,SREGS(%a6)
+
+	mov.b	0x6(%a0),%d1
+	lsl.l	&0x8,%d1
+	mov.b	0x4(%a0),%d1
+	lsl.l	&0x8,%d1
+	mov.b	0x2(%a0),%d1
+	lsl.l	&0x8,%d1
+	mov.b	0x0(%a0),%d1
+
+	cmp.l	%d0,%d1
+	bne.l	error
+
+	bsr.l	chkregs
+	tst.b	%d0
+	bne.l	error
+
+###############################
+# movep.l	%d0,(0x0,%a0) #
+###############################
+movp_5:
+	addq.l	&0x1,TESTCTR(%a6)
+
+	movm.l	DEF_REGS(%pc),&0x3fff
+
+	lea	DATA+0x4(%a6),%a0
+	mov.l	&0xaaaaaaaa,%d0
+	clr.l	-0x4(%a0)
+	clr.l	(%a0)
+	clr.l	0x4(%a0)
+	clr.l	0x8(%a0)
+
+	mov.w	&0x001f,ICCR(%a6)
+	mov.w	&0x001f,%cc
+	movm.l	&0x7fff,IREGS(%a6)
+
+	movp.l	%d0,(0x0,%a0)
+
+	mov.w	%cc,SCCR(%a6)
+	movm.l	&0x7fff,SREGS(%a6)
+
+	tst.l	-0x4(%a0)
+	bne.l	error
+	tst.l	0x8(%a0)
+	bne.l	error
+	cmpi.l	(%a0),&0xaa00aa00
+	bne.l	error
+	cmpi.l	0x4(%a0),&0xaa00aa00
+	bne.l	error
+
+	bsr.l	chkregs
+	tst.b	%d0
+	bne.l	error
+
+###############################
+# movep.l	(0x0,%a0),%d0 #
+###############################
+movp_6:
+	addq.l	&0x1,TESTCTR(%a6)
+
+	movm.l	DEF_REGS(%pc),&0x3fff
+
+	lea	DATA(%a6),%a0
+	mov.b	&0xaa,0x0(%a0)
+	mov.b	&0xaa,0x2(%a0)
+	mov.b	&0xaa,0x4(%a0)
+	mov.b	&0xaa,0x6(%a0)
+
+	mov.w	&0x001f,ICCR(%a6)
+	mov.w	&0x001f,%cc
+	movm.l	&0x7fff,IREGS(%a6)
+
+	movp.l	(0x0,%a0),%d0
+
+	mov.w	%cc,SCCR(%a6)
+	movm.l	&0x7fff,SREGS(%a6)
+	mov.l	&0xaaaaaaaa,IREGS(%a6)
+
+	mov.l	&0xaaaaaaaa,%d1
+
+	cmp.l	%d0,%d1
+	bne.l	error
+
+	bsr.l	chkregs
+	tst.b	%d0
+	bne.l	error
+
+###############################
+# movep.w	%d7,(0x0,%a0) #
+###############################
+movp_7:
+	addq.l	&0x1,TESTCTR(%a6)
+
+	movm.l	DEF_REGS(%pc),&0x3fff
+
+	lea	DATA(%a6),%a0
+	mov.w	&0xaaaa,%d7
+	clr.b	0x0(%a0)
+	clr.b	0x2(%a0)
+
+	mov.w	&0x001f,ICCR(%a6)
+	mov.w	&0x001f,%cc
+	movm.l	&0x7fff,IREGS(%a6)
+
+	movp.w	%d7,(0x0,%a0)
+
+	mov.w	%cc,SCCR(%a6)
+	movm.l	&0x7fff,SREGS(%a6)
+
+	mov.b	0x2(%a0),%d1
+	lsl.w	&0x8,%d1
+	mov.b	0x0(%a0),%d1
+
+	cmp.w	%d7,%d1
+	bne.l	error
+
+	bsr.l	chkregs
+	tst.b	%d0
+	bne.l	error
+
+###############################
+# movep.w	(0x0,%a0),%d7 #
+###############################
+movp_8:
+	addq.l	&0x1,TESTCTR(%a6)
+
+	movm.l	DEF_REGS(%pc),&0x3fff
+
+	lea	DATA(%a6),%a0
+	mov.b	&0xaa,0x0(%a0)
+	mov.b	&0xaa,0x2(%a0)
+
+	mov.w	&0x001f,ICCR(%a6)
+	mov.w	&0x001f,%cc
+	movm.l	&0x7fff,IREGS(%a6)
+
+	movp.w	(0x0,%a0),%d7
+
+	mov.w	%cc,SCCR(%a6)
+	movm.l	&0x7fff,SREGS(%a6)
+	mov.w	&0xaaaa,IREGS+30(%a6)
+
+	mov.w	&0xaaaa,%d1
+
+	cmp.w	%d7,%d1
+	bne.l	error
+
+	bsr.l	chkregs
+	tst.b	%d0
+	bne.l	error
+
+###############################
+# movep.w	%d0,(0x0,%a0) #
+###############################
+movp_9:
+	addq.l	&0x1,TESTCTR(%a6)
+
+	movm.l	DEF_REGS(%pc),&0x3fff
+
+	lea	DATA(%a6),%a0
+	mov.w	&0xaaaa,%d0
+	clr.b	0x0(%a0)
+	clr.b	0x2(%a0)
+
+	mov.w	&0x001f,ICCR(%a6)
+	mov.w	&0x001f,%cc
+	movm.l	&0x7fff,IREGS(%a6)
+
+	movp.w	%d0,(0x0,%a0)
+
+	mov.w	%cc,SCCR(%a6)
+	movm.l	&0x7fff,SREGS(%a6)
+
+	mov.b	0x2(%a0),%d1
+	lsl.w	&0x8,%d1
+	mov.b	0x0(%a0),%d1
+
+	cmp.w	%d0,%d1
+	bne.l	error
+
+	bsr.l	chkregs
+	tst.b	%d0
+	bne.l	error
+
+###############################
+# movep.w	%d0,(0x8,%a0) #
+###############################
+movp_10:
+	addq.l	&0x1,TESTCTR(%a6)
+
+	movm.l	DEF_REGS(%pc),&0x3fff
+
+	lea	DATA(%a6),%a0
+	mov.w	&0xaaaa,%d0
+	clr.b	0x0+0x8(%a0)
+	clr.b	0x2+0x8(%a0)
+
+	mov.w	&0x001f,ICCR(%a6)
+	mov.w	&0x1f,%cc
+	movm.l	&0x7fff,IREGS(%a6)
+
+	movp.w	%d0,(0x8,%a0)
+
+	mov.w	%cc,SCCR(%a6)
+	movm.l	&0x7fff,SREGS(%a6)
+
+	mov.b	0x2+0x8(%a0),%d1
+	lsl.w	&0x8,%d1
+	mov.b	0x0+0x8(%a0),%d1
+
+	cmp.w	%d0,%d1
+	bne.l	error
+
+	bsr.l	chkregs
+	tst.b	%d0
+	bne.l	error
+
+###############################
+# movep.w	(0x8,%a0),%d0 #
+###############################
+movp_11:
+	addq.l	&0x1,TESTCTR(%a6)
+
+	movm.l	DEF_REGS(%pc),&0x3fff
+
+	lea	DATA(%a6),%a0
+	mov.b	&0xaa,0x0+0x8(%a0)
+	mov.b	&0xaa,0x2+0x8(%a0)
+
+	mov.w	&0x001f,ICCR(%a6)
+	mov.w	&0x1f,%cc
+	movm.l	&0x7fff,IREGS(%a6)
+
+	movp.w	(0x8,%a0),%d0
+
+	mov.w	%cc,SCCR(%a6)
+	movm.l	&0x7fff,SREGS(%a6)
+	mov.w	&0xaaaa,IREGS+0x2(%a6)
+
+	mov.w	&0xaaaa,%d1
+
+	cmp.w	%d0,%d1
+	bne.l	error
+
+	bsr.l	chkregs
+	tst.b	%d0
+	bne.l	error
+
+###############################
+# movep.l	%d0,(0x8,%a0) #
+###############################
+movp_12:
+	addq.l	&0x1,TESTCTR(%a6)
+
+	movm.l	DEF_REGS(%pc),&0x3fff
+
+	lea	DATA(%a6),%a0
+	mov.l	&0xaaaaaaaa,%d0
+	clr.b	0x0+0x8(%a0)
+	clr.b	0x2+0x8(%a0)
+	clr.b	0x4+0x8(%a0)
+	clr.b	0x6+0x8(%a0)
+
+	mov.w	&0x001f,ICCR(%a6)
+	mov.w	&0x1f,%cc
+	movm.l	&0x7fff,IREGS(%a6)
+
+	movp.l	%d0,(0x8,%a0)
+
+	mov.w	%cc,SCCR(%a6)
+	movm.l	&0x7fff,SREGS(%a6)
+
+	mov.b	0x6+0x8(%a0),%d1
+	lsl.l	&0x8,%d1
+	mov.b	0x4+0x8(%a0),%d1
+	lsl.l	&0x8,%d1
+	mov.b	0x2+0x8(%a0),%d1
+	lsl.l	&0x8,%d1
+	mov.b	0x0+0x8(%a0),%d1
+
+	cmp.l	%d0,%d1
+	bne.l	error
+
+	bsr.l	chkregs
+	tst.b	%d0
+	bne.l	error
+
+###############################
+# movep.l	(0x8,%a0),%d0 #
+###############################
+movp_13:
+	addq.l	&0x1,TESTCTR(%a6)
+
+	movm.l	DEF_REGS(%pc),&0x3fff
+
+	lea	DATA(%a6),%a0
+	mov.b	&0xaa,0x0+0x8(%a0)
+	mov.b	&0xaa,0x2+0x8(%a0)
+	mov.b	&0xaa,0x4+0x8(%a0)
+	mov.b	&0xaa,0x6+0x8(%a0)
+
+	mov.w	&0x001f,ICCR(%a6)
+	mov.w	&0x1f,%cc
+	movm.l	&0x7fff,IREGS(%a6)
+
+	movp.l	(0x8,%a0),%d0
+
+	mov.w	%cc,SCCR(%a6)
+	movm.l	&0x7fff,SREGS(%a6)
+	mov.l	&0xaaaaaaaa,IREGS(%a6)
+
+	mov.l	&0xaaaaaaaa,%d1
+
+	cmp.l	%d0,%d1
+	bne.l	error
+
+	bsr.l	chkregs
+	tst.b	%d0
+	bne.l	error
+
+################################
+# movep.w	%d0,(-0x8,%a0) #
+################################
+movp_14:
+	addq.l	&0x1,TESTCTR(%a6)
+
+	movm.l	DEF_REGS(%pc),&0x3fff
+
+	lea	DATA+0x8(%a6),%a0
+	mov.w	&0xaaaa,%d0
+	clr.b	0x0-0x8(%a0)
+	clr.b	0x2-0x8(%a0)
+
+	mov.w	&0x001f,ICCR(%a6)
+	mov.w	&0x1f,%cc
+	movm.l	&0x7fff,IREGS(%a6)
+
+	movp.w	%d0,(-0x8,%a0)
+
+	mov.w	%cc,SCCR(%a6)
+	movm.l	&0x7fff,SREGS(%a6)
+
+	mov.b	0x2-0x8(%a0),%d1
+	lsl.w	&0x8,%d1
+	mov.b	0x0-0x8(%a0),%d1
+
+	cmp.w	%d0,%d1
+	bne.l	error
+
+	bsr.l	chkregs
+	tst.b	%d0
+	bne.l	error
+
+################################
+# movep.w	(-0x8,%a0),%d0 #
+################################
+movp_15:
+	addq.l	&0x1,TESTCTR(%a6)
+
+	movm.l	DEF_REGS(%pc),&0x3fff
+
+	lea	DATA+0x8(%a6),%a0
+	mov.b	&0xaa,0x0-0x8(%a0)
+	mov.b	&0xaa,0x2-0x8(%a0)
+
+	mov.w	&0x001f,ICCR(%a6)
+	mov.w	&0x1f,%cc
+	movm.l	&0x7fff,IREGS(%a6)
+
+	movp.w	(-0x8,%a0),%d0
+
+	mov.w	%cc,SCCR(%a6)
+	movm.l	&0x7fff,SREGS(%a6)
+	mov.w	&0xaaaa,IREGS+0x2(%a6)
+
+	mov.w	&0xaaaa,%d1
+
+	cmp.w	%d0,%d1
+	bne.l	error
+
+	bsr.l	chkregs
+	tst.b	%d0
+	bne.l	error
+
+################################
+# movep.l	%d0,(-0x8,%a0) #
+################################
+movp_16:
+	addq.l	&0x1,TESTCTR(%a6)
+
+	movm.l	DEF_REGS(%pc),&0x3fff
+
+	lea	DATA+0x8(%a6),%a0
+	mov.l	&0xaaaaaaaa,%d0
+	clr.b	0x0-0x8(%a0)
+	clr.b	0x2-0x8(%a0)
+	clr.b	0x4-0x8(%a0)
+	clr.b	0x8-0x8(%a0)
+
+	mov.w	&0x001f,ICCR(%a6)
+	mov.w	&0x1f,%cc
+	movm.l	&0x7fff,IREGS(%a6)
+
+	movp.l	%d0,(-0x8,%a0)
+
+	mov.w	%cc,SCCR(%a6)
+	movm.l	&0x7fff,SREGS(%a6)
+
+	mov.b	0x6-0x8(%a0),%d1
+	lsl.l	&0x8,%d1
+	mov.b	0x4-0x8(%a0),%d1
+	lsl.l	&0x8,%d1
+	mov.b	0x2-0x8(%a0),%d1
+	lsl.l	&0x8,%d1
+	mov.b	0x0-0x8(%a0),%d1
+
+	cmp.l	%d0,%d1
+	bne.l	error
+
+	bsr.l	chkregs
+	tst.b	%d0
+	bne.l	error
+
+################################
+# movep.l	(-0x8,%a0),%d0 #
+################################
+movp_17:
+	addq.l	&0x1,TESTCTR(%a6)
+
+	movm.l	DEF_REGS(%pc),&0x3fff
+
+	lea	DATA+0x8(%a6),%a0
+	mov.b	&0xaa,0x0-0x8(%a0)
+	mov.b	&0xaa,0x2-0x8(%a0)
+	mov.b	&0xaa,0x4-0x8(%a0)
+	mov.b	&0xaa,0x8-0x8(%a0)
+
+	mov.w	&0x001f,ICCR(%a6)
+	mov.w	&0x1f,%cc
+	movm.l	&0x7fff,IREGS(%a6)
+
+	movp.l	(-0x8,%a0),%d0
+
+	mov.w	%cc,SCCR(%a6)
+	movm.l	&0x7fff,SREGS(%a6)
+	mov.l	&0xaaaaaaaa,IREGS(%a6)
+
+	mov.l	&0xaaaaaaaa,%d1
+
+	cmp.l	%d0,%d1
+	bne.l	error
+
+	bsr.l	chkregs
+	tst.b	%d0
+	bne.l	error
+
+	mov.l	TESTCTR(%a6),%d1
+	clr.l	%d0
+	rts
+
+###########################################################
+
+divul_str:
+	string		"\t64-bit divide..."
+
+	align		0x4
+divul_0:
+	addq.l		&0x1,TESTCTR(%a6)
+
+#	movm.l		DEF_REGS(%pc),&0x3fff
+
+#	clr.l		%d1
+#	mov.l		&0x99999999,%d2
+#	mov.l		&0x88888888,%d3
+
+#	mov.w		&0x001e,ICCR(%a6)
+#	mov.w		&0x001f,%cc
+#	movm.l		&0x7fff,IREGS(%a6)
+
+#	divu.l		%d1,%d2:%d3
+
+#	mov.w		%cc,SCCR(%a6)
+#	movm.l		&0x7fff,SREGS(%a6)
+
+#	bsr.l		chkregs
+#	tst.b		%d0
+#	bne.l		error
+
+divul_1:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0x00000001,%d1
+	mov.l		&0x00000000,%d2
+	mov.l		&0x00000000,%d3
+
+	mov.w		&0x0014,ICCR(%a6)
+	mov.w		&0x001f,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	divu.l		%d1,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+divul_2:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0x44444444,%d1
+	mov.l		&0x00000000,%d2
+	mov.l		&0x55555555,%d3
+
+	mov.w		&0x0010,ICCR(%a6)
+	mov.w		&0x001f,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	divu.l		%d1,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	mov.l		&0x11111111,IREGS+0x8(%a6)
+	mov.l		&0x00000001,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+divul_3:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0x55555555,%d1
+	mov.l		&0x00000000,%d2
+	mov.l		&0x44444444,%d3
+
+	mov.w		&0x0014,ICCR(%a6)
+	mov.w		&0x001f,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	divu.l		%d1,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	mov.l		&0x44444444,IREGS+0x8(%a6)
+	mov.l		&0x00000000,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+divul_4:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0x11111111,%d1
+	mov.l		&0x44444444,%d2
+	mov.l		&0x44444444,%d3
+
+	mov.w		&0x001e,ICCR(%a6)
+	mov.w		&0x001d,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	divu.l		%d1,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+divul_5:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0xfffffffe,%d1
+	mov.l		&0x00000001,%d2
+	mov.l		&0x00000002,%d3
+
+	mov.w		&0x001e,ICCR(%a6)
+	mov.w		&0x001d,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	divs.l		%d1,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+divul_6:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0xfffffffe,%d1
+	mov.l		&0x00000001,%d2
+	mov.l		&0x00000000,%d3
+
+	mov.w		&0x0018,ICCR(%a6)
+	mov.w		&0x001d,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	divs.l		%d1,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	mov.l		&0x00000000,IREGS+0x8(%a6)
+	mov.l		&0x80000000,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+divul_7:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0x00000002,%d1
+	mov.l		&0x00000001,%d2
+	mov.l		&0x00000000,%d3
+
+	mov.w		&0x001e,ICCR(%a6)
+	mov.w		&0x001d,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	divs.l		%d1,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+divul_8:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0xffffffff,%d1
+	mov.l		&0xfffffffe,%d2
+	mov.l		&0xffffffff,%d3
+
+	mov.w		&0x0008,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	divu.l		%d1,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+divul_9:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0xffffffff,%d1
+	mov.l		&0xfffffffe,%d2
+	mov.l		&0xffffffff,%d3
+
+	mov.w		&0x0008,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	divu.l		&0xffffffff,%d2:%d2
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	mov.l		&0xffffffff,IREGS+0x8(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+divul_10:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0x0000ffff,%d1
+	mov.l		&0x00000001,%d2
+	mov.l		&0x55555555,%d3
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	divu.l		%d1,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	mov.l		&0x0000aaab,IREGS+0x8(%a6)
+	mov.l		&0x00015556,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	mov.l		TESTCTR(%a6),%d1
+	clr.l		%d0
+	rts
+
+###########################################################
+
+cas_str:
+	string		"\tcas..."
+
+	align		0x4
+cas0:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	lea		DATA+0x1(%a6),%a0
+
+	mov.w		&0xaaaa,(%a0)
+
+	mov.w		&0xaaaa,%d1
+	mov.w		&0xbbbb,%d2
+
+	mov.w		&0x0014,ICCR(%a6)
+	mov.w		&0x0010,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cas.w		%d1,%d2,(%a0)			# Dc,Du,<ea>
+
+	mov.w		%cc,SCCR(%a6)
+	mov.w		(%a0),%d3
+	mov.w		&0xbbbb,IREGS+0xc+0x2(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cas1:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	lea		DATA+0x1(%a6),%a0
+
+	mov.w		&0xeeee,(%a0)
+
+	mov.w		&0x0000aaaa,%d1
+	mov.w		&0x0000bbbb,%d2
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cas.w		%d1,%d2,(%a0)			# Dc,Du,<ea>
+
+	mov.w		%cc,SCCR(%a6)
+	mov.w		(%a0),%d3
+	mov.w		&0xeeee,IREGS+0x4+0x2(%a6)
+	mov.w		&0xeeee,IREGS+0xc+0x2(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cas2:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	lea		DATA+0x2(%a6),%a0
+
+	mov.l		&0xaaaaaaaa,(%a0)
+
+	mov.l		&0xaaaaaaaa,%d1
+	mov.l		&0xbbbbbbbb,%d2
+
+	mov.w		&0x0004,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cas.l		%d1,%d2,(%a0)			# Dc,Du,<ea>
+
+	mov.w		%cc,SCCR(%a6)
+	mov.l		(%a0),%d3
+	mov.l		&0xbbbbbbbb,IREGS+0xc(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cas3:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	lea		DATA+0x2(%a6),%a0
+
+	mov.l		&0xeeeeeeee,(%a0)
+
+	mov.l		&0xaaaaaaaa,%d1
+	mov.l		&0xbbbbbbbb,%d2
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cas.l		%d1,%d2,(%a0)			# Dc,Du,<ea>
+
+	mov.w		%cc,SCCR(%a6)
+	mov.l		(%a0),%d3
+	mov.l		&0xeeeeeeee,IREGS+0x4(%a6)
+	mov.l		&0xeeeeeeee,IREGS+0xc(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cas4:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	lea		DATA+0x1(%a6),%a0
+
+	mov.l		&0xaaaaaaaa,(%a0)
+
+	mov.l		&0xaaaaaaaa,%d1
+	mov.l		&0xbbbbbbbb,%d2
+
+	mov.w		&0x0004,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cas.l		%d1,%d2,(%a0)			# Dc,Du,<ea>
+
+	mov.w		%cc,SCCR(%a6)
+	mov.l		(%a0),%d3
+	mov.l		&0xbbbbbbbb,IREGS+0xc(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cas5:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	lea		DATA+0x1(%a6),%a0
+
+	mov.l		&0x7fffffff,(%a0)
+
+	mov.l		&0x80000000,%d1
+	mov.l		&0xbbbbbbbb,%d2
+
+	mov.w		&0x001b,ICCR(%a6)
+	mov.w		&0x0010,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cas.l		%d1,%d2,(%a0)			# Dc,Du,<ea>
+
+	mov.w		%cc,SCCR(%a6)
+	mov.l		(%a0),%d3
+	mov.l		&0x7fffffff,IREGS+0x4(%a6)
+	mov.l		&0x7fffffff,IREGS+0xc(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	mov.l		TESTCTR(%a6),%d1
+	clr.l		%d0
+	rts
+
+###########################################################
+
+cas2_str:
+	string		"\tcas2..."
+
+	align		0x4
+cas20:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	lea		DATA+0x0(%a6),%a0
+	lea		DATA+0x4(%a6),%a1
+
+	mov.l		&0xaaaaaaaa,(%a0)
+	mov.l		&0xbbbbbbbb,(%a1)
+
+	mov.l		&0xaaaaaaaa,%d1
+	mov.l		&0xbbbbbbbb,%d2
+	mov.l		&0xcccccccc,%d3
+	mov.l		&0xdddddddd,%d4
+
+	mov.w		&0x0014,ICCR(%a6)
+	mov.w		&0x0010,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cas2.l		%d1:%d2,%d3:%d4,(%a0):(%a1)	# Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
+
+	mov.w		%cc,SCCR(%a6)
+	mov.l		(%a0),%d5
+	mov.l		(%a1),%d6
+	mov.l		&0xcccccccc,IREGS+0x14(%a6)
+	mov.l		&0xdddddddd,IREGS+0x18(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cas21:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	lea		DATA+0x1(%a6),%a0
+	lea		DATA+0x5(%a6),%a1
+
+	mov.l		&0xaaaaaaaa,(%a0)
+	mov.l		&0xbbbbbbbb,(%a1)
+
+	mov.l		&0xaaaaaaaa,%d1
+	mov.l		&0xbbbbbbbb,%d2
+	mov.l		&0xcccccccc,%d3
+	mov.l		&0xdddddddd,%d4
+
+	mov.w		&0x0014,ICCR(%a6)
+	mov.w		&0x0010,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cas2.l		%d1:%d2,%d3:%d4,(%a0):(%a1)	# Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
+
+	mov.w		%cc,SCCR(%a6)
+	mov.l		(%a0),%d5
+	mov.l		(%a1),%d6
+	mov.l		&0xcccccccc,IREGS+0x14(%a6)
+	mov.l		&0xdddddddd,IREGS+0x18(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cas22:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	lea		DATA+0x2(%a6),%a0
+	lea		DATA+0x6(%a6),%a1
+
+	mov.l		&0xaaaaaaaa,(%a0)
+	mov.l		&0xbbbbbbbb,(%a1)
+
+	mov.l		&0xaaaaaaaa,%d1
+	mov.l		&0xbbbbbbbb,%d2
+	mov.l		&0xcccccccc,%d3
+	mov.l		&0xdddddddd,%d4
+
+	mov.w		&0x0014,ICCR(%a6)
+	mov.w		&0x0010,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cas2.l		%d1:%d2,%d3:%d4,(%a0):(%a1)	# Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
+
+	mov.w		%cc,SCCR(%a6)
+	mov.l		(%a0),%d5
+	mov.l		(%a1),%d6
+	mov.l		&0xcccccccc,IREGS+0x14(%a6)
+	mov.l		&0xdddddddd,IREGS+0x18(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cas23:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	lea		DATA+0x0(%a6),%a0
+	lea		DATA+0x4(%a6),%a1
+
+	mov.l		&0xeeeeeeee,(%a0)
+	mov.l		&0xbbbbbbbb,(%a1)
+
+	mov.l		&0xaaaaaaaa,%d1
+	mov.l		&0xbbbbbbbb,%d2
+	mov.l		&0xcccccccc,%d3
+	mov.l		&0xdddddddd,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cas2.l		%d1:%d2,%d3:%d4,(%a0):(%a1)	# Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
+
+	mov.w		%cc,SCCR(%a6)
+	mov.l		(%a0),%d5
+	mov.l		(%a1),%d6
+	mov.l		&0xeeeeeeee,IREGS+0x4(%a6)
+	mov.l		&0xbbbbbbbb,IREGS+0x8(%a6)
+	mov.l		&0xeeeeeeee,IREGS+0x14(%a6)
+	mov.l		&0xbbbbbbbb,IREGS+0x18(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cas24:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	lea		DATA+0x1(%a6),%a0
+	lea		DATA+0x5(%a6),%a1
+
+	mov.l		&0xeeeeeeee,(%a0)
+	mov.l		&0xbbbbbbbb,(%a1)
+
+	mov.l		&0xaaaaaaaa,%d1
+	mov.l		&0xbbbbbbbb,%d2
+	mov.l		&0xcccccccc,%d3
+	mov.l		&0xdddddddd,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cas2.l		%d1:%d2,%d3:%d4,(%a0):(%a1)	# Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
+
+	mov.w		%cc,SCCR(%a6)
+	mov.l		(%a0),%d5
+	mov.l		(%a1),%d6
+	mov.l		&0xeeeeeeee,IREGS+0x4(%a6)
+	mov.l		&0xbbbbbbbb,IREGS+0x8(%a6)
+	mov.l		&0xeeeeeeee,IREGS+0x14(%a6)
+	mov.l		&0xbbbbbbbb,IREGS+0x18(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cas25:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	lea		DATA+0x2(%a6),%a0
+	lea		DATA+0x6(%a6),%a1
+
+	mov.l		&0xeeeeeeee,(%a0)
+	mov.l		&0xbbbbbbbb,(%a1)
+
+	mov.l		&0xaaaaaaaa,%d1
+	mov.l		&0xbbbbbbbb,%d2
+	mov.l		&0xcccccccc,%d3
+	mov.l		&0xdddddddd,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cas2.l		%d1:%d2,%d3:%d4,(%a0):(%a1)	# Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
+
+	mov.w		%cc,SCCR(%a6)
+	mov.l		(%a0),%d5
+	mov.l		(%a1),%d6
+	mov.l		&0xeeeeeeee,IREGS+0x4(%a6)
+	mov.l		&0xbbbbbbbb,IREGS+0x8(%a6)
+	mov.l		&0xeeeeeeee,IREGS+0x14(%a6)
+	mov.l		&0xbbbbbbbb,IREGS+0x18(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cas26:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	lea		DATA+0x0(%a6),%a0
+	lea		DATA+0x4(%a6),%a1
+
+	mov.l		&0xaaaaaaaa,(%a0)
+	mov.l		&0xeeeeeeee,(%a1)
+
+	mov.l		&0xaaaaaaaa,%d1
+	mov.l		&0xbbbbbbbb,%d2
+	mov.l		&0xcccccccc,%d3
+	mov.l		&0xdddddddd,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cas2.l		%d1:%d2,%d3:%d4,(%a0):(%a1)	# Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
+
+	mov.w		%cc,SCCR(%a6)
+	mov.l		(%a0),%d5
+	mov.l		(%a1),%d6
+	mov.l		&0xaaaaaaaa,IREGS+0x4(%a6)
+	mov.l		&0xeeeeeeee,IREGS+0x8(%a6)
+	mov.l		&0xaaaaaaaa,IREGS+0x14(%a6)
+	mov.l		&0xeeeeeeee,IREGS+0x18(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cas27:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	lea		DATA+0x1(%a6),%a0
+	lea		DATA+0x5(%a6),%a1
+
+	mov.l		&0xaaaaaaaa,(%a0)
+	mov.l		&0xeeeeeeee,(%a1)
+
+	mov.l		&0xaaaaaaaa,%d1
+	mov.l		&0xbbbbbbbb,%d2
+	mov.l		&0xcccccccc,%d3
+	mov.l		&0xdddddddd,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cas2.l		%d1:%d2,%d3:%d4,(%a0):(%a1)	# Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
+
+	mov.w		%cc,SCCR(%a6)
+	mov.l		(%a0),%d5
+	mov.l		(%a1),%d6
+	mov.l		&0xaaaaaaaa,IREGS+0x4(%a6)
+	mov.l		&0xeeeeeeee,IREGS+0x8(%a6)
+	mov.l		&0xaaaaaaaa,IREGS+0x14(%a6)
+	mov.l		&0xeeeeeeee,IREGS+0x18(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cas28:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	lea		DATA+0x2(%a6),%a0
+	lea		DATA+0x6(%a6),%a1
+
+	mov.l		&0xaaaaaaaa,(%a0)
+	mov.l		&0x7fffffff,(%a1)
+
+	mov.l		&0xaaaaaaaa,%d1
+	mov.l		&0x80000000,%d2
+	mov.l		&0xcccccccc,%d3
+	mov.l		&0xdddddddd,%d4
+
+	mov.w		&0x000b,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cas2.l		%d1:%d2,%d3:%d4,(%a0):(%a1)	# Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
+
+	mov.w		%cc,SCCR(%a6)
+	mov.l		(%a0),%d5
+	mov.l		(%a1),%d6
+	mov.l		&0xaaaaaaaa,IREGS+0x4(%a6)
+	mov.l		&0x7fffffff,IREGS+0x8(%a6)
+	mov.l		&0xaaaaaaaa,IREGS+0x14(%a6)
+	mov.l		&0x7fffffff,IREGS+0x18(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+##################################
+cas29:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	lea		DATA+0x0(%a6),%a0
+	lea		DATA+0x4(%a6),%a1
+
+	mov.w		&0xaaaa,(%a0)
+	mov.w		&0xbbbb,(%a1)
+
+	mov.w		&0xaaaa,%d1
+	mov.w		&0xbbbb,%d2
+	mov.w		&0xcccc,%d3
+	mov.w		&0xdddd,%d4
+
+	mov.w		&0x0014,ICCR(%a6)
+	mov.w		&0x0010,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cas2.w		%d1:%d2,%d3:%d4,(%a0):(%a1)	# Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
+
+	mov.w		%cc,SCCR(%a6)
+	mov.w		(%a0),%d5
+	mov.w		(%a1),%d6
+	mov.w		&0xcccc,IREGS+0x14+0x2(%a6)
+	mov.w		&0xdddd,IREGS+0x18+0x2(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cas210:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	lea		DATA+0x1(%a6),%a0
+	lea		DATA+0x5(%a6),%a1
+
+	mov.w		&0xaaaa,(%a0)
+	mov.w		&0xbbbb,(%a1)
+
+	mov.w		&0xaaaa,%d1
+	mov.w		&0xbbbb,%d2
+	mov.w		&0xcccc,%d3
+	mov.w		&0xdddd,%d4
+
+	mov.w		&0x0004,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cas2.w		%d1:%d2,%d3:%d4,(%a0):(%a1)	# Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
+
+	mov.w		%cc,SCCR(%a6)
+	mov.w		(%a0),%d5
+	mov.w		(%a1),%d6
+	mov.w		&0xcccc,IREGS+0x14+0x2(%a6)
+	mov.w		&0xdddd,IREGS+0x18+0x2(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cas211:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	lea		DATA+0x0(%a6),%a0
+	lea		DATA+0x4(%a6),%a1
+
+	mov.w		&0xeeee,(%a0)
+	mov.w		&0xbbbb,(%a1)
+
+	mov.w		&0xaaaa,%d1
+	mov.w		&0xbbbb,%d2
+	mov.w		&0xcccc,%d3
+	mov.w		&0xdddd,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cas2.w		%d1:%d2,%d3:%d4,(%a0):(%a1)	# Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
+
+	mov.w		%cc,SCCR(%a6)
+	mov.w		(%a0),%d5
+	mov.w		(%a1),%d6
+	mov.w		&0xeeee,IREGS+0x4+0x2(%a6)
+	mov.w		&0xbbbb,IREGS+0x8+0x2(%a6)
+	mov.w		&0xeeee,IREGS+0x14+0x2(%a6)
+	mov.w		&0xbbbb,IREGS+0x18+0x2(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cas212:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	lea		DATA+0x1(%a6),%a0
+	lea		DATA+0x5(%a6),%a1
+
+	mov.w		&0xeeee,(%a0)
+	mov.w		&0xbbbb,(%a1)
+
+	mov.w		&0xaaaa,%d1
+	mov.w		&0xbbbb,%d2
+	mov.w		&0xcccc,%d3
+	mov.w		&0xdddd,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cas2.w		%d1:%d2,%d3:%d4,(%a0):(%a1)	# Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
+
+	mov.w		%cc,SCCR(%a6)
+	mov.w		(%a0),%d5
+	mov.w		(%a1),%d6
+	mov.w		&0xeeee,IREGS+0x4+0x2(%a6)
+	mov.w		&0xbbbb,IREGS+0x8+0x2(%a6)
+	mov.w		&0xeeee,IREGS+0x14+0x2(%a6)
+	mov.w		&0xbbbb,IREGS+0x18+0x2(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cas213:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	lea		DATA+0x0(%a6),%a0
+	lea		DATA+0x4(%a6),%a1
+
+	mov.w		&0xaaaa,(%a0)
+	mov.w		&0xeeee,(%a1)
+
+	mov.w		&0xaaaa,%d1
+	mov.w		&0xbbbb,%d2
+	mov.w		&0xcccc,%d3
+	mov.w		&0xdddd,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cas2.w		%d1:%d2,%d3:%d4,(%a0):(%a1)	# Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
+
+	mov.w		%cc,SCCR(%a6)
+	mov.w		(%a0),%d5
+	mov.w		(%a1),%d6
+	mov.w		&0xaaaa,IREGS+0x4+0x2(%a6)
+	mov.w		&0xeeee,IREGS+0x8+0x2(%a6)
+	mov.w		&0xaaaa,IREGS+0x14+0x2(%a6)
+	mov.w		&0xeeee,IREGS+0x18+0x2(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cas214:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	lea		DATA+0x1(%a6),%a0
+	lea		DATA+0x5(%a6),%a1
+
+	mov.w		&0xaaaa,(%a0)
+	mov.w		&0x7fff,(%a1)
+
+	mov.w		&0xaaaa,%d1
+	mov.w		&0x8000,%d2
+	mov.w		&0xcccc,%d3
+	mov.w		&0xdddd,%d4
+
+	mov.w		&0x001b,ICCR(%a6)
+	mov.w		&0x0010,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cas2.w		%d1:%d2,%d3:%d4,(%a0):(%a1)	# Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
+
+	mov.w		%cc,SCCR(%a6)
+	mov.w		(%a0),%d5
+	mov.w		(%a1),%d6
+	mov.w		&0xaaaa,IREGS+0x4+0x2(%a6)
+	mov.w		&0x7fff,IREGS+0x8+0x2(%a6)
+	mov.w		&0xaaaa,IREGS+0x14+0x2(%a6)
+	mov.w		&0x7fff,IREGS+0x18+0x2(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	mov.l		TESTCTR(%a6),%d1
+	clr.l		%d0
+	rts
+
+###########################################################
+
+cmp2_str:
+	string		"\tcmp2,chk2..."
+
+	align		0x4
+# unsigned - small,small
+cmp2_1:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.w		&0x2040,DATA(%a6)
+	mov.l		&0x11111120,%d1
+
+	mov.w		&0x0004,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.b		%d1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_2:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.w		&0x2040,DATA(%a6)
+	mov.l		&0x00000040,%a1
+
+	mov.w		&0x0004,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.b		%a1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_3:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.w		&0x2040,DATA(%a6)
+	mov.l		&0x11111130,%d1
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	chk2.b		DATA(%a6),%d1
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_4:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.w		&0x2040,DATA(%a6)
+	mov.l		&0x00000010,%a1
+
+	mov.w		&0x0001,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.b		%a1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_5:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.w		&0x2040,DATA(%a6)
+	mov.l		&0x11111150,%d1
+
+	mov.w		&0x0001,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.b		%d1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_6:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.w		&0x2040,DATA(%a6)
+	mov.l		&0x00000090,%a1
+
+	mov.w		&0x0001,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.b		%a1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+# unsigned - small,large
+cmp2_7:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0x2000a000,DATA(%a6)
+	mov.l		&0x11112000,%d1
+
+	mov.w		&0x0004,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.w		%d1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_8:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0x2000a000,DATA(%a6)
+	mov.l		&0xffffa000,%a1
+
+	mov.w		&0x0004,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.w		%a1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_9:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0x2000a000,DATA(%a6)
+	mov.l		&0x11113000,%d1
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	chk2.w		DATA(%a6),%d1
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_10:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0x2000a000,DATA(%a6)
+	mov.l		&0xffff9000,%a1
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.w		%a1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_11:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0x2000a000,DATA(%a6)
+	mov.l		&0x11111000,%d1
+
+	mov.w		&0x0001,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.w		%d1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_12:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0x2000a000,DATA(%a6)
+	mov.l		&0xffffb000,%a1
+
+	mov.w		&0x0001,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.w		%a1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+# unsigned - large,large
+cmp2_13:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0xa0000000,DATA(%a6)
+	mov.l		&0xc0000000,DATA+0x4(%a6)
+	mov.l		&0xa0000000,%d1
+
+	mov.w		&0x000c,ICCR(%a6)
+	mov.w		&0x0008,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.l		%d1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_14:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0xa0000000,DATA(%a6)
+	mov.l		&0xc0000000,DATA+0x4(%a6)
+	mov.l		&0xc0000000,%a1
+
+	mov.w		&0x000c,ICCR(%a6)
+	mov.w		&0x0008,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.l		%a1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_15:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0xa0000000,DATA(%a6)
+	mov.l		&0xc0000000,DATA+0x4(%a6)
+	mov.l		&0xb0000000,%d1
+
+	mov.w		&0x0008,ICCR(%a6)
+	mov.w		&0x0008,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	chk2.l		DATA(%a6),%d1
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_16:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0xa0000000,DATA(%a6)
+	mov.l		&0xc0000000,DATA+0x4(%a6)
+	mov.l		&0x10000000,%a1
+
+	mov.w		&0x0009,ICCR(%a6)
+	mov.w		&0x0008,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.l		%a1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_17:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0xa0000000,DATA(%a6)
+	mov.l		&0xc0000000,DATA+0x4(%a6)
+	mov.l		&0x90000000,%d1
+
+	mov.w		&0x0009,ICCR(%a6)
+	mov.w		&0x0008,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.l		%d1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_18:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		&0xa0000000,DATA(%a6)
+	mov.l		&0xc0000000,DATA+0x4(%a6)
+	mov.l		&0xd0000000,%a1
+
+	mov.w		&0x0009,ICCR(%a6)
+	mov.w		&0x0008,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.l		%a1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+# signed - negative,positive
+cmp2_19:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.w		&0xa040,DATA(%a6)
+	mov.l		&0x111111a0,%d1
+
+	mov.w		&0x0004,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.b		%d1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_20:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.w		&0xa040,DATA(%a6)
+	mov.l		&0x00000040,%a1
+
+	mov.w		&0x0004,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	chk2.b		DATA(%a6),%a1
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_21:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.w		&0xa040,DATA(%a6)
+	mov.l		&0x111111b0,%d1
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.b		%d1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_22:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.w		&0xa040,DATA(%a6)
+	mov.l		&0x00000010,%a1
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.b		%a1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_23:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.w		&0xa040,DATA(%a6)
+	mov.l		&0x11111190,%d1
+
+	mov.w		&0x0001,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.b		%d1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_24:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.w		&0xa040,DATA(%a6)
+	mov.l		&0x00000050,%a1
+
+	mov.w		&0x0001,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.b		%a1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+# signed - negative,negative
+cmp2_25:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.w		&0xa0c0,DATA(%a6)
+	mov.l		&0x111111a0,%d1
+
+	mov.w		&0x0004,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.b		%d1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_26:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.w		&0xa0c0,DATA(%a6)
+	mov.l		&0xffffffc0,%a1
+
+	mov.w		&0x0004,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.b		%a1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_27:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.w		&0xa0c0,DATA(%a6)
+	mov.l		&0x111111b0,%d1
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	chk2.b		DATA(%a6),%d1
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_28:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.w		&0xa0c0,DATA(%a6)
+	mov.l		&0x11111190,%a1
+
+	mov.w		&0x0001,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.b		%a1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_29:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.w		&0xa0c0,DATA(%a6)
+	mov.l		&0x111111d0,%d1
+
+	mov.w		&0x0001,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.b		%d1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+cmp2_30:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.w		&0xa0c0,DATA(%a6)
+	mov.l		&0x00000050,%a1
+
+	mov.w		&0x001b,ICCR(%a6)
+	mov.w		&0x001f,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	cmp2.b		%a1,DATA(%a6)
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	mov.l		TESTCTR(%a6),%d1
+	clr.l		%d0
+	rts
+
+###########################################################
+
+ea_str:
+	string		"\tEffective addresses..."
+
+	align		0x4
+ea_0:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(%a0),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_1:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(%a0)+,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+	lea		EAMEM+0x4(%a6),%a0
+	mov.l		%a0,IREGS+0x20(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_2:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM+0x4(%a6),%a0
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		-(%a0),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+	lea		EAMEM(%a6),%a0
+	mov.l		%a0,IREGS+0x20(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_3:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM+0x1000(%a6),%a0
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(-0x1000,%a0),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_4:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%a6),%a0
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x1000,%a0),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_5:
+	addq.l		&0x1,TESTCTR(%a6)
+
+#	movm.l		DEF_REGS(%pc),&0x3fff
+
+#	clr.l		%d2
+#	mov.l		&0x00000002,%d3
+
+#	mov.w		&0x0000,ICCR(%a6)
+#	mov.w		&0x0000,%cc
+#	movm.l		&0xffff,IREGS(%a6)
+
+#	mulu.l		EAMEM.w,%d2:%d3
+
+#	mov.w		%cc,SCCR(%a6)
+#	movm.l		&0xffff,SREGS(%a6)
+#	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+#	bsr.l		chkregs
+#	tst.b		%d0
+#	bne.l		error
+
+ea_6:
+	addq.l		&0x1,TESTCTR(%a6)
+
+#	movm.l		DEF_REGS(%pc),&0x3fff
+
+#	clr.l		%d2
+#	mov.l		&0x00000002,%d3
+
+#	mov.w		&0x0000,ICCR(%a6)
+#	mov.w		&0x0000,%cc
+#	movm.l		&0xffff,IREGS(%a6)
+
+#	mulu.l		EAMEM.l,%d2:%d3
+
+#	mov.w		%cc,SCCR(%a6)
+#	movm.l		&0xffff,SREGS(%a6)
+#	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+#	bsr.l		chkregs
+#	tst.b		%d0
+#	bne.l		error
+
+ea_7:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		&0x00000002,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_8:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	bra.b		ea_8_next
+ea_8_mem:
+	long		0x00000002
+ea_8_next:
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(ea_8_mem.w,%pc),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_9:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM+0x4(%a6),%a1
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		-(%a1),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+	lea		EAMEM(%a6),%a0
+	mov.l		%a0,IREGS+0x24(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_10:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM+0x4(%a6),%a2
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		-(%a2),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+	lea		EAMEM(%a6),%a0
+	mov.l		%a0,IREGS+0x28(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_11:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM+0x4(%a6),%a3
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		-(%a3),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+	lea		EAMEM(%a6),%a0
+	mov.l		%a0,IREGS+0x2c(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_12:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM+0x4(%a6),%a4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		-(%a4),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+	lea		EAMEM(%a6),%a0
+	mov.l		%a0,IREGS+0x30(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_13:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM+0x4(%a6),%a5
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		-(%a5),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+	lea		EAMEM(%a6),%a0
+	mov.l		%a0,IREGS+0x34(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_14:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		%a6,%a1
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM+0x4(%a1),%a6
+
+	mov.w		&0x0000,ICCR(%a1)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a1)
+
+	mulu.l		-(%a6),%d2:%d3
+
+	mov.w		%cc,SCCR(%a1)
+	movm.l		&0xffff,SREGS(%a1)
+	mov.l		&0x00000004,IREGS+0xc(%a1)
+	lea		EAMEM(%a1),%a0
+	mov.l		%a0,IREGS+0x38(%a1)
+
+	mov.l		%a1,%a6
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_15:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	mov.l		%a7,%a0
+	lea		EAMEM+0x4(%a6),%a7
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		-(%a7),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+	lea		EAMEM(%a6),%a1
+	mov.l		%a1,IREGS+0x3c(%a6)
+
+	mov.l		%a0,%a7
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_16:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x10.b,%a0,%d4.w*1),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_17:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x8,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x10.b,%a0,%d4.w*2),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_18:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x4,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x10.b,%a0,%d4.w*4),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_19:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x10.b,%a0,%d4.w*8),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_20:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x10.b,%a0,%d4.l*1),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_21:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x8,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x10.b,%a0,%d4.l*2),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_22:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x4,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x10.b,%a0,%d4.l*4),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_23:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x10.b,%a0,%d4.l*8),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_24:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x2,%a4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x10.b,%a0,%a4.l*8),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_25:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&0x2,%a4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(-0x10.b,%a0,%a4.l*8),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_26:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a1
+	mov.l		&-0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x10.b,%a1,%d4.w*1),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_27:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a2
+	mov.l		&-0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x10.b,%a2,%d4.w*1),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_28:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a3
+	mov.l		&-0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x10.b,%a3,%d4.w*1),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_29:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a4
+	mov.l		&-0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x10.b,%a4,%d4.w*1),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_30:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a5
+	mov.l		&-0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x10.b,%a5,%d4.w*1),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_31:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		%a6,%a1
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a1),%a6
+	mov.l		&-0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a1)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a1)
+
+	mulu.l		(0x10.b,%a6,%d4.w*1),%d2:%d3
+
+	mov.w		%cc,SCCR(%a1)
+	movm.l		&0xffff,SREGS(%a1)
+	mov.l		&0x00000004,IREGS+0xc(%a1)
+
+	mov.l		%a1,%a6
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_32:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	mov.l		%a7,%a0
+	lea		EAMEM(%a6),%a7
+	mov.l		&-0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x10.b,%a7,%d4.w*1),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	mov.l		%a0,%a7
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_33:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a1
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(%a1),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_34:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a2
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(%a2),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_35:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a3
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(%a3),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_36:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(%a4),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_37:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a5
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(%a5),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_38:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		%a6,%a1
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a1),%a6
+
+	mov.w		&0x0000,ICCR(%a1)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a1)
+
+	mulu.l		(%a6),%d2:%d3
+
+	mov.w		%cc,SCCR(%a1)
+	movm.l		&0xffff,SREGS(%a1)
+	mov.l		&0x00000004,IREGS+0xc(%a1)
+
+	mov.l		%a1,%a6
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_39:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	mov.l		%a7,%a0
+	lea		EAMEM(%a6),%a7
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(%a7),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	mov.l		%a0,%a7
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_40:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a1
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(%a1)+,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+	lea		EAMEM+0x4(%a6),%a0
+	mov.l		%a0,IREGS+0x24(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_41:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a2
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(%a2)+,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+	lea		EAMEM+0x4(%a6),%a0
+	mov.l		%a0,IREGS+0x28(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_42:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a3
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(%a3)+,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+	lea		EAMEM+0x4(%a6),%a0
+	mov.l		%a0,IREGS+0x2c(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_43:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(%a4)+,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+	lea		EAMEM+0x4(%a6),%a0
+	mov.l		%a0,IREGS+0x30(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_44:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a5
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(%a5)+,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+	lea		EAMEM+0x4(%a6),%a0
+	mov.l		%a0,IREGS+0x34(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_45:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		%a6,%a1
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a1),%a6
+
+	mov.w		&0x0000,ICCR(%a1)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a1)
+
+	mulu.l		(%a6)+,%d2:%d3
+
+	mov.w		%cc,SCCR(%a1)
+	movm.l		&0xffff,SREGS(%a1)
+	mov.l		&0x00000004,IREGS+0xc(%a1)
+	lea		EAMEM+0x4(%a1),%a0
+	mov.l		%a0,IREGS+0x38(%a1)
+
+	mov.l		%a1,%a6
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_46:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	mov.l		%a7,%a0
+	lea		EAMEM(%a6),%a7
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(%a7)+,%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+	lea		EAMEM+0x4(%a6),%a1
+	mov.l		%a1,IREGS+0x3c(%a6)
+
+	mov.l		%a0,%a7
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_47:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%a6),%a1
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x1000,%a1),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_48:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%a6),%a2
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x1000,%a2),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_49:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%a6),%a3
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x1000,%a3),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_50:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%a6),%a4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x1000,%a4),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_51:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%a6),%a5
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x1000,%a5),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_52:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		%a6,%a1
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%a1),%a6
+
+	mov.w		&0x0000,ICCR(%a1)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a1)
+
+	mulu.l		(0x1000,%a6),%d2:%d3
+
+	mov.w		%cc,SCCR(%a1)
+	movm.l		&0xffff,SREGS(%a1)
+	mov.l		&0x00000004,IREGS+0xc(%a1)
+
+	mov.l		%a1,%a6
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_53:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	mov.l		%a7,%a0
+	lea		EAMEM-0x1000(%a6),%a7
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x1000,%a7),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	mov.l		%a0,%a7
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_54:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM+0x1000(%a6),%a0
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(-0x1000,%a0),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_55:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	bra.b		ea_55_next
+
+ea_55_data:
+	long		0x00000002
+ea_55_next:
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(ea_55_data.w,%pc),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_56:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a3
+	mov.l		&-0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x10.w,%a3,%d4.w*1),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_57:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a3
+	mov.l		&-0x8,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x10.w,%a3,%d4.w*2),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_58:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a3
+	mov.l		&-0x4,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x10.w,%a3,%d4.w*4),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_59:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a3
+	mov.l		&-0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x10.w,%a3,%d4.w*8),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_60:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a3
+	mov.l		&-0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x10.w,%a3,%d4.l*1),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_61:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a3
+	mov.l		&-0x8,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x10.w,%a3,%d4.l*2),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_62:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a3
+	mov.l		&-0x4,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x10.w,%a3,%d4.l*4),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_63:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a3
+	mov.l		&-0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x10.w,%a3,%d4.l*8),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_64:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a3
+	mov.l		&0x2,%a4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(-0x10.w,%a3,%a4.l*8),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_65:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a3
+	mov.l		&0x2,%a4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(0x00.w,%a3,%za4.l*8),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_66:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a3
+	mov.l		%a3,%a4
+	add.l		&0x10,%a4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(-0x10.w,%za3,%a4.l*1),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_67:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a3
+	mov.l		&0x2,%a4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(-0x10.l,%a3,%a4.l*8),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_68:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	bra.b		ea_68_next
+ea_68_mem:
+	long		0x00000002
+ea_68_next:
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a3
+	mov.l		&-0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(ea_68_mem+0x10.w,%pc,%d4.w*1),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_69:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	bra.b		ea_69_next
+ea_69_mem:
+	long		0x00000002
+ea_69_next:
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x8,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	mulu.l		(ea_69_mem+0x10.w,%pc,%d4.w*2),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_70:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	bra.b		ea_70_next
+ea_70_mem:
+	long		0x00000002
+ea_70_next:
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x4,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	mulu.l		(ea_70_mem+0x10.w,%pc,%d4.w*4),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_71:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	bra.b		ea_71_next
+ea_71_mem:
+	long		0x00000002
+ea_71_next:
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	mulu.l		(ea_71_mem+0x10.w,%pc,%d4.w*8),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_72:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	bra.b		ea_72_next
+ea_72_mem:
+	long		0x00000002
+ea_72_next:
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	mulu.l		(ea_72_mem+0x10.w,%pc,%d4.l*1),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_73:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	bra.b		ea_73_next
+ea_73_mem:
+	long		0x00000002
+ea_73_next:
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x8,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	mulu.l		(ea_73_mem+0x10.w,%pc,%d4.l*2),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_74:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	bra.b		ea_74_next
+ea_74_mem:
+	long		0x00000002
+ea_74_next:
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x4,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	mulu.l		(ea_74_mem+0x10.w,%pc,%d4.l*4),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_75:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	bra.b		ea_75_next
+ea_75_mem:
+	long		0x00000002
+ea_75_next:
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0x7fff,IREGS(%a6)
+
+	mulu.l		(ea_75_mem+0x10.w,%pc,%d4.l*8),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0x7fff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_76:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	bra.b		ea_76_next
+ea_76_mem:
+	long		0x00000002
+ea_76_next:
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a3
+	mov.l		&-0x2,%a4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(ea_76_mem+0x10.w,%pc,%a4.l*8),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_77:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	bra.b		ea_77_next
+ea_77_mem:
+	long		0x00000002
+ea_77_next:
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a3
+	mov.l		&0x2,%a4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(ea_77_mem+0x00.w,%pc,%za4.l*8),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_78:
+	addq.l		&0x1,TESTCTR(%a6)
+
+#	movm.l		DEF_REGS(%pc),&0x3fff
+
+#	clr.l		%d2
+#	mov.l		&0x00000002,%d3
+#	lea		EAMEM,%a3
+#	mov.l		%a3,%a4
+#	add.l		&0x10,%a4
+
+#	mov.w		&0x0000,ICCR(%a6)
+#	mov.w		&0x0000,%cc
+#	movm.l		&0xffff,IREGS(%a6)
+
+#	mulu.l		(EAMEM-0x10.w,%zpc,%a4.l*1),%d2:%d3
+
+#	mov.w		%cc,SCCR(%a6)
+#	movm.l		&0xffff,SREGS(%a6)
+#	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+#	bsr.l		chkregs
+#	tst.b		%d0
+#	bne.l		error
+
+ea_79:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM,%a3
+	mov.l		&0x2,%a4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(ea_79_mem-0x10.l,%pc,%a4.l*8),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bra.b		ea_79_next
+ea_79_mem:
+	long		0x00000002
+ea_79_next:
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_80:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	bra.b		ea_80_next
+ea_80_mem:
+	long		0x00000002
+ea_80_next:
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a1
+	mov.l		&-0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(ea_80_mem+0x10.b,%pc,%d4.w*1),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_81:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	bra.b		ea_81_next
+ea_81_mem:
+	long		0x00000002
+ea_81_next:
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x8,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(ea_81_mem+0x10.b,%pc,%d4.w*2),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_82:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	bra.b		ea_82_next
+ea_82_mem:
+	long		0x00000002
+ea_82_next:
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x4,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(ea_82_mem+0x10.b,%pc,%d4.w*4),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_83:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	bra.b		ea_83_next
+ea_83_mem:
+	long		0x00000002
+ea_83_next:
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(ea_83_mem+0x10.b,%pc,%d4.w*8),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_84:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	bra.b		ea_84_next
+ea_84_mem:
+	long		0x00000002
+ea_84_next:
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(ea_84_mem+0x10.b,%pc,%d4.l*1),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_85:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	bra.b		ea_85_next
+ea_85_mem:
+	long		0x00000002
+ea_85_next:
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x8,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(ea_85_mem+0x10.b,%pc,%d4.l*2),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_86:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	bra.b		ea_86_next
+ea_86_mem:
+	long		0x00000002
+ea_86_next:
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x4,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(ea_86_mem+0x10.b,%pc,%d4.l*4),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_87:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	bra.b		ea_87_next
+ea_87_mem:
+	long		0x00000002
+ea_87_next:
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(ea_87_mem+0x10.b,%pc,%d4.l*8),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_88:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a6),%a0
+	mov.l		&-0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		(ea_88_mem+0x10.b,%pc,%d4.l*8),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bra.b		ea_88_next
+ea_88_mem:
+	long		0x00000002
+ea_88_next:
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_89:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%a6),%a3
+	lea		EASTORE(%a6),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([0x10.w,%a4,%d4.w*1],0x1000.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_90:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%a6),%a3
+	lea		EASTORE(%a6),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x8,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([0x10.w,%a4,%d4.w*2],0x1000.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_91:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%a6),%a3
+	lea		EASTORE(%a6),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x4,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([0x10.w,%a4,%d4.w*4],0x1000.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_92:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%a6),%a3
+	lea		EASTORE(%a6),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([0x10.w,%a4,%d4.w*8],0x1000.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_93:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%a6),%a3
+	lea		EASTORE(%a6),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([0x10.w,%a4,%d4.l*1],0x1000.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_94:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%a6),%a3
+	lea		EASTORE(%a6),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x8,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([0x10.w,%a4,%d4.l*2],0x1000.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_95:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%a6),%a3
+	lea		EASTORE(%a6),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x4,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([0x10.w,%a4,%d4.l*4],0x1000.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_96:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%a6),%a3
+	lea		EASTORE(%a6),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([0x10.w,%a4,%d4.l*8],0x1000.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_97:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%a6),%a3
+	lea		EASTORE(%a6),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([0x10.l,%a4,%d4.l*8],0x1000.l),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_98:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%a6),%a3
+	lea		EASTORE(%a6),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([0x00.l,%a4,%zd4.l*8],0x1000.l),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_99:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%a6),%a3
+	lea		EASTORE(%a6),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([%a4,%zd4.l*8],0x1000.l),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_100:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%a6),%a3
+	lea		EASTORE(%a6),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x10,%d4
+	add.l		%a4,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([0x10.l,%za4,%d4.l*1],0x1000.l),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_101:
+	addq.l		&0x1,TESTCTR(%a6)
+
+#	movm.l		DEF_REGS(%pc),&0x3fff
+
+#	clr.l		%d2
+#	mov.l		&0x00000002,%d3
+#	lea		EAMEM(%a6),%a3
+#	lea		EASTORE(%a6),%a4
+#	mov.l		%a3,(%a4)
+#	mov.l		&-0x10,%d4
+
+#	mov.w		&0x0000,ICCR(%a6)
+#	mov.w		&0x0000,%cc
+#	movm.l		&0xffff,IREGS(%a6)
+
+#	mulu.l		([EASTORE.l,%za4,%zd4.l*1]),%d2:%d3
+
+#	mov.w		%cc,SCCR(%a6)
+#	movm.l		&0xffff,SREGS(%a6)
+#	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+#	bsr.l		chkregs
+#	tst.b		%d0
+#	bne.l		error
+
+ea_102:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		%a6,%a1
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM+0x1000(%a1),%a3
+	lea		EASTORE(%a1),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x2,%a6
+
+	mov.w		&0x0000,ICCR(%a1)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a1)
+
+	mulu.l		([0x10.w,%a4,%a6.l*8],-0x1000.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a1)
+	movm.l		&0xffff,SREGS(%a1)
+	mov.l		&0x00000004,IREGS+0xc(%a1)
+
+	mov.l		%a1,%a6
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_103:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		%a6,%a1
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM+0x1000(%a1),%a3
+	lea		EASTORE(%a1),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&0x2,%a6
+
+	mov.w		&0x0000,ICCR(%a1)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a1)
+
+	mulu.l		([-0x10.w,%a4,%a6.l*8],-0x1000.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a1)
+	movm.l		&0xffff,SREGS(%a1)
+	mov.l		&0x00000004,IREGS+0xc(%a1)
+
+	mov.l		%a1,%a6
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_104:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x20(%a6),%a3
+	lea		EASTORE(%a6),%a4
+	mov.l		%a3,(%a4)
+	sub.l		&0x10,%a4
+	mov.l		&0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([0x10.w,%a4],%d4.w*1,0x10.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_105:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x20(%a6),%a3
+	lea		EASTORE(%a6),%a4
+	mov.l		%a3,(%a4)
+	sub.l		&0x10,%a4
+	mov.l		&0x8,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([0x10.w,%a4],%d4.w*2,0x10.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_106:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x20(%a6),%a3
+	lea		EASTORE(%a6),%a4
+	mov.l		%a3,(%a4)
+	sub.l		&0x10,%a4
+	mov.l		&0x4,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([0x10.w,%a4],%d4.w*4,0x10.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_107:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x20(%a6),%a3
+	lea		EASTORE(%a6),%a4
+	mov.l		%a3,(%a4)
+	sub.l		&0x10,%a4
+	mov.l		&0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([0x10.w,%a4],%d4.w*8,0x10.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_108:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x20(%a6),%a3
+	lea		EASTORE(%a6),%a4
+	mov.l		%a3,(%a4)
+	sub.l		&0x10,%a4
+	mov.l		&0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([0x10.w,%a4],%d4.l*1,0x10.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_109:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x20(%a6),%a3
+	lea		EASTORE(%a6),%a4
+	mov.l		%a3,(%a4)
+	sub.l		&0x10,%a4
+	mov.l		&0x8,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([0x10.w,%a4],%d4.w*2,0x10.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_110:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x20(%a6),%a3
+	lea		EASTORE(%a6),%a4
+	mov.l		%a3,(%a4)
+	sub.l		&0x10,%a4
+	mov.l		&0x4,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([0x10.w,%a4],%d4.l*4,0x10.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_111:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x20(%a6),%a3
+	lea		EASTORE(%a6),%a4
+	mov.l		%a3,(%a4)
+	sub.l		&0x10,%a4
+	mov.l		&0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([0x10.w,%a4],%d4.l*8,0x10.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_112:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x20(%a6),%a3
+	lea		EASTORE(%a6),%a4
+	mov.l		%a3,(%a4)
+	sub.l		&0x10,%a4
+	mov.l		&0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([0x10.l,%a4],%d4.l*8,0x10.l),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_113:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x20(%a6),%a3
+	lea		EASTORE(%a6),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([0x00.l,%a4],%zd4.l*8,0x20.l),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_114:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		%a7,%a0
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x20(%a6),%a3
+	lea		EASTORE(%a6),%a7
+	mov.l		%a3,(%a7)
+	mov.l		&0x20,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([%a7],%d4.l*1),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	mov.l		%a0,%a7
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_115:
+	addq.l		&0x1,TESTCTR(%a6)
+
+#	movm.l		DEF_REGS(%pc),&0x3fff
+
+#	clr.l		%d2
+#	mov.l		&0x00000002,%d3
+#	lea		EAMEM-0x20(%pc),%a3
+#	lea		EASTORE(%pc),%a4
+#	mov.l		%a3,(%a4)
+#	mov.l		&0x2,%d4
+
+#	mov.w		&0x0000,ICCR(%a6)
+#	mov.w		&0x0000,%cc
+#	movm.l		&0xffff,IREGS(%a6)
+
+#	mulu.l		([EASTORE.l,%za4],%zd4.l*8,0x20.l),%d2:%d3
+
+#	mov.w		%cc,SCCR(%a6)
+#	movm.l		&0xffff,SREGS(%a6)
+#	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+#	bsr.l		chkregs
+#	tst.b		%d0
+#	bne.l		error
+
+ea_116:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		%a6,%a1
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%a1),%a3
+	lea		EASTORE(%a1),%a6
+	mov.l		%a3,(%a6)
+	add.l		&0x10,%a6
+	mov.l		&-0x2,%a5
+
+	mov.w		&0x0000,ICCR(%a1)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a1)
+
+	mulu.l		([-0x10.w,%a6],%a5.l*8,0x10.l),%d2:%d3
+
+	mov.w		%cc,SCCR(%a1)
+	movm.l		&0xffff,SREGS(%a1)
+	mov.l		&0x00000004,IREGS+0xc(%a1)
+
+	mov.l		%a1,%a6
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	mov.l		TESTCTR(%a6),%d1
+	clr.l		%d0
+	rts
+
+ea_117:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE+0x10.w,%pc,%d4.w*1],0x1000.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_118:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x8,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE+0x10.w,%pc,%d4.w*2],0x1000.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_119:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x4,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE+0x10.w,%pc,%d4.w*4],0x1000.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_120:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE+0x10.w,%pc,%d4.w*8],0x1000.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_121:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE+0x10.w,%pc,%d4.l*1],0x1000.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_122:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x8,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE+0x10.w,%pc,%d4.l*2],0x1000.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_123:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x4,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE+0x10.w,%pc,%d4.l*4],0x1000.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_124:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE+0x10.w,%pc,%d4.l*8],0x1000.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_125:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+	mulu.l		([EASTORE+0x10.l,%pc,%d4.l*8],0x1000.l),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_126:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE+0x00.l,%pc,%zd4.l*8],0x1000.l),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_127:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	mov.l		%a4,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([%zpc,%d4.l*1],0x1000.l),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_128:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x1000(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x10,%d4
+	add.l		%a4,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([0x10.l,%zpc,%d4.l*1],0x1000.l),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_129:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&-0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE.l,%zpc,%zd4.l*1]),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_130:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM+0x1000(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&0x2,%a6
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE-0x10.w,%pc,%a6.l*8],-0x1000.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_131:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		%a7,%a0
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM+0x1000(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&0x2,%a7
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE-0x10.w,%pc,%a7.l*8],-0x1000.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	mov.l		%a0,%a7
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_132:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x20(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	sub.l		&0x10,%a4
+	mov.l		&0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE.w,%pc],%d4.w*1,0x10.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_133:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x20(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	sub.l		&0x10,%a4
+	mov.l		&0x8,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE.w,%pc],%d4.w*2,0x10.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_134:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x20(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	sub.l		&0x10,%a4
+	mov.l		&0x4,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE.w,%pc],%d4.w*4,0x10.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_135:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x20(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	sub.l		&0x10,%a4
+	mov.l		&0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE.w,%pc],%d4.w*8,0x10.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_136:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x20(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	sub.l		&0x10,%a4
+	mov.l		&0x10,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE.w,%pc],%d4.l*1,0x10.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_137:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x20(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	sub.l		&0x10,%a4
+	mov.l		&0x8,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE.w,%pc],%d4.w*2,0x10.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_138:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x20(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	sub.l		&0x10,%a4
+	mov.l		&0x4,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE.w,%pc],%d4.l*4,0x10.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_139:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x20(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	sub.l		&0x10,%a4
+	mov.l		&0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE.w,%pc],%d4.l*8,0x10.w),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_140:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x20(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	sub.l		&0x10,%a4
+	mov.l		&0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE.l,%pc],%d4.l*8,0x10.l),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_141:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x20(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&0x2,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE.l,%pc],%zd4.l*8,0x20.l),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_142:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM-0x20(%pc),%a3
+	lea		EASTORE(%pc),%a4
+	mov.l		%a3,(%a4)
+	mov.l		&0x4,%d4
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE.l,%zpc],%d4.l*8),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+ea_143:
+	addq.l		&0x1,TESTCTR(%a6)
+
+	movm.l		DEF_REGS(%pc),&0x3fff
+
+	mov.l		%a7,%a0
+	clr.l		%d2
+	mov.l		&0x00000002,%d3
+	lea		EAMEM(%pc),%a3
+	lea		EASTORE(%pc),%a6
+	mov.l		%a3,(%a6)
+	add.l		&0x10,%a6
+	mov.l		&-0x2,%a7
+
+	mov.w		&0x0000,ICCR(%a6)
+	mov.w		&0x0000,%cc
+	movm.l		&0xffff,IREGS(%a6)
+
+	mulu.l		([EASTORE.w,%pc],%a7.l*8,0x10.l),%d2:%d3
+
+	mov.w		%cc,SCCR(%a6)
+	movm.l		&0xffff,SREGS(%a6)
+	mov.l		&0x00000004,IREGS+0xc(%a6)
+
+	mov.l		%a0,%a7
+	bsr.l		chkregs
+	tst.b		%d0
+	bne.l		error
+
+	clr.l		%d0
+	rts
+
+###########################################################
+###########################################################
+chkregs:
+	lea		IREGS(%a6),%a0
+	lea		SREGS(%a6),%a1
+	mov.l		&14,%d0
+chkregs_loop:
+	cmp.l		(%a0)+,(%a1)+
+	bne.l		chkregs_error
+	dbra.w		%d0,chkregs_loop
+
+	mov.w		ICCR(%a6),%d0
+	mov.w		SCCR(%a6),%d1
+	cmp.w		%d0,%d1
+	bne.l		chkregs_error
+
+	clr.l		%d0
+	rts
+
+chkregs_error:
+	movq.l		&0x1,%d0
+	rts
+
+error:
+	mov.l		TESTCTR(%a6),%d1
+	movq.l		&0x1,%d0
+	rts
+
+DEF_REGS:
+	long		0xacacacac, 0xacacacac, 0xacacacac, 0xacacacac
+	long		0xacacacac, 0xacacacac, 0xacacacac, 0xacacacac
+
+	long		0xacacacac, 0xacacacac, 0xacacacac, 0xacacacac
+	long		0xacacacac, 0xacacacac, 0xacacacac, 0xacacacac
+
+############################################################
+
+_print_str:
+	mov.l		%d0,-(%sp)
+	mov.l		(TESTTOP-0x80+0x0,%pc),%d0
+	pea		(TESTTOP-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+_print_num:
+	mov.l		%d0,-(%sp)
+	mov.l		(TESTTOP-0x80+0x4,%pc),%d0
+	pea		(TESTTOP-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+############################################################
diff --git a/arch/m68k/ifpsp060/src/pfpsp.S b/arch/m68k/ifpsp060/src/pfpsp.S
new file mode 100644
index 0000000..0c997c4
--- /dev/null
+++ b/arch/m68k/ifpsp060/src/pfpsp.S
@@ -0,0 +1,14745 @@
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
+M68000 Hi-Performance Microprocessor Division
+M68060 Software Package
+Production Release P1.00 -- October 10, 1994
+
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc.  All rights reserved.
+
+THE SOFTWARE is provided on an "AS IS" basis and without warranty.
+To the maximum extent permitted by applicable law,
+MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
+INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+and any warranty against infringement with regard to the SOFTWARE
+(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
+
+To the maximum extent permitted by applicable law,
+IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
+(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
+BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
+ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
+Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
+
+You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
+so long as this entire notice is retained without alteration in any modified and/or
+redistributed versions, and that such modified versions are clearly identified as such.
+No licenses are granted by implication, estoppel or otherwise under any patents
+or trademarks of Motorola, Inc.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# freal.s:
+#	This file is appended to the top of the 060FPSP package
+# and contains the entry points into the package. The user, in
+# effect, branches to one of the branch table entries located
+# after _060FPSP_TABLE.
+#	Also, subroutine stubs exist in this file (_fpsp_done for
+# example) that are referenced by the FPSP package itself in order
+# to call a given routine. The stub routine actually performs the
+# callout. The FPSP code does a "bsr" to the stub routine. This
+# extra layer of hierarchy adds a slight performance penalty but
+# it makes the FPSP code easier to read and more mainatinable.
+#
+
+set	_off_bsun,	0x00
+set	_off_snan,	0x04
+set	_off_operr,	0x08
+set	_off_ovfl,	0x0c
+set	_off_unfl,	0x10
+set	_off_dz,	0x14
+set	_off_inex,	0x18
+set	_off_fline,	0x1c
+set	_off_fpu_dis,	0x20
+set	_off_trap,	0x24
+set	_off_trace,	0x28
+set	_off_access,	0x2c
+set	_off_done,	0x30
+
+set	_off_imr,	0x40
+set	_off_dmr,	0x44
+set	_off_dmw,	0x48
+set	_off_irw,	0x4c
+set	_off_irl,	0x50
+set	_off_drb,	0x54
+set	_off_drw,	0x58
+set	_off_drl,	0x5c
+set	_off_dwb,	0x60
+set	_off_dww,	0x64
+set	_off_dwl,	0x68
+
+_060FPSP_TABLE:
+
+###############################################################
+
+# Here's the table of ENTRY POINTS for those linking the package.
+	bra.l		_fpsp_snan
+	short		0x0000
+	bra.l		_fpsp_operr
+	short		0x0000
+	bra.l		_fpsp_ovfl
+	short		0x0000
+	bra.l		_fpsp_unfl
+	short		0x0000
+	bra.l		_fpsp_dz
+	short		0x0000
+	bra.l		_fpsp_inex
+	short		0x0000
+	bra.l		_fpsp_fline
+	short		0x0000
+	bra.l		_fpsp_unsupp
+	short		0x0000
+	bra.l		_fpsp_effadd
+	short		0x0000
+
+	space		56
+
+###############################################################
+	global		_fpsp_done
+_fpsp_done:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_done,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_ovfl
+_real_ovfl:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_ovfl,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_unfl
+_real_unfl:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_unfl,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_inex
+_real_inex:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_inex,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_bsun
+_real_bsun:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_bsun,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_operr
+_real_operr:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_operr,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_snan
+_real_snan:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_snan,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_dz
+_real_dz:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_dz,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_fline
+_real_fline:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_fline,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_fpu_disabled
+_real_fpu_disabled:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_fpu_dis,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_trap
+_real_trap:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_trap,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_trace
+_real_trace:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_trace,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_real_access
+_real_access:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_access,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+#######################################
+
+	global		_imem_read
+_imem_read:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_imr,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_read
+_dmem_read:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_dmr,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_write
+_dmem_write:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_dmw,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_imem_read_word
+_imem_read_word:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_irw,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_imem_read_long
+_imem_read_long:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_irl,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_read_byte
+_dmem_read_byte:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_drb,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_read_word
+_dmem_read_word:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_drw,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_read_long
+_dmem_read_long:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_drl,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_write_byte
+_dmem_write_byte:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_dwb,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_write_word
+_dmem_write_word:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_dww,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+	global		_dmem_write_long
+_dmem_write_long:
+	mov.l		%d0,-(%sp)
+	mov.l		(_060FPSP_TABLE-0x80+_off_dwl,%pc),%d0
+	pea.l		(_060FPSP_TABLE-0x80,%pc,%d0)
+	mov.l		0x4(%sp),%d0
+	rtd		&0x4
+
+#
+# This file contains a set of define statements for constants
+# in order to promote readability within the corecode itself.
+#
+
+set LOCAL_SIZE,		192			# stack frame size(bytes)
+set LV,			-LOCAL_SIZE		# stack offset
+
+set EXC_SR,		0x4			# stack status register
+set EXC_PC,		0x6			# stack pc
+set EXC_VOFF,		0xa			# stacked vector offset
+set EXC_EA,		0xc			# stacked <ea>
+
+set EXC_FP,		0x0			# frame pointer
+
+set EXC_AREGS,		-68			# offset of all address regs
+set EXC_DREGS,		-100			# offset of all data regs
+set EXC_FPREGS,		-36			# offset of all fp regs
+
+set EXC_A7,		EXC_AREGS+(7*4)		# offset of saved a7
+set OLD_A7,		EXC_AREGS+(6*4)		# extra copy of saved a7
+set EXC_A6,		EXC_AREGS+(6*4)		# offset of saved a6
+set EXC_A5,		EXC_AREGS+(5*4)
+set EXC_A4,		EXC_AREGS+(4*4)
+set EXC_A3,		EXC_AREGS+(3*4)
+set EXC_A2,		EXC_AREGS+(2*4)
+set EXC_A1,		EXC_AREGS+(1*4)
+set EXC_A0,		EXC_AREGS+(0*4)
+set EXC_D7,		EXC_DREGS+(7*4)
+set EXC_D6,		EXC_DREGS+(6*4)
+set EXC_D5,		EXC_DREGS+(5*4)
+set EXC_D4,		EXC_DREGS+(4*4)
+set EXC_D3,		EXC_DREGS+(3*4)
+set EXC_D2,		EXC_DREGS+(2*4)
+set EXC_D1,		EXC_DREGS+(1*4)
+set EXC_D0,		EXC_DREGS+(0*4)
+
+set EXC_FP0,		EXC_FPREGS+(0*12)	# offset of saved fp0
+set EXC_FP1,		EXC_FPREGS+(1*12)	# offset of saved fp1
+set EXC_FP2,		EXC_FPREGS+(2*12)	# offset of saved fp2 (not used)
+
+set FP_SCR1,		LV+80			# fp scratch 1
+set FP_SCR1_EX,		FP_SCR1+0
+set FP_SCR1_SGN,	FP_SCR1+2
+set FP_SCR1_HI,		FP_SCR1+4
+set FP_SCR1_LO,		FP_SCR1+8
+
+set FP_SCR0,		LV+68			# fp scratch 0
+set FP_SCR0_EX,		FP_SCR0+0
+set FP_SCR0_SGN,	FP_SCR0+2
+set FP_SCR0_HI,		FP_SCR0+4
+set FP_SCR0_LO,		FP_SCR0+8
+
+set FP_DST,		LV+56			# fp destination operand
+set FP_DST_EX,		FP_DST+0
+set FP_DST_SGN,		FP_DST+2
+set FP_DST_HI,		FP_DST+4
+set FP_DST_LO,		FP_DST+8
+
+set FP_SRC,		LV+44			# fp source operand
+set FP_SRC_EX,		FP_SRC+0
+set FP_SRC_SGN,		FP_SRC+2
+set FP_SRC_HI,		FP_SRC+4
+set FP_SRC_LO,		FP_SRC+8
+
+set USER_FPIAR,		LV+40			# FP instr address register
+
+set USER_FPSR,		LV+36			# FP status register
+set FPSR_CC,		USER_FPSR+0		# FPSR condition codes
+set FPSR_QBYTE,		USER_FPSR+1		# FPSR qoutient byte
+set FPSR_EXCEPT,	USER_FPSR+2		# FPSR exception status byte
+set FPSR_AEXCEPT,	USER_FPSR+3		# FPSR accrued exception byte
+
+set USER_FPCR,		LV+32			# FP control register
+set FPCR_ENABLE,	USER_FPCR+2		# FPCR exception enable
+set FPCR_MODE,		USER_FPCR+3		# FPCR rounding mode control
+
+set L_SCR3,		LV+28			# integer scratch 3
+set L_SCR2,		LV+24			# integer scratch 2
+set L_SCR1,		LV+20			# integer scratch 1
+
+set STORE_FLG,		LV+19			# flag: operand store (ie. not fcmp/ftst)
+
+set EXC_TEMP2,		LV+24			# temporary space
+set EXC_TEMP,		LV+16			# temporary space
+
+set DTAG,		LV+15			# destination operand type
+set STAG,		LV+14			# source operand type
+
+set SPCOND_FLG,		LV+10			# flag: special case (see below)
+
+set EXC_CC,		LV+8			# saved condition codes
+set EXC_EXTWPTR,	LV+4			# saved current PC (active)
+set EXC_EXTWORD,	LV+2			# saved extension word
+set EXC_CMDREG,		LV+2			# saved extension word
+set EXC_OPWORD,		LV+0			# saved operation word
+
+################################
+
+# Helpful macros
+
+set FTEMP,		0			# offsets within an
+set FTEMP_EX,		0			# extended precision
+set FTEMP_SGN,		2			# value saved in memory.
+set FTEMP_HI,		4
+set FTEMP_LO,		8
+set FTEMP_GRS,		12
+
+set LOCAL,		0			# offsets within an
+set LOCAL_EX,		0			# extended precision
+set LOCAL_SGN,		2			# value saved in memory.
+set LOCAL_HI,		4
+set LOCAL_LO,		8
+set LOCAL_GRS,		12
+
+set DST,		0			# offsets within an
+set DST_EX,		0			# extended precision
+set DST_HI,		4			# value saved in memory.
+set DST_LO,		8
+
+set SRC,		0			# offsets within an
+set SRC_EX,		0			# extended precision
+set SRC_HI,		4			# value saved in memory.
+set SRC_LO,		8
+
+set SGL_LO,		0x3f81			# min sgl prec exponent
+set SGL_HI,		0x407e			# max sgl prec exponent
+set DBL_LO,		0x3c01			# min dbl prec exponent
+set DBL_HI,		0x43fe			# max dbl prec exponent
+set EXT_LO,		0x0			# min ext prec exponent
+set EXT_HI,		0x7ffe			# max ext prec exponent
+
+set EXT_BIAS,		0x3fff			# extended precision bias
+set SGL_BIAS,		0x007f			# single precision bias
+set DBL_BIAS,		0x03ff			# double precision bias
+
+set NORM,		0x00			# operand type for STAG/DTAG
+set ZERO,		0x01			# operand type for STAG/DTAG
+set INF,		0x02			# operand type for STAG/DTAG
+set QNAN,		0x03			# operand type for STAG/DTAG
+set DENORM,		0x04			# operand type for STAG/DTAG
+set SNAN,		0x05			# operand type for STAG/DTAG
+set UNNORM,		0x06			# operand type for STAG/DTAG
+
+##################
+# FPSR/FPCR bits #
+##################
+set neg_bit,		0x3			# negative result
+set z_bit,		0x2			# zero result
+set inf_bit,		0x1			# infinite result
+set nan_bit,		0x0			# NAN result
+
+set q_sn_bit,		0x7			# sign bit of quotient byte
+
+set bsun_bit,		7			# branch on unordered
+set snan_bit,		6			# signalling NAN
+set operr_bit,		5			# operand error
+set ovfl_bit,		4			# overflow
+set unfl_bit,		3			# underflow
+set dz_bit,		2			# divide by zero
+set inex2_bit,		1			# inexact result 2
+set inex1_bit,		0			# inexact result 1
+
+set aiop_bit,		7			# accrued inexact operation bit
+set aovfl_bit,		6			# accrued overflow bit
+set aunfl_bit,		5			# accrued underflow bit
+set adz_bit,		4			# accrued dz bit
+set ainex_bit,		3			# accrued inexact bit
+
+#############################
+# FPSR individual bit masks #
+#############################
+set neg_mask,		0x08000000		# negative bit mask (lw)
+set inf_mask,		0x02000000		# infinity bit mask (lw)
+set z_mask,		0x04000000		# zero bit mask (lw)
+set nan_mask,		0x01000000		# nan bit mask (lw)
+
+set neg_bmask,		0x08			# negative bit mask (byte)
+set inf_bmask,		0x02			# infinity bit mask (byte)
+set z_bmask,		0x04			# zero bit mask (byte)
+set nan_bmask,		0x01			# nan bit mask (byte)
+
+set bsun_mask,		0x00008000		# bsun exception mask
+set snan_mask,		0x00004000		# snan exception mask
+set operr_mask,		0x00002000		# operr exception mask
+set ovfl_mask,		0x00001000		# overflow exception mask
+set unfl_mask,		0x00000800		# underflow exception mask
+set dz_mask,		0x00000400		# dz exception mask
+set inex2_mask,		0x00000200		# inex2 exception mask
+set inex1_mask,		0x00000100		# inex1 exception mask
+
+set aiop_mask,		0x00000080		# accrued illegal operation
+set aovfl_mask,		0x00000040		# accrued overflow
+set aunfl_mask,		0x00000020		# accrued underflow
+set adz_mask,		0x00000010		# accrued divide by zero
+set ainex_mask,		0x00000008		# accrued inexact
+
+######################################
+# FPSR combinations used in the FPSP #
+######################################
+set dzinf_mask,		inf_mask+dz_mask+adz_mask
+set opnan_mask,		nan_mask+operr_mask+aiop_mask
+set nzi_mask,		0x01ffffff		#clears N, Z, and I
+set unfinx_mask,	unfl_mask+inex2_mask+aunfl_mask+ainex_mask
+set unf2inx_mask,	unfl_mask+inex2_mask+ainex_mask
+set ovfinx_mask,	ovfl_mask+inex2_mask+aovfl_mask+ainex_mask
+set inx1a_mask,		inex1_mask+ainex_mask
+set inx2a_mask,		inex2_mask+ainex_mask
+set snaniop_mask,	nan_mask+snan_mask+aiop_mask
+set snaniop2_mask,	snan_mask+aiop_mask
+set naniop_mask,	nan_mask+aiop_mask
+set neginf_mask,	neg_mask+inf_mask
+set infaiop_mask,	inf_mask+aiop_mask
+set negz_mask,		neg_mask+z_mask
+set opaop_mask,		operr_mask+aiop_mask
+set unfl_inx_mask,	unfl_mask+aunfl_mask+ainex_mask
+set ovfl_inx_mask,	ovfl_mask+aovfl_mask+ainex_mask
+
+#########
+# misc. #
+#########
+set rnd_stky_bit,	29			# stky bit pos in longword
+
+set sign_bit,		0x7			# sign bit
+set signan_bit,		0x6			# signalling nan bit
+
+set sgl_thresh,		0x3f81			# minimum sgl exponent
+set dbl_thresh,		0x3c01			# minimum dbl exponent
+
+set x_mode,		0x0			# extended precision
+set s_mode,		0x4			# single precision
+set d_mode,		0x8			# double precision
+
+set rn_mode,		0x0			# round-to-nearest
+set rz_mode,		0x1			# round-to-zero
+set rm_mode,		0x2			# round-tp-minus-infinity
+set rp_mode,		0x3			# round-to-plus-infinity
+
+set mantissalen,	64			# length of mantissa in bits
+
+set BYTE,		1			# len(byte) == 1 byte
+set WORD,		2			# len(word) == 2 bytes
+set LONG,		4			# len(longword) == 2 bytes
+
+set BSUN_VEC,		0xc0			# bsun    vector offset
+set INEX_VEC,		0xc4			# inexact vector offset
+set DZ_VEC,		0xc8			# dz      vector offset
+set UNFL_VEC,		0xcc			# unfl    vector offset
+set OPERR_VEC,		0xd0			# operr   vector offset
+set OVFL_VEC,		0xd4			# ovfl    vector offset
+set SNAN_VEC,		0xd8			# snan    vector offset
+
+###########################
+# SPecial CONDition FLaGs #
+###########################
+set ftrapcc_flg,	0x01			# flag bit: ftrapcc exception
+set fbsun_flg,		0x02			# flag bit: bsun exception
+set mia7_flg,		0x04			# flag bit: (a7)+ <ea>
+set mda7_flg,		0x08			# flag bit: -(a7) <ea>
+set fmovm_flg,		0x40			# flag bit: fmovm instruction
+set immed_flg,		0x80			# flag bit: &<data> <ea>
+
+set ftrapcc_bit,	0x0
+set fbsun_bit,		0x1
+set mia7_bit,		0x2
+set mda7_bit,		0x3
+set immed_bit,		0x7
+
+##################################
+# TRANSCENDENTAL "LAST-OP" FLAGS #
+##################################
+set FMUL_OP,		0x0			# fmul instr performed last
+set FDIV_OP,		0x1			# fdiv performed last
+set FADD_OP,		0x2			# fadd performed last
+set FMOV_OP,		0x3			# fmov performed last
+
+#############
+# CONSTANTS #
+#############
+T1:	long		0x40C62D38,0xD3D64634	# 16381 LOG2 LEAD
+T2:	long		0x3D6F90AE,0xB1E75CC7	# 16381 LOG2 TRAIL
+
+PI:	long		0x40000000,0xC90FDAA2,0x2168C235,0x00000000
+PIBY2:	long		0x3FFF0000,0xC90FDAA2,0x2168C235,0x00000000
+
+TWOBYPI:
+	long		0x3FE45F30,0x6DC9C883
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_fpsp_ovfl(): 060FPSP entry point for FP Overflow exception.	#
+#									#
+#	This handler should be the first code executed upon taking the	#
+#	FP Overflow exception in an operating system.			#
+#									#
+# XREF ****************************************************************	#
+#	_imem_read_long() - read instruction longword			#
+#	fix_skewed_ops() - adjust src operand in fsave frame		#
+#	set_tag_x() - determine optype of src/dst operands		#
+#	store_fpreg() - store opclass 0 or 2 result to FP regfile	#
+#	unnorm_fix() - change UNNORM operands to NORM or ZERO		#
+#	load_fpn2() - load dst operand from FP regfile			#
+#	fout() - emulate an opclass 3 instruction			#
+#	tbl_unsupp - add of table of emulation routines for opclass 0,2	#
+#	_fpsp_done() - "callout" for 060FPSP exit (all work done!)	#
+#	_real_ovfl() - "callout" for Overflow exception enabled code	#
+#	_real_inex() - "callout" for Inexact exception enabled code	#
+#	_real_trace() - "callout" for Trace exception code		#
+#									#
+# INPUT ***************************************************************	#
+#	- The system stack contains the FP Ovfl exception stack frame	#
+#	- The fsave frame contains the source operand			#
+#									#
+# OUTPUT **************************************************************	#
+#	Overflow Exception enabled:					#
+#	- The system stack is unchanged					#
+#	- The fsave frame contains the adjusted src op for opclass 0,2	#
+#	Overflow Exception disabled:					#
+#	- The system stack is unchanged					#
+#	- The "exception present" flag in the fsave frame is cleared	#
+#									#
+# ALGORITHM ***********************************************************	#
+#	On the 060, if an FP overflow is present as the result of any	#
+# instruction, the 060 will take an overflow exception whether the	#
+# exception is enabled or disabled in the FPCR. For the disabled case,	#
+# This handler emulates the instruction to determine what the correct	#
+# default result should be for the operation. This default result is	#
+# then stored in either the FP regfile, data regfile, or memory.	#
+# Finally, the handler exits through the "callout" _fpsp_done()		#
+# denoting that no exceptional conditions exist within the machine.	#
+#	If the exception is enabled, then this handler must create the	#
+# exceptional operand and plave it in the fsave state frame, and store	#
+# the default result (only if the instruction is opclass 3). For	#
+# exceptions enabled, this handler must exit through the "callout"	#
+# _real_ovfl() so that the operating system enabled overflow handler	#
+# can handle this case.							#
+#	Two other conditions exist. First, if overflow was disabled	#
+# but the inexact exception was enabled, this handler must exit		#
+# through the "callout" _real_inex() regardless of whether the result	#
+# was inexact.								#
+#	Also, in the case of an opclass three instruction where		#
+# overflow was disabled and the trace exception was enabled, this	#
+# handler must exit through the "callout" _real_trace().		#
+#									#
+#########################################################################
+
+	global		_fpsp_ovfl
+_fpsp_ovfl:
+
+#$#	sub.l		&24,%sp			# make room for src/dst
+
+	link.w		%a6,&-LOCAL_SIZE	# init stack frame
+
+	fsave		FP_SRC(%a6)		# grab the "busy" frame
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FPREGS(%a6)	# save fp0-fp1 on stack
+
+# the FPIAR holds the "current PC" of the faulting instruction
+	mov.l		USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch the instruction words
+	mov.l		%d0,EXC_OPWORD(%a6)
+
+##############################################################################
+
+	btst		&0x5,EXC_CMDREG(%a6)	# is instr an fmove out?
+	bne.w		fovfl_out
+
+
+	lea		FP_SRC(%a6),%a0		# pass: ptr to src op
+	bsr.l		fix_skewed_ops		# fix src op
+
+# since, I believe, only NORMs and DENORMs can come through here,
+# maybe we can avoid the subroutine call.
+	lea		FP_SRC(%a6),%a0		# pass: ptr to src op
+	bsr.l		set_tag_x		# tag the operand type
+	mov.b		%d0,STAG(%a6)		# maybe NORM,DENORM
+
+# bit five of the fp extension word separates the monadic and dyadic operations
+# that can pass through fpsp_ovfl(). remember that fcmp, ftst, and fsincos
+# will never take this exception.
+	btst		&0x5,1+EXC_CMDREG(%a6)	# is operation monadic or dyadic?
+	beq.b		fovfl_extract		# monadic
+
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
+	bsr.l		load_fpn2		# load dst into FP_DST
+
+	lea		FP_DST(%a6),%a0		# pass: ptr to dst op
+	bsr.l		set_tag_x		# tag the operand type
+	cmpi.b		%d0,&UNNORM		# is operand an UNNORM?
+	bne.b		fovfl_op2_done		# no
+	bsr.l		unnorm_fix		# yes; convert to NORM,DENORM,or ZERO
+fovfl_op2_done:
+	mov.b		%d0,DTAG(%a6)		# save dst optype tag
+
+fovfl_extract:
+
+#$#	mov.l		FP_SRC_EX(%a6),TRAP_SRCOP_EX(%a6)
+#$#	mov.l		FP_SRC_HI(%a6),TRAP_SRCOP_HI(%a6)
+#$#	mov.l		FP_SRC_LO(%a6),TRAP_SRCOP_LO(%a6)
+#$#	mov.l		FP_DST_EX(%a6),TRAP_DSTOP_EX(%a6)
+#$#	mov.l		FP_DST_HI(%a6),TRAP_DSTOP_HI(%a6)
+#$#	mov.l		FP_DST_LO(%a6),TRAP_DSTOP_LO(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd prec/mode
+
+	mov.b		1+EXC_CMDREG(%a6),%d1
+	andi.w		&0x007f,%d1		# extract extension
+
+	andi.l		&0x00ff01ff,USER_FPSR(%a6) # zero all but accured field
+
+	fmov.l		&0x0,%fpcr		# zero current control regs
+	fmov.l		&0x0,%fpsr
+
+	lea		FP_SRC(%a6),%a0
+	lea		FP_DST(%a6),%a1
+
+# maybe we can make these entry points ONLY the OVFL entry points of each routine.
+	mov.l		(tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr
+	jsr		(tbl_unsupp.l,%pc,%d1.l*1)
+
+# the operation has been emulated. the result is in fp0.
+# the EXOP, if an exception occurred, is in fp1.
+# we must save the default result regardless of whether
+# traps are enabled or disabled.
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0
+	bsr.l		store_fpreg
+
+# the exceptional possibilities we have left ourselves with are ONLY overflow
+# and inexact. and, the inexact is such that overflow occurred and was disabled
+# but inexact was enabled.
+	btst		&ovfl_bit,FPCR_ENABLE(%a6)
+	bne.b		fovfl_ovfl_on
+
+	btst		&inex2_bit,FPCR_ENABLE(%a6)
+	bne.b		fovfl_inex_on
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+#$#	add.l		&24,%sp
+	bra.l		_fpsp_done
+
+# overflow is enabled AND overflow, of course, occurred. so, we have the EXOP
+# in fp1. now, simply jump to _real_ovfl()!
+fovfl_ovfl_on:
+	fmovm.x		&0x40,FP_SRC(%a6)	# save EXOP (fp1) to stack
+
+	mov.w		&0xe005,2+FP_SRC(%a6)	# save exc status
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)		# do this after fmovm,other f<op>s!
+
+	unlk		%a6
+
+	bra.l		_real_ovfl
+
+# overflow occurred but is disabled. meanwhile, inexact is enabled. therefore,
+# we must jump to real_inex().
+fovfl_inex_on:
+
+	fmovm.x		&0x40,FP_SRC(%a6)	# save EXOP (fp1) to stack
+
+	mov.b		&0xc4,1+EXC_VOFF(%a6)	# vector offset = 0xc4
+	mov.w		&0xe001,2+FP_SRC(%a6)	# save exc status
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)		# do this after fmovm,other f<op>s!
+
+	unlk		%a6
+
+	bra.l		_real_inex
+
+########################################################################
+fovfl_out:
+
+
+#$#	mov.l		FP_SRC_EX(%a6),TRAP_SRCOP_EX(%a6)
+#$#	mov.l		FP_SRC_HI(%a6),TRAP_SRCOP_HI(%a6)
+#$#	mov.l		FP_SRC_LO(%a6),TRAP_SRCOP_LO(%a6)
+
+# the src operand is definitely a NORM(!), so tag it as such
+	mov.b		&NORM,STAG(%a6)		# set src optype tag
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd prec/mode
+
+	and.l		&0xffff00ff,USER_FPSR(%a6) # zero all but accured field
+
+	fmov.l		&0x0,%fpcr		# zero current control regs
+	fmov.l		&0x0,%fpsr
+
+	lea		FP_SRC(%a6),%a0		# pass ptr to src operand
+
+	bsr.l		fout
+
+	btst		&ovfl_bit,FPCR_ENABLE(%a6)
+	bne.w		fovfl_ovfl_on
+
+	btst		&inex2_bit,FPCR_ENABLE(%a6)
+	bne.w		fovfl_inex_on
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+#$#	add.l		&24,%sp
+
+	btst		&0x7,(%sp)		# is trace on?
+	beq.l		_fpsp_done		# no
+
+	fmov.l		%fpiar,0x8(%sp)		# "Current PC" is in FPIAR
+	mov.w		&0x2024,0x6(%sp)	# stk fmt = 0x2; voff = 0x024
+	bra.l		_real_trace
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_fpsp_unfl(): 060FPSP entry point for FP Underflow exception.	#
+#									#
+#	This handler should be the first code executed upon taking the	#
+#	FP Underflow exception in an operating system.			#
+#									#
+# XREF ****************************************************************	#
+#	_imem_read_long() - read instruction longword			#
+#	fix_skewed_ops() - adjust src operand in fsave frame		#
+#	set_tag_x() - determine optype of src/dst operands		#
+#	store_fpreg() - store opclass 0 or 2 result to FP regfile	#
+#	unnorm_fix() - change UNNORM operands to NORM or ZERO		#
+#	load_fpn2() - load dst operand from FP regfile			#
+#	fout() - emulate an opclass 3 instruction			#
+#	tbl_unsupp - add of table of emulation routines for opclass 0,2	#
+#	_fpsp_done() - "callout" for 060FPSP exit (all work done!)	#
+#	_real_ovfl() - "callout" for Overflow exception enabled code	#
+#	_real_inex() - "callout" for Inexact exception enabled code	#
+#	_real_trace() - "callout" for Trace exception code		#
+#									#
+# INPUT ***************************************************************	#
+#	- The system stack contains the FP Unfl exception stack frame	#
+#	- The fsave frame contains the source operand			#
+#									#
+# OUTPUT **************************************************************	#
+#	Underflow Exception enabled:					#
+#	- The system stack is unchanged					#
+#	- The fsave frame contains the adjusted src op for opclass 0,2	#
+#	Underflow Exception disabled:					#
+#	- The system stack is unchanged					#
+#	- The "exception present" flag in the fsave frame is cleared	#
+#									#
+# ALGORITHM ***********************************************************	#
+#	On the 060, if an FP underflow is present as the result of any	#
+# instruction, the 060 will take an underflow exception whether the	#
+# exception is enabled or disabled in the FPCR. For the disabled case,	#
+# This handler emulates the instruction to determine what the correct	#
+# default result should be for the operation. This default result is	#
+# then stored in either the FP regfile, data regfile, or memory.	#
+# Finally, the handler exits through the "callout" _fpsp_done()		#
+# denoting that no exceptional conditions exist within the machine.	#
+#	If the exception is enabled, then this handler must create the	#
+# exceptional operand and plave it in the fsave state frame, and store	#
+# the default result (only if the instruction is opclass 3). For	#
+# exceptions enabled, this handler must exit through the "callout"	#
+# _real_unfl() so that the operating system enabled overflow handler	#
+# can handle this case.							#
+#	Two other conditions exist. First, if underflow was disabled	#
+# but the inexact exception was enabled and the result was inexact,	#
+# this handler must exit through the "callout" _real_inex().		#
+# was inexact.								#
+#	Also, in the case of an opclass three instruction where		#
+# underflow was disabled and the trace exception was enabled, this	#
+# handler must exit through the "callout" _real_trace().		#
+#									#
+#########################################################################
+
+	global		_fpsp_unfl
+_fpsp_unfl:
+
+#$#	sub.l		&24,%sp			# make room for src/dst
+
+	link.w		%a6,&-LOCAL_SIZE	# init stack frame
+
+	fsave		FP_SRC(%a6)		# grab the "busy" frame
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FPREGS(%a6)	# save fp0-fp1 on stack
+
+# the FPIAR holds the "current PC" of the faulting instruction
+	mov.l		USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch the instruction words
+	mov.l		%d0,EXC_OPWORD(%a6)
+
+##############################################################################
+
+	btst		&0x5,EXC_CMDREG(%a6)	# is instr an fmove out?
+	bne.w		funfl_out
+
+
+	lea		FP_SRC(%a6),%a0		# pass: ptr to src op
+	bsr.l		fix_skewed_ops		# fix src op
+
+	lea		FP_SRC(%a6),%a0		# pass: ptr to src op
+	bsr.l		set_tag_x		# tag the operand type
+	mov.b		%d0,STAG(%a6)		# maybe NORM,DENORM
+
+# bit five of the fp ext word separates the monadic and dyadic operations
+# that can pass through fpsp_unfl(). remember that fcmp, and ftst
+# will never take this exception.
+	btst		&0x5,1+EXC_CMDREG(%a6)	# is op monadic or dyadic?
+	beq.b		funfl_extract		# monadic
+
+# now, what's left that's not dyadic is fsincos. we can distinguish it
+# from all dyadics by the '0110xxx pattern
+	btst		&0x4,1+EXC_CMDREG(%a6)	# is op an fsincos?
+	bne.b		funfl_extract		# yes
+
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
+	bsr.l		load_fpn2		# load dst into FP_DST
+
+	lea		FP_DST(%a6),%a0		# pass: ptr to dst op
+	bsr.l		set_tag_x		# tag the operand type
+	cmpi.b		%d0,&UNNORM		# is operand an UNNORM?
+	bne.b		funfl_op2_done		# no
+	bsr.l		unnorm_fix		# yes; convert to NORM,DENORM,or ZERO
+funfl_op2_done:
+	mov.b		%d0,DTAG(%a6)		# save dst optype tag
+
+funfl_extract:
+
+#$#	mov.l		FP_SRC_EX(%a6),TRAP_SRCOP_EX(%a6)
+#$#	mov.l		FP_SRC_HI(%a6),TRAP_SRCOP_HI(%a6)
+#$#	mov.l		FP_SRC_LO(%a6),TRAP_SRCOP_LO(%a6)
+#$#	mov.l		FP_DST_EX(%a6),TRAP_DSTOP_EX(%a6)
+#$#	mov.l		FP_DST_HI(%a6),TRAP_DSTOP_HI(%a6)
+#$#	mov.l		FP_DST_LO(%a6),TRAP_DSTOP_LO(%a6)
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd prec/mode
+
+	mov.b		1+EXC_CMDREG(%a6),%d1
+	andi.w		&0x007f,%d1		# extract extension
+
+	andi.l		&0x00ff01ff,USER_FPSR(%a6)
+
+	fmov.l		&0x0,%fpcr		# zero current control regs
+	fmov.l		&0x0,%fpsr
+
+	lea		FP_SRC(%a6),%a0
+	lea		FP_DST(%a6),%a1
+
+# maybe we can make these entry points ONLY the OVFL entry points of each routine.
+	mov.l		(tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr
+	jsr		(tbl_unsupp.l,%pc,%d1.l*1)
+
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0
+	bsr.l		store_fpreg
+
+# The `060 FPU multiplier hardware is such that if the result of a
+# multiply operation is the smallest possible normalized number
+# (0x00000000_80000000_00000000), then the machine will take an
+# underflow exception. Since this is incorrect, we need to check
+# if our emulation, after re-doing the operation, decided that
+# no underflow was called for. We do these checks only in
+# funfl_{unfl,inex}_on() because w/ both exceptions disabled, this
+# special case will simply exit gracefully with the correct result.
+
+# the exceptional possibilities we have left ourselves with are ONLY overflow
+# and inexact. and, the inexact is such that overflow occurred and was disabled
+# but inexact was enabled.
+	btst		&unfl_bit,FPCR_ENABLE(%a6)
+	bne.b		funfl_unfl_on
+
+funfl_chkinex:
+	btst		&inex2_bit,FPCR_ENABLE(%a6)
+	bne.b		funfl_inex_on
+
+funfl_exit:
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+#$#	add.l		&24,%sp
+	bra.l		_fpsp_done
+
+# overflow is enabled AND overflow, of course, occurred. so, we have the EXOP
+# in fp1 (don't forget to save fp0). what to do now?
+# well, we simply have to get to go to _real_unfl()!
+funfl_unfl_on:
+
+# The `060 FPU multiplier hardware is such that if the result of a
+# multiply operation is the smallest possible normalized number
+# (0x00000000_80000000_00000000), then the machine will take an
+# underflow exception. Since this is incorrect, we check here to see
+# if our emulation, after re-doing the operation, decided that
+# no underflow was called for.
+	btst		&unfl_bit,FPSR_EXCEPT(%a6)
+	beq.w		funfl_chkinex
+
+funfl_unfl_on2:
+	fmovm.x		&0x40,FP_SRC(%a6)	# save EXOP (fp1) to stack
+
+	mov.w		&0xe003,2+FP_SRC(%a6)	# save exc status
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)		# do this after fmovm,other f<op>s!
+
+	unlk		%a6
+
+	bra.l		_real_unfl
+
+# undeflow occurred but is disabled. meanwhile, inexact is enabled. therefore,
+# we must jump to real_inex().
+funfl_inex_on:
+
+# The `060 FPU multiplier hardware is such that if the result of a
+# multiply operation is the smallest possible normalized number
+# (0x00000000_80000000_00000000), then the machine will take an
+# underflow exception.
+# But, whether bogus or not, if inexact is enabled AND it occurred,
+# then we have to branch to real_inex.
+
+	btst		&inex2_bit,FPSR_EXCEPT(%a6)
+	beq.w		funfl_exit
+
+funfl_inex_on2:
+
+	fmovm.x		&0x40,FP_SRC(%a6)	# save EXOP to stack
+
+	mov.b		&0xc4,1+EXC_VOFF(%a6)	# vector offset = 0xc4
+	mov.w		&0xe001,2+FP_SRC(%a6)	# save exc status
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)		# do this after fmovm,other f<op>s!
+
+	unlk		%a6
+
+	bra.l		_real_inex
+
+#######################################################################
+funfl_out:
+
+
+#$#	mov.l		FP_SRC_EX(%a6),TRAP_SRCOP_EX(%a6)
+#$#	mov.l		FP_SRC_HI(%a6),TRAP_SRCOP_HI(%a6)
+#$#	mov.l		FP_SRC_LO(%a6),TRAP_SRCOP_LO(%a6)
+
+# the src operand is definitely a NORM(!), so tag it as such
+	mov.b		&NORM,STAG(%a6)		# set src optype tag
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd prec/mode
+
+	and.l		&0xffff00ff,USER_FPSR(%a6) # zero all but accured field
+
+	fmov.l		&0x0,%fpcr		# zero current control regs
+	fmov.l		&0x0,%fpsr
+
+	lea		FP_SRC(%a6),%a0		# pass ptr to src operand
+
+	bsr.l		fout
+
+	btst		&unfl_bit,FPCR_ENABLE(%a6)
+	bne.w		funfl_unfl_on2
+
+	btst		&inex2_bit,FPCR_ENABLE(%a6)
+	bne.w		funfl_inex_on2
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+#$#	add.l		&24,%sp
+
+	btst		&0x7,(%sp)		# is trace on?
+	beq.l		_fpsp_done		# no
+
+	fmov.l		%fpiar,0x8(%sp)		# "Current PC" is in FPIAR
+	mov.w		&0x2024,0x6(%sp)	# stk fmt = 0x2; voff = 0x024
+	bra.l		_real_trace
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_fpsp_unsupp(): 060FPSP entry point for FP "Unimplemented	#
+#		        Data Type" exception.				#
+#									#
+#	This handler should be the first code executed upon taking the	#
+#	FP Unimplemented Data Type exception in an operating system.	#
+#									#
+# XREF ****************************************************************	#
+#	_imem_read_{word,long}() - read instruction word/longword	#
+#	fix_skewed_ops() - adjust src operand in fsave frame		#
+#	set_tag_x() - determine optype of src/dst operands		#
+#	store_fpreg() - store opclass 0 or 2 result to FP regfile	#
+#	unnorm_fix() - change UNNORM operands to NORM or ZERO		#
+#	load_fpn2() - load dst operand from FP regfile			#
+#	load_fpn1() - load src operand from FP regfile			#
+#	fout() - emulate an opclass 3 instruction			#
+#	tbl_unsupp - add of table of emulation routines for opclass 0,2	#
+#	_real_inex() - "callout" to operating system inexact handler	#
+#	_fpsp_done() - "callout" for exit; work all done		#
+#	_real_trace() - "callout" for Trace enabled exception		#
+#	funimp_skew() - adjust fsave src ops to "incorrect" value	#
+#	_real_snan() - "callout" for SNAN exception			#
+#	_real_operr() - "callout" for OPERR exception			#
+#	_real_ovfl() - "callout" for OVFL exception			#
+#	_real_unfl() - "callout" for UNFL exception			#
+#	get_packed() - fetch packed operand from memory			#
+#									#
+# INPUT ***************************************************************	#
+#	- The system stack contains the "Unimp Data Type" stk frame	#
+#	- The fsave frame contains the ssrc op (for UNNORM/DENORM)	#
+#									#
+# OUTPUT **************************************************************	#
+#	If Inexact exception (opclass 3):				#
+#	- The system stack is changed to an Inexact exception stk frame	#
+#	If SNAN exception (opclass 3):					#
+#	- The system stack is changed to an SNAN exception stk frame	#
+#	If OPERR exception (opclass 3):					#
+#	- The system stack is changed to an OPERR exception stk frame	#
+#	If OVFL exception (opclass 3):					#
+#	- The system stack is changed to an OVFL exception stk frame	#
+#	If UNFL exception (opclass 3):					#
+#	- The system stack is changed to an UNFL exception stack frame	#
+#	If Trace exception enabled:					#
+#	- The system stack is changed to a Trace exception stack frame	#
+#	Else: (normal case)						#
+#	- Correct result has been stored as appropriate			#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Two main instruction types can enter here: (1) DENORM or UNNORM	#
+# unimplemented data types. These can be either opclass 0,2 or 3	#
+# instructions, and (2) PACKED unimplemented data format instructions	#
+# also of opclasses 0,2, or 3.						#
+#	For UNNORM/DENORM opclass 0 and 2, the handler fetches the src	#
+# operand from the fsave state frame and the dst operand (if dyadic)	#
+# from the FP register file. The instruction is then emulated by	#
+# choosing an emulation routine from a table of routines indexed by	#
+# instruction type. Once the instruction has been emulated and result	#
+# saved, then we check to see if any enabled exceptions resulted from	#
+# instruction emulation. If none, then we exit through the "callout"	#
+# _fpsp_done(). If there is an enabled FP exception, then we insert	#
+# this exception into the FPU in the fsave state frame and then exit	#
+# through _fpsp_done().							#
+#	PACKED opclass 0 and 2 is similar in how the instruction is	#
+# emulated and exceptions handled. The differences occur in how the	#
+# handler loads the packed op (by calling get_packed() routine) and	#
+# by the fact that a Trace exception could be pending for PACKED ops.	#
+# If a Trace exception is pending, then the current exception stack	#
+# frame is changed to a Trace exception stack frame and an exit is	#
+# made through _real_trace().						#
+#	For UNNORM/DENORM opclass 3, the actual move out to memory is	#
+# performed by calling the routine fout(). If no exception should occur	#
+# as the result of emulation, then an exit either occurs through	#
+# _fpsp_done() or through _real_trace() if a Trace exception is pending	#
+# (a Trace stack frame must be created here, too). If an FP exception	#
+# should occur, then we must create an exception stack frame of that	#
+# type and jump to either _real_snan(), _real_operr(), _real_inex(),	#
+# _real_unfl(), or _real_ovfl() as appropriate. PACKED opclass 3	#
+# emulation is performed in a similar manner.				#
+#									#
+#########################################################################
+
+#
+# (1) DENORM and UNNORM (unimplemented) data types:
+#
+#				post-instruction
+#				*****************
+#				*      EA	*
+#	 pre-instruction	*		*
+#	*****************	*****************
+#	* 0x0 *  0x0dc  *	* 0x3 *  0x0dc  *
+#	*****************	*****************
+#	*     Next	*	*     Next	*
+#	*      PC	*	*      PC	*
+#	*****************	*****************
+#	*      SR	*	*      SR	*
+#	*****************	*****************
+#
+# (2) PACKED format (unsupported) opclasses two and three:
+#	*****************
+#	*      EA	*
+#	*		*
+#	*****************
+#	* 0x2 *  0x0dc	*
+#	*****************
+#	*     Next	*
+#	*      PC	*
+#	*****************
+#	*      SR	*
+#	*****************
+#
+	global		_fpsp_unsupp
+_fpsp_unsupp:
+
+	link.w		%a6,&-LOCAL_SIZE	# init stack frame
+
+	fsave		FP_SRC(%a6)		# save fp state
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FPREGS(%a6)	# save fp0-fp1 on stack
+
+	btst		&0x5,EXC_SR(%a6)	# user or supervisor mode?
+	bne.b		fu_s
+fu_u:
+	mov.l		%usp,%a0		# fetch user stack pointer
+	mov.l		%a0,EXC_A7(%a6)		# save on stack
+	bra.b		fu_cont
+# if the exception is an opclass zero or two unimplemented data type
+# exception, then the a7' calculated here is wrong since it doesn't
+# stack an ea. however, we don't need an a7' for this case anyways.
+fu_s:
+	lea		0x4+EXC_EA(%a6),%a0	# load old a7'
+	mov.l		%a0,EXC_A7(%a6)		# save on stack
+
+fu_cont:
+
+# the FPIAR holds the "current PC" of the faulting instruction
+# the FPIAR should be set correctly for ALL exceptions passing through
+# this point.
+	mov.l		USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch the instruction words
+	mov.l		%d0,EXC_OPWORD(%a6)	# store OPWORD and EXTWORD
+
+############################
+
+	clr.b		SPCOND_FLG(%a6)		# clear special condition flag
+
+# Separate opclass three (fpn-to-mem) ops since they have a different
+# stack frame and protocol.
+	btst		&0x5,EXC_CMDREG(%a6)	# is it an fmove out?
+	bne.w		fu_out			# yes
+
+# Separate packed opclass two instructions.
+	bfextu		EXC_CMDREG(%a6){&0:&6},%d0
+	cmpi.b		%d0,&0x13
+	beq.w		fu_in_pack
+
+
+# I'm not sure at this point what FPSR bits are valid for this instruction.
+# so, since the emulation routines re-create them anyways, zero exception field
+	andi.l		&0x00ff00ff,USER_FPSR(%a6) # zero exception field
+
+	fmov.l		&0x0,%fpcr		# zero current control regs
+	fmov.l		&0x0,%fpsr
+
+# Opclass two w/ memory-to-fpn operation will have an incorrect extended
+# precision format if the src format was single or double and the
+# source data type was an INF, NAN, DENORM, or UNNORM
+	lea		FP_SRC(%a6),%a0		# pass ptr to input
+	bsr.l		fix_skewed_ops
+
+# we don't know whether the src operand or the dst operand (or both) is the
+# UNNORM or DENORM. call the function that tags the operand type. if the
+# input is an UNNORM, then convert it to a NORM, DENORM, or ZERO.
+	lea		FP_SRC(%a6),%a0		# pass: ptr to src op
+	bsr.l		set_tag_x		# tag the operand type
+	cmpi.b		%d0,&UNNORM		# is operand an UNNORM?
+	bne.b		fu_op2			# no
+	bsr.l		unnorm_fix		# yes; convert to NORM,DENORM,or ZERO
+
+fu_op2:
+	mov.b		%d0,STAG(%a6)		# save src optype tag
+
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
+
+# bit five of the fp extension word separates the monadic and dyadic operations
+# at this point
+	btst		&0x5,1+EXC_CMDREG(%a6)	# is operation monadic or dyadic?
+	beq.b		fu_extract		# monadic
+	cmpi.b		1+EXC_CMDREG(%a6),&0x3a	# is operation an ftst?
+	beq.b		fu_extract		# yes, so it's monadic, too
+
+	bsr.l		load_fpn2		# load dst into FP_DST
+
+	lea		FP_DST(%a6),%a0		# pass: ptr to dst op
+	bsr.l		set_tag_x		# tag the operand type
+	cmpi.b		%d0,&UNNORM		# is operand an UNNORM?
+	bne.b		fu_op2_done		# no
+	bsr.l		unnorm_fix		# yes; convert to NORM,DENORM,or ZERO
+fu_op2_done:
+	mov.b		%d0,DTAG(%a6)		# save dst optype tag
+
+fu_extract:
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# fetch rnd mode/prec
+
+	bfextu		1+EXC_CMDREG(%a6){&1:&7},%d1 # extract extension
+
+	lea		FP_SRC(%a6),%a0
+	lea		FP_DST(%a6),%a1
+
+	mov.l		(tbl_unsupp.l,%pc,%d1.l*4),%d1 # fetch routine addr
+	jsr		(tbl_unsupp.l,%pc,%d1.l*1)
+
+#
+# Exceptions in order of precedence:
+#	BSUN	: none
+#	SNAN	: all dyadic ops
+#	OPERR	: fsqrt(-NORM)
+#	OVFL	: all except ftst,fcmp
+#	UNFL	: all except ftst,fcmp
+#	DZ	: fdiv
+#	INEX2	: all except ftst,fcmp
+#	INEX1	: none (packed doesn't go through here)
+#
+
+# we determine the highest priority exception(if any) set by the
+# emulation routine that has also been enabled by the user.
+	mov.b		FPCR_ENABLE(%a6),%d0	# fetch exceptions set
+	bne.b		fu_in_ena		# some are enabled
+
+fu_in_cont:
+# fcmp and ftst do not store any result.
+	mov.b		1+EXC_CMDREG(%a6),%d0	# fetch extension
+	andi.b		&0x38,%d0		# extract bits 3-5
+	cmpi.b		%d0,&0x38		# is instr fcmp or ftst?
+	beq.b		fu_in_exit		# yes
+
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
+	bsr.l		store_fpreg		# store the result
+
+fu_in_exit:
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+
+	bra.l		_fpsp_done
+
+fu_in_ena:
+	and.b		FPSR_EXCEPT(%a6),%d0	# keep only ones enabled
+	bfffo		%d0{&24:&8},%d0		# find highest priority exception
+	bne.b		fu_in_exc		# there is at least one set
+
+#
+# No exceptions occurred that were also enabled. Now:
+#
+#	if (OVFL && ovfl_disabled && inexact_enabled) {
+#	    branch to _real_inex() (even if the result was exact!);
+#	} else {
+#	    save the result in the proper fp reg (unless the op is fcmp or ftst);
+#	    return;
+#	}
+#
+	btst		&ovfl_bit,FPSR_EXCEPT(%a6) # was overflow set?
+	beq.b		fu_in_cont		# no
+
+fu_in_ovflchk:
+	btst		&inex2_bit,FPCR_ENABLE(%a6) # was inexact enabled?
+	beq.b		fu_in_cont		# no
+	bra.w		fu_in_exc_ovfl		# go insert overflow frame
+
+#
+# An exception occurred and that exception was enabled:
+#
+#	shift enabled exception field into lo byte of d0;
+#	if (((INEX2 || INEX1) && inex_enabled && OVFL && ovfl_disabled) ||
+#	    ((INEX2 || INEX1) && inex_enabled && UNFL && unfl_disabled)) {
+#		/*
+#		 * this is the case where we must call _real_inex() now or else
+#		 * there will be no other way to pass it the exceptional operand
+#		 */
+#		call _real_inex();
+#	} else {
+#		restore exc state (SNAN||OPERR||OVFL||UNFL||DZ||INEX) into the FPU;
+#	}
+#
+fu_in_exc:
+	subi.l		&24,%d0			# fix offset to be 0-8
+	cmpi.b		%d0,&0x6		# is exception INEX? (6)
+	bne.b		fu_in_exc_exit		# no
+
+# the enabled exception was inexact
+	btst		&unfl_bit,FPSR_EXCEPT(%a6) # did disabled underflow occur?
+	bne.w		fu_in_exc_unfl		# yes
+	btst		&ovfl_bit,FPSR_EXCEPT(%a6) # did disabled overflow occur?
+	bne.w		fu_in_exc_ovfl		# yes
+
+# here, we insert the correct fsave status value into the fsave frame for the
+# corresponding exception. the operand in the fsave frame should be the original
+# src operand.
+fu_in_exc_exit:
+	mov.l		%d0,-(%sp)		# save d0
+	bsr.l		funimp_skew		# skew sgl or dbl inputs
+	mov.l		(%sp)+,%d0		# restore d0
+
+	mov.w		(tbl_except.b,%pc,%d0.w*2),2+FP_SRC(%a6) # create exc status
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)		# restore src op
+
+	unlk		%a6
+
+	bra.l		_fpsp_done
+
+tbl_except:
+	short		0xe000,0xe006,0xe004,0xe005
+	short		0xe003,0xe002,0xe001,0xe001
+
+fu_in_exc_unfl:
+	mov.w		&0x4,%d0
+	bra.b		fu_in_exc_exit
+fu_in_exc_ovfl:
+	mov.w		&0x03,%d0
+	bra.b		fu_in_exc_exit
+
+# If the input operand to this operation was opclass two and a single
+# or double precision denorm, inf, or nan, the operand needs to be
+# "corrected" in order to have the proper equivalent extended precision
+# number.
+	global		fix_skewed_ops
+fix_skewed_ops:
+	bfextu		EXC_CMDREG(%a6){&0:&6},%d0 # extract opclass,src fmt
+	cmpi.b		%d0,&0x11		# is class = 2 & fmt = sgl?
+	beq.b		fso_sgl			# yes
+	cmpi.b		%d0,&0x15		# is class = 2 & fmt = dbl?
+	beq.b		fso_dbl			# yes
+	rts					# no
+
+fso_sgl:
+	mov.w		LOCAL_EX(%a0),%d0	# fetch src exponent
+	andi.w		&0x7fff,%d0		# strip sign
+	cmpi.w		%d0,&0x3f80		# is |exp| == $3f80?
+	beq.b		fso_sgl_dnrm_zero	# yes
+	cmpi.w		%d0,&0x407f		# no; is |exp| == $407f?
+	beq.b		fso_infnan		# yes
+	rts					# no
+
+fso_sgl_dnrm_zero:
+	andi.l		&0x7fffffff,LOCAL_HI(%a0) # clear j-bit
+	beq.b		fso_zero		# it's a skewed zero
+fso_sgl_dnrm:
+# here, we count on norm not to alter a0...
+	bsr.l		norm			# normalize mantissa
+	neg.w		%d0			# -shft amt
+	addi.w		&0x3f81,%d0		# adjust new exponent
+	andi.w		&0x8000,LOCAL_EX(%a0)	# clear old exponent
+	or.w		%d0,LOCAL_EX(%a0)	# insert new exponent
+	rts
+
+fso_zero:
+	andi.w		&0x8000,LOCAL_EX(%a0)	# clear bogus exponent
+	rts
+
+fso_infnan:
+	andi.b		&0x7f,LOCAL_HI(%a0)	# clear j-bit
+	ori.w		&0x7fff,LOCAL_EX(%a0)	# make exponent = $7fff
+	rts
+
+fso_dbl:
+	mov.w		LOCAL_EX(%a0),%d0	# fetch src exponent
+	andi.w		&0x7fff,%d0		# strip sign
+	cmpi.w		%d0,&0x3c00		# is |exp| == $3c00?
+	beq.b		fso_dbl_dnrm_zero	# yes
+	cmpi.w		%d0,&0x43ff		# no; is |exp| == $43ff?
+	beq.b		fso_infnan		# yes
+	rts					# no
+
+fso_dbl_dnrm_zero:
+	andi.l		&0x7fffffff,LOCAL_HI(%a0) # clear j-bit
+	bne.b		fso_dbl_dnrm		# it's a skewed denorm
+	tst.l		LOCAL_LO(%a0)		# is it a zero?
+	beq.b		fso_zero		# yes
+fso_dbl_dnrm:
+# here, we count on norm not to alter a0...
+	bsr.l		norm			# normalize mantissa
+	neg.w		%d0			# -shft amt
+	addi.w		&0x3c01,%d0		# adjust new exponent
+	andi.w		&0x8000,LOCAL_EX(%a0)	# clear old exponent
+	or.w		%d0,LOCAL_EX(%a0)	# insert new exponent
+	rts
+
+#################################################################
+
+# fmove out took an unimplemented data type exception.
+# the src operand is in FP_SRC. Call _fout() to write out the result and
+# to determine which exceptions, if any, to take.
+fu_out:
+
+# Separate packed move outs from the UNNORM and DENORM move outs.
+	bfextu		EXC_CMDREG(%a6){&3:&3},%d0
+	cmpi.b		%d0,&0x3
+	beq.w		fu_out_pack
+	cmpi.b		%d0,&0x7
+	beq.w		fu_out_pack
+
+
+# I'm not sure at this point what FPSR bits are valid for this instruction.
+# so, since the emulation routines re-create them anyways, zero exception field.
+# fmove out doesn't affect ccodes.
+	and.l		&0xffff00ff,USER_FPSR(%a6) # zero exception field
+
+	fmov.l		&0x0,%fpcr		# zero current control regs
+	fmov.l		&0x0,%fpsr
+
+# the src can ONLY be a DENORM or an UNNORM! so, don't make any big subroutine
+# call here. just figure out what it is...
+	mov.w		FP_SRC_EX(%a6),%d0	# get exponent
+	andi.w		&0x7fff,%d0		# strip sign
+	beq.b		fu_out_denorm		# it's a DENORM
+
+	lea		FP_SRC(%a6),%a0
+	bsr.l		unnorm_fix		# yes; fix it
+
+	mov.b		%d0,STAG(%a6)
+
+	bra.b		fu_out_cont
+fu_out_denorm:
+	mov.b		&DENORM,STAG(%a6)
+fu_out_cont:
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# fetch rnd mode/prec
+
+	lea		FP_SRC(%a6),%a0		# pass ptr to src operand
+
+	mov.l		(%a6),EXC_A6(%a6)	# in case a6 changes
+	bsr.l		fout			# call fmove out routine
+
+# Exceptions in order of precedence:
+#	BSUN	: none
+#	SNAN	: none
+#	OPERR	: fmove.{b,w,l} out of large UNNORM
+#	OVFL	: fmove.{s,d}
+#	UNFL	: fmove.{s,d,x}
+#	DZ	: none
+#	INEX2	: all
+#	INEX1	: none (packed doesn't travel through here)
+
+# determine the highest priority exception(if any) set by the
+# emulation routine that has also been enabled by the user.
+	mov.b		FPCR_ENABLE(%a6),%d0	# fetch exceptions enabled
+	bne.w		fu_out_ena		# some are enabled
+
+fu_out_done:
+
+	mov.l		EXC_A6(%a6),(%a6)	# in case a6 changed
+
+# on extended precision opclass three instructions using pre-decrement or
+# post-increment addressing mode, the address register is not updated. is the
+# address register was the stack pointer used from user mode, then let's update
+# it here. if it was used from supervisor mode, then we have to handle this
+# as a special case.
+	btst		&0x5,EXC_SR(%a6)
+	bne.b		fu_out_done_s
+
+	mov.l		EXC_A7(%a6),%a0		# restore a7
+	mov.l		%a0,%usp
+
+fu_out_done_cont:
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+
+	btst		&0x7,(%sp)		# is trace on?
+	bne.b		fu_out_trace		# yes
+
+	bra.l		_fpsp_done
+
+# is the ea mode pre-decrement of the stack pointer from supervisor mode?
+# ("fmov.x fpm,-(a7)") if so,
+fu_out_done_s:
+	cmpi.b		SPCOND_FLG(%a6),&mda7_flg
+	bne.b		fu_out_done_cont
+
+# the extended precision result is still in fp0. but, we need to save it
+# somewhere on the stack until we can copy it to its final resting place.
+# here, we're counting on the top of the stack to be the old place-holders
+# for fp0/fp1 which have already been restored. that way, we can write
+# over those destinations with the shifted stack frame.
+	fmovm.x		&0x80,FP_SRC(%a6)	# put answer on stack
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.l		(%a6),%a6		# restore frame pointer
+
+	mov.l		LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
+	mov.l		LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
+
+# now, copy the result to the proper place on the stack
+	mov.l		LOCAL_SIZE+FP_SRC_EX(%sp),LOCAL_SIZE+EXC_SR+0x0(%sp)
+	mov.l		LOCAL_SIZE+FP_SRC_HI(%sp),LOCAL_SIZE+EXC_SR+0x4(%sp)
+	mov.l		LOCAL_SIZE+FP_SRC_LO(%sp),LOCAL_SIZE+EXC_SR+0x8(%sp)
+
+	add.l		&LOCAL_SIZE-0x8,%sp
+
+	btst		&0x7,(%sp)
+	bne.b		fu_out_trace
+
+	bra.l		_fpsp_done
+
+fu_out_ena:
+	and.b		FPSR_EXCEPT(%a6),%d0	# keep only ones enabled
+	bfffo		%d0{&24:&8},%d0		# find highest priority exception
+	bne.b		fu_out_exc		# there is at least one set
+
+# no exceptions were set.
+# if a disabled overflow occurred and inexact was enabled but the result
+# was exact, then a branch to _real_inex() is made.
+	btst		&ovfl_bit,FPSR_EXCEPT(%a6) # was overflow set?
+	beq.w		fu_out_done		# no
+
+fu_out_ovflchk:
+	btst		&inex2_bit,FPCR_ENABLE(%a6) # was inexact enabled?
+	beq.w		fu_out_done		# no
+	bra.w		fu_inex			# yes
+
+#
+# The fp move out that took the "Unimplemented Data Type" exception was
+# being traced. Since the stack frames are similar, get the "current" PC
+# from FPIAR and put it in the trace stack frame then jump to _real_trace().
+#
+#		  UNSUPP FRAME		   TRACE FRAME
+#		*****************	*****************
+#		*      EA	*	*    Current	*
+#		*		*	*      PC	*
+#		*****************	*****************
+#		* 0x3 *  0x0dc	*	* 0x2 *  0x024	*
+#		*****************	*****************
+#		*     Next	*	*     Next	*
+#		*      PC	*	*      PC	*
+#		*****************	*****************
+#		*      SR	*	*      SR	*
+#		*****************	*****************
+#
+fu_out_trace:
+	mov.w		&0x2024,0x6(%sp)
+	fmov.l		%fpiar,0x8(%sp)
+	bra.l		_real_trace
+
+# an exception occurred and that exception was enabled.
+fu_out_exc:
+	subi.l		&24,%d0			# fix offset to be 0-8
+
+# we don't mess with the existing fsave frame. just re-insert it and
+# jump to the "_real_{}()" handler...
+	mov.w		(tbl_fu_out.b,%pc,%d0.w*2),%d0
+	jmp		(tbl_fu_out.b,%pc,%d0.w*1)
+
+	swbeg		&0x8
+tbl_fu_out:
+	short		tbl_fu_out	- tbl_fu_out	# BSUN can't happen
+	short		tbl_fu_out	- tbl_fu_out	# SNAN can't happen
+	short		fu_operr	- tbl_fu_out	# OPERR
+	short		fu_ovfl		- tbl_fu_out	# OVFL
+	short		fu_unfl		- tbl_fu_out	# UNFL
+	short		tbl_fu_out	- tbl_fu_out	# DZ can't happen
+	short		fu_inex		- tbl_fu_out	# INEX2
+	short		tbl_fu_out	- tbl_fu_out	# INEX1 won't make it here
+
+# for snan,operr,ovfl,unfl, src op is still in FP_SRC so just
+# frestore it.
+fu_snan:
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.w		&0x30d8,EXC_VOFF(%a6)	# vector offset = 0xd8
+	mov.w		&0xe006,2+FP_SRC(%a6)
+
+	frestore	FP_SRC(%a6)
+
+	unlk		%a6
+
+
+	bra.l		_real_snan
+
+fu_operr:
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.w		&0x30d0,EXC_VOFF(%a6)	# vector offset = 0xd0
+	mov.w		&0xe004,2+FP_SRC(%a6)
+
+	frestore	FP_SRC(%a6)
+
+	unlk		%a6
+
+
+	bra.l		_real_operr
+
+fu_ovfl:
+	fmovm.x		&0x40,FP_SRC(%a6)	# save EXOP to the stack
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.w		&0x30d4,EXC_VOFF(%a6)	# vector offset = 0xd4
+	mov.w		&0xe005,2+FP_SRC(%a6)
+
+	frestore	FP_SRC(%a6)		# restore EXOP
+
+	unlk		%a6
+
+	bra.l		_real_ovfl
+
+# underflow can happen for extended precision. extended precision opclass
+# three instruction exceptions don't update the stack pointer. so, if the
+# exception occurred from user mode, then simply update a7 and exit normally.
+# if the exception occurred from supervisor mode, check if
+fu_unfl:
+	mov.l		EXC_A6(%a6),(%a6)	# restore a6
+
+	btst		&0x5,EXC_SR(%a6)
+	bne.w		fu_unfl_s
+
+	mov.l		EXC_A7(%a6),%a0		# restore a7 whether we need
+	mov.l		%a0,%usp		# to or not...
+
+fu_unfl_cont:
+	fmovm.x		&0x40,FP_SRC(%a6)	# save EXOP to the stack
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.w		&0x30cc,EXC_VOFF(%a6)	# vector offset = 0xcc
+	mov.w		&0xe003,2+FP_SRC(%a6)
+
+	frestore	FP_SRC(%a6)		# restore EXOP
+
+	unlk		%a6
+
+	bra.l		_real_unfl
+
+fu_unfl_s:
+	cmpi.b		SPCOND_FLG(%a6),&mda7_flg # was the <ea> mode -(sp)?
+	bne.b		fu_unfl_cont
+
+# the extended precision result is still in fp0. but, we need to save it
+# somewhere on the stack until we can copy it to its final resting place
+# (where the exc frame is currently). make sure it's not at the top of the
+# frame or it will get overwritten when the exc stack frame is shifted "down".
+	fmovm.x		&0x80,FP_SRC(%a6)	# put answer on stack
+	fmovm.x		&0x40,FP_DST(%a6)	# put EXOP on stack
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.w		&0x30cc,EXC_VOFF(%a6)	# vector offset = 0xcc
+	mov.w		&0xe003,2+FP_DST(%a6)
+
+	frestore	FP_DST(%a6)		# restore EXOP
+
+	mov.l		(%a6),%a6		# restore frame pointer
+
+	mov.l		LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
+	mov.l		LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
+	mov.l		LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
+
+# now, copy the result to the proper place on the stack
+	mov.l		LOCAL_SIZE+FP_SRC_EX(%sp),LOCAL_SIZE+EXC_SR+0x0(%sp)
+	mov.l		LOCAL_SIZE+FP_SRC_HI(%sp),LOCAL_SIZE+EXC_SR+0x4(%sp)
+	mov.l		LOCAL_SIZE+FP_SRC_LO(%sp),LOCAL_SIZE+EXC_SR+0x8(%sp)
+
+	add.l		&LOCAL_SIZE-0x8,%sp
+
+	bra.l		_real_unfl
+
+# fmove in and out enter here.
+fu_inex:
+	fmovm.x		&0x40,FP_SRC(%a6)	# save EXOP to the stack
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.w		&0x30c4,EXC_VOFF(%a6)	# vector offset = 0xc4
+	mov.w		&0xe001,2+FP_SRC(%a6)
+
+	frestore	FP_SRC(%a6)		# restore EXOP
+
+	unlk		%a6
+
+
+	bra.l		_real_inex
+
+#########################################################################
+#########################################################################
+fu_in_pack:
+
+
+# I'm not sure at this point what FPSR bits are valid for this instruction.
+# so, since the emulation routines re-create them anyways, zero exception field
+	andi.l		&0x0ff00ff,USER_FPSR(%a6) # zero exception field
+
+	fmov.l		&0x0,%fpcr		# zero current control regs
+	fmov.l		&0x0,%fpsr
+
+	bsr.l		get_packed		# fetch packed src operand
+
+	lea		FP_SRC(%a6),%a0		# pass ptr to src
+	bsr.l		set_tag_x		# set src optype tag
+
+	mov.b		%d0,STAG(%a6)		# save src optype tag
+
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
+
+# bit five of the fp extension word separates the monadic and dyadic operations
+# at this point
+	btst		&0x5,1+EXC_CMDREG(%a6)	# is operation monadic or dyadic?
+	beq.b		fu_extract_p		# monadic
+	cmpi.b		1+EXC_CMDREG(%a6),&0x3a	# is operation an ftst?
+	beq.b		fu_extract_p		# yes, so it's monadic, too
+
+	bsr.l		load_fpn2		# load dst into FP_DST
+
+	lea		FP_DST(%a6),%a0		# pass: ptr to dst op
+	bsr.l		set_tag_x		# tag the operand type
+	cmpi.b		%d0,&UNNORM		# is operand an UNNORM?
+	bne.b		fu_op2_done_p		# no
+	bsr.l		unnorm_fix		# yes; convert to NORM,DENORM,or ZERO
+fu_op2_done_p:
+	mov.b		%d0,DTAG(%a6)		# save dst optype tag
+
+fu_extract_p:
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# fetch rnd mode/prec
+
+	bfextu		1+EXC_CMDREG(%a6){&1:&7},%d1 # extract extension
+
+	lea		FP_SRC(%a6),%a0
+	lea		FP_DST(%a6),%a1
+
+	mov.l		(tbl_unsupp.l,%pc,%d1.l*4),%d1 # fetch routine addr
+	jsr		(tbl_unsupp.l,%pc,%d1.l*1)
+
+#
+# Exceptions in order of precedence:
+#	BSUN	: none
+#	SNAN	: all dyadic ops
+#	OPERR	: fsqrt(-NORM)
+#	OVFL	: all except ftst,fcmp
+#	UNFL	: all except ftst,fcmp
+#	DZ	: fdiv
+#	INEX2	: all except ftst,fcmp
+#	INEX1	: all
+#
+
+# we determine the highest priority exception(if any) set by the
+# emulation routine that has also been enabled by the user.
+	mov.b		FPCR_ENABLE(%a6),%d0	# fetch exceptions enabled
+	bne.w		fu_in_ena_p		# some are enabled
+
+fu_in_cont_p:
+# fcmp and ftst do not store any result.
+	mov.b		1+EXC_CMDREG(%a6),%d0	# fetch extension
+	andi.b		&0x38,%d0		# extract bits 3-5
+	cmpi.b		%d0,&0x38		# is instr fcmp or ftst?
+	beq.b		fu_in_exit_p		# yes
+
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
+	bsr.l		store_fpreg		# store the result
+
+fu_in_exit_p:
+
+	btst		&0x5,EXC_SR(%a6)	# user or supervisor?
+	bne.w		fu_in_exit_s_p		# supervisor
+
+	mov.l		EXC_A7(%a6),%a0		# update user a7
+	mov.l		%a0,%usp
+
+fu_in_exit_cont_p:
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6			# unravel stack frame
+
+	btst		&0x7,(%sp)		# is trace on?
+	bne.w		fu_trace_p		# yes
+
+	bra.l		_fpsp_done		# exit to os
+
+# the exception occurred in supervisor mode. check to see if the
+# addressing mode was (a7)+. if so, we'll need to shift the
+# stack frame "up".
+fu_in_exit_s_p:
+	btst		&mia7_bit,SPCOND_FLG(%a6) # was ea mode (a7)+
+	beq.b		fu_in_exit_cont_p	# no
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6			# unravel stack frame
+
+# shift the stack frame "up". we don't really care about the <ea> field.
+	mov.l		0x4(%sp),0x10(%sp)
+	mov.l		0x0(%sp),0xc(%sp)
+	add.l		&0xc,%sp
+
+	btst		&0x7,(%sp)		# is trace on?
+	bne.w		fu_trace_p		# yes
+
+	bra.l		_fpsp_done		# exit to os
+
+fu_in_ena_p:
+	and.b		FPSR_EXCEPT(%a6),%d0	# keep only ones enabled & set
+	bfffo		%d0{&24:&8},%d0		# find highest priority exception
+	bne.b		fu_in_exc_p		# at least one was set
+
+#
+# No exceptions occurred that were also enabled. Now:
+#
+#	if (OVFL && ovfl_disabled && inexact_enabled) {
+#	    branch to _real_inex() (even if the result was exact!);
+#	} else {
+#	    save the result in the proper fp reg (unless the op is fcmp or ftst);
+#	    return;
+#	}
+#
+	btst		&ovfl_bit,FPSR_EXCEPT(%a6) # was overflow set?
+	beq.w		fu_in_cont_p		# no
+
+fu_in_ovflchk_p:
+	btst		&inex2_bit,FPCR_ENABLE(%a6) # was inexact enabled?
+	beq.w		fu_in_cont_p		# no
+	bra.w		fu_in_exc_ovfl_p	# do _real_inex() now
+
+#
+# An exception occurred and that exception was enabled:
+#
+#	shift enabled exception field into lo byte of d0;
+#	if (((INEX2 || INEX1) && inex_enabled && OVFL && ovfl_disabled) ||
+#	    ((INEX2 || INEX1) && inex_enabled && UNFL && unfl_disabled)) {
+#		/*
+#		 * this is the case where we must call _real_inex() now or else
+#		 * there will be no other way to pass it the exceptional operand
+#		 */
+#		call _real_inex();
+#	} else {
+#		restore exc state (SNAN||OPERR||OVFL||UNFL||DZ||INEX) into the FPU;
+#	}
+#
+fu_in_exc_p:
+	subi.l		&24,%d0			# fix offset to be 0-8
+	cmpi.b		%d0,&0x6		# is exception INEX? (6 or 7)
+	blt.b		fu_in_exc_exit_p	# no
+
+# the enabled exception was inexact
+	btst		&unfl_bit,FPSR_EXCEPT(%a6) # did disabled underflow occur?
+	bne.w		fu_in_exc_unfl_p	# yes
+	btst		&ovfl_bit,FPSR_EXCEPT(%a6) # did disabled overflow occur?
+	bne.w		fu_in_exc_ovfl_p	# yes
+
+# here, we insert the correct fsave status value into the fsave frame for the
+# corresponding exception. the operand in the fsave frame should be the original
+# src operand.
+# as a reminder for future predicted pain and agony, we are passing in fsave the
+# "non-skewed" operand for cases of sgl and dbl src INFs,NANs, and DENORMs.
+# this is INCORRECT for enabled SNAN which would give to the user the skewed SNAN!!!
+fu_in_exc_exit_p:
+	btst		&0x5,EXC_SR(%a6)	# user or supervisor?
+	bne.w		fu_in_exc_exit_s_p	# supervisor
+
+	mov.l		EXC_A7(%a6),%a0		# update user a7
+	mov.l		%a0,%usp
+
+fu_in_exc_exit_cont_p:
+	mov.w		(tbl_except_p.b,%pc,%d0.w*2),2+FP_SRC(%a6)
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)		# restore src op
+
+	unlk		%a6
+
+	btst		&0x7,(%sp)		# is trace enabled?
+	bne.w		fu_trace_p		# yes
+
+	bra.l		_fpsp_done
+
+tbl_except_p:
+	short		0xe000,0xe006,0xe004,0xe005
+	short		0xe003,0xe002,0xe001,0xe001
+
+fu_in_exc_ovfl_p:
+	mov.w		&0x3,%d0
+	bra.w		fu_in_exc_exit_p
+
+fu_in_exc_unfl_p:
+	mov.w		&0x4,%d0
+	bra.w		fu_in_exc_exit_p
+
+fu_in_exc_exit_s_p:
+	btst		&mia7_bit,SPCOND_FLG(%a6)
+	beq.b		fu_in_exc_exit_cont_p
+
+	mov.w		(tbl_except_p.b,%pc,%d0.w*2),2+FP_SRC(%a6)
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)		# restore src op
+
+	unlk		%a6			# unravel stack frame
+
+# shift stack frame "up". who cares about <ea> field.
+	mov.l		0x4(%sp),0x10(%sp)
+	mov.l		0x0(%sp),0xc(%sp)
+	add.l		&0xc,%sp
+
+	btst		&0x7,(%sp)		# is trace on?
+	bne.b		fu_trace_p		# yes
+
+	bra.l		_fpsp_done		# exit to os
+
+#
+# The opclass two PACKED instruction that took an "Unimplemented Data Type"
+# exception was being traced. Make the "current" PC the FPIAR and put it in the
+# trace stack frame then jump to _real_trace().
+#
+#		  UNSUPP FRAME		   TRACE FRAME
+#		*****************	*****************
+#		*      EA	*	*    Current	*
+#		*		*	*      PC	*
+#		*****************	*****************
+#		* 0x2 *	0x0dc	*	* 0x2 *  0x024	*
+#		*****************	*****************
+#		*     Next	*	*     Next	*
+#		*      PC	*	*      PC	*
+#		*****************	*****************
+#		*      SR	*	*      SR	*
+#		*****************	*****************
+fu_trace_p:
+	mov.w		&0x2024,0x6(%sp)
+	fmov.l		%fpiar,0x8(%sp)
+
+	bra.l		_real_trace
+
+#########################################################
+#########################################################
+fu_out_pack:
+
+
+# I'm not sure at this point what FPSR bits are valid for this instruction.
+# so, since the emulation routines re-create them anyways, zero exception field.
+# fmove out doesn't affect ccodes.
+	and.l		&0xffff00ff,USER_FPSR(%a6) # zero exception field
+
+	fmov.l		&0x0,%fpcr		# zero current control regs
+	fmov.l		&0x0,%fpsr
+
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0
+	bsr.l		load_fpn1
+
+# unlike other opclass 3, unimplemented data type exceptions, packed must be
+# able to detect all operand types.
+	lea		FP_SRC(%a6),%a0
+	bsr.l		set_tag_x		# tag the operand type
+	cmpi.b		%d0,&UNNORM		# is operand an UNNORM?
+	bne.b		fu_op2_p		# no
+	bsr.l		unnorm_fix		# yes; convert to NORM,DENORM,or ZERO
+
+fu_op2_p:
+	mov.b		%d0,STAG(%a6)		# save src optype tag
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# fetch rnd mode/prec
+
+	lea		FP_SRC(%a6),%a0		# pass ptr to src operand
+
+	mov.l		(%a6),EXC_A6(%a6)	# in case a6 changes
+	bsr.l		fout			# call fmove out routine
+
+# Exceptions in order of precedence:
+#	BSUN	: no
+#	SNAN	: yes
+#	OPERR	: if ((k_factor > +17) || (dec. exp exceeds 3 digits))
+#	OVFL	: no
+#	UNFL	: no
+#	DZ	: no
+#	INEX2	: yes
+#	INEX1	: no
+
+# determine the highest priority exception(if any) set by the
+# emulation routine that has also been enabled by the user.
+	mov.b		FPCR_ENABLE(%a6),%d0	# fetch exceptions enabled
+	bne.w		fu_out_ena_p		# some are enabled
+
+fu_out_exit_p:
+	mov.l		EXC_A6(%a6),(%a6)	# restore a6
+
+	btst		&0x5,EXC_SR(%a6)	# user or supervisor?
+	bne.b		fu_out_exit_s_p		# supervisor
+
+	mov.l		EXC_A7(%a6),%a0		# update user a7
+	mov.l		%a0,%usp
+
+fu_out_exit_cont_p:
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6			# unravel stack frame
+
+	btst		&0x7,(%sp)		# is trace on?
+	bne.w		fu_trace_p		# yes
+
+	bra.l		_fpsp_done		# exit to os
+
+# the exception occurred in supervisor mode. check to see if the
+# addressing mode was -(a7). if so, we'll need to shift the
+# stack frame "down".
+fu_out_exit_s_p:
+	btst		&mda7_bit,SPCOND_FLG(%a6) # was ea mode -(a7)
+	beq.b		fu_out_exit_cont_p	# no
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.l		(%a6),%a6		# restore frame pointer
+
+	mov.l		LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
+	mov.l		LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
+
+# now, copy the result to the proper place on the stack
+	mov.l		LOCAL_SIZE+FP_DST_EX(%sp),LOCAL_SIZE+EXC_SR+0x0(%sp)
+	mov.l		LOCAL_SIZE+FP_DST_HI(%sp),LOCAL_SIZE+EXC_SR+0x4(%sp)
+	mov.l		LOCAL_SIZE+FP_DST_LO(%sp),LOCAL_SIZE+EXC_SR+0x8(%sp)
+
+	add.l		&LOCAL_SIZE-0x8,%sp
+
+	btst		&0x7,(%sp)
+	bne.w		fu_trace_p
+
+	bra.l		_fpsp_done
+
+fu_out_ena_p:
+	and.b		FPSR_EXCEPT(%a6),%d0	# keep only ones enabled
+	bfffo		%d0{&24:&8},%d0		# find highest priority exception
+	beq.w		fu_out_exit_p
+
+	mov.l		EXC_A6(%a6),(%a6)	# restore a6
+
+# an exception occurred and that exception was enabled.
+# the only exception possible on packed move out are INEX, OPERR, and SNAN.
+fu_out_exc_p:
+	cmpi.b		%d0,&0x1a
+	bgt.w		fu_inex_p2
+	beq.w		fu_operr_p
+
+fu_snan_p:
+	btst		&0x5,EXC_SR(%a6)
+	bne.b		fu_snan_s_p
+
+	mov.l		EXC_A7(%a6),%a0
+	mov.l		%a0,%usp
+	bra.w		fu_snan
+
+fu_snan_s_p:
+	cmpi.b		SPCOND_FLG(%a6),&mda7_flg
+	bne.w		fu_snan
+
+# the instruction was "fmove.p fpn,-(a7)" from supervisor mode.
+# the strategy is to move the exception frame "down" 12 bytes. then, we
+# can store the default result where the exception frame was.
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.w		&0x30d8,EXC_VOFF(%a6)	# vector offset = 0xd0
+	mov.w		&0xe006,2+FP_SRC(%a6)	# set fsave status
+
+	frestore	FP_SRC(%a6)		# restore src operand
+
+	mov.l		(%a6),%a6		# restore frame pointer
+
+	mov.l		LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
+	mov.l		LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
+	mov.l		LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
+
+# now, we copy the default result to its proper location
+	mov.l		LOCAL_SIZE+FP_DST_EX(%sp),LOCAL_SIZE+0x4(%sp)
+	mov.l		LOCAL_SIZE+FP_DST_HI(%sp),LOCAL_SIZE+0x8(%sp)
+	mov.l		LOCAL_SIZE+FP_DST_LO(%sp),LOCAL_SIZE+0xc(%sp)
+
+	add.l		&LOCAL_SIZE-0x8,%sp
+
+
+	bra.l		_real_snan
+
+fu_operr_p:
+	btst		&0x5,EXC_SR(%a6)
+	bne.w		fu_operr_p_s
+
+	mov.l		EXC_A7(%a6),%a0
+	mov.l		%a0,%usp
+	bra.w		fu_operr
+
+fu_operr_p_s:
+	cmpi.b		SPCOND_FLG(%a6),&mda7_flg
+	bne.w		fu_operr
+
+# the instruction was "fmove.p fpn,-(a7)" from supervisor mode.
+# the strategy is to move the exception frame "down" 12 bytes. then, we
+# can store the default result where the exception frame was.
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.w		&0x30d0,EXC_VOFF(%a6)	# vector offset = 0xd0
+	mov.w		&0xe004,2+FP_SRC(%a6)	# set fsave status
+
+	frestore	FP_SRC(%a6)		# restore src operand
+
+	mov.l		(%a6),%a6		# restore frame pointer
+
+	mov.l		LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
+	mov.l		LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
+	mov.l		LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
+
+# now, we copy the default result to its proper location
+	mov.l		LOCAL_SIZE+FP_DST_EX(%sp),LOCAL_SIZE+0x4(%sp)
+	mov.l		LOCAL_SIZE+FP_DST_HI(%sp),LOCAL_SIZE+0x8(%sp)
+	mov.l		LOCAL_SIZE+FP_DST_LO(%sp),LOCAL_SIZE+0xc(%sp)
+
+	add.l		&LOCAL_SIZE-0x8,%sp
+
+
+	bra.l		_real_operr
+
+fu_inex_p2:
+	btst		&0x5,EXC_SR(%a6)
+	bne.w		fu_inex_s_p2
+
+	mov.l		EXC_A7(%a6),%a0
+	mov.l		%a0,%usp
+	bra.w		fu_inex
+
+fu_inex_s_p2:
+	cmpi.b		SPCOND_FLG(%a6),&mda7_flg
+	bne.w		fu_inex
+
+# the instruction was "fmove.p fpn,-(a7)" from supervisor mode.
+# the strategy is to move the exception frame "down" 12 bytes. then, we
+# can store the default result where the exception frame was.
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0/fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.w		&0x30c4,EXC_VOFF(%a6)	# vector offset = 0xc4
+	mov.w		&0xe001,2+FP_SRC(%a6)	# set fsave status
+
+	frestore	FP_SRC(%a6)		# restore src operand
+
+	mov.l		(%a6),%a6		# restore frame pointer
+
+	mov.l		LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
+	mov.l		LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
+	mov.l		LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
+
+# now, we copy the default result to its proper location
+	mov.l		LOCAL_SIZE+FP_DST_EX(%sp),LOCAL_SIZE+0x4(%sp)
+	mov.l		LOCAL_SIZE+FP_DST_HI(%sp),LOCAL_SIZE+0x8(%sp)
+	mov.l		LOCAL_SIZE+FP_DST_LO(%sp),LOCAL_SIZE+0xc(%sp)
+
+	add.l		&LOCAL_SIZE-0x8,%sp
+
+
+	bra.l		_real_inex
+
+#########################################################################
+
+#
+# if we're stuffing a source operand back into an fsave frame then we
+# have to make sure that for single or double source operands that the
+# format stuffed is as weird as the hardware usually makes it.
+#
+	global		funimp_skew
+funimp_skew:
+	bfextu		EXC_EXTWORD(%a6){&3:&3},%d0 # extract src specifier
+	cmpi.b		%d0,&0x1		# was src sgl?
+	beq.b		funimp_skew_sgl		# yes
+	cmpi.b		%d0,&0x5		# was src dbl?
+	beq.b		funimp_skew_dbl		# yes
+	rts
+
+funimp_skew_sgl:
+	mov.w		FP_SRC_EX(%a6),%d0	# fetch DENORM exponent
+	andi.w		&0x7fff,%d0		# strip sign
+	beq.b		funimp_skew_sgl_not
+	cmpi.w		%d0,&0x3f80
+	bgt.b		funimp_skew_sgl_not
+	neg.w		%d0			# make exponent negative
+	addi.w		&0x3f81,%d0		# find amt to shift
+	mov.l		FP_SRC_HI(%a6),%d1	# fetch DENORM hi(man)
+	lsr.l		%d0,%d1			# shift it
+	bset		&31,%d1			# set j-bit
+	mov.l		%d1,FP_SRC_HI(%a6)	# insert new hi(man)
+	andi.w		&0x8000,FP_SRC_EX(%a6)	# clear old exponent
+	ori.w		&0x3f80,FP_SRC_EX(%a6)	# insert new "skewed" exponent
+funimp_skew_sgl_not:
+	rts
+
+funimp_skew_dbl:
+	mov.w		FP_SRC_EX(%a6),%d0	# fetch DENORM exponent
+	andi.w		&0x7fff,%d0		# strip sign
+	beq.b		funimp_skew_dbl_not
+	cmpi.w		%d0,&0x3c00
+	bgt.b		funimp_skew_dbl_not
+
+	tst.b		FP_SRC_EX(%a6)		# make "internal format"
+	smi.b		0x2+FP_SRC(%a6)
+	mov.w		%d0,FP_SRC_EX(%a6)	# insert exponent with cleared sign
+	clr.l		%d0			# clear g,r,s
+	lea		FP_SRC(%a6),%a0		# pass ptr to src op
+	mov.w		&0x3c01,%d1		# pass denorm threshold
+	bsr.l		dnrm_lp			# denorm it
+	mov.w		&0x3c00,%d0		# new exponent
+	tst.b		0x2+FP_SRC(%a6)		# is sign set?
+	beq.b		fss_dbl_denorm_done	# no
+	bset		&15,%d0			# set sign
+fss_dbl_denorm_done:
+	bset		&0x7,FP_SRC_HI(%a6)	# set j-bit
+	mov.w		%d0,FP_SRC_EX(%a6)	# insert new exponent
+funimp_skew_dbl_not:
+	rts
+
+#########################################################################
+	global		_mem_write2
+_mem_write2:
+	btst		&0x5,EXC_SR(%a6)
+	beq.l		_dmem_write
+	mov.l		0x0(%a0),FP_DST_EX(%a6)
+	mov.l		0x4(%a0),FP_DST_HI(%a6)
+	mov.l		0x8(%a0),FP_DST_LO(%a6)
+	clr.l		%d1
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_fpsp_effadd(): 060FPSP entry point for FP "Unimplemented	#
+#			effective address" exception.			#
+#									#
+#	This handler should be the first code executed upon taking the	#
+#	FP Unimplemented Effective Address exception in an operating	#
+#	system.								#
+#									#
+# XREF ****************************************************************	#
+#	_imem_read_long() - read instruction longword			#
+#	fix_skewed_ops() - adjust src operand in fsave frame		#
+#	set_tag_x() - determine optype of src/dst operands		#
+#	store_fpreg() - store opclass 0 or 2 result to FP regfile	#
+#	unnorm_fix() - change UNNORM operands to NORM or ZERO		#
+#	load_fpn2() - load dst operand from FP regfile			#
+#	tbl_unsupp - add of table of emulation routines for opclass 0,2	#
+#	decbin() - convert packed data to FP binary data		#
+#	_real_fpu_disabled() - "callout" for "FPU disabled" exception	#
+#	_real_access() - "callout" for access error exception		#
+#	_mem_read() - read extended immediate operand from memory	#
+#	_fpsp_done() - "callout" for exit; work all done		#
+#	_real_trace() - "callout" for Trace enabled exception		#
+#	fmovm_dynamic() - emulate dynamic fmovm instruction		#
+#	fmovm_ctrl() - emulate fmovm control instruction		#
+#									#
+# INPUT ***************************************************************	#
+#	- The system stack contains the "Unimplemented <ea>" stk frame	#
+#									#
+# OUTPUT **************************************************************	#
+#	If access error:						#
+#	- The system stack is changed to an access error stack frame	#
+#	If FPU disabled:						#
+#	- The system stack is changed to an FPU disabled stack frame	#
+#	If Trace exception enabled:					#
+#	- The system stack is changed to a Trace exception stack frame	#
+#	Else: (normal case)						#
+#	- None (correct result has been stored as appropriate)		#
+#									#
+# ALGORITHM ***********************************************************	#
+#	This exception handles 3 types of operations:			#
+# (1) FP Instructions using extended precision or packed immediate	#
+#     addressing mode.							#
+# (2) The "fmovm.x" instruction w/ dynamic register specification.	#
+# (3) The "fmovm.l" instruction w/ 2 or 3 control registers.		#
+#									#
+#	For immediate data operations, the data is read in w/ a		#
+# _mem_read() "callout", converted to FP binary (if packed), and used	#
+# as the source operand to the instruction specified by the instruction	#
+# word. If no FP exception should be reported ads a result of the	#
+# emulation, then the result is stored to the destination register and	#
+# the handler exits through _fpsp_done(). If an enabled exc has been	#
+# signalled as a result of emulation, then an fsave state frame		#
+# corresponding to the FP exception type must be entered into the 060	#
+# FPU before exiting. In either the enabled or disabled cases, we	#
+# must also check if a Trace exception is pending, in which case, we	#
+# must create a Trace exception stack frame from the current exception	#
+# stack frame. If no Trace is pending, we simply exit through		#
+# _fpsp_done().								#
+#	For "fmovm.x", call the routine fmovm_dynamic() which will	#
+# decode and emulate the instruction. No FP exceptions can be pending	#
+# as a result of this operation emulation. A Trace exception can be	#
+# pending, though, which means the current stack frame must be changed	#
+# to a Trace stack frame and an exit made through _real_trace().	#
+# For the case of "fmovm.x Dn,-(a7)", where the offending instruction	#
+# was executed from supervisor mode, this handler must store the FP	#
+# register file values to the system stack by itself since		#
+# fmovm_dynamic() can't handle this. A normal exit is made through	#
+# fpsp_done().								#
+#	For "fmovm.l", fmovm_ctrl() is used to emulate the instruction.	#
+# Again, a Trace exception may be pending and an exit made through	#
+# _real_trace(). Else, a normal exit is made through _fpsp_done().	#
+#									#
+#	Before any of the above is attempted, it must be checked to	#
+# see if the FPU is disabled. Since the "Unimp <ea>" exception is taken	#
+# before the "FPU disabled" exception, but the "FPU disabled" exception	#
+# has higher priority, we check the disabled bit in the PCR. If set,	#
+# then we must create an 8 word "FPU disabled" exception stack frame	#
+# from the current 4 word exception stack frame. This includes		#
+# reproducing the effective address of the instruction to put on the	#
+# new stack frame.							#
+#									#
+#	In the process of all emulation work, if a _mem_read()		#
+# "callout" returns a failing result indicating an access error, then	#
+# we must create an access error stack frame from the current stack	#
+# frame. This information includes a faulting address and a fault-	#
+# status-longword. These are created within this handler.		#
+#									#
+#########################################################################
+
+	global		_fpsp_effadd
+_fpsp_effadd:
+
+# This exception type takes priority over the "Line F Emulator"
+# exception. Therefore, the FPU could be disabled when entering here.
+# So, we must check to see if it's disabled and handle that case separately.
+	mov.l		%d0,-(%sp)		# save d0
+	movc		%pcr,%d0		# load proc cr
+	btst		&0x1,%d0		# is FPU disabled?
+	bne.w		iea_disabled		# yes
+	mov.l		(%sp)+,%d0		# restore d0
+
+	link		%a6,&-LOCAL_SIZE	# init stack frame
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FPREGS(%a6)	# save fp0-fp1 on stack
+
+# PC of instruction that took the exception is the PC in the frame
+	mov.l		EXC_PC(%a6),EXC_EXTWPTR(%a6)
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch the instruction words
+	mov.l		%d0,EXC_OPWORD(%a6)	# store OPWORD and EXTWORD
+
+#########################################################################
+
+	tst.w		%d0			# is operation fmovem?
+	bmi.w		iea_fmovm		# yes
+
+#
+# here, we will have:
+#	fabs	fdabs	fsabs		facos		fmod
+#	fadd	fdadd	fsadd		fasin		frem
+#	fcmp				fatan		fscale
+#	fdiv	fddiv	fsdiv		fatanh		fsin
+#	fint				fcos		fsincos
+#	fintrz				fcosh		fsinh
+#	fmove	fdmove	fsmove		fetox		ftan
+#	fmul	fdmul	fsmul		fetoxm1		ftanh
+#	fneg	fdneg	fsneg		fgetexp		ftentox
+#	fsgldiv				fgetman		ftwotox
+#	fsglmul				flog10
+#	fsqrt				flog2
+#	fsub	fdsub	fssub		flogn
+#	ftst				flognp1
+# which can all use f<op>.{x,p}
+# so, now it's immediate data extended precision AND PACKED FORMAT!
+#
+iea_op:
+	andi.l		&0x00ff00ff,USER_FPSR(%a6)
+
+	btst		&0xa,%d0		# is src fmt x or p?
+	bne.b		iea_op_pack		# packed
+
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# pass: ptr to #<data>
+	lea		FP_SRC(%a6),%a1		# pass: ptr to super addr
+	mov.l		&0xc,%d0		# pass: 12 bytes
+	bsr.l		_imem_read		# read extended immediate
+
+	tst.l		%d1			# did ifetch fail?
+	bne.w		iea_iacc		# yes
+
+	bra.b		iea_op_setsrc
+
+iea_op_pack:
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# pass: ptr to #<data>
+	lea		FP_SRC(%a6),%a1		# pass: ptr to super dst
+	mov.l		&0xc,%d0		# pass: 12 bytes
+	bsr.l		_imem_read		# read packed operand
+
+	tst.l		%d1			# did ifetch fail?
+	bne.w		iea_iacc		# yes
+
+# The packed operand is an INF or a NAN if the exponent field is all ones.
+	bfextu		FP_SRC(%a6){&1:&15},%d0	# get exp
+	cmpi.w		%d0,&0x7fff		# INF or NAN?
+	beq.b		iea_op_setsrc		# operand is an INF or NAN
+
+# The packed operand is a zero if the mantissa is all zero, else it's
+# a normal packed op.
+	mov.b		3+FP_SRC(%a6),%d0	# get byte 4
+	andi.b		&0x0f,%d0		# clear all but last nybble
+	bne.b		iea_op_gp_not_spec	# not a zero
+	tst.l		FP_SRC_HI(%a6)		# is lw 2 zero?
+	bne.b		iea_op_gp_not_spec	# not a zero
+	tst.l		FP_SRC_LO(%a6)		# is lw 3 zero?
+	beq.b		iea_op_setsrc		# operand is a ZERO
+iea_op_gp_not_spec:
+	lea		FP_SRC(%a6),%a0		# pass: ptr to packed op
+	bsr.l		decbin			# convert to extended
+	fmovm.x		&0x80,FP_SRC(%a6)	# make this the srcop
+
+iea_op_setsrc:
+	addi.l		&0xc,EXC_EXTWPTR(%a6)	# update extension word pointer
+
+# FP_SRC now holds the src operand.
+	lea		FP_SRC(%a6),%a0		# pass: ptr to src op
+	bsr.l		set_tag_x		# tag the operand type
+	mov.b		%d0,STAG(%a6)		# could be ANYTHING!!!
+	cmpi.b		%d0,&UNNORM		# is operand an UNNORM?
+	bne.b		iea_op_getdst		# no
+	bsr.l		unnorm_fix		# yes; convert to NORM/DENORM/ZERO
+	mov.b		%d0,STAG(%a6)		# set new optype tag
+iea_op_getdst:
+	clr.b		STORE_FLG(%a6)		# clear "store result" boolean
+
+	btst		&0x5,1+EXC_CMDREG(%a6)	# is operation monadic or dyadic?
+	beq.b		iea_op_extract		# monadic
+	btst		&0x4,1+EXC_CMDREG(%a6)	# is operation fsincos,ftst,fcmp?
+	bne.b		iea_op_spec		# yes
+
+iea_op_loaddst:
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0 # fetch dst regno
+	bsr.l		load_fpn2		# load dst operand
+
+	lea		FP_DST(%a6),%a0		# pass: ptr to dst op
+	bsr.l		set_tag_x		# tag the operand type
+	mov.b		%d0,DTAG(%a6)		# could be ANYTHING!!!
+	cmpi.b		%d0,&UNNORM		# is operand an UNNORM?
+	bne.b		iea_op_extract		# no
+	bsr.l		unnorm_fix		# yes; convert to NORM/DENORM/ZERO
+	mov.b		%d0,DTAG(%a6)		# set new optype tag
+	bra.b		iea_op_extract
+
+# the operation is fsincos, ftst, or fcmp. only fcmp is dyadic
+iea_op_spec:
+	btst		&0x3,1+EXC_CMDREG(%a6)	# is operation fsincos?
+	beq.b		iea_op_extract		# yes
+# now, we're left with ftst and fcmp. so, first let's tag them so that they don't
+# store a result. then, only fcmp will branch back and pick up a dst operand.
+	st		STORE_FLG(%a6)		# don't store a final result
+	btst		&0x1,1+EXC_CMDREG(%a6)	# is operation fcmp?
+	beq.b		iea_op_loaddst		# yes
+
+iea_op_extract:
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass: rnd mode,prec
+
+	mov.b		1+EXC_CMDREG(%a6),%d1
+	andi.w		&0x007f,%d1		# extract extension
+
+	fmov.l		&0x0,%fpcr
+	fmov.l		&0x0,%fpsr
+
+	lea		FP_SRC(%a6),%a0
+	lea		FP_DST(%a6),%a1
+
+	mov.l		(tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr
+	jsr		(tbl_unsupp.l,%pc,%d1.l*1)
+
+#
+# Exceptions in order of precedence:
+#	BSUN	: none
+#	SNAN	: all operations
+#	OPERR	: all reg-reg or mem-reg operations that can normally operr
+#	OVFL	: same as OPERR
+#	UNFL	: same as OPERR
+#	DZ	: same as OPERR
+#	INEX2	: same as OPERR
+#	INEX1	: all packed immediate operations
+#
+
+# we determine the highest priority exception(if any) set by the
+# emulation routine that has also been enabled by the user.
+	mov.b		FPCR_ENABLE(%a6),%d0	# fetch exceptions enabled
+	bne.b		iea_op_ena		# some are enabled
+
+# now, we save the result, unless, of course, the operation was ftst or fcmp.
+# these don't save results.
+iea_op_save:
+	tst.b		STORE_FLG(%a6)		# does this op store a result?
+	bne.b		iea_op_exit1		# exit with no frestore
+
+iea_op_store:
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0 # fetch dst regno
+	bsr.l		store_fpreg		# store the result
+
+iea_op_exit1:
+	mov.l		EXC_PC(%a6),USER_FPIAR(%a6) # set FPIAR to "Current PC"
+	mov.l		EXC_EXTWPTR(%a6),EXC_PC(%a6) # set "Next PC" in exc frame
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6			# unravel the frame
+
+	btst		&0x7,(%sp)		# is trace on?
+	bne.w		iea_op_trace		# yes
+
+	bra.l		_fpsp_done		# exit to os
+
+iea_op_ena:
+	and.b		FPSR_EXCEPT(%a6),%d0	# keep only ones enable and set
+	bfffo		%d0{&24:&8},%d0		# find highest priority exception
+	bne.b		iea_op_exc		# at least one was set
+
+# no exception occurred. now, did a disabled, exact overflow occur with inexact
+# enabled? if so, then we have to stuff an overflow frame into the FPU.
+	btst		&ovfl_bit,FPSR_EXCEPT(%a6) # did overflow occur?
+	beq.b		iea_op_save
+
+iea_op_ovfl:
+	btst		&inex2_bit,FPCR_ENABLE(%a6) # is inexact enabled?
+	beq.b		iea_op_store		# no
+	bra.b		iea_op_exc_ovfl		# yes
+
+# an enabled exception occurred. we have to insert the exception type back into
+# the machine.
+iea_op_exc:
+	subi.l		&24,%d0			# fix offset to be 0-8
+	cmpi.b		%d0,&0x6		# is exception INEX?
+	bne.b		iea_op_exc_force	# no
+
+# the enabled exception was inexact. so, if it occurs with an overflow
+# or underflow that was disabled, then we have to force an overflow or
+# underflow frame.
+	btst		&ovfl_bit,FPSR_EXCEPT(%a6) # did overflow occur?
+	bne.b		iea_op_exc_ovfl		# yes
+	btst		&unfl_bit,FPSR_EXCEPT(%a6) # did underflow occur?
+	bne.b		iea_op_exc_unfl		# yes
+
+iea_op_exc_force:
+	mov.w		(tbl_iea_except.b,%pc,%d0.w*2),2+FP_SRC(%a6)
+	bra.b		iea_op_exit2		# exit with frestore
+
+tbl_iea_except:
+	short		0xe002, 0xe006, 0xe004, 0xe005
+	short		0xe003, 0xe002, 0xe001, 0xe001
+
+iea_op_exc_ovfl:
+	mov.w		&0xe005,2+FP_SRC(%a6)
+	bra.b		iea_op_exit2
+
+iea_op_exc_unfl:
+	mov.w		&0xe003,2+FP_SRC(%a6)
+
+iea_op_exit2:
+	mov.l		EXC_PC(%a6),USER_FPIAR(%a6) # set FPIAR to "Current PC"
+	mov.l		EXC_EXTWPTR(%a6),EXC_PC(%a6) # set "Next PC" in exc frame
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)		# restore exceptional state
+
+	unlk		%a6			# unravel the frame
+
+	btst		&0x7,(%sp)		# is trace on?
+	bne.b		iea_op_trace		# yes
+
+	bra.l		_fpsp_done		# exit to os
+
+#
+# The opclass two instruction that took an "Unimplemented Effective Address"
+# exception was being traced. Make the "current" PC the FPIAR and put it in
+# the trace stack frame then jump to _real_trace().
+#
+#		 UNIMP EA FRAME		   TRACE FRAME
+#		*****************	*****************
+#		* 0x0 *  0x0f0	*	*    Current	*
+#		*****************	*      PC	*
+#		*    Current	*	*****************
+#		*      PC	*	* 0x2 *  0x024	*
+#		*****************	*****************
+#		*      SR	*	*     Next	*
+#		*****************	*      PC	*
+#					*****************
+#					*      SR	*
+#					*****************
+iea_op_trace:
+	mov.l		(%sp),-(%sp)		# shift stack frame "down"
+	mov.w		0x8(%sp),0x4(%sp)
+	mov.w		&0x2024,0x6(%sp)	# stk fmt = 0x2; voff = 0x024
+	fmov.l		%fpiar,0x8(%sp)		# "Current PC" is in FPIAR
+
+	bra.l		_real_trace
+
+#########################################################################
+iea_fmovm:
+	btst		&14,%d0			# ctrl or data reg
+	beq.w		iea_fmovm_ctrl
+
+iea_fmovm_data:
+
+	btst		&0x5,EXC_SR(%a6)	# user or supervisor mode
+	bne.b		iea_fmovm_data_s
+
+iea_fmovm_data_u:
+	mov.l		%usp,%a0
+	mov.l		%a0,EXC_A7(%a6)		# store current a7
+	bsr.l		fmovm_dynamic		# do dynamic fmovm
+	mov.l		EXC_A7(%a6),%a0		# load possibly new a7
+	mov.l		%a0,%usp		# update usp
+	bra.w		iea_fmovm_exit
+
+iea_fmovm_data_s:
+	clr.b		SPCOND_FLG(%a6)
+	lea		0x2+EXC_VOFF(%a6),%a0
+	mov.l		%a0,EXC_A7(%a6)
+	bsr.l		fmovm_dynamic		# do dynamic fmovm
+
+	cmpi.b		SPCOND_FLG(%a6),&mda7_flg
+	beq.w		iea_fmovm_data_predec
+	cmpi.b		SPCOND_FLG(%a6),&mia7_flg
+	bne.w		iea_fmovm_exit
+
+# right now, d0 = the size.
+# the data has been fetched from the supervisor stack, but we have not
+# incremented the stack pointer by the appropriate number of bytes.
+# do it here.
+iea_fmovm_data_postinc:
+	btst		&0x7,EXC_SR(%a6)
+	bne.b		iea_fmovm_data_pi_trace
+
+	mov.w		EXC_SR(%a6),(EXC_SR,%a6,%d0)
+	mov.l		EXC_EXTWPTR(%a6),(EXC_PC,%a6,%d0)
+	mov.w		&0x00f0,(EXC_VOFF,%a6,%d0)
+
+	lea		(EXC_SR,%a6,%d0),%a0
+	mov.l		%a0,EXC_SR(%a6)
+
+	fmovm.x		EXC_FP0(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+	mov.l		(%sp)+,%sp
+	bra.l		_fpsp_done
+
+iea_fmovm_data_pi_trace:
+	mov.w		EXC_SR(%a6),(EXC_SR-0x4,%a6,%d0)
+	mov.l		EXC_EXTWPTR(%a6),(EXC_PC-0x4,%a6,%d0)
+	mov.w		&0x2024,(EXC_VOFF-0x4,%a6,%d0)
+	mov.l		EXC_PC(%a6),(EXC_VOFF+0x2-0x4,%a6,%d0)
+
+	lea		(EXC_SR-0x4,%a6,%d0),%a0
+	mov.l		%a0,EXC_SR(%a6)
+
+	fmovm.x		EXC_FP0(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+	mov.l		(%sp)+,%sp
+	bra.l		_real_trace
+
+# right now, d1 = size and d0 = the strg.
+iea_fmovm_data_predec:
+	mov.b		%d1,EXC_VOFF(%a6)	# store strg
+	mov.b		%d0,0x1+EXC_VOFF(%a6)	# store size
+
+	fmovm.x		EXC_FP0(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	mov.l		(%a6),-(%sp)		# make a copy of a6
+	mov.l		%d0,-(%sp)		# save d0
+	mov.l		%d1,-(%sp)		# save d1
+	mov.l		EXC_EXTWPTR(%a6),-(%sp)	# make a copy of Next PC
+
+	clr.l		%d0
+	mov.b		0x1+EXC_VOFF(%a6),%d0	# fetch size
+	neg.l		%d0			# get negative of size
+
+	btst		&0x7,EXC_SR(%a6)	# is trace enabled?
+	beq.b		iea_fmovm_data_p2
+
+	mov.w		EXC_SR(%a6),(EXC_SR-0x4,%a6,%d0)
+	mov.l		EXC_PC(%a6),(EXC_VOFF-0x2,%a6,%d0)
+	mov.l		(%sp)+,(EXC_PC-0x4,%a6,%d0)
+	mov.w		&0x2024,(EXC_VOFF-0x4,%a6,%d0)
+
+	pea		(%a6,%d0)		# create final sp
+	bra.b		iea_fmovm_data_p3
+
+iea_fmovm_data_p2:
+	mov.w		EXC_SR(%a6),(EXC_SR,%a6,%d0)
+	mov.l		(%sp)+,(EXC_PC,%a6,%d0)
+	mov.w		&0x00f0,(EXC_VOFF,%a6,%d0)
+
+	pea		(0x4,%a6,%d0)		# create final sp
+
+iea_fmovm_data_p3:
+	clr.l		%d1
+	mov.b		EXC_VOFF(%a6),%d1	# fetch strg
+
+	tst.b		%d1
+	bpl.b		fm_1
+	fmovm.x		&0x80,(0x4+0x8,%a6,%d0)
+	addi.l		&0xc,%d0
+fm_1:
+	lsl.b		&0x1,%d1
+	bpl.b		fm_2
+	fmovm.x		&0x40,(0x4+0x8,%a6,%d0)
+	addi.l		&0xc,%d0
+fm_2:
+	lsl.b		&0x1,%d1
+	bpl.b		fm_3
+	fmovm.x		&0x20,(0x4+0x8,%a6,%d0)
+	addi.l		&0xc,%d0
+fm_3:
+	lsl.b		&0x1,%d1
+	bpl.b		fm_4
+	fmovm.x		&0x10,(0x4+0x8,%a6,%d0)
+	addi.l		&0xc,%d0
+fm_4:
+	lsl.b		&0x1,%d1
+	bpl.b		fm_5
+	fmovm.x		&0x08,(0x4+0x8,%a6,%d0)
+	addi.l		&0xc,%d0
+fm_5:
+	lsl.b		&0x1,%d1
+	bpl.b		fm_6
+	fmovm.x		&0x04,(0x4+0x8,%a6,%d0)
+	addi.l		&0xc,%d0
+fm_6:
+	lsl.b		&0x1,%d1
+	bpl.b		fm_7
+	fmovm.x		&0x02,(0x4+0x8,%a6,%d0)
+	addi.l		&0xc,%d0
+fm_7:
+	lsl.b		&0x1,%d1
+	bpl.b		fm_end
+	fmovm.x		&0x01,(0x4+0x8,%a6,%d0)
+fm_end:
+	mov.l		0x4(%sp),%d1
+	mov.l		0x8(%sp),%d0
+	mov.l		0xc(%sp),%a6
+	mov.l		(%sp)+,%sp
+
+	btst		&0x7,(%sp)		# is trace enabled?
+	beq.l		_fpsp_done
+	bra.l		_real_trace
+
+#########################################################################
+iea_fmovm_ctrl:
+
+	bsr.l		fmovm_ctrl		# load ctrl regs
+
+iea_fmovm_exit:
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	btst		&0x7,EXC_SR(%a6)	# is trace on?
+	bne.b		iea_fmovm_trace		# yes
+
+	mov.l		EXC_EXTWPTR(%a6),EXC_PC(%a6) # set Next PC
+
+	unlk		%a6			# unravel the frame
+
+	bra.l		_fpsp_done		# exit to os
+
+#
+# The control reg instruction that took an "Unimplemented Effective Address"
+# exception was being traced. The "Current PC" for the trace frame is the
+# PC stacked for Unimp EA. The "Next PC" is in EXC_EXTWPTR.
+# After fixing the stack frame, jump to _real_trace().
+#
+#		 UNIMP EA FRAME		   TRACE FRAME
+#		*****************	*****************
+#		* 0x0 *  0x0f0	*	*    Current	*
+#		*****************	*      PC	*
+#		*    Current	*	*****************
+#		*      PC	*	* 0x2 *  0x024	*
+#		*****************	*****************
+#		*      SR	*	*     Next	*
+#		*****************	*      PC	*
+#					*****************
+#					*      SR	*
+#					*****************
+# this ain't a pretty solution, but it works:
+# -restore a6 (not with unlk)
+# -shift stack frame down over where old a6 used to be
+# -add LOCAL_SIZE to stack pointer
+iea_fmovm_trace:
+	mov.l		(%a6),%a6		# restore frame pointer
+	mov.w		EXC_SR+LOCAL_SIZE(%sp),0x0+LOCAL_SIZE(%sp)
+	mov.l		EXC_PC+LOCAL_SIZE(%sp),0x8+LOCAL_SIZE(%sp)
+	mov.l		EXC_EXTWPTR+LOCAL_SIZE(%sp),0x2+LOCAL_SIZE(%sp)
+	mov.w		&0x2024,0x6+LOCAL_SIZE(%sp) # stk fmt = 0x2; voff = 0x024
+	add.l		&LOCAL_SIZE,%sp		# clear stack frame
+
+	bra.l		_real_trace
+
+#########################################################################
+# The FPU is disabled and so we should really have taken the "Line
+# F Emulator" exception. So, here we create an 8-word stack frame
+# from our 4-word stack frame. This means we must calculate the length
+# the faulting instruction to get the "next PC". This is trivial for
+# immediate operands but requires some extra work for fmovm dynamic
+# which can use most addressing modes.
+iea_disabled:
+	mov.l		(%sp)+,%d0		# restore d0
+
+	link		%a6,&-LOCAL_SIZE	# init stack frame
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+
+# PC of instruction that took the exception is the PC in the frame
+	mov.l		EXC_PC(%a6),EXC_EXTWPTR(%a6)
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch the instruction words
+	mov.l		%d0,EXC_OPWORD(%a6)	# store OPWORD and EXTWORD
+
+	tst.w		%d0			# is instr fmovm?
+	bmi.b		iea_dis_fmovm		# yes
+# instruction is using an extended precision immediate operand. therefore,
+# the total instruction length is 16 bytes.
+iea_dis_immed:
+	mov.l		&0x10,%d0		# 16 bytes of instruction
+	bra.b		iea_dis_cont
+iea_dis_fmovm:
+	btst		&0xe,%d0		# is instr fmovm ctrl
+	bne.b		iea_dis_fmovm_data	# no
+# the instruction is a fmovm.l with 2 or 3 registers.
+	bfextu		%d0{&19:&3},%d1
+	mov.l		&0xc,%d0
+	cmpi.b		%d1,&0x7		# move all regs?
+	bne.b		iea_dis_cont
+	addq.l		&0x4,%d0
+	bra.b		iea_dis_cont
+# the instruction is an fmovm.x dynamic which can use many addressing
+# modes and thus can have several different total instruction lengths.
+# call fmovm_calc_ea which will go through the ea calc process and,
+# as a by-product, will tell us how long the instruction is.
+iea_dis_fmovm_data:
+	clr.l		%d0
+	bsr.l		fmovm_calc_ea
+	mov.l		EXC_EXTWPTR(%a6),%d0
+	sub.l		EXC_PC(%a6),%d0
+iea_dis_cont:
+	mov.w		%d0,EXC_VOFF(%a6)	# store stack shift value
+
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+
+# here, we actually create the 8-word frame from the 4-word frame,
+# with the "next PC" as additional info.
+# the <ea> field is let as undefined.
+	subq.l		&0x8,%sp		# make room for new stack
+	mov.l		%d0,-(%sp)		# save d0
+	mov.w		0xc(%sp),0x4(%sp)	# move SR
+	mov.l		0xe(%sp),0x6(%sp)	# move Current PC
+	clr.l		%d0
+	mov.w		0x12(%sp),%d0
+	mov.l		0x6(%sp),0x10(%sp)	# move Current PC
+	add.l		%d0,0x6(%sp)		# make Next PC
+	mov.w		&0x402c,0xa(%sp)	# insert offset,frame format
+	mov.l		(%sp)+,%d0		# restore d0
+
+	bra.l		_real_fpu_disabled
+
+##########
+
+iea_iacc:
+	movc		%pcr,%d0
+	btst		&0x1,%d0
+	bne.b		iea_iacc_cont
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1 on stack
+iea_iacc_cont:
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+
+	subq.w		&0x8,%sp		# make stack frame bigger
+	mov.l		0x8(%sp),(%sp)		# store SR,hi(PC)
+	mov.w		0xc(%sp),0x4(%sp)	# store lo(PC)
+	mov.w		&0x4008,0x6(%sp)	# store voff
+	mov.l		0x2(%sp),0x8(%sp)	# store ea
+	mov.l		&0x09428001,0xc(%sp)	# store fslw
+
+iea_acc_done:
+	btst		&0x5,(%sp)		# user or supervisor mode?
+	beq.b		iea_acc_done2		# user
+	bset		&0x2,0xd(%sp)		# set supervisor TM bit
+
+iea_acc_done2:
+	bra.l		_real_access
+
+iea_dacc:
+	lea		-LOCAL_SIZE(%a6),%sp
+
+	movc		%pcr,%d1
+	btst		&0x1,%d1
+	bne.b		iea_dacc_cont
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1 on stack
+	fmovm.l		LOCAL_SIZE+USER_FPCR(%sp),%fpcr,%fpsr,%fpiar # restore ctrl regs
+iea_dacc_cont:
+	mov.l		(%a6),%a6
+
+	mov.l		0x4+LOCAL_SIZE(%sp),-0x8+0x4+LOCAL_SIZE(%sp)
+	mov.w		0x8+LOCAL_SIZE(%sp),-0x8+0x8+LOCAL_SIZE(%sp)
+	mov.w		&0x4008,-0x8+0xa+LOCAL_SIZE(%sp)
+	mov.l		%a0,-0x8+0xc+LOCAL_SIZE(%sp)
+	mov.w		%d0,-0x8+0x10+LOCAL_SIZE(%sp)
+	mov.w		&0x0001,-0x8+0x12+LOCAL_SIZE(%sp)
+
+	movm.l		LOCAL_SIZE+EXC_DREGS(%sp),&0x0303 # restore d0-d1/a0-a1
+	add.w		&LOCAL_SIZE-0x4,%sp
+
+	bra.b		iea_acc_done
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_fpsp_operr(): 060FPSP entry point for FP Operr exception.	#
+#									#
+#	This handler should be the first code executed upon taking the	#
+#	FP Operand Error exception in an operating system.		#
+#									#
+# XREF ****************************************************************	#
+#	_imem_read_long() - read instruction longword			#
+#	fix_skewed_ops() - adjust src operand in fsave frame		#
+#	_real_operr() - "callout" to operating system operr handler	#
+#	_dmem_write_{byte,word,long}() - store data to mem (opclass 3)	#
+#	store_dreg_{b,w,l}() - store data to data regfile (opclass 3)	#
+#	facc_out_{b,w,l}() - store to memory took access error (opcl 3)	#
+#									#
+# INPUT ***************************************************************	#
+#	- The system stack contains the FP Operr exception frame	#
+#	- The fsave frame contains the source operand			#
+#									#
+# OUTPUT **************************************************************	#
+#	No access error:						#
+#	- The system stack is unchanged					#
+#	- The fsave frame contains the adjusted src op for opclass 0,2	#
+#									#
+# ALGORITHM ***********************************************************	#
+#	In a system where the FP Operr exception is enabled, the goal	#
+# is to get to the handler specified at _real_operr(). But, on the 060,	#
+# for opclass zero and two instruction taking this exception, the	#
+# input operand in the fsave frame may be incorrect for some cases	#
+# and needs to be corrected. This handler calls fix_skewed_ops() to	#
+# do just this and then exits through _real_operr().			#
+#	For opclass 3 instructions, the 060 doesn't store the default	#
+# operr result out to memory or data register file as it should.	#
+# This code must emulate the move out before finally exiting through	#
+# _real_inex(). The move out, if to memory, is performed using		#
+# _mem_write() "callout" routines that may return a failing result.	#
+# In this special case, the handler must exit through facc_out()	#
+# which creates an access error stack frame from the current operr	#
+# stack frame.								#
+#									#
+#########################################################################
+
+	global		_fpsp_operr
+_fpsp_operr:
+
+	link.w		%a6,&-LOCAL_SIZE	# init stack frame
+
+	fsave		FP_SRC(%a6)		# grab the "busy" frame
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FPREGS(%a6)	# save fp0-fp1 on stack
+
+# the FPIAR holds the "current PC" of the faulting instruction
+	mov.l		USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch the instruction words
+	mov.l		%d0,EXC_OPWORD(%a6)
+
+##############################################################################
+
+	btst		&13,%d0			# is instr an fmove out?
+	bne.b		foperr_out		# fmove out
+
+
+# here, we simply see if the operand in the fsave frame needs to be "unskewed".
+# this would be the case for opclass two operations with a source infinity or
+# denorm operand in the sgl or dbl format. NANs also become skewed, but can't
+# cause an operr so we don't need to check for them here.
+	lea		FP_SRC(%a6),%a0		# pass: ptr to src op
+	bsr.l		fix_skewed_ops		# fix src op
+
+foperr_exit:
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)
+
+	unlk		%a6
+	bra.l		_real_operr
+
+########################################################################
+
+#
+# the hardware does not save the default result to memory on enabled
+# operand error exceptions. we do this here before passing control to
+# the user operand error handler.
+#
+# byte, word, and long destination format operations can pass
+# through here. we simply need to test the sign of the src
+# operand and save the appropriate minimum or maximum integer value
+# to the effective address as pointed to by the stacked effective address.
+#
+# although packed opclass three operations can take operand error
+# exceptions, they won't pass through here since they are caught
+# first by the unsupported data format exception handler. that handler
+# sends them directly to _real_operr() if necessary.
+#
+foperr_out:
+
+	mov.w		FP_SRC_EX(%a6),%d1	# fetch exponent
+	andi.w		&0x7fff,%d1
+	cmpi.w		%d1,&0x7fff
+	bne.b		foperr_out_not_qnan
+# the operand is either an infinity or a QNAN.
+	tst.l		FP_SRC_LO(%a6)
+	bne.b		foperr_out_qnan
+	mov.l		FP_SRC_HI(%a6),%d1
+	andi.l		&0x7fffffff,%d1
+	beq.b		foperr_out_not_qnan
+foperr_out_qnan:
+	mov.l		FP_SRC_HI(%a6),L_SCR1(%a6)
+	bra.b		foperr_out_jmp
+
+foperr_out_not_qnan:
+	mov.l		&0x7fffffff,%d1
+	tst.b		FP_SRC_EX(%a6)
+	bpl.b		foperr_out_not_qnan2
+	addq.l		&0x1,%d1
+foperr_out_not_qnan2:
+	mov.l		%d1,L_SCR1(%a6)
+
+foperr_out_jmp:
+	bfextu		%d0{&19:&3},%d0		# extract dst format field
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract <ea> mode,reg
+	mov.w		(tbl_operr.b,%pc,%d0.w*2),%a0
+	jmp		(tbl_operr.b,%pc,%a0)
+
+tbl_operr:
+	short		foperr_out_l - tbl_operr # long word integer
+	short		tbl_operr    - tbl_operr # sgl prec shouldn't happen
+	short		tbl_operr    - tbl_operr # ext prec shouldn't happen
+	short		foperr_exit  - tbl_operr # packed won't enter here
+	short		foperr_out_w - tbl_operr # word integer
+	short		tbl_operr    - tbl_operr # dbl prec shouldn't happen
+	short		foperr_out_b - tbl_operr # byte integer
+	short		tbl_operr    - tbl_operr # packed won't enter here
+
+foperr_out_b:
+	mov.b		L_SCR1(%a6),%d0		# load positive default result
+	cmpi.b		%d1,&0x7		# is <ea> mode a data reg?
+	ble.b		foperr_out_b_save_dn	# yes
+	mov.l		EXC_EA(%a6),%a0		# pass: <ea> of default result
+	bsr.l		_dmem_write_byte	# write the default result
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_b		# yes
+
+	bra.w		foperr_exit
+foperr_out_b_save_dn:
+	andi.w		&0x0007,%d1
+	bsr.l		store_dreg_b		# store result to regfile
+	bra.w		foperr_exit
+
+foperr_out_w:
+	mov.w		L_SCR1(%a6),%d0		# load positive default result
+	cmpi.b		%d1,&0x7		# is <ea> mode a data reg?
+	ble.b		foperr_out_w_save_dn	# yes
+	mov.l		EXC_EA(%a6),%a0		# pass: <ea> of default result
+	bsr.l		_dmem_write_word	# write the default result
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_w		# yes
+
+	bra.w		foperr_exit
+foperr_out_w_save_dn:
+	andi.w		&0x0007,%d1
+	bsr.l		store_dreg_w		# store result to regfile
+	bra.w		foperr_exit
+
+foperr_out_l:
+	mov.l		L_SCR1(%a6),%d0		# load positive default result
+	cmpi.b		%d1,&0x7		# is <ea> mode a data reg?
+	ble.b		foperr_out_l_save_dn	# yes
+	mov.l		EXC_EA(%a6),%a0		# pass: <ea> of default result
+	bsr.l		_dmem_write_long	# write the default result
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_l		# yes
+
+	bra.w		foperr_exit
+foperr_out_l_save_dn:
+	andi.w		&0x0007,%d1
+	bsr.l		store_dreg_l		# store result to regfile
+	bra.w		foperr_exit
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_fpsp_snan(): 060FPSP entry point for FP SNAN exception.	#
+#									#
+#	This handler should be the first code executed upon taking the	#
+#	FP Signalling NAN exception in an operating system.		#
+#									#
+# XREF ****************************************************************	#
+#	_imem_read_long() - read instruction longword			#
+#	fix_skewed_ops() - adjust src operand in fsave frame		#
+#	_real_snan() - "callout" to operating system SNAN handler	#
+#	_dmem_write_{byte,word,long}() - store data to mem (opclass 3)	#
+#	store_dreg_{b,w,l}() - store data to data regfile (opclass 3)	#
+#	facc_out_{b,w,l,d,x}() - store to mem took acc error (opcl 3)	#
+#	_calc_ea_fout() - fix An if <ea> is -() or ()+; also get <ea>	#
+#									#
+# INPUT ***************************************************************	#
+#	- The system stack contains the FP SNAN exception frame		#
+#	- The fsave frame contains the source operand			#
+#									#
+# OUTPUT **************************************************************	#
+#	No access error:						#
+#	- The system stack is unchanged					#
+#	- The fsave frame contains the adjusted src op for opclass 0,2	#
+#									#
+# ALGORITHM ***********************************************************	#
+#	In a system where the FP SNAN exception is enabled, the goal	#
+# is to get to the handler specified at _real_snan(). But, on the 060,	#
+# for opclass zero and two instructions taking this exception, the	#
+# input operand in the fsave frame may be incorrect for some cases	#
+# and needs to be corrected. This handler calls fix_skewed_ops() to	#
+# do just this and then exits through _real_snan().			#
+#	For opclass 3 instructions, the 060 doesn't store the default	#
+# SNAN result out to memory or data register file as it should.		#
+# This code must emulate the move out before finally exiting through	#
+# _real_snan(). The move out, if to memory, is performed using		#
+# _mem_write() "callout" routines that may return a failing result.	#
+# In this special case, the handler must exit through facc_out()	#
+# which creates an access error stack frame from the current SNAN	#
+# stack frame.								#
+#	For the case of an extended precision opclass 3 instruction,	#
+# if the effective addressing mode was -() or ()+, then the address	#
+# register must get updated by calling _calc_ea_fout(). If the <ea>	#
+# was -(a7) from supervisor mode, then the exception frame currently	#
+# on the system stack must be carefully moved "down" to make room	#
+# for the operand being moved.						#
+#									#
+#########################################################################
+
+	global		_fpsp_snan
+_fpsp_snan:
+
+	link.w		%a6,&-LOCAL_SIZE	# init stack frame
+
+	fsave		FP_SRC(%a6)		# grab the "busy" frame
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FPREGS(%a6)	# save fp0-fp1 on stack
+
+# the FPIAR holds the "current PC" of the faulting instruction
+	mov.l		USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch the instruction words
+	mov.l		%d0,EXC_OPWORD(%a6)
+
+##############################################################################
+
+	btst		&13,%d0			# is instr an fmove out?
+	bne.w		fsnan_out		# fmove out
+
+
+# here, we simply see if the operand in the fsave frame needs to be "unskewed".
+# this would be the case for opclass two operations with a source infinity or
+# denorm operand in the sgl or dbl format. NANs also become skewed and must be
+# fixed here.
+	lea		FP_SRC(%a6),%a0		# pass: ptr to src op
+	bsr.l		fix_skewed_ops		# fix src op
+
+fsnan_exit:
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)
+
+	unlk		%a6
+	bra.l		_real_snan
+
+########################################################################
+
+#
+# the hardware does not save the default result to memory on enabled
+# snan exceptions. we do this here before passing control to
+# the user snan handler.
+#
+# byte, word, long, and packed destination format operations can pass
+# through here. since packed format operations already were handled by
+# fpsp_unsupp(), then we need to do nothing else for them here.
+# for byte, word, and long, we simply need to test the sign of the src
+# operand and save the appropriate minimum or maximum integer value
+# to the effective address as pointed to by the stacked effective address.
+#
+fsnan_out:
+
+	bfextu		%d0{&19:&3},%d0		# extract dst format field
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract <ea> mode,reg
+	mov.w		(tbl_snan.b,%pc,%d0.w*2),%a0
+	jmp		(tbl_snan.b,%pc,%a0)
+
+tbl_snan:
+	short		fsnan_out_l - tbl_snan # long word integer
+	short		fsnan_out_s - tbl_snan # sgl prec shouldn't happen
+	short		fsnan_out_x - tbl_snan # ext prec shouldn't happen
+	short		tbl_snan    - tbl_snan # packed needs no help
+	short		fsnan_out_w - tbl_snan # word integer
+	short		fsnan_out_d - tbl_snan # dbl prec shouldn't happen
+	short		fsnan_out_b - tbl_snan # byte integer
+	short		tbl_snan    - tbl_snan # packed needs no help
+
+fsnan_out_b:
+	mov.b		FP_SRC_HI(%a6),%d0	# load upper byte of SNAN
+	bset		&6,%d0			# set SNAN bit
+	cmpi.b		%d1,&0x7		# is <ea> mode a data reg?
+	ble.b		fsnan_out_b_dn		# yes
+	mov.l		EXC_EA(%a6),%a0		# pass: <ea> of default result
+	bsr.l		_dmem_write_byte	# write the default result
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_b		# yes
+
+	bra.w		fsnan_exit
+fsnan_out_b_dn:
+	andi.w		&0x0007,%d1
+	bsr.l		store_dreg_b		# store result to regfile
+	bra.w		fsnan_exit
+
+fsnan_out_w:
+	mov.w		FP_SRC_HI(%a6),%d0	# load upper word of SNAN
+	bset		&14,%d0			# set SNAN bit
+	cmpi.b		%d1,&0x7		# is <ea> mode a data reg?
+	ble.b		fsnan_out_w_dn		# yes
+	mov.l		EXC_EA(%a6),%a0		# pass: <ea> of default result
+	bsr.l		_dmem_write_word	# write the default result
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_w		# yes
+
+	bra.w		fsnan_exit
+fsnan_out_w_dn:
+	andi.w		&0x0007,%d1
+	bsr.l		store_dreg_w		# store result to regfile
+	bra.w		fsnan_exit
+
+fsnan_out_l:
+	mov.l		FP_SRC_HI(%a6),%d0	# load upper longword of SNAN
+	bset		&30,%d0			# set SNAN bit
+	cmpi.b		%d1,&0x7		# is <ea> mode a data reg?
+	ble.b		fsnan_out_l_dn		# yes
+	mov.l		EXC_EA(%a6),%a0		# pass: <ea> of default result
+	bsr.l		_dmem_write_long	# write the default result
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_l		# yes
+
+	bra.w		fsnan_exit
+fsnan_out_l_dn:
+	andi.w		&0x0007,%d1
+	bsr.l		store_dreg_l		# store result to regfile
+	bra.w		fsnan_exit
+
+fsnan_out_s:
+	cmpi.b		%d1,&0x7		# is <ea> mode a data reg?
+	ble.b		fsnan_out_d_dn		# yes
+	mov.l		FP_SRC_EX(%a6),%d0	# fetch SNAN sign
+	andi.l		&0x80000000,%d0		# keep sign
+	ori.l		&0x7fc00000,%d0		# insert new exponent,SNAN bit
+	mov.l		FP_SRC_HI(%a6),%d1	# load mantissa
+	lsr.l		&0x8,%d1		# shift mantissa for sgl
+	or.l		%d1,%d0			# create sgl SNAN
+	mov.l		EXC_EA(%a6),%a0		# pass: <ea> of default result
+	bsr.l		_dmem_write_long	# write the default result
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_l		# yes
+
+	bra.w		fsnan_exit
+fsnan_out_d_dn:
+	mov.l		FP_SRC_EX(%a6),%d0	# fetch SNAN sign
+	andi.l		&0x80000000,%d0		# keep sign
+	ori.l		&0x7fc00000,%d0		# insert new exponent,SNAN bit
+	mov.l		%d1,-(%sp)
+	mov.l		FP_SRC_HI(%a6),%d1	# load mantissa
+	lsr.l		&0x8,%d1		# shift mantissa for sgl
+	or.l		%d1,%d0			# create sgl SNAN
+	mov.l		(%sp)+,%d1
+	andi.w		&0x0007,%d1
+	bsr.l		store_dreg_l		# store result to regfile
+	bra.w		fsnan_exit
+
+fsnan_out_d:
+	mov.l		FP_SRC_EX(%a6),%d0	# fetch SNAN sign
+	andi.l		&0x80000000,%d0		# keep sign
+	ori.l		&0x7ff80000,%d0		# insert new exponent,SNAN bit
+	mov.l		FP_SRC_HI(%a6),%d1	# load hi mantissa
+	mov.l		%d0,FP_SCR0_EX(%a6)	# store to temp space
+	mov.l		&11,%d0			# load shift amt
+	lsr.l		%d0,%d1
+	or.l		%d1,FP_SCR0_EX(%a6)	# create dbl hi
+	mov.l		FP_SRC_HI(%a6),%d1	# load hi mantissa
+	andi.l		&0x000007ff,%d1
+	ror.l		%d0,%d1
+	mov.l		%d1,FP_SCR0_HI(%a6)	# store to temp space
+	mov.l		FP_SRC_LO(%a6),%d1	# load lo mantissa
+	lsr.l		%d0,%d1
+	or.l		%d1,FP_SCR0_HI(%a6)	# create dbl lo
+	lea		FP_SCR0(%a6),%a0	# pass: ptr to operand
+	mov.l		EXC_EA(%a6),%a1		# pass: dst addr
+	movq.l		&0x8,%d0		# pass: size of 8 bytes
+	bsr.l		_dmem_write		# write the default result
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_d		# yes
+
+	bra.w		fsnan_exit
+
+# for extended precision, if the addressing mode is pre-decrement or
+# post-increment, then the address register did not get updated.
+# in addition, for pre-decrement, the stacked <ea> is incorrect.
+fsnan_out_x:
+	clr.b		SPCOND_FLG(%a6)		# clear special case flag
+
+	mov.w		FP_SRC_EX(%a6),FP_SCR0_EX(%a6)
+	clr.w		2+FP_SCR0(%a6)
+	mov.l		FP_SRC_HI(%a6),%d0
+	bset		&30,%d0
+	mov.l		%d0,FP_SCR0_HI(%a6)
+	mov.l		FP_SRC_LO(%a6),FP_SCR0_LO(%a6)
+
+	btst		&0x5,EXC_SR(%a6)	# supervisor mode exception?
+	bne.b		fsnan_out_x_s		# yes
+
+	mov.l		%usp,%a0		# fetch user stack pointer
+	mov.l		%a0,EXC_A7(%a6)		# save on stack for calc_ea()
+	mov.l		(%a6),EXC_A6(%a6)
+
+	bsr.l		_calc_ea_fout		# find the correct ea,update An
+	mov.l		%a0,%a1
+	mov.l		%a0,EXC_EA(%a6)		# stack correct <ea>
+
+	mov.l		EXC_A7(%a6),%a0
+	mov.l		%a0,%usp		# restore user stack pointer
+	mov.l		EXC_A6(%a6),(%a6)
+
+fsnan_out_x_save:
+	lea		FP_SCR0(%a6),%a0	# pass: ptr to operand
+	movq.l		&0xc,%d0		# pass: size of extended
+	bsr.l		_dmem_write		# write the default result
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_x		# yes
+
+	bra.w		fsnan_exit
+
+fsnan_out_x_s:
+	mov.l		(%a6),EXC_A6(%a6)
+
+	bsr.l		_calc_ea_fout		# find the correct ea,update An
+	mov.l		%a0,%a1
+	mov.l		%a0,EXC_EA(%a6)		# stack correct <ea>
+
+	mov.l		EXC_A6(%a6),(%a6)
+
+	cmpi.b		SPCOND_FLG(%a6),&mda7_flg # is <ea> mode -(a7)?
+	bne.b		fsnan_out_x_save	# no
+
+# the operation was "fmove.x SNAN,-(a7)" from supervisor mode.
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)
+
+	mov.l		EXC_A6(%a6),%a6		# restore frame pointer
+
+	mov.l		LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
+	mov.l		LOCAL_SIZE+EXC_PC+0x2(%sp),LOCAL_SIZE+EXC_PC+0x2-0xc(%sp)
+	mov.l		LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
+
+	mov.l		LOCAL_SIZE+FP_SCR0_EX(%sp),LOCAL_SIZE+EXC_SR(%sp)
+	mov.l		LOCAL_SIZE+FP_SCR0_HI(%sp),LOCAL_SIZE+EXC_PC+0x2(%sp)
+	mov.l		LOCAL_SIZE+FP_SCR0_LO(%sp),LOCAL_SIZE+EXC_EA(%sp)
+
+	add.l		&LOCAL_SIZE-0x8,%sp
+
+	bra.l		_real_snan
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_fpsp_inex(): 060FPSP entry point for FP Inexact exception.	#
+#									#
+#	This handler should be the first code executed upon taking the	#
+#	FP Inexact exception in an operating system.			#
+#									#
+# XREF ****************************************************************	#
+#	_imem_read_long() - read instruction longword			#
+#	fix_skewed_ops() - adjust src operand in fsave frame		#
+#	set_tag_x() - determine optype of src/dst operands		#
+#	store_fpreg() - store opclass 0 or 2 result to FP regfile	#
+#	unnorm_fix() - change UNNORM operands to NORM or ZERO		#
+#	load_fpn2() - load dst operand from FP regfile			#
+#	smovcr() - emulate an "fmovcr" instruction			#
+#	fout() - emulate an opclass 3 instruction			#
+#	tbl_unsupp - add of table of emulation routines for opclass 0,2	#
+#	_real_inex() - "callout" to operating system inexact handler	#
+#									#
+# INPUT ***************************************************************	#
+#	- The system stack contains the FP Inexact exception frame	#
+#	- The fsave frame contains the source operand			#
+#									#
+# OUTPUT **************************************************************	#
+#	- The system stack is unchanged					#
+#	- The fsave frame contains the adjusted src op for opclass 0,2	#
+#									#
+# ALGORITHM ***********************************************************	#
+#	In a system where the FP Inexact exception is enabled, the goal	#
+# is to get to the handler specified at _real_inex(). But, on the 060,	#
+# for opclass zero and two instruction taking this exception, the	#
+# hardware doesn't store the correct result to the destination FP	#
+# register as did the '040 and '881/2. This handler must emulate the	#
+# instruction in order to get this value and then store it to the	#
+# correct register before calling _real_inex().				#
+#	For opclass 3 instructions, the 060 doesn't store the default	#
+# inexact result out to memory or data register file as it should.	#
+# This code must emulate the move out by calling fout() before finally	#
+# exiting through _real_inex().						#
+#									#
+#########################################################################
+
+	global		_fpsp_inex
+_fpsp_inex:
+
+	link.w		%a6,&-LOCAL_SIZE	# init stack frame
+
+	fsave		FP_SRC(%a6)		# grab the "busy" frame
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FPREGS(%a6)	# save fp0-fp1 on stack
+
+# the FPIAR holds the "current PC" of the faulting instruction
+	mov.l		USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch the instruction words
+	mov.l		%d0,EXC_OPWORD(%a6)
+
+##############################################################################
+
+	btst		&13,%d0			# is instr an fmove out?
+	bne.w		finex_out		# fmove out
+
+
+# the hardware, for "fabs" and "fneg" w/ a long source format, puts the
+# longword integer directly into the upper longword of the mantissa along
+# w/ an exponent value of 0x401e. we convert this to extended precision here.
+	bfextu		%d0{&19:&3},%d0		# fetch instr size
+	bne.b		finex_cont		# instr size is not long
+	cmpi.w		FP_SRC_EX(%a6),&0x401e	# is exponent 0x401e?
+	bne.b		finex_cont		# no
+	fmov.l		&0x0,%fpcr
+	fmov.l		FP_SRC_HI(%a6),%fp0	# load integer src
+	fmov.x		%fp0,FP_SRC(%a6)	# store integer as extended precision
+	mov.w		&0xe001,0x2+FP_SRC(%a6)
+
+finex_cont:
+	lea		FP_SRC(%a6),%a0		# pass: ptr to src op
+	bsr.l		fix_skewed_ops		# fix src op
+
+# Here, we zero the ccode and exception byte field since we're going to
+# emulate the whole instruction. Notice, though, that we don't kill the
+# INEX1 bit. This is because a packed op has long since been converted
+# to extended before arriving here. Therefore, we need to retain the
+# INEX1 bit from when the operand was first converted.
+	andi.l		&0x00ff01ff,USER_FPSR(%a6) # zero all but accured field
+
+	fmov.l		&0x0,%fpcr		# zero current control regs
+	fmov.l		&0x0,%fpsr
+
+	bfextu		EXC_EXTWORD(%a6){&0:&6},%d1 # extract upper 6 of cmdreg
+	cmpi.b		%d1,&0x17		# is op an fmovecr?
+	beq.w		finex_fmovcr		# yes
+
+	lea		FP_SRC(%a6),%a0		# pass: ptr to src op
+	bsr.l		set_tag_x		# tag the operand type
+	mov.b		%d0,STAG(%a6)		# maybe NORM,DENORM
+
+# bits four and five of the fp extension word separate the monadic and dyadic
+# operations that can pass through fpsp_inex(). remember that fcmp and ftst
+# will never take this exception, but fsincos will.
+	btst		&0x5,1+EXC_CMDREG(%a6)	# is operation monadic or dyadic?
+	beq.b		finex_extract		# monadic
+
+	btst		&0x4,1+EXC_CMDREG(%a6)	# is operation an fsincos?
+	bne.b		finex_extract		# yes
+
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
+	bsr.l		load_fpn2		# load dst into FP_DST
+
+	lea		FP_DST(%a6),%a0		# pass: ptr to dst op
+	bsr.l		set_tag_x		# tag the operand type
+	cmpi.b		%d0,&UNNORM		# is operand an UNNORM?
+	bne.b		finex_op2_done		# no
+	bsr.l		unnorm_fix		# yes; convert to NORM,DENORM,or ZERO
+finex_op2_done:
+	mov.b		%d0,DTAG(%a6)		# save dst optype tag
+
+finex_extract:
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd prec/mode
+
+	mov.b		1+EXC_CMDREG(%a6),%d1
+	andi.w		&0x007f,%d1		# extract extension
+
+	lea		FP_SRC(%a6),%a0
+	lea		FP_DST(%a6),%a1
+
+	mov.l		(tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr
+	jsr		(tbl_unsupp.l,%pc,%d1.l*1)
+
+# the operation has been emulated. the result is in fp0.
+finex_save:
+	bfextu		EXC_CMDREG(%a6){&6:&3},%d0
+	bsr.l		store_fpreg
+
+finex_exit:
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)
+
+	unlk		%a6
+	bra.l		_real_inex
+
+finex_fmovcr:
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd prec,mode
+	mov.b		1+EXC_CMDREG(%a6),%d1
+	andi.l		&0x0000007f,%d1		# pass rom offset
+	bsr.l		smovcr
+	bra.b		finex_save
+
+########################################################################
+
+#
+# the hardware does not save the default result to memory on enabled
+# inexact exceptions. we do this here before passing control to
+# the user inexact handler.
+#
+# byte, word, and long destination format operations can pass
+# through here. so can double and single precision.
+# although packed opclass three operations can take inexact
+# exceptions, they won't pass through here since they are caught
+# first by the unsupported data format exception handler. that handler
+# sends them directly to _real_inex() if necessary.
+#
+finex_out:
+
+	mov.b		&NORM,STAG(%a6)		# src is a NORM
+
+	clr.l		%d0
+	mov.b		FPCR_MODE(%a6),%d0	# pass rnd prec,mode
+
+	andi.l		&0xffff00ff,USER_FPSR(%a6) # zero exception field
+
+	lea		FP_SRC(%a6),%a0		# pass ptr to src operand
+
+	bsr.l		fout			# store the default result
+
+	bra.b		finex_exit
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_fpsp_dz(): 060FPSP entry point for FP DZ exception.		#
+#									#
+#	This handler should be the first code executed upon taking	#
+#	the FP DZ exception in an operating system.			#
+#									#
+# XREF ****************************************************************	#
+#	_imem_read_long() - read instruction longword from memory	#
+#	fix_skewed_ops() - adjust fsave operand				#
+#	_real_dz() - "callout" exit point from FP DZ handler		#
+#									#
+# INPUT ***************************************************************	#
+#	- The system stack contains the FP DZ exception stack.		#
+#	- The fsave frame contains the source operand.			#
+#									#
+# OUTPUT **************************************************************	#
+#	- The system stack contains the FP DZ exception stack.		#
+#	- The fsave frame contains the adjusted source operand.		#
+#									#
+# ALGORITHM ***********************************************************	#
+#	In a system where the DZ exception is enabled, the goal is to	#
+# get to the handler specified at _real_dz(). But, on the 060, when the	#
+# exception is taken, the input operand in the fsave state frame may	#
+# be incorrect for some cases and need to be adjusted. So, this package	#
+# adjusts the operand using fix_skewed_ops() and then branches to	#
+# _real_dz().								#
+#									#
+#########################################################################
+
+	global		_fpsp_dz
+_fpsp_dz:
+
+	link.w		%a6,&-LOCAL_SIZE	# init stack frame
+
+	fsave		FP_SRC(%a6)		# grab the "busy" frame
+
+	movm.l		&0x0303,EXC_DREGS(%a6)	# save d0-d1/a0-a1
+	fmovm.l		%fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+	fmovm.x		&0xc0,EXC_FPREGS(%a6)	# save fp0-fp1 on stack
+
+# the FPIAR holds the "current PC" of the faulting instruction
+	mov.l		USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch the instruction words
+	mov.l		%d0,EXC_OPWORD(%a6)
+
+##############################################################################
+
+
+# here, we simply see if the operand in the fsave frame needs to be "unskewed".
+# this would be the case for opclass two operations with a source zero
+# in the sgl or dbl format.
+	lea		FP_SRC(%a6),%a0		# pass: ptr to src op
+	bsr.l		fix_skewed_ops		# fix src op
+
+fdz_exit:
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	frestore	FP_SRC(%a6)
+
+	unlk		%a6
+	bra.l		_real_dz
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_fpsp_fline(): 060FPSP entry point for "Line F emulator"	#
+#		       exception when the "reduced" version of the	#
+#		       FPSP is implemented that does not emulate	#
+#		       FP unimplemented instructions.			#
+#									#
+#	This handler should be the first code executed upon taking a	#
+#	"Line F Emulator" exception in an operating system integrating	#
+#	the reduced version of 060FPSP.					#
+#									#
+# XREF ****************************************************************	#
+#	_real_fpu_disabled() - Handle "FPU disabled" exceptions		#
+#	_real_fline() - Handle all other cases (treated equally)	#
+#									#
+# INPUT ***************************************************************	#
+#	- The system stack contains a "Line F Emulator" exception	#
+#	  stack frame.							#
+#									#
+# OUTPUT **************************************************************	#
+#	- The system stack is unchanged.				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	When a "Line F Emulator" exception occurs in a system where	#
+# "FPU Unimplemented" instructions will not be emulated, the exception	#
+# can occur because then FPU is disabled or the instruction is to be	#
+# classifed as "Line F". This module determines which case exists and	#
+# calls the appropriate "callout".					#
+#									#
+#########################################################################
+
+	global		_fpsp_fline
+_fpsp_fline:
+
+# check to see if the FPU is disabled. if so, jump to the OS entry
+# point for that condition.
+	cmpi.w		0x6(%sp),&0x402c
+	beq.l		_real_fpu_disabled
+
+	bra.l		_real_fline
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_dcalc_ea(): calc correct <ea> from <ea> stacked on exception	#
+#									#
+# XREF ****************************************************************	#
+#	inc_areg() - increment an address register			#
+#	dec_areg() - decrement an address register			#
+#									#
+# INPUT ***************************************************************	#
+#	d0 = number of bytes to adjust <ea> by				#
+#									#
+# OUTPUT **************************************************************	#
+#	None								#
+#									#
+# ALGORITHM ***********************************************************	#
+# "Dummy" CALCulate Effective Address:					#
+#	The stacked <ea> for FP unimplemented instructions and opclass	#
+#	two packed instructions is correct with the exception of...	#
+#									#
+#	1) -(An)   : The register is not updated regardless of size.	#
+#		     Also, for extended precision and packed, the	#
+#		     stacked <ea> value is 8 bytes too big		#
+#	2) (An)+   : The register is not updated.			#
+#	3) #<data> : The upper longword of the immediate operand is	#
+#		     stacked b,w,l and s sizes are completely stacked.	#
+#		     d,x, and p are not.				#
+#									#
+#########################################################################
+
+	global		_dcalc_ea
+_dcalc_ea:
+	mov.l		%d0, %a0		# move # bytes to %a0
+
+	mov.b		1+EXC_OPWORD(%a6), %d0	# fetch opcode word
+	mov.l		%d0, %d1		# make a copy
+
+	andi.w		&0x38, %d0		# extract mode field
+	andi.l		&0x7, %d1		# extract reg  field
+
+	cmpi.b		%d0,&0x18		# is mode (An)+ ?
+	beq.b		dcea_pi			# yes
+
+	cmpi.b		%d0,&0x20		# is mode -(An) ?
+	beq.b		dcea_pd			# yes
+
+	or.w		%d1,%d0			# concat mode,reg
+	cmpi.b		%d0,&0x3c		# is mode #<data>?
+
+	beq.b		dcea_imm		# yes
+
+	mov.l		EXC_EA(%a6),%a0		# return <ea>
+	rts
+
+# need to set immediate data flag here since we'll need to do
+# an imem_read to fetch this later.
+dcea_imm:
+	mov.b		&immed_flg,SPCOND_FLG(%a6)
+	lea		([USER_FPIAR,%a6],0x4),%a0 # no; return <ea>
+	rts
+
+# here, the <ea> is stacked correctly. however, we must update the
+# address register...
+dcea_pi:
+	mov.l		%a0,%d0			# pass amt to inc by
+	bsr.l		inc_areg		# inc addr register
+
+	mov.l		EXC_EA(%a6),%a0		# stacked <ea> is correct
+	rts
+
+# the <ea> is stacked correctly for all but extended and packed which
+# the <ea>s are 8 bytes too large.
+# it would make no sense to have a pre-decrement to a7 in supervisor
+# mode so we don't even worry about this tricky case here : )
+dcea_pd:
+	mov.l		%a0,%d0			# pass amt to dec by
+	bsr.l		dec_areg		# dec addr register
+
+	mov.l		EXC_EA(%a6),%a0		# stacked <ea> is correct
+
+	cmpi.b		%d0,&0xc		# is opsize ext or packed?
+	beq.b		dcea_pd2		# yes
+	rts
+dcea_pd2:
+	sub.l		&0x8,%a0		# correct <ea>
+	mov.l		%a0,EXC_EA(%a6)		# put correct <ea> on stack
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_calc_ea_fout(): calculate correct stacked <ea> for extended	#
+#			 and packed data opclass 3 operations.		#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	None								#
+#									#
+# OUTPUT **************************************************************	#
+#	a0 = return correct effective address				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	For opclass 3 extended and packed data operations, the <ea>	#
+# stacked for the exception is incorrect for -(an) and (an)+ addressing	#
+# modes. Also, while we're at it, the index register itself must get	#
+# updated.								#
+#	So, for -(an), we must subtract 8 off of the stacked <ea> value	#
+# and return that value as the correct <ea> and store that value in An.	#
+# For (an)+, the stacked <ea> is correct but we must adjust An by +12.	#
+#									#
+#########################################################################
+
+# This calc_ea is currently used to retrieve the correct <ea>
+# for fmove outs of type extended and packed.
+	global		_calc_ea_fout
+_calc_ea_fout:
+	mov.b		1+EXC_OPWORD(%a6),%d0	# fetch opcode word
+	mov.l		%d0,%d1			# make a copy
+
+	andi.w		&0x38,%d0		# extract mode field
+	andi.l		&0x7,%d1		# extract reg  field
+
+	cmpi.b		%d0,&0x18		# is mode (An)+ ?
+	beq.b		ceaf_pi			# yes
+
+	cmpi.b		%d0,&0x20		# is mode -(An) ?
+	beq.w		ceaf_pd			# yes
+
+	mov.l		EXC_EA(%a6),%a0		# stacked <ea> is correct
+	rts
+
+# (An)+ : extended and packed fmove out
+#	: stacked <ea> is correct
+#	: "An" not updated
+ceaf_pi:
+	mov.w		(tbl_ceaf_pi.b,%pc,%d1.w*2),%d1
+	mov.l		EXC_EA(%a6),%a0
+	jmp		(tbl_ceaf_pi.b,%pc,%d1.w*1)
+
+	swbeg		&0x8
+tbl_ceaf_pi:
+	short		ceaf_pi0 - tbl_ceaf_pi
+	short		ceaf_pi1 - tbl_ceaf_pi
+	short		ceaf_pi2 - tbl_ceaf_pi
+	short		ceaf_pi3 - tbl_ceaf_pi
+	short		ceaf_pi4 - tbl_ceaf_pi
+	short		ceaf_pi5 - tbl_ceaf_pi
+	short		ceaf_pi6 - tbl_ceaf_pi
+	short		ceaf_pi7 - tbl_ceaf_pi
+
+ceaf_pi0:
+	addi.l		&0xc,EXC_DREGS+0x8(%a6)
+	rts
+ceaf_pi1:
+	addi.l		&0xc,EXC_DREGS+0xc(%a6)
+	rts
+ceaf_pi2:
+	add.l		&0xc,%a2
+	rts
+ceaf_pi3:
+	add.l		&0xc,%a3
+	rts
+ceaf_pi4:
+	add.l		&0xc,%a4
+	rts
+ceaf_pi5:
+	add.l		&0xc,%a5
+	rts
+ceaf_pi6:
+	addi.l		&0xc,EXC_A6(%a6)
+	rts
+ceaf_pi7:
+	mov.b		&mia7_flg,SPCOND_FLG(%a6)
+	addi.l		&0xc,EXC_A7(%a6)
+	rts
+
+# -(An) : extended and packed fmove out
+#	: stacked <ea> = actual <ea> + 8
+#	: "An" not updated
+ceaf_pd:
+	mov.w		(tbl_ceaf_pd.b,%pc,%d1.w*2),%d1
+	mov.l		EXC_EA(%a6),%a0
+	sub.l		&0x8,%a0
+	sub.l		&0x8,EXC_EA(%a6)
+	jmp		(tbl_ceaf_pd.b,%pc,%d1.w*1)
+
+	swbeg		&0x8
+tbl_ceaf_pd:
+	short		ceaf_pd0 - tbl_ceaf_pd
+	short		ceaf_pd1 - tbl_ceaf_pd
+	short		ceaf_pd2 - tbl_ceaf_pd
+	short		ceaf_pd3 - tbl_ceaf_pd
+	short		ceaf_pd4 - tbl_ceaf_pd
+	short		ceaf_pd5 - tbl_ceaf_pd
+	short		ceaf_pd6 - tbl_ceaf_pd
+	short		ceaf_pd7 - tbl_ceaf_pd
+
+ceaf_pd0:
+	mov.l		%a0,EXC_DREGS+0x8(%a6)
+	rts
+ceaf_pd1:
+	mov.l		%a0,EXC_DREGS+0xc(%a6)
+	rts
+ceaf_pd2:
+	mov.l		%a0,%a2
+	rts
+ceaf_pd3:
+	mov.l		%a0,%a3
+	rts
+ceaf_pd4:
+	mov.l		%a0,%a4
+	rts
+ceaf_pd5:
+	mov.l		%a0,%a5
+	rts
+ceaf_pd6:
+	mov.l		%a0,EXC_A6(%a6)
+	rts
+ceaf_pd7:
+	mov.l		%a0,EXC_A7(%a6)
+	mov.b		&mda7_flg,SPCOND_FLG(%a6)
+	rts
+
+#
+# This table holds the offsets of the emulation routines for each individual
+# math operation relative to the address of this table. Included are
+# routines like fadd/fmul/fabs. The transcendentals ARE NOT. This is because
+# this table is for the version if the 060FPSP without transcendentals.
+# The location within the table is determined by the extension bits of the
+# operation longword.
+#
+
+	swbeg		&109
+tbl_unsupp:
+	long		fin		- tbl_unsupp	# 00: fmove
+	long		fint		- tbl_unsupp	# 01: fint
+	long		tbl_unsupp	- tbl_unsupp	# 02: fsinh
+	long		fintrz		- tbl_unsupp	# 03: fintrz
+	long		fsqrt		- tbl_unsupp	# 04: fsqrt
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp	# 06: flognp1
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp	# 08: fetoxm1
+	long		tbl_unsupp	- tbl_unsupp	# 09: ftanh
+	long		tbl_unsupp	- tbl_unsupp	# 0a: fatan
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp	# 0c: fasin
+	long		tbl_unsupp	- tbl_unsupp	# 0d: fatanh
+	long		tbl_unsupp	- tbl_unsupp	# 0e: fsin
+	long		tbl_unsupp	- tbl_unsupp	# 0f: ftan
+	long		tbl_unsupp	- tbl_unsupp	# 10: fetox
+	long		tbl_unsupp	- tbl_unsupp	# 11: ftwotox
+	long		tbl_unsupp	- tbl_unsupp	# 12: ftentox
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp	# 14: flogn
+	long		tbl_unsupp	- tbl_unsupp	# 15: flog10
+	long		tbl_unsupp	- tbl_unsupp	# 16: flog2
+	long		tbl_unsupp	- tbl_unsupp
+	long		fabs		- tbl_unsupp	# 18: fabs
+	long		tbl_unsupp	- tbl_unsupp	# 19: fcosh
+	long		fneg		- tbl_unsupp	# 1a: fneg
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp	# 1c: facos
+	long		tbl_unsupp	- tbl_unsupp	# 1d: fcos
+	long		tbl_unsupp	- tbl_unsupp	# 1e: fgetexp
+	long		tbl_unsupp	- tbl_unsupp	# 1f: fgetman
+	long		fdiv		- tbl_unsupp	# 20: fdiv
+	long		tbl_unsupp	- tbl_unsupp	# 21: fmod
+	long		fadd		- tbl_unsupp	# 22: fadd
+	long		fmul		- tbl_unsupp	# 23: fmul
+	long		fsgldiv		- tbl_unsupp	# 24: fsgldiv
+	long		tbl_unsupp	- tbl_unsupp	# 25: frem
+	long		tbl_unsupp	- tbl_unsupp	# 26: fscale
+	long		fsglmul		- tbl_unsupp	# 27: fsglmul
+	long		fsub		- tbl_unsupp	# 28: fsub
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp	# 30: fsincos
+	long		tbl_unsupp	- tbl_unsupp	# 31: fsincos
+	long		tbl_unsupp	- tbl_unsupp	# 32: fsincos
+	long		tbl_unsupp	- tbl_unsupp	# 33: fsincos
+	long		tbl_unsupp	- tbl_unsupp	# 34: fsincos
+	long		tbl_unsupp	- tbl_unsupp	# 35: fsincos
+	long		tbl_unsupp	- tbl_unsupp	# 36: fsincos
+	long		tbl_unsupp	- tbl_unsupp	# 37: fsincos
+	long		fcmp		- tbl_unsupp	# 38: fcmp
+	long		tbl_unsupp	- tbl_unsupp
+	long		ftst		- tbl_unsupp	# 3a: ftst
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		fsin		- tbl_unsupp	# 40: fsmove
+	long		fssqrt		- tbl_unsupp	# 41: fssqrt
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		fdin		- tbl_unsupp	# 44: fdmove
+	long		fdsqrt		- tbl_unsupp	# 45: fdsqrt
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		fsabs		- tbl_unsupp	# 58: fsabs
+	long		tbl_unsupp	- tbl_unsupp
+	long		fsneg		- tbl_unsupp	# 5a: fsneg
+	long		tbl_unsupp	- tbl_unsupp
+	long		fdabs		- tbl_unsupp	# 5c: fdabs
+	long		tbl_unsupp	- tbl_unsupp
+	long		fdneg		- tbl_unsupp	# 5e: fdneg
+	long		tbl_unsupp	- tbl_unsupp
+	long		fsdiv		- tbl_unsupp	# 60: fsdiv
+	long		tbl_unsupp	- tbl_unsupp
+	long		fsadd		- tbl_unsupp	# 62: fsadd
+	long		fsmul		- tbl_unsupp	# 63: fsmul
+	long		fddiv		- tbl_unsupp	# 64: fddiv
+	long		tbl_unsupp	- tbl_unsupp
+	long		fdadd		- tbl_unsupp	# 66: fdadd
+	long		fdmul		- tbl_unsupp	# 67: fdmul
+	long		fssub		- tbl_unsupp	# 68: fssub
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		tbl_unsupp	- tbl_unsupp
+	long		fdsub		- tbl_unsupp	# 6c: fdsub
+
+#################################################
+# Add this here so non-fp modules can compile.
+# (smovcr is called from fpsp_inex.)
+	global		smovcr
+smovcr:
+	bra.b		smovcr
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fmovm_dynamic(): emulate "fmovm" dynamic instruction		#
+#									#
+# XREF ****************************************************************	#
+#	fetch_dreg() - fetch data register				#
+#	{i,d,}mem_read() - fetch data from memory			#
+#	_mem_write() - write data to memory				#
+#	iea_iacc() - instruction memory access error occurred		#
+#	iea_dacc() - data memory access error occurred			#
+#	restore() - restore An index regs if access error occurred	#
+#									#
+# INPUT ***************************************************************	#
+#	None								#
+#									#
+# OUTPUT **************************************************************	#
+#	If instr is "fmovm Dn,-(A7)" from supervisor mode,		#
+#		d0 = size of dump					#
+#		d1 = Dn							#
+#	Else if instruction access error,				#
+#		d0 = FSLW						#
+#	Else if data access error,					#
+#		d0 = FSLW						#
+#		a0 = address of fault					#
+#	Else								#
+#		none.							#
+#									#
+# ALGORITHM ***********************************************************	#
+#	The effective address must be calculated since this is entered	#
+# from an "Unimplemented Effective Address" exception handler. So, we	#
+# have our own fcalc_ea() routine here. If an access error is flagged	#
+# by a _{i,d,}mem_read() call, we must exit through the special		#
+# handler.								#
+#	The data register is determined and its value loaded to get the	#
+# string of FP registers affected. This value is used as an index into	#
+# a lookup table such that we can determine the number of bytes		#
+# involved.								#
+#	If the instruction is "fmovm.x <ea>,Dn", a _mem_read() is used	#
+# to read in all FP values. Again, _mem_read() may fail and require a	#
+# special exit.								#
+#	If the instruction is "fmovm.x DN,<ea>", a _mem_write() is used	#
+# to write all FP values. _mem_write() may also fail.			#
+#	If the instruction is "fmovm.x DN,-(a7)" from supervisor mode,	#
+# then we return the size of the dump and the string to the caller	#
+# so that the move can occur outside of this routine. This special	#
+# case is required so that moves to the system stack are handled	#
+# correctly.								#
+#									#
+# DYNAMIC:								#
+#	fmovm.x	dn, <ea>						#
+#	fmovm.x	<ea>, dn						#
+#									#
+#	      <WORD 1>		      <WORD2>				#
+#	1111 0010 00 |<ea>|	11@& 1000 0$$$ 0000			#
+#									#
+#	& = (0): predecrement addressing mode				#
+#	    (1): postincrement or control addressing mode		#
+#	@ = (0): move listed regs from memory to the FPU		#
+#	    (1): move listed regs from the FPU to memory		#
+#	$$$    : index of data register holding reg select mask		#
+#									#
+# NOTES:								#
+#	If the data register holds a zero, then the			#
+#	instruction is a nop.						#
+#									#
+#########################################################################
+
+	global		fmovm_dynamic
+fmovm_dynamic:
+
+# extract the data register in which the bit string resides...
+	mov.b		1+EXC_EXTWORD(%a6),%d1	# fetch extword
+	andi.w		&0x70,%d1		# extract reg bits
+	lsr.b		&0x4,%d1		# shift into lo bits
+
+# fetch the bit string into d0...
+	bsr.l		fetch_dreg		# fetch reg string
+
+	andi.l		&0x000000ff,%d0		# keep only lo byte
+
+	mov.l		%d0,-(%sp)		# save strg
+	mov.b		(tbl_fmovm_size.w,%pc,%d0),%d0
+	mov.l		%d0,-(%sp)		# save size
+	bsr.l		fmovm_calc_ea		# calculate <ea>
+	mov.l		(%sp)+,%d0		# restore size
+	mov.l		(%sp)+,%d1		# restore strg
+
+# if the bit string is a zero, then the operation is a no-op
+# but, make sure that we've calculated ea and advanced the opword pointer
+	beq.w		fmovm_data_done
+
+# separate move ins from move outs...
+	btst		&0x5,EXC_EXTWORD(%a6)	# is it a move in or out?
+	beq.w		fmovm_data_in		# it's a move out
+
+#############
+# MOVE OUT: #
+#############
+fmovm_data_out:
+	btst		&0x4,EXC_EXTWORD(%a6)	# control or predecrement?
+	bne.w		fmovm_out_ctrl		# control
+
+############################
+fmovm_out_predec:
+# for predecrement mode, the bit string is the opposite of both control
+# operations and postincrement mode. (bit7 = FP7 ... bit0 = FP0)
+# here, we convert it to be just like the others...
+	mov.b		(tbl_fmovm_convert.w,%pc,%d1.w*1),%d1
+
+	btst		&0x5,EXC_SR(%a6)	# user or supervisor mode?
+	beq.b		fmovm_out_ctrl		# user
+
+fmovm_out_predec_s:
+	cmpi.b		SPCOND_FLG(%a6),&mda7_flg # is <ea> mode -(a7)?
+	bne.b		fmovm_out_ctrl
+
+# the operation was unfortunately an: fmovm.x dn,-(sp)
+# called from supervisor mode.
+# we're also passing "size" and "strg" back to the calling routine
+	rts
+
+############################
+fmovm_out_ctrl:
+	mov.l		%a0,%a1			# move <ea> to a1
+
+	sub.l		%d0,%sp			# subtract size of dump
+	lea		(%sp),%a0
+
+	tst.b		%d1			# should FP0 be moved?
+	bpl.b		fmovm_out_ctrl_fp1	# no
+
+	mov.l		0x0+EXC_FP0(%a6),(%a0)+	# yes
+	mov.l		0x4+EXC_FP0(%a6),(%a0)+
+	mov.l		0x8+EXC_FP0(%a6),(%a0)+
+
+fmovm_out_ctrl_fp1:
+	lsl.b		&0x1,%d1		# should FP1 be moved?
+	bpl.b		fmovm_out_ctrl_fp2	# no
+
+	mov.l		0x0+EXC_FP1(%a6),(%a0)+	# yes
+	mov.l		0x4+EXC_FP1(%a6),(%a0)+
+	mov.l		0x8+EXC_FP1(%a6),(%a0)+
+
+fmovm_out_ctrl_fp2:
+	lsl.b		&0x1,%d1		# should FP2 be moved?
+	bpl.b		fmovm_out_ctrl_fp3	# no
+
+	fmovm.x		&0x20,(%a0)		# yes
+	add.l		&0xc,%a0
+
+fmovm_out_ctrl_fp3:
+	lsl.b		&0x1,%d1		# should FP3 be moved?
+	bpl.b		fmovm_out_ctrl_fp4	# no
+
+	fmovm.x		&0x10,(%a0)		# yes
+	add.l		&0xc,%a0
+
+fmovm_out_ctrl_fp4:
+	lsl.b		&0x1,%d1		# should FP4 be moved?
+	bpl.b		fmovm_out_ctrl_fp5	# no
+
+	fmovm.x		&0x08,(%a0)		# yes
+	add.l		&0xc,%a0
+
+fmovm_out_ctrl_fp5:
+	lsl.b		&0x1,%d1		# should FP5 be moved?
+	bpl.b		fmovm_out_ctrl_fp6	# no
+
+	fmovm.x		&0x04,(%a0)		# yes
+	add.l		&0xc,%a0
+
+fmovm_out_ctrl_fp6:
+	lsl.b		&0x1,%d1		# should FP6 be moved?
+	bpl.b		fmovm_out_ctrl_fp7	# no
+
+	fmovm.x		&0x02,(%a0)		# yes
+	add.l		&0xc,%a0
+
+fmovm_out_ctrl_fp7:
+	lsl.b		&0x1,%d1		# should FP7 be moved?
+	bpl.b		fmovm_out_ctrl_done	# no
+
+	fmovm.x		&0x01,(%a0)		# yes
+	add.l		&0xc,%a0
+
+fmovm_out_ctrl_done:
+	mov.l		%a1,L_SCR1(%a6)
+
+	lea		(%sp),%a0		# pass: supervisor src
+	mov.l		%d0,-(%sp)		# save size
+	bsr.l		_dmem_write		# copy data to user mem
+
+	mov.l		(%sp)+,%d0
+	add.l		%d0,%sp			# clear fpreg data from stack
+
+	tst.l		%d1			# did dstore err?
+	bne.w		fmovm_out_err		# yes
+
+	rts
+
+############
+# MOVE IN: #
+############
+fmovm_data_in:
+	mov.l		%a0,L_SCR1(%a6)
+
+	sub.l		%d0,%sp			# make room for fpregs
+	lea		(%sp),%a1
+
+	mov.l		%d1,-(%sp)		# save bit string for later
+	mov.l		%d0,-(%sp)		# save # of bytes
+
+	bsr.l		_dmem_read		# copy data from user mem
+
+	mov.l		(%sp)+,%d0		# retrieve # of bytes
+
+	tst.l		%d1			# did dfetch fail?
+	bne.w		fmovm_in_err		# yes
+
+	mov.l		(%sp)+,%d1		# load bit string
+
+	lea		(%sp),%a0		# addr of stack
+
+	tst.b		%d1			# should FP0 be moved?
+	bpl.b		fmovm_data_in_fp1	# no
+
+	mov.l		(%a0)+,0x0+EXC_FP0(%a6)	# yes
+	mov.l		(%a0)+,0x4+EXC_FP0(%a6)
+	mov.l		(%a0)+,0x8+EXC_FP0(%a6)
+
+fmovm_data_in_fp1:
+	lsl.b		&0x1,%d1		# should FP1 be moved?
+	bpl.b		fmovm_data_in_fp2	# no
+
+	mov.l		(%a0)+,0x0+EXC_FP1(%a6)	# yes
+	mov.l		(%a0)+,0x4+EXC_FP1(%a6)
+	mov.l		(%a0)+,0x8+EXC_FP1(%a6)
+
+fmovm_data_in_fp2:
+	lsl.b		&0x1,%d1		# should FP2 be moved?
+	bpl.b		fmovm_data_in_fp3	# no
+
+	fmovm.x		(%a0)+,&0x20		# yes
+
+fmovm_data_in_fp3:
+	lsl.b		&0x1,%d1		# should FP3 be moved?
+	bpl.b		fmovm_data_in_fp4	# no
+
+	fmovm.x		(%a0)+,&0x10		# yes
+
+fmovm_data_in_fp4:
+	lsl.b		&0x1,%d1		# should FP4 be moved?
+	bpl.b		fmovm_data_in_fp5	# no
+
+	fmovm.x		(%a0)+,&0x08		# yes
+
+fmovm_data_in_fp5:
+	lsl.b		&0x1,%d1		# should FP5 be moved?
+	bpl.b		fmovm_data_in_fp6	# no
+
+	fmovm.x		(%a0)+,&0x04		# yes
+
+fmovm_data_in_fp6:
+	lsl.b		&0x1,%d1		# should FP6 be moved?
+	bpl.b		fmovm_data_in_fp7	# no
+
+	fmovm.x		(%a0)+,&0x02		# yes
+
+fmovm_data_in_fp7:
+	lsl.b		&0x1,%d1		# should FP7 be moved?
+	bpl.b		fmovm_data_in_done	# no
+
+	fmovm.x		(%a0)+,&0x01		# yes
+
+fmovm_data_in_done:
+	add.l		%d0,%sp			# remove fpregs from stack
+	rts
+
+#####################################
+
+fmovm_data_done:
+	rts
+
+##############################################################################
+
+#
+# table indexed by the operation's bit string that gives the number
+# of bytes that will be moved.
+#
+# number of bytes = (# of 1's in bit string) * 12(bytes/fpreg)
+#
+tbl_fmovm_size:
+	byte	0x00,0x0c,0x0c,0x18,0x0c,0x18,0x18,0x24
+	byte	0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
+	byte	0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
+	byte	0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+	byte	0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
+	byte	0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+	byte	0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+	byte	0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+	byte	0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
+	byte	0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+	byte	0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+	byte	0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+	byte	0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+	byte	0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+	byte	0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+	byte	0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
+	byte	0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
+	byte	0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+	byte	0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+	byte	0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+	byte	0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+	byte	0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+	byte	0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+	byte	0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
+	byte	0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+	byte	0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+	byte	0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+	byte	0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
+	byte	0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+	byte	0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
+	byte	0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
+	byte	0x3c,0x48,0x48,0x54,0x48,0x54,0x54,0x60
+
+#
+# table to convert a pre-decrement bit string into a post-increment
+# or control bit string.
+# ex:	0x00	==>	0x00
+#	0x01	==>	0x80
+#	0x02	==>	0x40
+#		.
+#		.
+#	0xfd	==>	0xbf
+#	0xfe	==>	0x7f
+#	0xff	==>	0xff
+#
+tbl_fmovm_convert:
+	byte	0x00,0x80,0x40,0xc0,0x20,0xa0,0x60,0xe0
+	byte	0x10,0x90,0x50,0xd0,0x30,0xb0,0x70,0xf0
+	byte	0x08,0x88,0x48,0xc8,0x28,0xa8,0x68,0xe8
+	byte	0x18,0x98,0x58,0xd8,0x38,0xb8,0x78,0xf8
+	byte	0x04,0x84,0x44,0xc4,0x24,0xa4,0x64,0xe4
+	byte	0x14,0x94,0x54,0xd4,0x34,0xb4,0x74,0xf4
+	byte	0x0c,0x8c,0x4c,0xcc,0x2c,0xac,0x6c,0xec
+	byte	0x1c,0x9c,0x5c,0xdc,0x3c,0xbc,0x7c,0xfc
+	byte	0x02,0x82,0x42,0xc2,0x22,0xa2,0x62,0xe2
+	byte	0x12,0x92,0x52,0xd2,0x32,0xb2,0x72,0xf2
+	byte	0x0a,0x8a,0x4a,0xca,0x2a,0xaa,0x6a,0xea
+	byte	0x1a,0x9a,0x5a,0xda,0x3a,0xba,0x7a,0xfa
+	byte	0x06,0x86,0x46,0xc6,0x26,0xa6,0x66,0xe6
+	byte	0x16,0x96,0x56,0xd6,0x36,0xb6,0x76,0xf6
+	byte	0x0e,0x8e,0x4e,0xce,0x2e,0xae,0x6e,0xee
+	byte	0x1e,0x9e,0x5e,0xde,0x3e,0xbe,0x7e,0xfe
+	byte	0x01,0x81,0x41,0xc1,0x21,0xa1,0x61,0xe1
+	byte	0x11,0x91,0x51,0xd1,0x31,0xb1,0x71,0xf1
+	byte	0x09,0x89,0x49,0xc9,0x29,0xa9,0x69,0xe9
+	byte	0x19,0x99,0x59,0xd9,0x39,0xb9,0x79,0xf9
+	byte	0x05,0x85,0x45,0xc5,0x25,0xa5,0x65,0xe5
+	byte	0x15,0x95,0x55,0xd5,0x35,0xb5,0x75,0xf5
+	byte	0x0d,0x8d,0x4d,0xcd,0x2d,0xad,0x6d,0xed
+	byte	0x1d,0x9d,0x5d,0xdd,0x3d,0xbd,0x7d,0xfd
+	byte	0x03,0x83,0x43,0xc3,0x23,0xa3,0x63,0xe3
+	byte	0x13,0x93,0x53,0xd3,0x33,0xb3,0x73,0xf3
+	byte	0x0b,0x8b,0x4b,0xcb,0x2b,0xab,0x6b,0xeb
+	byte	0x1b,0x9b,0x5b,0xdb,0x3b,0xbb,0x7b,0xfb
+	byte	0x07,0x87,0x47,0xc7,0x27,0xa7,0x67,0xe7
+	byte	0x17,0x97,0x57,0xd7,0x37,0xb7,0x77,0xf7
+	byte	0x0f,0x8f,0x4f,0xcf,0x2f,0xaf,0x6f,0xef
+	byte	0x1f,0x9f,0x5f,0xdf,0x3f,0xbf,0x7f,0xff
+
+	global		fmovm_calc_ea
+###############################################
+# _fmovm_calc_ea: calculate effective address #
+###############################################
+fmovm_calc_ea:
+	mov.l		%d0,%a0			# move # bytes to a0
+
+# currently, MODE and REG are taken from the EXC_OPWORD. this could be
+# easily changed if they were inputs passed in registers.
+	mov.w		EXC_OPWORD(%a6),%d0	# fetch opcode word
+	mov.w		%d0,%d1			# make a copy
+
+	andi.w		&0x3f,%d0		# extract mode field
+	andi.l		&0x7,%d1		# extract reg  field
+
+# jump to the corresponding function for each {MODE,REG} pair.
+	mov.w		(tbl_fea_mode.b,%pc,%d0.w*2),%d0 # fetch jmp distance
+	jmp		(tbl_fea_mode.b,%pc,%d0.w*1) # jmp to correct ea mode
+
+	swbeg		&64
+tbl_fea_mode:
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+
+	short		faddr_ind_a0	-	tbl_fea_mode
+	short		faddr_ind_a1	-	tbl_fea_mode
+	short		faddr_ind_a2	-	tbl_fea_mode
+	short		faddr_ind_a3	-	tbl_fea_mode
+	short		faddr_ind_a4	-	tbl_fea_mode
+	short		faddr_ind_a5	-	tbl_fea_mode
+	short		faddr_ind_a6	-	tbl_fea_mode
+	short		faddr_ind_a7	-	tbl_fea_mode
+
+	short		faddr_ind_p_a0	-	tbl_fea_mode
+	short		faddr_ind_p_a1	-	tbl_fea_mode
+	short		faddr_ind_p_a2	-	tbl_fea_mode
+	short		faddr_ind_p_a3	-	tbl_fea_mode
+	short		faddr_ind_p_a4	-	tbl_fea_mode
+	short		faddr_ind_p_a5	-	tbl_fea_mode
+	short		faddr_ind_p_a6	-	tbl_fea_mode
+	short		faddr_ind_p_a7	-	tbl_fea_mode
+
+	short		faddr_ind_m_a0	-	tbl_fea_mode
+	short		faddr_ind_m_a1	-	tbl_fea_mode
+	short		faddr_ind_m_a2	-	tbl_fea_mode
+	short		faddr_ind_m_a3	-	tbl_fea_mode
+	short		faddr_ind_m_a4	-	tbl_fea_mode
+	short		faddr_ind_m_a5	-	tbl_fea_mode
+	short		faddr_ind_m_a6	-	tbl_fea_mode
+	short		faddr_ind_m_a7	-	tbl_fea_mode
+
+	short		faddr_ind_disp_a0	-	tbl_fea_mode
+	short		faddr_ind_disp_a1	-	tbl_fea_mode
+	short		faddr_ind_disp_a2	-	tbl_fea_mode
+	short		faddr_ind_disp_a3	-	tbl_fea_mode
+	short		faddr_ind_disp_a4	-	tbl_fea_mode
+	short		faddr_ind_disp_a5	-	tbl_fea_mode
+	short		faddr_ind_disp_a6	-	tbl_fea_mode
+	short		faddr_ind_disp_a7	-	tbl_fea_mode
+
+	short		faddr_ind_ext	-	tbl_fea_mode
+	short		faddr_ind_ext	-	tbl_fea_mode
+	short		faddr_ind_ext	-	tbl_fea_mode
+	short		faddr_ind_ext	-	tbl_fea_mode
+	short		faddr_ind_ext	-	tbl_fea_mode
+	short		faddr_ind_ext	-	tbl_fea_mode
+	short		faddr_ind_ext	-	tbl_fea_mode
+	short		faddr_ind_ext	-	tbl_fea_mode
+
+	short		fabs_short	-	tbl_fea_mode
+	short		fabs_long	-	tbl_fea_mode
+	short		fpc_ind		-	tbl_fea_mode
+	short		fpc_ind_ext	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+	short		tbl_fea_mode	-	tbl_fea_mode
+
+###################################
+# Address register indirect: (An) #
+###################################
+faddr_ind_a0:
+	mov.l		EXC_DREGS+0x8(%a6),%a0	# Get current a0
+	rts
+
+faddr_ind_a1:
+	mov.l		EXC_DREGS+0xc(%a6),%a0	# Get current a1
+	rts
+
+faddr_ind_a2:
+	mov.l		%a2,%a0			# Get current a2
+	rts
+
+faddr_ind_a3:
+	mov.l		%a3,%a0			# Get current a3
+	rts
+
+faddr_ind_a4:
+	mov.l		%a4,%a0			# Get current a4
+	rts
+
+faddr_ind_a5:
+	mov.l		%a5,%a0			# Get current a5
+	rts
+
+faddr_ind_a6:
+	mov.l		(%a6),%a0		# Get current a6
+	rts
+
+faddr_ind_a7:
+	mov.l		EXC_A7(%a6),%a0		# Get current a7
+	rts
+
+#####################################################
+# Address register indirect w/ postincrement: (An)+ #
+#####################################################
+faddr_ind_p_a0:
+	mov.l		EXC_DREGS+0x8(%a6),%d0	# Get current a0
+	mov.l		%d0,%d1
+	add.l		%a0,%d1			# Increment
+	mov.l		%d1,EXC_DREGS+0x8(%a6)	# Save incr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_p_a1:
+	mov.l		EXC_DREGS+0xc(%a6),%d0	# Get current a1
+	mov.l		%d0,%d1
+	add.l		%a0,%d1			# Increment
+	mov.l		%d1,EXC_DREGS+0xc(%a6)	# Save incr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_p_a2:
+	mov.l		%a2,%d0			# Get current a2
+	mov.l		%d0,%d1
+	add.l		%a0,%d1			# Increment
+	mov.l		%d1,%a2			# Save incr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_p_a3:
+	mov.l		%a3,%d0			# Get current a3
+	mov.l		%d0,%d1
+	add.l		%a0,%d1			# Increment
+	mov.l		%d1,%a3			# Save incr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_p_a4:
+	mov.l		%a4,%d0			# Get current a4
+	mov.l		%d0,%d1
+	add.l		%a0,%d1			# Increment
+	mov.l		%d1,%a4			# Save incr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_p_a5:
+	mov.l		%a5,%d0			# Get current a5
+	mov.l		%d0,%d1
+	add.l		%a0,%d1			# Increment
+	mov.l		%d1,%a5			# Save incr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_p_a6:
+	mov.l		(%a6),%d0		# Get current a6
+	mov.l		%d0,%d1
+	add.l		%a0,%d1			# Increment
+	mov.l		%d1,(%a6)		# Save incr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_p_a7:
+	mov.b		&mia7_flg,SPCOND_FLG(%a6) # set "special case" flag
+
+	mov.l		EXC_A7(%a6),%d0		# Get current a7
+	mov.l		%d0,%d1
+	add.l		%a0,%d1			# Increment
+	mov.l		%d1,EXC_A7(%a6)		# Save incr value
+	mov.l		%d0,%a0
+	rts
+
+####################################################
+# Address register indirect w/ predecrement: -(An) #
+####################################################
+faddr_ind_m_a0:
+	mov.l		EXC_DREGS+0x8(%a6),%d0	# Get current a0
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,EXC_DREGS+0x8(%a6)	# Save decr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_m_a1:
+	mov.l		EXC_DREGS+0xc(%a6),%d0	# Get current a1
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,EXC_DREGS+0xc(%a6)	# Save decr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_m_a2:
+	mov.l		%a2,%d0			# Get current a2
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,%a2			# Save decr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_m_a3:
+	mov.l		%a3,%d0			# Get current a3
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,%a3			# Save decr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_m_a4:
+	mov.l		%a4,%d0			# Get current a4
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,%a4			# Save decr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_m_a5:
+	mov.l		%a5,%d0			# Get current a5
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,%a5			# Save decr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_m_a6:
+	mov.l		(%a6),%d0		# Get current a6
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,(%a6)		# Save decr value
+	mov.l		%d0,%a0
+	rts
+
+faddr_ind_m_a7:
+	mov.b		&mda7_flg,SPCOND_FLG(%a6) # set "special case" flag
+
+	mov.l		EXC_A7(%a6),%d0		# Get current a7
+	sub.l		%a0,%d0			# Decrement
+	mov.l		%d0,EXC_A7(%a6)		# Save decr value
+	mov.l		%d0,%a0
+	rts
+
+########################################################
+# Address register indirect w/ displacement: (d16, An) #
+########################################################
+faddr_ind_disp_a0:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+
+	add.l		EXC_DREGS+0x8(%a6),%a0	# a0 + d16
+	rts
+
+faddr_ind_disp_a1:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+
+	add.l		EXC_DREGS+0xc(%a6),%a0	# a1 + d16
+	rts
+
+faddr_ind_disp_a2:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+
+	add.l		%a2,%a0			# a2 + d16
+	rts
+
+faddr_ind_disp_a3:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+
+	add.l		%a3,%a0			# a3 + d16
+	rts
+
+faddr_ind_disp_a4:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+
+	add.l		%a4,%a0			# a4 + d16
+	rts
+
+faddr_ind_disp_a5:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+
+	add.l		%a5,%a0			# a5 + d16
+	rts
+
+faddr_ind_disp_a6:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+
+	add.l		(%a6),%a0		# a6 + d16
+	rts
+
+faddr_ind_disp_a7:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+
+	add.l		EXC_A7(%a6),%a0		# a7 + d16
+	rts
+
+########################################################################
+# Address register indirect w/ index(8-bit displacement): (d8, An, Xn) #
+#    "       "         "    w/   "  (base displacement): (bd, An, Xn)  #
+# Memory indirect postindexed: ([bd, An], Xn, od)		       #
+# Memory indirect preindexed: ([bd, An, Xn], od)		       #
+########################################################################
+faddr_ind_ext:
+	addq.l		&0x8,%d1
+	bsr.l		fetch_dreg		# fetch base areg
+	mov.l		%d0,-(%sp)
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word		# fetch extword in d0
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		(%sp)+,%a0
+
+	btst		&0x8,%d0
+	bne.w		fcalc_mem_ind
+
+	mov.l		%d0,L_SCR1(%a6)		# hold opword
+
+	mov.l		%d0,%d1
+	rol.w		&0x4,%d1
+	andi.w		&0xf,%d1		# extract index regno
+
+# count on fetch_dreg() not to alter a0...
+	bsr.l		fetch_dreg		# fetch index
+
+	mov.l		%d2,-(%sp)		# save d2
+	mov.l		L_SCR1(%a6),%d2		# fetch opword
+
+	btst		&0xb,%d2		# is it word or long?
+	bne.b		faii8_long
+	ext.l		%d0			# sign extend word index
+faii8_long:
+	mov.l		%d2,%d1
+	rol.w		&0x7,%d1
+	andi.l		&0x3,%d1		# extract scale value
+
+	lsl.l		%d1,%d0			# shift index by scale
+
+	extb.l		%d2			# sign extend displacement
+	add.l		%d2,%d0			# index + disp
+	add.l		%d0,%a0			# An + (index + disp)
+
+	mov.l		(%sp)+,%d2		# restore old d2
+	rts
+
+###########################
+# Absolute short: (XXX).W #
+###########################
+fabs_short:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word		# fetch short address
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.w		%d0,%a0			# return <ea> in a0
+	rts
+
+##########################
+# Absolute long: (XXX).L #
+##########################
+fabs_long:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch long address
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		%d0,%a0			# return <ea> in a0
+	rts
+
+#######################################################
+# Program counter indirect w/ displacement: (d16, PC) #
+#######################################################
+fpc_ind:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word		# fetch word displacement
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.w		%d0,%a0			# sign extend displacement
+
+	add.l		EXC_EXTWPTR(%a6),%a0	# pc + d16
+
+# _imem_read_word() increased the extwptr by 2. need to adjust here.
+	subq.l		&0x2,%a0		# adjust <ea>
+	rts
+
+##########################################################
+# PC indirect w/ index(8-bit displacement): (d8, PC, An) #
+# "     "     w/   "  (base displacement): (bd, PC, An)  #
+# PC memory indirect postindexed: ([bd, PC], Xn, od)     #
+# PC memory indirect preindexed: ([bd, PC, Xn], od)      #
+##########################################################
+fpc_ind_ext:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word		# fetch ext word
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# put base in a0
+	subq.l		&0x2,%a0		# adjust base
+
+	btst		&0x8,%d0		# is disp only 8 bits?
+	bne.w		fcalc_mem_ind		# calc memory indirect
+
+	mov.l		%d0,L_SCR1(%a6)		# store opword
+
+	mov.l		%d0,%d1			# make extword copy
+	rol.w		&0x4,%d1		# rotate reg num into place
+	andi.w		&0xf,%d1		# extract register number
+
+# count on fetch_dreg() not to alter a0...
+	bsr.l		fetch_dreg		# fetch index
+
+	mov.l		%d2,-(%sp)		# save d2
+	mov.l		L_SCR1(%a6),%d2		# fetch opword
+
+	btst		&0xb,%d2		# is index word or long?
+	bne.b		fpii8_long		# long
+	ext.l		%d0			# sign extend word index
+fpii8_long:
+	mov.l		%d2,%d1
+	rol.w		&0x7,%d1		# rotate scale value into place
+	andi.l		&0x3,%d1		# extract scale value
+
+	lsl.l		%d1,%d0			# shift index by scale
+
+	extb.l		%d2			# sign extend displacement
+	add.l		%d2,%d0			# disp + index
+	add.l		%d0,%a0			# An + (index + disp)
+
+	mov.l		(%sp)+,%d2		# restore temp register
+	rts
+
+# d2 = index
+# d3 = base
+# d4 = od
+# d5 = extword
+fcalc_mem_ind:
+	btst		&0x6,%d0		# is the index suppressed?
+	beq.b		fcalc_index
+
+	movm.l		&0x3c00,-(%sp)		# save d2-d5
+
+	mov.l		%d0,%d5			# put extword in d5
+	mov.l		%a0,%d3			# put base in d3
+
+	clr.l		%d2			# yes, so index = 0
+	bra.b		fbase_supp_ck
+
+# index:
+fcalc_index:
+	mov.l		%d0,L_SCR1(%a6)		# save d0 (opword)
+	bfextu		%d0{&16:&4},%d1		# fetch dreg index
+	bsr.l		fetch_dreg
+
+	movm.l		&0x3c00,-(%sp)		# save d2-d5
+	mov.l		%d0,%d2			# put index in d2
+	mov.l		L_SCR1(%a6),%d5
+	mov.l		%a0,%d3
+
+	btst		&0xb,%d5		# is index word or long?
+	bne.b		fno_ext
+	ext.l		%d2
+
+fno_ext:
+	bfextu		%d5{&21:&2},%d0
+	lsl.l		%d0,%d2
+
+# base address (passed as parameter in d3):
+# we clear the value here if it should actually be suppressed.
+fbase_supp_ck:
+	btst		&0x7,%d5		# is the bd suppressed?
+	beq.b		fno_base_sup
+	clr.l		%d3
+
+# base displacement:
+fno_base_sup:
+	bfextu		%d5{&26:&2},%d0		# get bd size
+#	beq.l		fmovm_error		# if (size == 0) it's reserved
+
+	cmpi.b		%d0,&0x2
+	blt.b		fno_bd
+	beq.b		fget_word_bd
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		fcea_iacc		# yes
+
+	bra.b		fchk_ind
+
+fget_word_bd:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		fcea_iacc		# yes
+
+	ext.l		%d0			# sign extend bd
+
+fchk_ind:
+	add.l		%d0,%d3			# base += bd
+
+# outer displacement:
+fno_bd:
+	bfextu		%d5{&30:&2},%d0		# is od suppressed?
+	beq.w		faii_bd
+
+	cmpi.b		%d0,&0x2
+	blt.b		fnull_od
+	beq.b		fword_od
+
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		fcea_iacc		# yes
+
+	bra.b		fadd_them
+
+fword_od:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x2,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_word
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		fcea_iacc		# yes
+
+	ext.l		%d0			# sign extend od
+	bra.b		fadd_them
+
+fnull_od:
+	clr.l		%d0
+
+fadd_them:
+	mov.l		%d0,%d4
+
+	btst		&0x2,%d5		# pre or post indexing?
+	beq.b		fpre_indexed
+
+	mov.l		%d3,%a0
+	bsr.l		_dmem_read_long
+
+	tst.l		%d1			# did dfetch fail?
+	bne.w		fcea_err		# yes
+
+	add.l		%d2,%d0			# <ea> += index
+	add.l		%d4,%d0			# <ea> += od
+	bra.b		fdone_ea
+
+fpre_indexed:
+	add.l		%d2,%d3			# preindexing
+	mov.l		%d3,%a0
+	bsr.l		_dmem_read_long
+
+	tst.l		%d1			# did dfetch fail?
+	bne.w		fcea_err		# yes
+
+	add.l		%d4,%d0			# ea += od
+	bra.b		fdone_ea
+
+faii_bd:
+	add.l		%d2,%d3			# ea = (base + bd) + index
+	mov.l		%d3,%d0
+fdone_ea:
+	mov.l		%d0,%a0
+
+	movm.l		(%sp)+,&0x003c		# restore d2-d5
+	rts
+
+#########################################################
+fcea_err:
+	mov.l		%d3,%a0
+
+	movm.l		(%sp)+,&0x003c		# restore d2-d5
+	mov.w		&0x0101,%d0
+	bra.l		iea_dacc
+
+fcea_iacc:
+	movm.l		(%sp)+,&0x003c		# restore d2-d5
+	bra.l		iea_iacc
+
+fmovm_out_err:
+	bsr.l		restore
+	mov.w		&0x00e1,%d0
+	bra.b		fmovm_err
+
+fmovm_in_err:
+	bsr.l		restore
+	mov.w		&0x0161,%d0
+
+fmovm_err:
+	mov.l		L_SCR1(%a6),%a0
+	bra.l		iea_dacc
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fmovm_ctrl(): emulate fmovm.l of control registers instr	#
+#									#
+# XREF ****************************************************************	#
+#	_imem_read_long() - read longword from memory			#
+#	iea_iacc() - _imem_read_long() failed; error recovery		#
+#									#
+# INPUT ***************************************************************	#
+#	None								#
+#									#
+# OUTPUT **************************************************************	#
+#	If _imem_read_long() doesn't fail:				#
+#		USER_FPCR(a6)  = new FPCR value				#
+#		USER_FPSR(a6)  = new FPSR value				#
+#		USER_FPIAR(a6) = new FPIAR value			#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Decode the instruction type by looking at the extension word	#
+# in order to see how many control registers to fetch from memory.	#
+# Fetch them using _imem_read_long(). If this fetch fails, exit through	#
+# the special access error exit handler iea_iacc().			#
+#									#
+# Instruction word decoding:						#
+#									#
+#	fmovem.l #<data>, {FPIAR&|FPCR&|FPSR}				#
+#									#
+#		WORD1			WORD2				#
+#	1111 0010 00 111100	100$ $$00 0000 0000			#
+#									#
+#	$$$ (100): FPCR							#
+#	    (010): FPSR							#
+#	    (001): FPIAR						#
+#	    (000): FPIAR						#
+#									#
+#########################################################################
+
+	global		fmovm_ctrl
+fmovm_ctrl:
+	mov.b		EXC_EXTWORD(%a6),%d0	# fetch reg select bits
+	cmpi.b		%d0,&0x9c		# fpcr & fpsr & fpiar ?
+	beq.w		fctrl_in_7		# yes
+	cmpi.b		%d0,&0x98		# fpcr & fpsr ?
+	beq.w		fctrl_in_6		# yes
+	cmpi.b		%d0,&0x94		# fpcr & fpiar ?
+	beq.b		fctrl_in_5		# yes
+
+# fmovem.l #<data>, fpsr/fpiar
+fctrl_in_3:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch FPSR from mem
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		%d0,USER_FPSR(%a6)	# store new FPSR to stack
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch FPIAR from mem
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		%d0,USER_FPIAR(%a6)	# store new FPIAR to stack
+	rts
+
+# fmovem.l #<data>, fpcr/fpiar
+fctrl_in_5:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch FPCR from mem
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		%d0,USER_FPCR(%a6)	# store new FPCR to stack
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch FPIAR from mem
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		%d0,USER_FPIAR(%a6)	# store new FPIAR to stack
+	rts
+
+# fmovem.l #<data>, fpcr/fpsr
+fctrl_in_6:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch FPCR from mem
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		%d0,USER_FPCR(%a6)	# store new FPCR to mem
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch FPSR from mem
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		%d0,USER_FPSR(%a6)	# store new FPSR to mem
+	rts
+
+# fmovem.l #<data>, fpcr/fpsr/fpiar
+fctrl_in_7:
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch FPCR from mem
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		%d0,USER_FPCR(%a6)	# store new FPCR to mem
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch FPSR from mem
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		%d0,USER_FPSR(%a6)	# store new FPSR to mem
+	mov.l		EXC_EXTWPTR(%a6),%a0	# fetch instruction addr
+	addq.l		&0x4,EXC_EXTWPTR(%a6)	# incr instruction ptr
+	bsr.l		_imem_read_long		# fetch FPIAR from mem
+
+	tst.l		%d1			# did ifetch fail?
+	bne.l		iea_iacc		# yes
+
+	mov.l		%d0,USER_FPIAR(%a6)	# store new FPIAR to mem
+	rts
+
+##########################################################################
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	addsub_scaler2(): scale inputs to fadd/fsub such that no	#
+#			  OVFL/UNFL exceptions will result		#
+#									#
+# XREF ****************************************************************	#
+#	norm() - normalize mantissa after adjusting exponent		#
+#									#
+# INPUT ***************************************************************	#
+#	FP_SRC(a6) = fp op1(src)					#
+#	FP_DST(a6) = fp op2(dst)					#
+#									#
+# OUTPUT **************************************************************	#
+#	FP_SRC(a6) = fp op1 scaled(src)					#
+#	FP_DST(a6) = fp op2 scaled(dst)					#
+#	d0         = scale amount					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	If the DST exponent is > the SRC exponent, set the DST exponent	#
+# equal to 0x3fff and scale the SRC exponent by the value that the	#
+# DST exponent was scaled by. If the SRC exponent is greater or equal,	#
+# do the opposite. Return this scale factor in d0.			#
+#	If the two exponents differ by > the number of mantissa bits	#
+# plus two, then set the smallest exponent to a very small value as a	#
+# quick shortcut.							#
+#									#
+#########################################################################
+
+	global		addsub_scaler2
+addsub_scaler2:
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		DST_HI(%a1),FP_SCR1_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	mov.l		DST_LO(%a1),FP_SCR1_LO(%a6)
+	mov.w		SRC_EX(%a0),%d0
+	mov.w		DST_EX(%a1),%d1
+	mov.w		%d0,FP_SCR0_EX(%a6)
+	mov.w		%d1,FP_SCR1_EX(%a6)
+
+	andi.w		&0x7fff,%d0
+	andi.w		&0x7fff,%d1
+	mov.w		%d0,L_SCR1(%a6)		# store src exponent
+	mov.w		%d1,2+L_SCR1(%a6)	# store dst exponent
+
+	cmp.w		%d0, %d1		# is src exp >= dst exp?
+	bge.l		src_exp_ge2
+
+# dst exp is >  src exp; scale dst to exp = 0x3fff
+dst_exp_gt2:
+	bsr.l		scale_to_zero_dst
+	mov.l		%d0,-(%sp)		# save scale factor
+
+	cmpi.b		STAG(%a6),&DENORM	# is dst denormalized?
+	bne.b		cmpexp12
+
+	lea		FP_SCR0(%a6),%a0
+	bsr.l		norm			# normalize the denorm; result is new exp
+	neg.w		%d0			# new exp = -(shft val)
+	mov.w		%d0,L_SCR1(%a6)		# inset new exp
+
+cmpexp12:
+	mov.w		2+L_SCR1(%a6),%d0
+	subi.w		&mantissalen+2,%d0	# subtract mantissalen+2 from larger exp
+
+	cmp.w		%d0,L_SCR1(%a6)		# is difference >= len(mantissa)+2?
+	bge.b		quick_scale12
+
+	mov.w		L_SCR1(%a6),%d0
+	add.w		0x2(%sp),%d0		# scale src exponent by scale factor
+	mov.w		FP_SCR0_EX(%a6),%d1
+	and.w		&0x8000,%d1
+	or.w		%d1,%d0			# concat {sgn,new exp}
+	mov.w		%d0,FP_SCR0_EX(%a6)	# insert new dst exponent
+
+	mov.l		(%sp)+,%d0		# return SCALE factor
+	rts
+
+quick_scale12:
+	andi.w		&0x8000,FP_SCR0_EX(%a6)	# zero src exponent
+	bset		&0x0,1+FP_SCR0_EX(%a6)	# set exp = 1
+
+	mov.l		(%sp)+,%d0		# return SCALE factor
+	rts
+
+# src exp is >= dst exp; scale src to exp = 0x3fff
+src_exp_ge2:
+	bsr.l		scale_to_zero_src
+	mov.l		%d0,-(%sp)		# save scale factor
+
+	cmpi.b		DTAG(%a6),&DENORM	# is dst denormalized?
+	bne.b		cmpexp22
+	lea		FP_SCR1(%a6),%a0
+	bsr.l		norm			# normalize the denorm; result is new exp
+	neg.w		%d0			# new exp = -(shft val)
+	mov.w		%d0,2+L_SCR1(%a6)	# inset new exp
+
+cmpexp22:
+	mov.w		L_SCR1(%a6),%d0
+	subi.w		&mantissalen+2,%d0	# subtract mantissalen+2 from larger exp
+
+	cmp.w		%d0,2+L_SCR1(%a6)	# is difference >= len(mantissa)+2?
+	bge.b		quick_scale22
+
+	mov.w		2+L_SCR1(%a6),%d0
+	add.w		0x2(%sp),%d0		# scale dst exponent by scale factor
+	mov.w		FP_SCR1_EX(%a6),%d1
+	andi.w		&0x8000,%d1
+	or.w		%d1,%d0			# concat {sgn,new exp}
+	mov.w		%d0,FP_SCR1_EX(%a6)	# insert new dst exponent
+
+	mov.l		(%sp)+,%d0		# return SCALE factor
+	rts
+
+quick_scale22:
+	andi.w		&0x8000,FP_SCR1_EX(%a6)	# zero dst exponent
+	bset		&0x0,1+FP_SCR1_EX(%a6)	# set exp = 1
+
+	mov.l		(%sp)+,%d0		# return SCALE factor
+	rts
+
+##########################################################################
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	scale_to_zero_src(): scale the exponent of extended precision	#
+#			     value at FP_SCR0(a6).			#
+#									#
+# XREF ****************************************************************	#
+#	norm() - normalize the mantissa if the operand was a DENORM	#
+#									#
+# INPUT ***************************************************************	#
+#	FP_SCR0(a6) = extended precision operand to be scaled		#
+#									#
+# OUTPUT **************************************************************	#
+#	FP_SCR0(a6) = scaled extended precision operand			#
+#	d0	    = scale value					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Set the exponent of the input operand to 0x3fff. Save the value	#
+# of the difference between the original and new exponent. Then,	#
+# normalize the operand if it was a DENORM. Add this normalization	#
+# value to the previous value. Return the result.			#
+#									#
+#########################################################################
+
+	global		scale_to_zero_src
+scale_to_zero_src:
+	mov.w		FP_SCR0_EX(%a6),%d1	# extract operand's {sgn,exp}
+	mov.w		%d1,%d0			# make a copy
+
+	andi.l		&0x7fff,%d1		# extract operand's exponent
+
+	andi.w		&0x8000,%d0		# extract operand's sgn
+	or.w		&0x3fff,%d0		# insert new operand's exponent(=0)
+
+	mov.w		%d0,FP_SCR0_EX(%a6)	# insert biased exponent
+
+	cmpi.b		STAG(%a6),&DENORM	# is operand normalized?
+	beq.b		stzs_denorm		# normalize the DENORM
+
+stzs_norm:
+	mov.l		&0x3fff,%d0
+	sub.l		%d1,%d0			# scale = BIAS + (-exp)
+
+	rts
+
+stzs_denorm:
+	lea		FP_SCR0(%a6),%a0	# pass ptr to src op
+	bsr.l		norm			# normalize denorm
+	neg.l		%d0			# new exponent = -(shft val)
+	mov.l		%d0,%d1			# prepare for op_norm call
+	bra.b		stzs_norm		# finish scaling
+
+###
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	scale_sqrt(): scale the input operand exponent so a subsequent	#
+#		      fsqrt operation won't take an exception.		#
+#									#
+# XREF ****************************************************************	#
+#	norm() - normalize the mantissa if the operand was a DENORM	#
+#									#
+# INPUT ***************************************************************	#
+#	FP_SCR0(a6) = extended precision operand to be scaled		#
+#									#
+# OUTPUT **************************************************************	#
+#	FP_SCR0(a6) = scaled extended precision operand			#
+#	d0	    = scale value					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	If the input operand is a DENORM, normalize it.			#
+#	If the exponent of the input operand is even, set the exponent	#
+# to 0x3ffe and return a scale factor of "(exp-0x3ffe)/2". If the	#
+# exponent of the input operand is off, set the exponent to ox3fff and	#
+# return a scale factor of "(exp-0x3fff)/2".				#
+#									#
+#########################################################################
+
+	global		scale_sqrt
+scale_sqrt:
+	cmpi.b		STAG(%a6),&DENORM	# is operand normalized?
+	beq.b		ss_denorm		# normalize the DENORM
+
+	mov.w		FP_SCR0_EX(%a6),%d1	# extract operand's {sgn,exp}
+	andi.l		&0x7fff,%d1		# extract operand's exponent
+
+	andi.w		&0x8000,FP_SCR0_EX(%a6)	# extract operand's sgn
+
+	btst		&0x0,%d1		# is exp even or odd?
+	beq.b		ss_norm_even
+
+	ori.w		&0x3fff,FP_SCR0_EX(%a6)	# insert new operand's exponent(=0)
+
+	mov.l		&0x3fff,%d0
+	sub.l		%d1,%d0			# scale = BIAS + (-exp)
+	asr.l		&0x1,%d0		# divide scale factor by 2
+	rts
+
+ss_norm_even:
+	ori.w		&0x3ffe,FP_SCR0_EX(%a6)	# insert new operand's exponent(=0)
+
+	mov.l		&0x3ffe,%d0
+	sub.l		%d1,%d0			# scale = BIAS + (-exp)
+	asr.l		&0x1,%d0		# divide scale factor by 2
+	rts
+
+ss_denorm:
+	lea		FP_SCR0(%a6),%a0	# pass ptr to src op
+	bsr.l		norm			# normalize denorm
+
+	btst		&0x0,%d0		# is exp even or odd?
+	beq.b		ss_denorm_even
+
+	ori.w		&0x3fff,FP_SCR0_EX(%a6)	# insert new operand's exponent(=0)
+
+	add.l		&0x3fff,%d0
+	asr.l		&0x1,%d0		# divide scale factor by 2
+	rts
+
+ss_denorm_even:
+	ori.w		&0x3ffe,FP_SCR0_EX(%a6)	# insert new operand's exponent(=0)
+
+	add.l		&0x3ffe,%d0
+	asr.l		&0x1,%d0		# divide scale factor by 2
+	rts
+
+###
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	scale_to_zero_dst(): scale the exponent of extended precision	#
+#			     value at FP_SCR1(a6).			#
+#									#
+# XREF ****************************************************************	#
+#	norm() - normalize the mantissa if the operand was a DENORM	#
+#									#
+# INPUT ***************************************************************	#
+#	FP_SCR1(a6) = extended precision operand to be scaled		#
+#									#
+# OUTPUT **************************************************************	#
+#	FP_SCR1(a6) = scaled extended precision operand			#
+#	d0	    = scale value					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Set the exponent of the input operand to 0x3fff. Save the value	#
+# of the difference between the original and new exponent. Then,	#
+# normalize the operand if it was a DENORM. Add this normalization	#
+# value to the previous value. Return the result.			#
+#									#
+#########################################################################
+
+	global		scale_to_zero_dst
+scale_to_zero_dst:
+	mov.w		FP_SCR1_EX(%a6),%d1	# extract operand's {sgn,exp}
+	mov.w		%d1,%d0			# make a copy
+
+	andi.l		&0x7fff,%d1		# extract operand's exponent
+
+	andi.w		&0x8000,%d0		# extract operand's sgn
+	or.w		&0x3fff,%d0		# insert new operand's exponent(=0)
+
+	mov.w		%d0,FP_SCR1_EX(%a6)	# insert biased exponent
+
+	cmpi.b		DTAG(%a6),&DENORM	# is operand normalized?
+	beq.b		stzd_denorm		# normalize the DENORM
+
+stzd_norm:
+	mov.l		&0x3fff,%d0
+	sub.l		%d1,%d0			# scale = BIAS + (-exp)
+	rts
+
+stzd_denorm:
+	lea		FP_SCR1(%a6),%a0	# pass ptr to dst op
+	bsr.l		norm			# normalize denorm
+	neg.l		%d0			# new exponent = -(shft val)
+	mov.l		%d0,%d1			# prepare for op_norm call
+	bra.b		stzd_norm		# finish scaling
+
+##########################################################################
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	res_qnan(): return default result w/ QNAN operand for dyadic	#
+#	res_snan(): return default result w/ SNAN operand for dyadic	#
+#	res_qnan_1op(): return dflt result w/ QNAN operand for monadic	#
+#	res_snan_1op(): return dflt result w/ SNAN operand for monadic	#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	FP_SRC(a6) = pointer to extended precision src operand		#
+#	FP_DST(a6) = pointer to extended precision dst operand		#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = default result						#
+#									#
+# ALGORITHM ***********************************************************	#
+#	If either operand (but not both operands) of an operation is a	#
+# nonsignalling NAN, then that NAN is returned as the result. If both	#
+# operands are nonsignalling NANs, then the destination operand		#
+# nonsignalling NAN is returned as the result.				#
+#	If either operand to an operation is a signalling NAN (SNAN),	#
+# then, the SNAN bit is set in the FPSR EXC byte. If the SNAN trap	#
+# enable bit is set in the FPCR, then the trap is taken and the		#
+# destination is not modified. If the SNAN trap enable bit is not set,	#
+# then the SNAN is converted to a nonsignalling NAN (by setting the	#
+# SNAN bit in the operand to one), and the operation continues as	#
+# described in the preceding paragraph, for nonsignalling NANs.		#
+#	Make sure the appropriate FPSR bits are set before exiting.	#
+#									#
+#########################################################################
+
+	global		res_qnan
+	global		res_snan
+res_qnan:
+res_snan:
+	cmp.b		DTAG(%a6), &SNAN	# is the dst an SNAN?
+	beq.b		dst_snan2
+	cmp.b		DTAG(%a6), &QNAN	# is the dst a  QNAN?
+	beq.b		dst_qnan2
+src_nan:
+	cmp.b		STAG(%a6), &QNAN
+	beq.b		src_qnan2
+	global		res_snan_1op
+res_snan_1op:
+src_snan2:
+	bset		&0x6, FP_SRC_HI(%a6)	# set SNAN bit
+	or.l		&nan_mask+aiop_mask+snan_mask, USER_FPSR(%a6)
+	lea		FP_SRC(%a6), %a0
+	bra.b		nan_comp
+	global		res_qnan_1op
+res_qnan_1op:
+src_qnan2:
+	or.l		&nan_mask, USER_FPSR(%a6)
+	lea		FP_SRC(%a6), %a0
+	bra.b		nan_comp
+dst_snan2:
+	or.l		&nan_mask+aiop_mask+snan_mask, USER_FPSR(%a6)
+	bset		&0x6, FP_DST_HI(%a6)	# set SNAN bit
+	lea		FP_DST(%a6), %a0
+	bra.b		nan_comp
+dst_qnan2:
+	lea		FP_DST(%a6), %a0
+	cmp.b		STAG(%a6), &SNAN
+	bne		nan_done
+	or.l		&aiop_mask+snan_mask, USER_FPSR(%a6)
+nan_done:
+	or.l		&nan_mask, USER_FPSR(%a6)
+nan_comp:
+	btst		&0x7, FTEMP_EX(%a0)	# is NAN neg?
+	beq.b		nan_not_neg
+	or.l		&neg_mask, USER_FPSR(%a6)
+nan_not_neg:
+	fmovm.x		(%a0), &0x80
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	res_operr(): return default result during operand error		#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	None								#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = default operand error result				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	An nonsignalling NAN is returned as the default result when	#
+# an operand error occurs for the following cases:			#
+#									#
+#	Multiply: (Infinity x Zero)					#
+#	Divide  : (Zero / Zero) || (Infinity / Infinity)		#
+#									#
+#########################################################################
+
+	global		res_operr
+res_operr:
+	or.l		&nan_mask+operr_mask+aiop_mask, USER_FPSR(%a6)
+	fmovm.x		nan_return(%pc), &0x80
+	rts
+
+nan_return:
+	long		0x7fff0000, 0xffffffff, 0xffffffff
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_denorm(): denormalize an intermediate result			#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT *************************************************************** #
+#	a0 = points to the operand to be denormalized			#
+#		(in the internal extended format)			#
+#									#
+#	d0 = rounding precision						#
+#									#
+# OUTPUT **************************************************************	#
+#	a0 = pointer to the denormalized result				#
+#		(in the internal extended format)			#
+#									#
+#	d0 = guard,round,sticky						#
+#									#
+# ALGORITHM ***********************************************************	#
+#	According to the exponent underflow threshold for the given	#
+# precision, shift the mantissa bits to the right in order raise the	#
+# exponent of the operand to the threshold value. While shifting the	#
+# mantissa bits right, maintain the value of the guard, round, and	#
+# sticky bits.								#
+# other notes:								#
+#	(1) _denorm() is called by the underflow routines		#
+#	(2) _denorm() does NOT affect the status register		#
+#									#
+#########################################################################
+
+#
+# table of exponent threshold values for each precision
+#
+tbl_thresh:
+	short		0x0
+	short		sgl_thresh
+	short		dbl_thresh
+
+	global		_denorm
+_denorm:
+#
+# Load the exponent threshold for the precision selected and check
+# to see if (threshold - exponent) is > 65 in which case we can
+# simply calculate the sticky bit and zero the mantissa. otherwise
+# we have to call the denormalization routine.
+#
+	lsr.b		&0x2, %d0		# shift prec to lo bits
+	mov.w		(tbl_thresh.b,%pc,%d0.w*2), %d1 # load prec threshold
+	mov.w		%d1, %d0		# copy d1 into d0
+	sub.w		FTEMP_EX(%a0), %d0	# diff = threshold - exp
+	cmpi.w		%d0, &66		# is diff > 65? (mant + g,r bits)
+	bpl.b		denorm_set_stky		# yes; just calc sticky
+
+	clr.l		%d0			# clear g,r,s
+	btst		&inex2_bit, FPSR_EXCEPT(%a6) # yes; was INEX2 set?
+	beq.b		denorm_call		# no; don't change anything
+	bset		&29, %d0		# yes; set sticky bit
+
+denorm_call:
+	bsr.l		dnrm_lp			# denormalize the number
+	rts
+
+#
+# all bit would have been shifted off during the denorm so simply
+# calculate if the sticky should be set and clear the entire mantissa.
+#
+denorm_set_stky:
+	mov.l		&0x20000000, %d0	# set sticky bit in return value
+	mov.w		%d1, FTEMP_EX(%a0)	# load exp with threshold
+	clr.l		FTEMP_HI(%a0)		# set d1 = 0 (ms mantissa)
+	clr.l		FTEMP_LO(%a0)		# set d2 = 0 (ms mantissa)
+	rts
+
+#									#
+# dnrm_lp(): normalize exponent/mantissa to specified threshhold	#
+#									#
+# INPUT:								#
+#	%a0	   : points to the operand to be denormalized		#
+#	%d0{31:29} : initial guard,round,sticky				#
+#	%d1{15:0}  : denormalization threshold				#
+# OUTPUT:								#
+#	%a0	   : points to the denormalized operand			#
+#	%d0{31:29} : final guard,round,sticky				#
+#									#
+
+# *** Local Equates *** #
+set	GRS,		L_SCR2			# g,r,s temp storage
+set	FTEMP_LO2,	L_SCR1			# FTEMP_LO copy
+
+	global		dnrm_lp
+dnrm_lp:
+
+#
+# make a copy of FTEMP_LO and place the g,r,s bits directly after it
+# in memory so as to make the bitfield extraction for denormalization easier.
+#
+	mov.l		FTEMP_LO(%a0), FTEMP_LO2(%a6) # make FTEMP_LO copy
+	mov.l		%d0, GRS(%a6)		# place g,r,s after it
+
+#
+# check to see how much less than the underflow threshold the operand
+# exponent is.
+#
+	mov.l		%d1, %d0		# copy the denorm threshold
+	sub.w		FTEMP_EX(%a0), %d1	# d1 = threshold - uns exponent
+	ble.b		dnrm_no_lp		# d1 <= 0
+	cmpi.w		%d1, &0x20		# is ( 0 <= d1 < 32) ?
+	blt.b		case_1			# yes
+	cmpi.w		%d1, &0x40		# is (32 <= d1 < 64) ?
+	blt.b		case_2			# yes
+	bra.w		case_3			# (d1 >= 64)
+
+#
+# No normalization necessary
+#
+dnrm_no_lp:
+	mov.l		GRS(%a6), %d0		# restore original g,r,s
+	rts
+
+#
+# case (0<d1<32)
+#
+# %d0 = denorm threshold
+# %d1 = "n" = amt to shift
+#
+#	---------------------------------------------------------
+#	|     FTEMP_HI	  |	FTEMP_LO     |grs000.........000|
+#	---------------------------------------------------------
+#	<-(32 - n)-><-(n)-><-(32 - n)-><-(n)-><-(32 - n)-><-(n)->
+#	\	   \		      \			 \
+#	 \	    \		       \		  \
+#	  \	     \			\		   \
+#	   \	      \			 \		    \
+#	    \	       \		  \		     \
+#	     \		\		   \		      \
+#	      \		 \		    \		       \
+#	       \	  \		     \			\
+#	<-(n)-><-(32 - n)-><------(32)-------><------(32)------->
+#	---------------------------------------------------------
+#	|0.....0| NEW_HI  |  NEW_FTEMP_LO     |grs		|
+#	---------------------------------------------------------
+#
+case_1:
+	mov.l		%d2, -(%sp)		# create temp storage
+
+	mov.w		%d0, FTEMP_EX(%a0)	# exponent = denorm threshold
+	mov.l		&32, %d0
+	sub.w		%d1, %d0		# %d0 = 32 - %d1
+
+	cmpi.w		%d1, &29		# is shft amt >= 29
+	blt.b		case1_extract		# no; no fix needed
+	mov.b		GRS(%a6), %d2
+	or.b		%d2, 3+FTEMP_LO2(%a6)
+
+case1_extract:
+	bfextu		FTEMP_HI(%a0){&0:%d0}, %d2 # %d2 = new FTEMP_HI
+	bfextu		FTEMP_HI(%a0){%d0:&32}, %d1 # %d1 = new FTEMP_LO
+	bfextu		FTEMP_LO2(%a6){%d0:&32}, %d0 # %d0 = new G,R,S
+
+	mov.l		%d2, FTEMP_HI(%a0)	# store new FTEMP_HI
+	mov.l		%d1, FTEMP_LO(%a0)	# store new FTEMP_LO
+
+	bftst		%d0{&2:&30}		# were bits shifted off?
+	beq.b		case1_sticky_clear	# no; go finish
+	bset		&rnd_stky_bit, %d0	# yes; set sticky bit
+
+case1_sticky_clear:
+	and.l		&0xe0000000, %d0	# clear all but G,R,S
+	mov.l		(%sp)+, %d2		# restore temp register
+	rts
+
+#
+# case (32<=d1<64)
+#
+# %d0 = denorm threshold
+# %d1 = "n" = amt to shift
+#
+#	---------------------------------------------------------
+#	|     FTEMP_HI	  |	FTEMP_LO     |grs000.........000|
+#	---------------------------------------------------------
+#	<-(32 - n)-><-(n)-><-(32 - n)-><-(n)-><-(32 - n)-><-(n)->
+#	\	   \		      \
+#	 \	    \		       \
+#	  \	     \			-------------------
+#	   \	      --------------------		   \
+#	    -------------------		  \		    \
+#			       \	   \		     \
+#				\	    \		      \
+#				 \	     \		       \
+#	<-------(32)------><-(n)-><-(32 - n)-><------(32)------->
+#	---------------------------------------------------------
+#	|0...............0|0....0| NEW_LO     |grs		|
+#	---------------------------------------------------------
+#
+case_2:
+	mov.l		%d2, -(%sp)		# create temp storage
+
+	mov.w		%d0, FTEMP_EX(%a0)	# exponent = denorm threshold
+	subi.w		&0x20, %d1		# %d1 now between 0 and 32
+	mov.l		&0x20, %d0
+	sub.w		%d1, %d0		# %d0 = 32 - %d1
+
+# subtle step here; or in the g,r,s at the bottom of FTEMP_LO to minimize
+# the number of bits to check for the sticky detect.
+# it only plays a role in shift amounts of 61-63.
+	mov.b		GRS(%a6), %d2
+	or.b		%d2, 3+FTEMP_LO2(%a6)
+
+	bfextu		FTEMP_HI(%a0){&0:%d0}, %d2 # %d2 = new FTEMP_LO
+	bfextu		FTEMP_HI(%a0){%d0:&32}, %d1 # %d1 = new G,R,S
+
+	bftst		%d1{&2:&30}		# were any bits shifted off?
+	bne.b		case2_set_sticky	# yes; set sticky bit
+	bftst		FTEMP_LO2(%a6){%d0:&31}	# were any bits shifted off?
+	bne.b		case2_set_sticky	# yes; set sticky bit
+
+	mov.l		%d1, %d0		# move new G,R,S to %d0
+	bra.b		case2_end
+
+case2_set_sticky:
+	mov.l		%d1, %d0		# move new G,R,S to %d0
+	bset		&rnd_stky_bit, %d0	# set sticky bit
+
+case2_end:
+	clr.l		FTEMP_HI(%a0)		# store FTEMP_HI = 0
+	mov.l		%d2, FTEMP_LO(%a0)	# store FTEMP_LO
+	and.l		&0xe0000000, %d0	# clear all but G,R,S
+
+	mov.l		(%sp)+,%d2		# restore temp register
+	rts
+
+#
+# case (d1>=64)
+#
+# %d0 = denorm threshold
+# %d1 = amt to shift
+#
+case_3:
+	mov.w		%d0, FTEMP_EX(%a0)	# insert denorm threshold
+
+	cmpi.w		%d1, &65		# is shift amt > 65?
+	blt.b		case3_64		# no; it's == 64
+	beq.b		case3_65		# no; it's == 65
+
+#
+# case (d1>65)
+#
+# Shift value is > 65 and out of range. All bits are shifted off.
+# Return a zero mantissa with the sticky bit set
+#
+	clr.l		FTEMP_HI(%a0)		# clear hi(mantissa)
+	clr.l		FTEMP_LO(%a0)		# clear lo(mantissa)
+	mov.l		&0x20000000, %d0	# set sticky bit
+	rts
+
+#
+# case (d1 == 64)
+#
+#	---------------------------------------------------------
+#	|     FTEMP_HI	  |	FTEMP_LO     |grs000.........000|
+#	---------------------------------------------------------
+#	<-------(32)------>
+#	\		   \
+#	 \		    \
+#	  \		     \
+#	   \		      ------------------------------
+#	    -------------------------------		    \
+#					   \		     \
+#					    \		      \
+#					     \		       \
+#					      <-------(32)------>
+#	---------------------------------------------------------
+#	|0...............0|0................0|grs		|
+#	---------------------------------------------------------
+#
+case3_64:
+	mov.l		FTEMP_HI(%a0), %d0	# fetch hi(mantissa)
+	mov.l		%d0, %d1		# make a copy
+	and.l		&0xc0000000, %d0	# extract G,R
+	and.l		&0x3fffffff, %d1	# extract other bits
+
+	bra.b		case3_complete
+
+#
+# case (d1 == 65)
+#
+#	---------------------------------------------------------
+#	|     FTEMP_HI	  |	FTEMP_LO     |grs000.........000|
+#	---------------------------------------------------------
+#	<-------(32)------>
+#	\		   \
+#	 \		    \
+#	  \		     \
+#	   \		      ------------------------------
+#	    --------------------------------		    \
+#					    \		     \
+#					     \		      \
+#					      \		       \
+#					       <-------(31)----->
+#	---------------------------------------------------------
+#	|0...............0|0................0|0rs		|
+#	---------------------------------------------------------
+#
+case3_65:
+	mov.l		FTEMP_HI(%a0), %d0	# fetch hi(mantissa)
+	and.l		&0x80000000, %d0	# extract R bit
+	lsr.l		&0x1, %d0		# shift high bit into R bit
+	and.l		&0x7fffffff, %d1	# extract other bits
+
+case3_complete:
+# last operation done was an "and" of the bits shifted off so the condition
+# codes are already set so branch accordingly.
+	bne.b		case3_set_sticky	# yes; go set new sticky
+	tst.l		FTEMP_LO(%a0)		# were any bits shifted off?
+	bne.b		case3_set_sticky	# yes; go set new sticky
+	tst.b		GRS(%a6)		# were any bits shifted off?
+	bne.b		case3_set_sticky	# yes; go set new sticky
+
+#
+# no bits were shifted off so don't set the sticky bit.
+# the guard and
+# the entire mantissa is zero.
+#
+	clr.l		FTEMP_HI(%a0)		# clear hi(mantissa)
+	clr.l		FTEMP_LO(%a0)		# clear lo(mantissa)
+	rts
+
+#
+# some bits were shifted off so set the sticky bit.
+# the entire mantissa is zero.
+#
+case3_set_sticky:
+	bset		&rnd_stky_bit,%d0	# set new sticky bit
+	clr.l		FTEMP_HI(%a0)		# clear hi(mantissa)
+	clr.l		FTEMP_LO(%a0)		# clear lo(mantissa)
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	_round(): round result according to precision/mode		#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	a0	  = ptr to input operand in internal extended format	#
+#	d1(hi)    = contains rounding precision:			#
+#			ext = $0000xxxx					#
+#			sgl = $0004xxxx					#
+#			dbl = $0008xxxx					#
+#	d1(lo)	  = contains rounding mode:				#
+#			RN  = $xxxx0000					#
+#			RZ  = $xxxx0001					#
+#			RM  = $xxxx0002					#
+#			RP  = $xxxx0003					#
+#	d0{31:29} = contains the g,r,s bits (extended)			#
+#									#
+# OUTPUT **************************************************************	#
+#	a0 = pointer to rounded result					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	On return the value pointed to by a0 is correctly rounded,	#
+#	a0 is preserved and the g-r-s bits in d0 are cleared.		#
+#	The result is not typed - the tag field is invalid.  The	#
+#	result is still in the internal extended format.		#
+#									#
+#	The INEX bit of USER_FPSR will be set if the rounded result was	#
+#	inexact (i.e. if any of the g-r-s bits were set).		#
+#									#
+#########################################################################
+
+	global		_round
+_round:
+#
+# ext_grs() looks at the rounding precision and sets the appropriate
+# G,R,S bits.
+# If (G,R,S == 0) then result is exact and round is done, else set
+# the inex flag in status reg and continue.
+#
+	bsr.l		ext_grs			# extract G,R,S
+
+	tst.l		%d0			# are G,R,S zero?
+	beq.w		truncate		# yes; round is complete
+
+	or.w		&inx2a_mask, 2+USER_FPSR(%a6) # set inex2/ainex
+
+#
+# Use rounding mode as an index into a jump table for these modes.
+# All of the following assumes grs != 0.
+#
+	mov.w		(tbl_mode.b,%pc,%d1.w*2), %a1 # load jump offset
+	jmp		(tbl_mode.b,%pc,%a1)	# jmp to rnd mode handler
+
+tbl_mode:
+	short		rnd_near - tbl_mode
+	short		truncate - tbl_mode	# RZ always truncates
+	short		rnd_mnus - tbl_mode
+	short		rnd_plus - tbl_mode
+
+#################################################################
+#	ROUND PLUS INFINITY					#
+#								#
+#	If sign of fp number = 0 (positive), then add 1 to l.	#
+#################################################################
+rnd_plus:
+	tst.b		FTEMP_SGN(%a0)		# check for sign
+	bmi.w		truncate		# if positive then truncate
+
+	mov.l		&0xffffffff, %d0	# force g,r,s to be all f's
+	swap		%d1			# set up d1 for round prec.
+
+	cmpi.b		%d1, &s_mode		# is prec = sgl?
+	beq.w		add_sgl			# yes
+	bgt.w		add_dbl			# no; it's dbl
+	bra.w		add_ext			# no; it's ext
+
+#################################################################
+#	ROUND MINUS INFINITY					#
+#								#
+#	If sign of fp number = 1 (negative), then add 1 to l.	#
+#################################################################
+rnd_mnus:
+	tst.b		FTEMP_SGN(%a0)		# check for sign
+	bpl.w		truncate		# if negative then truncate
+
+	mov.l		&0xffffffff, %d0	# force g,r,s to be all f's
+	swap		%d1			# set up d1 for round prec.
+
+	cmpi.b		%d1, &s_mode		# is prec = sgl?
+	beq.w		add_sgl			# yes
+	bgt.w		add_dbl			# no; it's dbl
+	bra.w		add_ext			# no; it's ext
+
+#################################################################
+#	ROUND NEAREST						#
+#								#
+#	If (g=1), then add 1 to l and if (r=s=0), then clear l	#
+#	Note that this will round to even in case of a tie.	#
+#################################################################
+rnd_near:
+	asl.l		&0x1, %d0		# shift g-bit to c-bit
+	bcc.w		truncate		# if (g=1) then
+
+	swap		%d1			# set up d1 for round prec.
+
+	cmpi.b		%d1, &s_mode		# is prec = sgl?
+	beq.w		add_sgl			# yes
+	bgt.w		add_dbl			# no; it's dbl
+	bra.w		add_ext			# no; it's ext
+
+# *** LOCAL EQUATES ***
+set	ad_1_sgl,	0x00000100	# constant to add 1 to l-bit in sgl prec
+set	ad_1_dbl,	0x00000800	# constant to add 1 to l-bit in dbl prec
+
+#########################
+#	ADD SINGLE	#
+#########################
+add_sgl:
+	add.l		&ad_1_sgl, FTEMP_HI(%a0)
+	bcc.b		scc_clr			# no mantissa overflow
+	roxr.w		FTEMP_HI(%a0)		# shift v-bit back in
+	roxr.w		FTEMP_HI+2(%a0)		# shift v-bit back in
+	add.w		&0x1, FTEMP_EX(%a0)	# and incr exponent
+scc_clr:
+	tst.l		%d0			# test for rs = 0
+	bne.b		sgl_done
+	and.w		&0xfe00, FTEMP_HI+2(%a0) # clear the l-bit
+sgl_done:
+	and.l		&0xffffff00, FTEMP_HI(%a0) # truncate bits beyond sgl limit
+	clr.l		FTEMP_LO(%a0)		# clear d2
+	rts
+
+#########################
+#	ADD EXTENDED	#
+#########################
+add_ext:
+	addq.l		&1,FTEMP_LO(%a0)	# add 1 to l-bit
+	bcc.b		xcc_clr			# test for carry out
+	addq.l		&1,FTEMP_HI(%a0)	# propagate carry
+	bcc.b		xcc_clr
+	roxr.w		FTEMP_HI(%a0)		# mant is 0 so restore v-bit
+	roxr.w		FTEMP_HI+2(%a0)		# mant is 0 so restore v-bit
+	roxr.w		FTEMP_LO(%a0)
+	roxr.w		FTEMP_LO+2(%a0)
+	add.w		&0x1,FTEMP_EX(%a0)	# and inc exp
+xcc_clr:
+	tst.l		%d0			# test rs = 0
+	bne.b		add_ext_done
+	and.b		&0xfe,FTEMP_LO+3(%a0)	# clear the l bit
+add_ext_done:
+	rts
+
+#########################
+#	ADD DOUBLE	#
+#########################
+add_dbl:
+	add.l		&ad_1_dbl, FTEMP_LO(%a0) # add 1 to lsb
+	bcc.b		dcc_clr			# no carry
+	addq.l		&0x1, FTEMP_HI(%a0)	# propagate carry
+	bcc.b		dcc_clr			# no carry
+
+	roxr.w		FTEMP_HI(%a0)		# mant is 0 so restore v-bit
+	roxr.w		FTEMP_HI+2(%a0)		# mant is 0 so restore v-bit
+	roxr.w		FTEMP_LO(%a0)
+	roxr.w		FTEMP_LO+2(%a0)
+	addq.w		&0x1, FTEMP_EX(%a0)	# incr exponent
+dcc_clr:
+	tst.l		%d0			# test for rs = 0
+	bne.b		dbl_done
+	and.w		&0xf000, FTEMP_LO+2(%a0) # clear the l-bit
+
+dbl_done:
+	and.l		&0xfffff800,FTEMP_LO(%a0) # truncate bits beyond dbl limit
+	rts
+
+###########################
+# Truncate all other bits #
+###########################
+truncate:
+	swap		%d1			# select rnd prec
+
+	cmpi.b		%d1, &s_mode		# is prec sgl?
+	beq.w		sgl_done		# yes
+	bgt.b		dbl_done		# no; it's dbl
+	rts					# no; it's ext
+
+
+#
+# ext_grs(): extract guard, round and sticky bits according to
+#	     rounding precision.
+#
+# INPUT
+#	d0	   = extended precision g,r,s (in d0{31:29})
+#	d1	   = {PREC,ROUND}
+# OUTPUT
+#	d0{31:29}  = guard, round, sticky
+#
+# The ext_grs extract the guard/round/sticky bits according to the
+# selected rounding precision. It is called by the round subroutine
+# only.  All registers except d0 are kept intact. d0 becomes an
+# updated guard,round,sticky in d0{31:29}
+#
+# Notes: the ext_grs uses the round PREC, and therefore has to swap d1
+#	 prior to usage, and needs to restore d1 to original. this
+#	 routine is tightly tied to the round routine and not meant to
+#	 uphold standard subroutine calling practices.
+#
+
+ext_grs:
+	swap		%d1			# have d1.w point to round precision
+	tst.b		%d1			# is rnd prec = extended?
+	bne.b		ext_grs_not_ext		# no; go handle sgl or dbl
+
+#
+# %d0 actually already hold g,r,s since _round() had it before calling
+# this function. so, as long as we don't disturb it, we are "returning" it.
+#
+ext_grs_ext:
+	swap		%d1			# yes; return to correct positions
+	rts
+
+ext_grs_not_ext:
+	movm.l		&0x3000, -(%sp)		# make some temp registers {d2/d3}
+
+	cmpi.b		%d1, &s_mode		# is rnd prec = sgl?
+	bne.b		ext_grs_dbl		# no; go handle dbl
+
+#
+# sgl:
+#	96		64	  40	32		0
+#	-----------------------------------------------------
+#	| EXP	|XXXXXXX|	  |xx	|		|grs|
+#	-----------------------------------------------------
+#			<--(24)--->nn\			   /
+#				   ee ---------------------
+#				   ww		|
+#						v
+#				   gr	   new sticky
+#
+ext_grs_sgl:
+	bfextu		FTEMP_HI(%a0){&24:&2}, %d3 # sgl prec. g-r are 2 bits right
+	mov.l		&30, %d2		# of the sgl prec. limits
+	lsl.l		%d2, %d3		# shift g-r bits to MSB of d3
+	mov.l		FTEMP_HI(%a0), %d2	# get word 2 for s-bit test
+	and.l		&0x0000003f, %d2	# s bit is the or of all other
+	bne.b		ext_grs_st_stky		# bits to the right of g-r
+	tst.l		FTEMP_LO(%a0)		# test lower mantissa
+	bne.b		ext_grs_st_stky		# if any are set, set sticky
+	tst.l		%d0			# test original g,r,s
+	bne.b		ext_grs_st_stky		# if any are set, set sticky
+	bra.b		ext_grs_end_sd		# if words 3 and 4 are clr, exit
+
+#
+# dbl:
+#	96		64		32	 11	0
+#	-----------------------------------------------------
+#	| EXP	|XXXXXXX|		|	 |xx	|grs|
+#	-----------------------------------------------------
+#						  nn\	    /
+#						  ee -------
+#						  ww	|
+#							v
+#						  gr	new sticky
+#
+ext_grs_dbl:
+	bfextu		FTEMP_LO(%a0){&21:&2}, %d3 # dbl-prec. g-r are 2 bits right
+	mov.l		&30, %d2		# of the dbl prec. limits
+	lsl.l		%d2, %d3		# shift g-r bits to the MSB of d3
+	mov.l		FTEMP_LO(%a0), %d2	# get lower mantissa  for s-bit test
+	and.l		&0x000001ff, %d2	# s bit is the or-ing of all
+	bne.b		ext_grs_st_stky		# other bits to the right of g-r
+	tst.l		%d0			# test word original g,r,s
+	bne.b		ext_grs_st_stky		# if any are set, set sticky
+	bra.b		ext_grs_end_sd		# if clear, exit
+
+ext_grs_st_stky:
+	bset		&rnd_stky_bit, %d3	# set sticky bit
+ext_grs_end_sd:
+	mov.l		%d3, %d0		# return grs to d0
+
+	movm.l		(%sp)+, &0xc		# restore scratch registers {d2/d3}
+
+	swap		%d1			# restore d1 to original
+	rts
+
+#########################################################################
+# norm(): normalize the mantissa of an extended precision input. the	#
+#	  input operand should not be normalized already.		#
+#									#
+# XDEF ****************************************************************	#
+#	norm()								#
+#									#
+# XREF **************************************************************** #
+#	none								#
+#									#
+# INPUT *************************************************************** #
+#	a0 = pointer fp extended precision operand to normalize		#
+#									#
+# OUTPUT ************************************************************** #
+#	d0 = number of bit positions the mantissa was shifted		#
+#	a0 = the input operand's mantissa is normalized; the exponent	#
+#	     is unchanged.						#
+#									#
+#########################################################################
+	global		norm
+norm:
+	mov.l		%d2, -(%sp)		# create some temp regs
+	mov.l		%d3, -(%sp)
+
+	mov.l		FTEMP_HI(%a0), %d0	# load hi(mantissa)
+	mov.l		FTEMP_LO(%a0), %d1	# load lo(mantissa)
+
+	bfffo		%d0{&0:&32}, %d2	# how many places to shift?
+	beq.b		norm_lo			# hi(man) is all zeroes!
+
+norm_hi:
+	lsl.l		%d2, %d0		# left shift hi(man)
+	bfextu		%d1{&0:%d2}, %d3	# extract lo bits
+
+	or.l		%d3, %d0		# create hi(man)
+	lsl.l		%d2, %d1		# create lo(man)
+
+	mov.l		%d0, FTEMP_HI(%a0)	# store new hi(man)
+	mov.l		%d1, FTEMP_LO(%a0)	# store new lo(man)
+
+	mov.l		%d2, %d0		# return shift amount
+
+	mov.l		(%sp)+, %d3		# restore temp regs
+	mov.l		(%sp)+, %d2
+
+	rts
+
+norm_lo:
+	bfffo		%d1{&0:&32}, %d2	# how many places to shift?
+	lsl.l		%d2, %d1		# shift lo(man)
+	add.l		&32, %d2		# add 32 to shft amount
+
+	mov.l		%d1, FTEMP_HI(%a0)	# store hi(man)
+	clr.l		FTEMP_LO(%a0)		# lo(man) is now zero
+
+	mov.l		%d2, %d0		# return shift amount
+
+	mov.l		(%sp)+, %d3		# restore temp regs
+	mov.l		(%sp)+, %d2
+
+	rts
+
+#########################################################################
+# unnorm_fix(): - changes an UNNORM to one of NORM, DENORM, or ZERO	#
+#		- returns corresponding optype tag			#
+#									#
+# XDEF ****************************************************************	#
+#	unnorm_fix()							#
+#									#
+# XREF **************************************************************** #
+#	norm() - normalize the mantissa					#
+#									#
+# INPUT *************************************************************** #
+#	a0 = pointer to unnormalized extended precision number		#
+#									#
+# OUTPUT ************************************************************** #
+#	d0 = optype tag - is corrected to one of NORM, DENORM, or ZERO	#
+#	a0 = input operand has been converted to a norm, denorm, or	#
+#	     zero; both the exponent and mantissa are changed.		#
+#									#
+#########################################################################
+
+	global		unnorm_fix
+unnorm_fix:
+	bfffo		FTEMP_HI(%a0){&0:&32}, %d0 # how many shifts are needed?
+	bne.b		unnorm_shift		# hi(man) is not all zeroes
+
+#
+# hi(man) is all zeroes so see if any bits in lo(man) are set
+#
+unnorm_chk_lo:
+	bfffo		FTEMP_LO(%a0){&0:&32}, %d0 # is operand really a zero?
+	beq.w		unnorm_zero		# yes
+
+	add.w		&32, %d0		# no; fix shift distance
+
+#
+# d0 = # shifts needed for complete normalization
+#
+unnorm_shift:
+	clr.l		%d1			# clear top word
+	mov.w		FTEMP_EX(%a0), %d1	# extract exponent
+	and.w		&0x7fff, %d1		# strip off sgn
+
+	cmp.w		%d0, %d1		# will denorm push exp < 0?
+	bgt.b		unnorm_nrm_zero		# yes; denorm only until exp = 0
+
+#
+# exponent would not go < 0. therefore, number stays normalized
+#
+	sub.w		%d0, %d1		# shift exponent value
+	mov.w		FTEMP_EX(%a0), %d0	# load old exponent
+	and.w		&0x8000, %d0		# save old sign
+	or.w		%d0, %d1		# {sgn,new exp}
+	mov.w		%d1, FTEMP_EX(%a0)	# insert new exponent
+
+	bsr.l		norm			# normalize UNNORM
+
+	mov.b		&NORM, %d0		# return new optype tag
+	rts
+
+#
+# exponent would go < 0, so only denormalize until exp = 0
+#
+unnorm_nrm_zero:
+	cmp.b		%d1, &32		# is exp <= 32?
+	bgt.b		unnorm_nrm_zero_lrg	# no; go handle large exponent
+
+	bfextu		FTEMP_HI(%a0){%d1:&32}, %d0 # extract new hi(man)
+	mov.l		%d0, FTEMP_HI(%a0)	# save new hi(man)
+
+	mov.l		FTEMP_LO(%a0), %d0	# fetch old lo(man)
+	lsl.l		%d1, %d0		# extract new lo(man)
+	mov.l		%d0, FTEMP_LO(%a0)	# save new lo(man)
+
+	and.w		&0x8000, FTEMP_EX(%a0)	# set exp = 0
+
+	mov.b		&DENORM, %d0		# return new optype tag
+	rts
+
+#
+# only mantissa bits set are in lo(man)
+#
+unnorm_nrm_zero_lrg:
+	sub.w		&32, %d1		# adjust shft amt by 32
+
+	mov.l		FTEMP_LO(%a0), %d0	# fetch old lo(man)
+	lsl.l		%d1, %d0		# left shift lo(man)
+
+	mov.l		%d0, FTEMP_HI(%a0)	# store new hi(man)
+	clr.l		FTEMP_LO(%a0)		# lo(man) = 0
+
+	and.w		&0x8000, FTEMP_EX(%a0)	# set exp = 0
+
+	mov.b		&DENORM, %d0		# return new optype tag
+	rts
+
+#
+# whole mantissa is zero so this UNNORM is actually a zero
+#
+unnorm_zero:
+	and.w		&0x8000, FTEMP_EX(%a0)	# force exponent to zero
+
+	mov.b		&ZERO, %d0		# fix optype tag
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	set_tag_x(): return the optype of the input ext fp number	#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision operand			#
+#									#
+# OUTPUT **************************************************************	#
+#	d0 = value of type tag						#
+#		one of: NORM, INF, QNAN, SNAN, DENORM, UNNORM, ZERO	#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Simply test the exponent, j-bit, and mantissa values to		#
+# determine the type of operand.					#
+#	If it's an unnormalized zero, alter the operand and force it	#
+# to be a normal zero.							#
+#									#
+#########################################################################
+
+	global		set_tag_x
+set_tag_x:
+	mov.w		FTEMP_EX(%a0), %d0	# extract exponent
+	andi.w		&0x7fff, %d0		# strip off sign
+	cmpi.w		%d0, &0x7fff		# is (EXP == MAX)?
+	beq.b		inf_or_nan_x
+not_inf_or_nan_x:
+	btst		&0x7,FTEMP_HI(%a0)
+	beq.b		not_norm_x
+is_norm_x:
+	mov.b		&NORM, %d0
+	rts
+not_norm_x:
+	tst.w		%d0			# is exponent = 0?
+	bne.b		is_unnorm_x
+not_unnorm_x:
+	tst.l		FTEMP_HI(%a0)
+	bne.b		is_denorm_x
+	tst.l		FTEMP_LO(%a0)
+	bne.b		is_denorm_x
+is_zero_x:
+	mov.b		&ZERO, %d0
+	rts
+is_denorm_x:
+	mov.b		&DENORM, %d0
+	rts
+# must distinguish now "Unnormalized zeroes" which we
+# must convert to zero.
+is_unnorm_x:
+	tst.l		FTEMP_HI(%a0)
+	bne.b		is_unnorm_reg_x
+	tst.l		FTEMP_LO(%a0)
+	bne.b		is_unnorm_reg_x
+# it's an "unnormalized zero". let's convert it to an actual zero...
+	andi.w		&0x8000,FTEMP_EX(%a0)	# clear exponent
+	mov.b		&ZERO, %d0
+	rts
+is_unnorm_reg_x:
+	mov.b		&UNNORM, %d0
+	rts
+inf_or_nan_x:
+	tst.l		FTEMP_LO(%a0)
+	bne.b		is_nan_x
+	mov.l		FTEMP_HI(%a0), %d0
+	and.l		&0x7fffffff, %d0	# msb is a don't care!
+	bne.b		is_nan_x
+is_inf_x:
+	mov.b		&INF, %d0
+	rts
+is_nan_x:
+	btst		&0x6, FTEMP_HI(%a0)
+	beq.b		is_snan_x
+	mov.b		&QNAN, %d0
+	rts
+is_snan_x:
+	mov.b		&SNAN, %d0
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	set_tag_d(): return the optype of the input dbl fp number	#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = points to double precision operand				#
+#									#
+# OUTPUT **************************************************************	#
+#	d0 = value of type tag						#
+#		one of: NORM, INF, QNAN, SNAN, DENORM, ZERO		#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Simply test the exponent, j-bit, and mantissa values to		#
+# determine the type of operand.					#
+#									#
+#########################################################################
+
+	global		set_tag_d
+set_tag_d:
+	mov.l		FTEMP(%a0), %d0
+	mov.l		%d0, %d1
+
+	andi.l		&0x7ff00000, %d0
+	beq.b		zero_or_denorm_d
+
+	cmpi.l		%d0, &0x7ff00000
+	beq.b		inf_or_nan_d
+
+is_norm_d:
+	mov.b		&NORM, %d0
+	rts
+zero_or_denorm_d:
+	and.l		&0x000fffff, %d1
+	bne		is_denorm_d
+	tst.l		4+FTEMP(%a0)
+	bne		is_denorm_d
+is_zero_d:
+	mov.b		&ZERO, %d0
+	rts
+is_denorm_d:
+	mov.b		&DENORM, %d0
+	rts
+inf_or_nan_d:
+	and.l		&0x000fffff, %d1
+	bne		is_nan_d
+	tst.l		4+FTEMP(%a0)
+	bne		is_nan_d
+is_inf_d:
+	mov.b		&INF, %d0
+	rts
+is_nan_d:
+	btst		&19, %d1
+	bne		is_qnan_d
+is_snan_d:
+	mov.b		&SNAN, %d0
+	rts
+is_qnan_d:
+	mov.b		&QNAN, %d0
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	set_tag_s(): return the optype of the input sgl fp number	#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to single precision operand			#
+#									#
+# OUTPUT **************************************************************	#
+#	d0 = value of type tag						#
+#		one of: NORM, INF, QNAN, SNAN, DENORM, ZERO		#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Simply test the exponent, j-bit, and mantissa values to		#
+# determine the type of operand.					#
+#									#
+#########################################################################
+
+	global		set_tag_s
+set_tag_s:
+	mov.l		FTEMP(%a0), %d0
+	mov.l		%d0, %d1
+
+	andi.l		&0x7f800000, %d0
+	beq.b		zero_or_denorm_s
+
+	cmpi.l		%d0, &0x7f800000
+	beq.b		inf_or_nan_s
+
+is_norm_s:
+	mov.b		&NORM, %d0
+	rts
+zero_or_denorm_s:
+	and.l		&0x007fffff, %d1
+	bne		is_denorm_s
+is_zero_s:
+	mov.b		&ZERO, %d0
+	rts
+is_denorm_s:
+	mov.b		&DENORM, %d0
+	rts
+inf_or_nan_s:
+	and.l		&0x007fffff, %d1
+	bne		is_nan_s
+is_inf_s:
+	mov.b		&INF, %d0
+	rts
+is_nan_s:
+	btst		&22, %d1
+	bne		is_qnan_s
+is_snan_s:
+	mov.b		&SNAN, %d0
+	rts
+is_qnan_s:
+	mov.b		&QNAN, %d0
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	unf_res(): routine to produce default underflow result of a	#
+#		   scaled extended precision number; this is used by	#
+#		   fadd/fdiv/fmul/etc. emulation routines.		#
+#	unf_res4(): same as above but for fsglmul/fsgldiv which use	#
+#		    single round prec and extended prec mode.		#
+#									#
+# XREF ****************************************************************	#
+#	_denorm() - denormalize according to scale factor		#
+#	_round() - round denormalized number according to rnd prec	#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precison operand			#
+#	d0 = scale factor						#
+#	d1 = rounding precision/mode					#
+#									#
+# OUTPUT **************************************************************	#
+#	a0 = pointer to default underflow result in extended precision	#
+#	d0.b = result FPSR_cc which caller may or may not want to save	#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Convert the input operand to "internal format" which means the	#
+# exponent is extended to 16 bits and the sign is stored in the unused	#
+# portion of the extended precison operand. Denormalize the number	#
+# according to the scale factor passed in d0. Then, round the		#
+# denormalized result.							#
+#	Set the FPSR_exc bits as appropriate but return the cc bits in	#
+# d0 in case the caller doesn't want to save them (as is the case for	#
+# fmove out).								#
+#	unf_res4() for fsglmul/fsgldiv forces the denorm to extended	#
+# precision and the rounding mode to single.				#
+#									#
+#########################################################################
+	global		unf_res
+unf_res:
+	mov.l		%d1, -(%sp)		# save rnd prec,mode on stack
+
+	btst		&0x7, FTEMP_EX(%a0)	# make "internal" format
+	sne		FTEMP_SGN(%a0)
+
+	mov.w		FTEMP_EX(%a0), %d1	# extract exponent
+	and.w		&0x7fff, %d1
+	sub.w		%d0, %d1
+	mov.w		%d1, FTEMP_EX(%a0)	# insert 16 bit exponent
+
+	mov.l		%a0, -(%sp)		# save operand ptr during calls
+
+	mov.l		0x4(%sp),%d0		# pass rnd prec.
+	andi.w		&0x00c0,%d0
+	lsr.w		&0x4,%d0
+	bsr.l		_denorm			# denorm result
+
+	mov.l		(%sp),%a0
+	mov.w		0x6(%sp),%d1		# load prec:mode into %d1
+	andi.w		&0xc0,%d1		# extract rnd prec
+	lsr.w		&0x4,%d1
+	swap		%d1
+	mov.w		0x6(%sp),%d1
+	andi.w		&0x30,%d1
+	lsr.w		&0x4,%d1
+	bsr.l		_round			# round the denorm
+
+	mov.l		(%sp)+, %a0
+
+# result is now rounded properly. convert back to normal format
+	bclr		&0x7, FTEMP_EX(%a0)	# clear sgn first; may have residue
+	tst.b		FTEMP_SGN(%a0)		# is "internal result" sign set?
+	beq.b		unf_res_chkifzero	# no; result is positive
+	bset		&0x7, FTEMP_EX(%a0)	# set result sgn
+	clr.b		FTEMP_SGN(%a0)		# clear temp sign
+
+# the number may have become zero after rounding. set ccodes accordingly.
+unf_res_chkifzero:
+	clr.l		%d0
+	tst.l		FTEMP_HI(%a0)		# is value now a zero?
+	bne.b		unf_res_cont		# no
+	tst.l		FTEMP_LO(%a0)
+	bne.b		unf_res_cont		# no
+#	bset		&z_bit, FPSR_CC(%a6)	# yes; set zero ccode bit
+	bset		&z_bit, %d0		# yes; set zero ccode bit
+
+unf_res_cont:
+
+#
+# can inex1 also be set along with unfl and inex2???
+#
+# we know that underflow has occurred. aunfl should be set if INEX2 is also set.
+#
+	btst		&inex2_bit, FPSR_EXCEPT(%a6) # is INEX2 set?
+	beq.b		unf_res_end		# no
+	bset		&aunfl_bit, FPSR_AEXCEPT(%a6) # yes; set aunfl
+
+unf_res_end:
+	add.l		&0x4, %sp		# clear stack
+	rts
+
+# unf_res() for fsglmul() and fsgldiv().
+	global		unf_res4
+unf_res4:
+	mov.l		%d1,-(%sp)		# save rnd prec,mode on stack
+
+	btst		&0x7,FTEMP_EX(%a0)	# make "internal" format
+	sne		FTEMP_SGN(%a0)
+
+	mov.w		FTEMP_EX(%a0),%d1	# extract exponent
+	and.w		&0x7fff,%d1
+	sub.w		%d0,%d1
+	mov.w		%d1,FTEMP_EX(%a0)	# insert 16 bit exponent
+
+	mov.l		%a0,-(%sp)		# save operand ptr during calls
+
+	clr.l		%d0			# force rnd prec = ext
+	bsr.l		_denorm			# denorm result
+
+	mov.l		(%sp),%a0
+	mov.w		&s_mode,%d1		# force rnd prec = sgl
+	swap		%d1
+	mov.w		0x6(%sp),%d1		# load rnd mode
+	andi.w		&0x30,%d1		# extract rnd prec
+	lsr.w		&0x4,%d1
+	bsr.l		_round			# round the denorm
+
+	mov.l		(%sp)+,%a0
+
+# result is now rounded properly. convert back to normal format
+	bclr		&0x7,FTEMP_EX(%a0)	# clear sgn first; may have residue
+	tst.b		FTEMP_SGN(%a0)		# is "internal result" sign set?
+	beq.b		unf_res4_chkifzero	# no; result is positive
+	bset		&0x7,FTEMP_EX(%a0)	# set result sgn
+	clr.b		FTEMP_SGN(%a0)		# clear temp sign
+
+# the number may have become zero after rounding. set ccodes accordingly.
+unf_res4_chkifzero:
+	clr.l		%d0
+	tst.l		FTEMP_HI(%a0)		# is value now a zero?
+	bne.b		unf_res4_cont		# no
+	tst.l		FTEMP_LO(%a0)
+	bne.b		unf_res4_cont		# no
+#	bset		&z_bit,FPSR_CC(%a6)	# yes; set zero ccode bit
+	bset		&z_bit,%d0		# yes; set zero ccode bit
+
+unf_res4_cont:
+
+#
+# can inex1 also be set along with unfl and inex2???
+#
+# we know that underflow has occurred. aunfl should be set if INEX2 is also set.
+#
+	btst		&inex2_bit,FPSR_EXCEPT(%a6) # is INEX2 set?
+	beq.b		unf_res4_end		# no
+	bset		&aunfl_bit,FPSR_AEXCEPT(%a6) # yes; set aunfl
+
+unf_res4_end:
+	add.l		&0x4,%sp		# clear stack
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	ovf_res(): routine to produce the default overflow result of	#
+#		   an overflowing number.				#
+#	ovf_res2(): same as above but the rnd mode/prec are passed	#
+#		    differently.					#
+#									#
+# XREF ****************************************************************	#
+#	none								#
+#									#
+# INPUT ***************************************************************	#
+#	d1.b	= '-1' => (-); '0' => (+)				#
+#   ovf_res():								#
+#	d0	= rnd mode/prec						#
+#   ovf_res2():								#
+#	hi(d0)	= rnd prec						#
+#	lo(d0)	= rnd mode						#
+#									#
+# OUTPUT **************************************************************	#
+#	a0	= points to extended precision result			#
+#	d0.b	= condition code bits					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	The default overflow result can be determined by the sign of	#
+# the result and the rounding mode/prec in effect. These bits are	#
+# concatenated together to create an index into the default result	#
+# table. A pointer to the correct result is returned in a0. The		#
+# resulting condition codes are returned in d0 in case the caller	#
+# doesn't want FPSR_cc altered (as is the case for fmove out).		#
+#									#
+#########################################################################
+
+	global		ovf_res
+ovf_res:
+	andi.w		&0x10,%d1		# keep result sign
+	lsr.b		&0x4,%d0		# shift prec/mode
+	or.b		%d0,%d1			# concat the two
+	mov.w		%d1,%d0			# make a copy
+	lsl.b		&0x1,%d1		# multiply d1 by 2
+	bra.b		ovf_res_load
+
+	global		ovf_res2
+ovf_res2:
+	and.w		&0x10, %d1		# keep result sign
+	or.b		%d0, %d1		# insert rnd mode
+	swap		%d0
+	or.b		%d0, %d1		# insert rnd prec
+	mov.w		%d1, %d0		# make a copy
+	lsl.b		&0x1, %d1		# shift left by 1
+
+#
+# use the rounding mode, precision, and result sign as in index into the
+# two tables below to fetch the default result and the result ccodes.
+#
+ovf_res_load:
+	mov.b		(tbl_ovfl_cc.b,%pc,%d0.w*1), %d0 # fetch result ccodes
+	lea		(tbl_ovfl_result.b,%pc,%d1.w*8), %a0 # return result ptr
+
+	rts
+
+tbl_ovfl_cc:
+	byte		0x2, 0x0, 0x0, 0x2
+	byte		0x2, 0x0, 0x0, 0x2
+	byte		0x2, 0x0, 0x0, 0x2
+	byte		0x0, 0x0, 0x0, 0x0
+	byte		0x2+0x8, 0x8, 0x2+0x8, 0x8
+	byte		0x2+0x8, 0x8, 0x2+0x8, 0x8
+	byte		0x2+0x8, 0x8, 0x2+0x8, 0x8
+
+tbl_ovfl_result:
+	long		0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RN
+	long		0x7ffe0000,0xffffffff,0xffffffff,0x00000000 # +EXT; RZ
+	long		0x7ffe0000,0xffffffff,0xffffffff,0x00000000 # +EXT; RM
+	long		0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RP
+
+	long		0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RN
+	long		0x407e0000,0xffffff00,0x00000000,0x00000000 # +SGL; RZ
+	long		0x407e0000,0xffffff00,0x00000000,0x00000000 # +SGL; RM
+	long		0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RP
+
+	long		0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RN
+	long		0x43fe0000,0xffffffff,0xfffff800,0x00000000 # +DBL; RZ
+	long		0x43fe0000,0xffffffff,0xfffff800,0x00000000 # +DBL; RM
+	long		0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RP
+
+	long		0x00000000,0x00000000,0x00000000,0x00000000
+	long		0x00000000,0x00000000,0x00000000,0x00000000
+	long		0x00000000,0x00000000,0x00000000,0x00000000
+	long		0x00000000,0x00000000,0x00000000,0x00000000
+
+	long		0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RN
+	long		0xfffe0000,0xffffffff,0xffffffff,0x00000000 # -EXT; RZ
+	long		0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RM
+	long		0xfffe0000,0xffffffff,0xffffffff,0x00000000 # -EXT; RP
+
+	long		0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RN
+	long		0xc07e0000,0xffffff00,0x00000000,0x00000000 # -SGL; RZ
+	long		0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RM
+	long		0xc07e0000,0xffffff00,0x00000000,0x00000000 # -SGL; RP
+
+	long		0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RN
+	long		0xc3fe0000,0xffffffff,0xfffff800,0x00000000 # -DBL; RZ
+	long		0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RM
+	long		0xc3fe0000,0xffffffff,0xfffff800,0x00000000 # -DBL; RP
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fout(): move from fp register to memory or data register	#
+#									#
+# XREF ****************************************************************	#
+#	_round() - needed to create EXOP for sgl/dbl precision		#
+#	norm() - needed to create EXOP for extended precision		#
+#	ovf_res() - create default overflow result for sgl/dbl precision#
+#	unf_res() - create default underflow result for sgl/dbl prec.	#
+#	dst_dbl() - create rounded dbl precision result.		#
+#	dst_sgl() - create rounded sgl precision result.		#
+#	fetch_dreg() - fetch dynamic k-factor reg for packed.		#
+#	bindec() - convert FP binary number to packed number.		#
+#	_mem_write() - write data to memory.				#
+#	_mem_write2() - write data to memory unless supv mode -(a7) exc.#
+#	_dmem_write_{byte,word,long}() - write data to memory.		#
+#	store_dreg_{b,w,l}() - store data to data register file.	#
+#	facc_out_{b,w,l,d,x}() - data access error occurred.		#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	d0 = round prec,mode						#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 : intermediate underflow or overflow result if		#
+#	      OVFL/UNFL occurred for a sgl or dbl operand		#
+#									#
+# ALGORITHM ***********************************************************	#
+#	This routine is accessed by many handlers that need to do an	#
+# opclass three move of an operand out to memory.			#
+#	Decode an fmove out (opclass 3) instruction to determine if	#
+# it's b,w,l,s,d,x, or p in size. b,w,l can be stored to either a data	#
+# register or memory. The algorithm uses a standard "fmove" to create	#
+# the rounded result. Also, since exceptions are disabled, this also	#
+# create the correct OPERR default result if appropriate.		#
+#	For sgl or dbl precision, overflow or underflow can occur. If	#
+# either occurs and is enabled, the EXOP.				#
+#	For extended precision, the stacked <ea> must be fixed along	#
+# w/ the address index register as appropriate w/ _calc_ea_fout(). If	#
+# the source is a denorm and if underflow is enabled, an EXOP must be	#
+# created.								#
+#	For packed, the k-factor must be fetched from the instruction	#
+# word or a data register. The <ea> must be fixed as w/ extended	#
+# precision. Then, bindec() is called to create the appropriate		#
+# packed result.							#
+#	If at any time an access error is flagged by one of the move-	#
+# to-memory routines, then a special exit must be made so that the	#
+# access error can be handled properly.					#
+#									#
+#########################################################################
+
+	global		fout
+fout:
+	bfextu		EXC_CMDREG(%a6){&3:&3},%d1 # extract dst fmt
+	mov.w		(tbl_fout.b,%pc,%d1.w*2),%a1 # use as index
+	jmp		(tbl_fout.b,%pc,%a1)	# jump to routine
+
+	swbeg		&0x8
+tbl_fout:
+	short		fout_long	-	tbl_fout
+	short		fout_sgl	-	tbl_fout
+	short		fout_ext	-	tbl_fout
+	short		fout_pack	-	tbl_fout
+	short		fout_word	-	tbl_fout
+	short		fout_dbl	-	tbl_fout
+	short		fout_byte	-	tbl_fout
+	short		fout_pack	-	tbl_fout
+
+#################################################################
+# fmove.b out ###################################################
+#################################################################
+
+# Only "Unimplemented Data Type" exceptions enter here. The operand
+# is either a DENORM or a NORM.
+fout_byte:
+	tst.b		STAG(%a6)		# is operand normalized?
+	bne.b		fout_byte_denorm	# no
+
+	fmovm.x		SRC(%a0),&0x80		# load value
+
+fout_byte_norm:
+	fmov.l		%d0,%fpcr		# insert rnd prec,mode
+
+	fmov.b		%fp0,%d0		# exec move out w/ correct rnd mode
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# fetch FPSR
+	or.w		%d1,2+USER_FPSR(%a6)	# save new exc,accrued bits
+
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract dst mode
+	andi.b		&0x38,%d1		# is mode == 0? (Dreg dst)
+	beq.b		fout_byte_dn		# must save to integer regfile
+
+	mov.l		EXC_EA(%a6),%a0		# stacked <ea> is correct
+	bsr.l		_dmem_write_byte	# write byte
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_b		# yes
+
+	rts
+
+fout_byte_dn:
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract Dn
+	andi.w		&0x7,%d1
+	bsr.l		store_dreg_b
+	rts
+
+fout_byte_denorm:
+	mov.l		SRC_EX(%a0),%d1
+	andi.l		&0x80000000,%d1		# keep DENORM sign
+	ori.l		&0x00800000,%d1		# make smallest sgl
+	fmov.s		%d1,%fp0
+	bra.b		fout_byte_norm
+
+#################################################################
+# fmove.w out ###################################################
+#################################################################
+
+# Only "Unimplemented Data Type" exceptions enter here. The operand
+# is either a DENORM or a NORM.
+fout_word:
+	tst.b		STAG(%a6)		# is operand normalized?
+	bne.b		fout_word_denorm	# no
+
+	fmovm.x		SRC(%a0),&0x80		# load value
+
+fout_word_norm:
+	fmov.l		%d0,%fpcr		# insert rnd prec:mode
+
+	fmov.w		%fp0,%d0		# exec move out w/ correct rnd mode
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# fetch FPSR
+	or.w		%d1,2+USER_FPSR(%a6)	# save new exc,accrued bits
+
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract dst mode
+	andi.b		&0x38,%d1		# is mode == 0? (Dreg dst)
+	beq.b		fout_word_dn		# must save to integer regfile
+
+	mov.l		EXC_EA(%a6),%a0		# stacked <ea> is correct
+	bsr.l		_dmem_write_word	# write word
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_w		# yes
+
+	rts
+
+fout_word_dn:
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract Dn
+	andi.w		&0x7,%d1
+	bsr.l		store_dreg_w
+	rts
+
+fout_word_denorm:
+	mov.l		SRC_EX(%a0),%d1
+	andi.l		&0x80000000,%d1		# keep DENORM sign
+	ori.l		&0x00800000,%d1		# make smallest sgl
+	fmov.s		%d1,%fp0
+	bra.b		fout_word_norm
+
+#################################################################
+# fmove.l out ###################################################
+#################################################################
+
+# Only "Unimplemented Data Type" exceptions enter here. The operand
+# is either a DENORM or a NORM.
+fout_long:
+	tst.b		STAG(%a6)		# is operand normalized?
+	bne.b		fout_long_denorm	# no
+
+	fmovm.x		SRC(%a0),&0x80		# load value
+
+fout_long_norm:
+	fmov.l		%d0,%fpcr		# insert rnd prec:mode
+
+	fmov.l		%fp0,%d0		# exec move out w/ correct rnd mode
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# fetch FPSR
+	or.w		%d1,2+USER_FPSR(%a6)	# save new exc,accrued bits
+
+fout_long_write:
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract dst mode
+	andi.b		&0x38,%d1		# is mode == 0? (Dreg dst)
+	beq.b		fout_long_dn		# must save to integer regfile
+
+	mov.l		EXC_EA(%a6),%a0		# stacked <ea> is correct
+	bsr.l		_dmem_write_long	# write long
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_l		# yes
+
+	rts
+
+fout_long_dn:
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract Dn
+	andi.w		&0x7,%d1
+	bsr.l		store_dreg_l
+	rts
+
+fout_long_denorm:
+	mov.l		SRC_EX(%a0),%d1
+	andi.l		&0x80000000,%d1		# keep DENORM sign
+	ori.l		&0x00800000,%d1		# make smallest sgl
+	fmov.s		%d1,%fp0
+	bra.b		fout_long_norm
+
+#################################################################
+# fmove.x out ###################################################
+#################################################################
+
+# Only "Unimplemented Data Type" exceptions enter here. The operand
+# is either a DENORM or a NORM.
+# The DENORM causes an Underflow exception.
+fout_ext:
+
+# we copy the extended precision result to FP_SCR0 so that the reserved
+# 16-bit field gets zeroed. we do this since we promise not to disturb
+# what's at SRC(a0).
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	clr.w		2+FP_SCR0_EX(%a6)	# clear reserved field
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+	fmovm.x		SRC(%a0),&0x80		# return result
+
+	bsr.l		_calc_ea_fout		# fix stacked <ea>
+
+	mov.l		%a0,%a1			# pass: dst addr
+	lea		FP_SCR0(%a6),%a0	# pass: src addr
+	mov.l		&0xc,%d0		# pass: opsize is 12 bytes
+
+# we must not yet write the extended precision data to the stack
+# in the pre-decrement case from supervisor mode or else we'll corrupt
+# the stack frame. so, leave it in FP_SRC for now and deal with it later...
+	cmpi.b		SPCOND_FLG(%a6),&mda7_flg
+	beq.b		fout_ext_a7
+
+	bsr.l		_dmem_write		# write ext prec number to memory
+
+	tst.l		%d1			# did dstore fail?
+	bne.w		fout_ext_err		# yes
+
+	tst.b		STAG(%a6)		# is operand normalized?
+	bne.b		fout_ext_denorm		# no
+	rts
+
+# the number is a DENORM. must set the underflow exception bit
+fout_ext_denorm:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set underflow exc bit
+
+	mov.b		FPCR_ENABLE(%a6),%d0
+	andi.b		&0x0a,%d0		# is UNFL or INEX enabled?
+	bne.b		fout_ext_exc		# yes
+	rts
+
+# we don't want to do the write if the exception occurred in supervisor mode
+# so _mem_write2() handles this for us.
+fout_ext_a7:
+	bsr.l		_mem_write2		# write ext prec number to memory
+
+	tst.l		%d1			# did dstore fail?
+	bne.w		fout_ext_err		# yes
+
+	tst.b		STAG(%a6)		# is operand normalized?
+	bne.b		fout_ext_denorm		# no
+	rts
+
+fout_ext_exc:
+	lea		FP_SCR0(%a6),%a0
+	bsr.l		norm			# normalize the mantissa
+	neg.w		%d0			# new exp = -(shft amt)
+	andi.w		&0x7fff,%d0
+	andi.w		&0x8000,FP_SCR0_EX(%a6)	# keep only old sign
+	or.w		%d0,FP_SCR0_EX(%a6)	# insert new exponent
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	rts
+
+fout_ext_err:
+	mov.l		EXC_A6(%a6),(%a6)	# fix stacked a6
+	bra.l		facc_out_x
+
+#########################################################################
+# fmove.s out ###########################################################
+#########################################################################
+fout_sgl:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&s_mode*0x10,%d0	# insert sgl prec
+	mov.l		%d0,L_SCR3(%a6)		# save rnd prec,mode on stack
+
+#
+# operand is a normalized number. first, we check to see if the move out
+# would cause either an underflow or overflow. these cases are handled
+# separately. otherwise, set the FPCR to the proper rounding mode and
+# execute the move.
+#
+	mov.w		SRC_EX(%a0),%d0		# extract exponent
+	andi.w		&0x7fff,%d0		# strip sign
+
+	cmpi.w		%d0,&SGL_HI		# will operand overflow?
+	bgt.w		fout_sgl_ovfl		# yes; go handle OVFL
+	beq.w		fout_sgl_may_ovfl	# maybe; go handle possible OVFL
+	cmpi.w		%d0,&SGL_LO		# will operand underflow?
+	blt.w		fout_sgl_unfl		# yes; go handle underflow
+
+#
+# NORMs(in range) can be stored out by a simple "fmov.s"
+# Unnormalized inputs can come through this point.
+#
+fout_sgl_exg:
+	fmovm.x		SRC(%a0),&0x80		# fetch fop from stack
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fmov.s		%fp0,%d0		# store does convert and round
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# save FPSR
+
+	or.w		%d1,2+USER_FPSR(%a6)	# set possible inex2/ainex
+
+fout_sgl_exg_write:
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract dst mode
+	andi.b		&0x38,%d1		# is mode == 0? (Dreg dst)
+	beq.b		fout_sgl_exg_write_dn	# must save to integer regfile
+
+	mov.l		EXC_EA(%a6),%a0		# stacked <ea> is correct
+	bsr.l		_dmem_write_long	# write long
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_l		# yes
+
+	rts
+
+fout_sgl_exg_write_dn:
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract Dn
+	andi.w		&0x7,%d1
+	bsr.l		store_dreg_l
+	rts
+
+#
+# here, we know that the operand would UNFL if moved out to single prec,
+# so, denorm and round and then use generic store single routine to
+# write the value to memory.
+#
+fout_sgl_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set UNFL
+
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	mov.l		%a0,-(%sp)
+
+	clr.l		%d0			# pass: S.F. = 0
+
+	cmpi.b		STAG(%a6),&DENORM	# fetch src optype tag
+	bne.b		fout_sgl_unfl_cont	# let DENORMs fall through
+
+	lea		FP_SCR0(%a6),%a0
+	bsr.l		norm			# normalize the DENORM
+
+fout_sgl_unfl_cont:
+	lea		FP_SCR0(%a6),%a0	# pass: ptr to operand
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res			# calc default underflow result
+
+	lea		FP_SCR0(%a6),%a0	# pass: ptr to fop
+	bsr.l		dst_sgl			# convert to single prec
+
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract dst mode
+	andi.b		&0x38,%d1		# is mode == 0? (Dreg dst)
+	beq.b		fout_sgl_unfl_dn	# must save to integer regfile
+
+	mov.l		EXC_EA(%a6),%a0		# stacked <ea> is correct
+	bsr.l		_dmem_write_long	# write long
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_l		# yes
+
+	bra.b		fout_sgl_unfl_chkexc
+
+fout_sgl_unfl_dn:
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract Dn
+	andi.w		&0x7,%d1
+	bsr.l		store_dreg_l
+
+fout_sgl_unfl_chkexc:
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0a,%d1		# is UNFL or INEX enabled?
+	bne.w		fout_sd_exc_unfl	# yes
+	addq.l		&0x4,%sp
+	rts
+
+#
+# it's definitely an overflow so call ovf_res to get the correct answer
+#
+fout_sgl_ovfl:
+	tst.b		3+SRC_HI(%a0)		# is result inexact?
+	bne.b		fout_sgl_ovfl_inex2
+	tst.l		SRC_LO(%a0)		# is result inexact?
+	bne.b		fout_sgl_ovfl_inex2
+	ori.w		&ovfl_inx_mask,2+USER_FPSR(%a6) # set ovfl/aovfl/ainex
+	bra.b		fout_sgl_ovfl_cont
+fout_sgl_ovfl_inex2:
+	ori.w		&ovfinx_mask,2+USER_FPSR(%a6) # set ovfl/aovfl/ainex/inex2
+
+fout_sgl_ovfl_cont:
+	mov.l		%a0,-(%sp)
+
+# call ovf_res() w/ sgl prec and the correct rnd mode to create the default
+# overflow result. DON'T save the returned ccodes from ovf_res() since
+# fmove out doesn't alter them.
+	tst.b		SRC_EX(%a0)		# is operand negative?
+	smi		%d1			# set if so
+	mov.l		L_SCR3(%a6),%d0		# pass: sgl prec,rnd mode
+	bsr.l		ovf_res			# calc OVFL result
+	fmovm.x		(%a0),&0x80		# load default overflow result
+	fmov.s		%fp0,%d0		# store to single
+
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract dst mode
+	andi.b		&0x38,%d1		# is mode == 0? (Dreg dst)
+	beq.b		fout_sgl_ovfl_dn	# must save to integer regfile
+
+	mov.l		EXC_EA(%a6),%a0		# stacked <ea> is correct
+	bsr.l		_dmem_write_long	# write long
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_l		# yes
+
+	bra.b		fout_sgl_ovfl_chkexc
+
+fout_sgl_ovfl_dn:
+	mov.b		1+EXC_OPWORD(%a6),%d1	# extract Dn
+	andi.w		&0x7,%d1
+	bsr.l		store_dreg_l
+
+fout_sgl_ovfl_chkexc:
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0a,%d1		# is UNFL or INEX enabled?
+	bne.w		fout_sd_exc_ovfl	# yes
+	addq.l		&0x4,%sp
+	rts
+
+#
+# move out MAY overflow:
+# (1) force the exp to 0x3fff
+# (2) do a move w/ appropriate rnd mode
+# (3) if exp still equals zero, then insert original exponent
+#	for the correct result.
+#     if exp now equals one, then it overflowed so call ovf_res.
+#
+fout_sgl_may_ovfl:
+	mov.w		SRC_EX(%a0),%d1		# fetch current sign
+	andi.w		&0x8000,%d1		# keep it,clear exp
+	ori.w		&0x3fff,%d1		# insert exp = 0
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert scaled exp
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6) # copy hi(man)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6) # copy lo(man)
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fmov.x		FP_SCR0(%a6),%fp0	# force fop to be rounded
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	fabs.x		%fp0			# need absolute value
+	fcmp.b		%fp0,&0x2		# did exponent increase?
+	fblt.w		fout_sgl_exg		# no; go finish NORM
+	bra.w		fout_sgl_ovfl		# yes; go handle overflow
+
+################
+
+fout_sd_exc_unfl:
+	mov.l		(%sp)+,%a0
+
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+	cmpi.b		STAG(%a6),&DENORM	# was src a DENORM?
+	bne.b		fout_sd_exc_cont	# no
+
+	lea		FP_SCR0(%a6),%a0
+	bsr.l		norm
+	neg.l		%d0
+	andi.w		&0x7fff,%d0
+	bfins		%d0,FP_SCR0_EX(%a6){&1:&15}
+	bra.b		fout_sd_exc_cont
+
+fout_sd_exc:
+fout_sd_exc_ovfl:
+	mov.l		(%sp)+,%a0		# restore a0
+
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+fout_sd_exc_cont:
+	bclr		&0x7,FP_SCR0_EX(%a6)	# clear sign bit
+	sne.b		2+FP_SCR0_EX(%a6)	# set internal sign bit
+	lea		FP_SCR0(%a6),%a0	# pass: ptr to DENORM
+
+	mov.b		3+L_SCR3(%a6),%d1
+	lsr.b		&0x4,%d1
+	andi.w		&0x0c,%d1
+	swap		%d1
+	mov.b		3+L_SCR3(%a6),%d1
+	lsr.b		&0x4,%d1
+	andi.w		&0x03,%d1
+	clr.l		%d0			# pass: zero g,r,s
+	bsr.l		_round			# round the DENORM
+
+	tst.b		2+FP_SCR0_EX(%a6)	# is EXOP negative?
+	beq.b		fout_sd_exc_done	# no
+	bset		&0x7,FP_SCR0_EX(%a6)	# yes
+
+fout_sd_exc_done:
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	rts
+
+#################################################################
+# fmove.d out ###################################################
+#################################################################
+fout_dbl:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&d_mode*0x10,%d0	# insert dbl prec
+	mov.l		%d0,L_SCR3(%a6)		# save rnd prec,mode on stack
+
+#
+# operand is a normalized number. first, we check to see if the move out
+# would cause either an underflow or overflow. these cases are handled
+# separately. otherwise, set the FPCR to the proper rounding mode and
+# execute the move.
+#
+	mov.w		SRC_EX(%a0),%d0		# extract exponent
+	andi.w		&0x7fff,%d0		# strip sign
+
+	cmpi.w		%d0,&DBL_HI		# will operand overflow?
+	bgt.w		fout_dbl_ovfl		# yes; go handle OVFL
+	beq.w		fout_dbl_may_ovfl	# maybe; go handle possible OVFL
+	cmpi.w		%d0,&DBL_LO		# will operand underflow?
+	blt.w		fout_dbl_unfl		# yes; go handle underflow
+
+#
+# NORMs(in range) can be stored out by a simple "fmov.d"
+# Unnormalized inputs can come through this point.
+#
+fout_dbl_exg:
+	fmovm.x		SRC(%a0),&0x80		# fetch fop from stack
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fmov.d		%fp0,L_SCR1(%a6)	# store does convert and round
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d0		# save FPSR
+
+	or.w		%d0,2+USER_FPSR(%a6)	# set possible inex2/ainex
+
+	mov.l		EXC_EA(%a6),%a1		# pass: dst addr
+	lea		L_SCR1(%a6),%a0		# pass: src addr
+	movq.l		&0x8,%d0		# pass: opsize is 8 bytes
+	bsr.l		_dmem_write		# store dbl fop to memory
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_d		# yes
+
+	rts					# no; so we're finished
+
+#
+# here, we know that the operand would UNFL if moved out to double prec,
+# so, denorm and round and then use generic store double routine to
+# write the value to memory.
+#
+fout_dbl_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set UNFL
+
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	mov.l		%a0,-(%sp)
+
+	clr.l		%d0			# pass: S.F. = 0
+
+	cmpi.b		STAG(%a6),&DENORM	# fetch src optype tag
+	bne.b		fout_dbl_unfl_cont	# let DENORMs fall through
+
+	lea		FP_SCR0(%a6),%a0
+	bsr.l		norm			# normalize the DENORM
+
+fout_dbl_unfl_cont:
+	lea		FP_SCR0(%a6),%a0	# pass: ptr to operand
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res			# calc default underflow result
+
+	lea		FP_SCR0(%a6),%a0	# pass: ptr to fop
+	bsr.l		dst_dbl			# convert to single prec
+	mov.l		%d0,L_SCR1(%a6)
+	mov.l		%d1,L_SCR2(%a6)
+
+	mov.l		EXC_EA(%a6),%a1		# pass: dst addr
+	lea		L_SCR1(%a6),%a0		# pass: src addr
+	movq.l		&0x8,%d0		# pass: opsize is 8 bytes
+	bsr.l		_dmem_write		# store dbl fop to memory
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_d		# yes
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0a,%d1		# is UNFL or INEX enabled?
+	bne.w		fout_sd_exc_unfl	# yes
+	addq.l		&0x4,%sp
+	rts
+
+#
+# it's definitely an overflow so call ovf_res to get the correct answer
+#
+fout_dbl_ovfl:
+	mov.w		2+SRC_LO(%a0),%d0
+	andi.w		&0x7ff,%d0
+	bne.b		fout_dbl_ovfl_inex2
+
+	ori.w		&ovfl_inx_mask,2+USER_FPSR(%a6) # set ovfl/aovfl/ainex
+	bra.b		fout_dbl_ovfl_cont
+fout_dbl_ovfl_inex2:
+	ori.w		&ovfinx_mask,2+USER_FPSR(%a6) # set ovfl/aovfl/ainex/inex2
+
+fout_dbl_ovfl_cont:
+	mov.l		%a0,-(%sp)
+
+# call ovf_res() w/ dbl prec and the correct rnd mode to create the default
+# overflow result. DON'T save the returned ccodes from ovf_res() since
+# fmove out doesn't alter them.
+	tst.b		SRC_EX(%a0)		# is operand negative?
+	smi		%d1			# set if so
+	mov.l		L_SCR3(%a6),%d0		# pass: dbl prec,rnd mode
+	bsr.l		ovf_res			# calc OVFL result
+	fmovm.x		(%a0),&0x80		# load default overflow result
+	fmov.d		%fp0,L_SCR1(%a6)	# store to double
+
+	mov.l		EXC_EA(%a6),%a1		# pass: dst addr
+	lea		L_SCR1(%a6),%a0		# pass: src addr
+	movq.l		&0x8,%d0		# pass: opsize is 8 bytes
+	bsr.l		_dmem_write		# store dbl fop to memory
+
+	tst.l		%d1			# did dstore fail?
+	bne.l		facc_out_d		# yes
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0a,%d1		# is UNFL or INEX enabled?
+	bne.w		fout_sd_exc_ovfl	# yes
+	addq.l		&0x4,%sp
+	rts
+
+#
+# move out MAY overflow:
+# (1) force the exp to 0x3fff
+# (2) do a move w/ appropriate rnd mode
+# (3) if exp still equals zero, then insert original exponent
+#	for the correct result.
+#     if exp now equals one, then it overflowed so call ovf_res.
+#
+fout_dbl_may_ovfl:
+	mov.w		SRC_EX(%a0),%d1		# fetch current sign
+	andi.w		&0x8000,%d1		# keep it,clear exp
+	ori.w		&0x3fff,%d1		# insert exp = 0
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert scaled exp
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6) # copy hi(man)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6) # copy lo(man)
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fmov.x		FP_SCR0(%a6),%fp0	# force fop to be rounded
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	fabs.x		%fp0			# need absolute value
+	fcmp.b		%fp0,&0x2		# did exponent increase?
+	fblt.w		fout_dbl_exg		# no; go finish NORM
+	bra.w		fout_dbl_ovfl		# yes; go handle overflow
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	dst_dbl(): create double precision value from extended prec.	#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to source operand in extended precision		#
+#									#
+# OUTPUT **************************************************************	#
+#	d0 = hi(double precision result)				#
+#	d1 = lo(double precision result)				#
+#									#
+# ALGORITHM ***********************************************************	#
+#									#
+#  Changes extended precision to double precision.			#
+#  Note: no attempt is made to round the extended value to double.	#
+#	dbl_sign = ext_sign						#
+#	dbl_exp = ext_exp - $3fff(ext bias) + $7ff(dbl bias)		#
+#	get rid of ext integer bit					#
+#	dbl_mant = ext_mant{62:12}					#
+#									#
+#		---------------   ---------------    ---------------	#
+#  extended ->  |s|    exp    |   |1| ms mant   |    | ls mant     |	#
+#		---------------   ---------------    ---------------	#
+#		 95	    64    63 62	      32      31     11	  0	#
+#				     |			     |		#
+#				     |			     |		#
+#				     |			     |		#
+#			             v			     v		#
+#			      ---------------   ---------------		#
+#  double   ->		      |s|exp| mant  |   |  mant       |		#
+#			      ---------------   ---------------		#
+#			      63     51   32   31	       0	#
+#									#
+#########################################################################
+
+dst_dbl:
+	clr.l		%d0			# clear d0
+	mov.w		FTEMP_EX(%a0),%d0	# get exponent
+	subi.w		&EXT_BIAS,%d0		# subtract extended precision bias
+	addi.w		&DBL_BIAS,%d0		# add double precision bias
+	tst.b		FTEMP_HI(%a0)		# is number a denorm?
+	bmi.b		dst_get_dupper		# no
+	subq.w		&0x1,%d0		# yes; denorm bias = DBL_BIAS - 1
+dst_get_dupper:
+	swap		%d0			# d0 now in upper word
+	lsl.l		&0x4,%d0		# d0 in proper place for dbl prec exp
+	tst.b		FTEMP_EX(%a0)		# test sign
+	bpl.b		dst_get_dman		# if postive, go process mantissa
+	bset		&0x1f,%d0		# if negative, set sign
+dst_get_dman:
+	mov.l		FTEMP_HI(%a0),%d1	# get ms mantissa
+	bfextu		%d1{&1:&20},%d1		# get upper 20 bits of ms
+	or.l		%d1,%d0			# put these bits in ms word of double
+	mov.l		%d0,L_SCR1(%a6)		# put the new exp back on the stack
+	mov.l		FTEMP_HI(%a0),%d1	# get ms mantissa
+	mov.l		&21,%d0			# load shift count
+	lsl.l		%d0,%d1			# put lower 11 bits in upper bits
+	mov.l		%d1,L_SCR2(%a6)		# build lower lword in memory
+	mov.l		FTEMP_LO(%a0),%d1	# get ls mantissa
+	bfextu		%d1{&0:&21},%d0		# get ls 21 bits of double
+	mov.l		L_SCR2(%a6),%d1
+	or.l		%d0,%d1			# put them in double result
+	mov.l		L_SCR1(%a6),%d0
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	dst_sgl(): create single precision value from extended prec	#
+#									#
+# XREF ****************************************************************	#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to source operand in extended precision		#
+#									#
+# OUTPUT **************************************************************	#
+#	d0 = single precision result					#
+#									#
+# ALGORITHM ***********************************************************	#
+#									#
+# Changes extended precision to single precision.			#
+#	sgl_sign = ext_sign						#
+#	sgl_exp = ext_exp - $3fff(ext bias) + $7f(sgl bias)		#
+#	get rid of ext integer bit					#
+#	sgl_mant = ext_mant{62:12}					#
+#									#
+#		---------------   ---------------    ---------------	#
+#  extended ->  |s|    exp    |   |1| ms mant   |    | ls mant     |	#
+#		---------------   ---------------    ---------------	#
+#		 95	    64    63 62	   40 32      31     12	  0	#
+#				     |	   |				#
+#				     |	   |				#
+#				     |	   |				#
+#			             v     v				#
+#			      ---------------				#
+#  single   ->		      |s|exp| mant  |				#
+#			      ---------------				#
+#			      31     22     0				#
+#									#
+#########################################################################
+
+dst_sgl:
+	clr.l		%d0
+	mov.w		FTEMP_EX(%a0),%d0	# get exponent
+	subi.w		&EXT_BIAS,%d0		# subtract extended precision bias
+	addi.w		&SGL_BIAS,%d0		# add single precision bias
+	tst.b		FTEMP_HI(%a0)		# is number a denorm?
+	bmi.b		dst_get_supper		# no
+	subq.w		&0x1,%d0		# yes; denorm bias = SGL_BIAS - 1
+dst_get_supper:
+	swap		%d0			# put exp in upper word of d0
+	lsl.l		&0x7,%d0		# shift it into single exp bits
+	tst.b		FTEMP_EX(%a0)		# test sign
+	bpl.b		dst_get_sman		# if positive, continue
+	bset		&0x1f,%d0		# if negative, put in sign first
+dst_get_sman:
+	mov.l		FTEMP_HI(%a0),%d1	# get ms mantissa
+	andi.l		&0x7fffff00,%d1		# get upper 23 bits of ms
+	lsr.l		&0x8,%d1		# and put them flush right
+	or.l		%d1,%d0			# put these bits in ms word of single
+	rts
+
+##############################################################################
+fout_pack:
+	bsr.l		_calc_ea_fout		# fetch the <ea>
+	mov.l		%a0,-(%sp)
+
+	mov.b		STAG(%a6),%d0		# fetch input type
+	bne.w		fout_pack_not_norm	# input is not NORM
+
+fout_pack_norm:
+	btst		&0x4,EXC_CMDREG(%a6)	# static or dynamic?
+	beq.b		fout_pack_s		# static
+
+fout_pack_d:
+	mov.b		1+EXC_CMDREG(%a6),%d1	# fetch dynamic reg
+	lsr.b		&0x4,%d1
+	andi.w		&0x7,%d1
+
+	bsr.l		fetch_dreg		# fetch Dn w/ k-factor
+
+	bra.b		fout_pack_type
+fout_pack_s:
+	mov.b		1+EXC_CMDREG(%a6),%d0	# fetch static field
+
+fout_pack_type:
+	bfexts		%d0{&25:&7},%d0		# extract k-factor
+	mov.l	%d0,-(%sp)
+
+	lea		FP_SRC(%a6),%a0		# pass: ptr to input
+
+# bindec is currently scrambling FP_SRC for denorm inputs.
+# we'll have to change this, but for now, tough luck!!!
+	bsr.l		bindec			# convert xprec to packed
+
+#	andi.l		&0xcfff000f,FP_SCR0(%a6) # clear unused fields
+	andi.l		&0xcffff00f,FP_SCR0(%a6) # clear unused fields
+
+	mov.l	(%sp)+,%d0
+
+	tst.b		3+FP_SCR0_EX(%a6)
+	bne.b		fout_pack_set
+	tst.l		FP_SCR0_HI(%a6)
+	bne.b		fout_pack_set
+	tst.l		FP_SCR0_LO(%a6)
+	bne.b		fout_pack_set
+
+# add the extra condition that only if the k-factor was zero, too, should
+# we zero the exponent
+	tst.l		%d0
+	bne.b		fout_pack_set
+# "mantissa" is all zero which means that the answer is zero. but, the '040
+# algorithm allows the exponent to be non-zero. the 881/2 do not. therefore,
+# if the mantissa is zero, I will zero the exponent, too.
+# the question now is whether the exponents sign bit is allowed to be non-zero
+# for a zero, also...
+	andi.w		&0xf000,FP_SCR0(%a6)
+
+fout_pack_set:
+
+	lea		FP_SCR0(%a6),%a0	# pass: src addr
+
+fout_pack_write:
+	mov.l		(%sp)+,%a1		# pass: dst addr
+	mov.l		&0xc,%d0		# pass: opsize is 12 bytes
+
+	cmpi.b		SPCOND_FLG(%a6),&mda7_flg
+	beq.b		fout_pack_a7
+
+	bsr.l		_dmem_write		# write ext prec number to memory
+
+	tst.l		%d1			# did dstore fail?
+	bne.w		fout_ext_err		# yes
+
+	rts
+
+# we don't want to do the write if the exception occurred in supervisor mode
+# so _mem_write2() handles this for us.
+fout_pack_a7:
+	bsr.l		_mem_write2		# write ext prec number to memory
+
+	tst.l		%d1			# did dstore fail?
+	bne.w		fout_ext_err		# yes
+
+	rts
+
+fout_pack_not_norm:
+	cmpi.b		%d0,&DENORM		# is it a DENORM?
+	beq.w		fout_pack_norm		# yes
+	lea		FP_SRC(%a6),%a0
+	clr.w		2+FP_SRC_EX(%a6)
+	cmpi.b		%d0,&SNAN		# is it an SNAN?
+	beq.b		fout_pack_snan		# yes
+	bra.b		fout_pack_write		# no
+
+fout_pack_snan:
+	ori.w		&snaniop2_mask,FPSR_EXCEPT(%a6) # set SNAN/AIOP
+	bset		&0x6,FP_SRC_HI(%a6)	# set snan bit
+	bra.b		fout_pack_write
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fmul(): emulates the fmul instruction				#
+#	fsmul(): emulates the fsmul instruction				#
+#	fdmul(): emulates the fdmul instruction				#
+#									#
+# XREF ****************************************************************	#
+#	scale_to_zero_src() - scale src exponent to zero		#
+#	scale_to_zero_dst() - scale dst exponent to zero		#
+#	unf_res() - return default underflow result			#
+#	ovf_res() - return default overflow result			#
+#	res_qnan() - return QNAN result					#
+#	res_snan() - return SNAN result					#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	a1 = pointer to extended precision destination operand		#
+#	d0  rnd prec,mode						#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = result							#
+#	fp1 = EXOP (if exception occurred)				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Handle NANs, infinities, and zeroes as special cases. Divide	#
+# norms/denorms into ext/sgl/dbl precision.				#
+#	For norms/denorms, scale the exponents such that a multiply	#
+# instruction won't cause an exception. Use the regular fmul to		#
+# compute a result. Check if the regular operands would have taken	#
+# an exception. If so, return the default overflow/underflow result	#
+# and return the EXOP if exceptions are enabled. Else, scale the	#
+# result operand to the proper exponent.				#
+#									#
+#########################################################################
+
+	align		0x10
+tbl_fmul_ovfl:
+	long		0x3fff - 0x7ffe		# ext_max
+	long		0x3fff - 0x407e		# sgl_max
+	long		0x3fff - 0x43fe		# dbl_max
+tbl_fmul_unfl:
+	long		0x3fff + 0x0001		# ext_unfl
+	long		0x3fff - 0x3f80		# sgl_unfl
+	long		0x3fff - 0x3c00		# dbl_unfl
+
+	global		fsmul
+fsmul:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&s_mode*0x10,%d0	# insert sgl prec
+	bra.b		fmul
+
+	global		fdmul
+fdmul:
+	andi.b		&0x30,%d0
+	ori.b		&d_mode*0x10,%d0	# insert dbl prec
+
+	global		fmul
+fmul:
+	mov.l		%d0,L_SCR3(%a6)		# store rnd info
+
+	clr.w		%d1
+	mov.b		DTAG(%a6),%d1
+	lsl.b		&0x3,%d1
+	or.b		STAG(%a6),%d1		# combine src tags
+	bne.w		fmul_not_norm		# optimize on non-norm input
+
+fmul_norm:
+	mov.w		DST_EX(%a1),FP_SCR1_EX(%a6)
+	mov.l		DST_HI(%a1),FP_SCR1_HI(%a6)
+	mov.l		DST_LO(%a1),FP_SCR1_LO(%a6)
+
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+	bsr.l		scale_to_zero_src	# scale src exponent
+	mov.l		%d0,-(%sp)		# save scale factor 1
+
+	bsr.l		scale_to_zero_dst	# scale dst exponent
+
+	add.l		%d0,(%sp)		# SCALE_FACTOR = scale1 + scale2
+
+	mov.w		2+L_SCR3(%a6),%d1	# fetch precision
+	lsr.b		&0x6,%d1		# shift to lo bits
+	mov.l		(%sp)+,%d0		# load S.F.
+	cmp.l		%d0,(tbl_fmul_ovfl.w,%pc,%d1.w*4) # would result ovfl?
+	beq.w		fmul_may_ovfl		# result may rnd to overflow
+	blt.w		fmul_ovfl		# result will overflow
+
+	cmp.l		%d0,(tbl_fmul_unfl.w,%pc,%d1.w*4) # would result unfl?
+	beq.w		fmul_may_unfl		# result may rnd to no unfl
+	bgt.w		fmul_unfl		# result will underflow
+
+#
+# NORMAL:
+# - the result of the multiply operation will neither overflow nor underflow.
+# - do the multiply to the proper precision and rounding mode.
+# - scale the result exponent using the scale factor. if both operands were
+# normalized then we really don't need to go through this scaling. but for now,
+# this will do.
+#
+fmul_normal:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst operand
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fmul.x		FP_SCR0(%a6),%fp0	# execute multiply
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fmul_normal_exit:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# load {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	or.w		%d2,%d1			# concat old sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+	rts
+
+#
+# OVERFLOW:
+# - the result of the multiply operation is an overflow.
+# - do the multiply to the proper precision and rounding mode in order to
+# set the inexact bits.
+# - calculate the default result and return it in fp0.
+# - if overflow or inexact is enabled, we need a multiply result rounded to
+# extended precision. if the original operation was extended, then we have this
+# result. if the original operation was single or double, we have to do another
+# multiply using extended precision and the correct rounding mode. the result
+# of this operation then has its exponent scaled by -0x6000 to create the
+# exceptional operand.
+#
+fmul_ovfl:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst operand
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fmul.x		FP_SCR0(%a6),%fp0	# execute multiply
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+# save setting this until now because this is where fmul_may_ovfl may jump in
+fmul_ovfl_tst:
+	or.l		&ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x13,%d1		# is OVFL or INEX enabled?
+	bne.b		fmul_ovfl_ena		# yes
+
+# calculate the default result
+fmul_ovfl_dis:
+	btst		&neg_bit,FPSR_CC(%a6)	# is result negative?
+	sne		%d1			# set sign param accordingly
+	mov.l		L_SCR3(%a6),%d0		# pass rnd prec,mode
+	bsr.l		ovf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set INF,N if applicable
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+	rts
+
+#
+# OVFL is enabled; Create EXOP:
+# - if precision is extended, then we have the EXOP. simply bias the exponent
+# with an extra -0x6000. if the precision is single or double, we need to
+# calculate a result rounded to extended precision.
+#
+fmul_ovfl_ena:
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# test the rnd prec
+	bne.b		fmul_ovfl_ena_sd	# it's sgl or dbl
+
+fmul_ovfl_ena_cont:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# move result to stack
+
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.w		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	sub.l		%d0,%d1			# add scale factor
+	subi.l		&0x6000,%d1		# subtract bias
+	andi.w		&0x7fff,%d1		# clear sign bit
+	andi.w		&0x8000,%d2		# keep old sign
+	or.w		%d2,%d1			# concat old sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	bra.b		fmul_ovfl_dis
+
+fmul_ovfl_ena_sd:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst operand
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0x30,%d1		# keep rnd mode only
+	fmov.l		%d1,%fpcr		# set FPCR
+
+	fmul.x		FP_SCR0(%a6),%fp0	# execute multiply
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	bra.b		fmul_ovfl_ena_cont
+
+#
+# may OVERFLOW:
+# - the result of the multiply operation MAY overflow.
+# - do the multiply to the proper precision and rounding mode in order to
+# set the inexact bits.
+# - calculate the default result and return it in fp0.
+#
+fmul_may_ovfl:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fmul.x		FP_SCR0(%a6),%fp0	# execute multiply
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	fabs.x		%fp0,%fp1		# make a copy of result
+	fcmp.b		%fp1,&0x2		# is |result| >= 2.b?
+	fbge.w		fmul_ovfl_tst		# yes; overflow has occurred
+
+# no, it didn't overflow; we have correct result
+	bra.w		fmul_normal_exit
+
+#
+# UNDERFLOW:
+# - the result of the multiply operation is an underflow.
+# - do the multiply to the proper precision and rounding mode in order to
+# set the inexact bits.
+# - calculate the default result and return it in fp0.
+# - if overflow or inexact is enabled, we need a multiply result rounded to
+# extended precision. if the original operation was extended, then we have this
+# result. if the original operation was single or double, we have to do another
+# multiply using extended precision and the correct rounding mode. the result
+# of this operation then has its exponent scaled by -0x6000 to create the
+# exceptional operand.
+#
+fmul_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+# for fun, let's use only extended precision, round to zero. then, let
+# the unf_res() routine figure out all the rest.
+# will we get the correct answer.
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst operand
+
+	fmov.l		&rz_mode*0x10,%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fmul.x		FP_SCR0(%a6),%fp0	# execute multiply
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0b,%d1		# is UNFL or INEX enabled?
+	bne.b		fmul_unfl_ena		# yes
+
+fmul_unfl_dis:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+
+	lea		FP_SCR0(%a6),%a0	# pass: result addr
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# unf_res2 may have set 'Z'
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+	rts
+
+#
+# UNFL is enabled.
+#
+fmul_unfl_ena:
+	fmovm.x		FP_SCR1(%a6),&0x40	# load dst op
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# is precision extended?
+	bne.b		fmul_unfl_ena_sd	# no, sgl or dbl
+
+# if the rnd mode is anything but RZ, then we have to re-do the above
+# multiplication becuase we used RZ for all.
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+fmul_unfl_ena_cont:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fmul.x		FP_SCR0(%a6),%fp1	# execute multiply
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	fmovm.x		&0x40,FP_SCR0(%a6)	# save result to stack
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	addi.l		&0x6000,%d1		# add bias
+	andi.w		&0x7fff,%d1
+	or.w		%d2,%d1			# concat old sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	bra.w		fmul_unfl_dis
+
+fmul_unfl_ena_sd:
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0x30,%d1		# use only rnd mode
+	fmov.l		%d1,%fpcr		# set FPCR
+
+	bra.b		fmul_unfl_ena_cont
+
+# MAY UNDERFLOW:
+# -use the correct rounding mode and precision. this code favors operations
+# that do not underflow.
+fmul_may_unfl:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst operand
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fmul.x		FP_SCR0(%a6),%fp0	# execute multiply
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	fabs.x		%fp0,%fp1		# make a copy of result
+	fcmp.b		%fp1,&0x2		# is |result| > 2.b?
+	fbgt.w		fmul_normal_exit	# no; no underflow occurred
+	fblt.w		fmul_unfl		# yes; underflow occurred
+
+#
+# we still don't know if underflow occurred. result is ~ equal to 2. but,
+# we don't know if the result was an underflow that rounded up to a 2 or
+# a normalized number that rounded down to a 2. so, redo the entire operation
+# using RZ as the rounding mode to see what the pre-rounded result is.
+# this case should be relatively rare.
+#
+	fmovm.x		FP_SCR1(%a6),&0x40	# load dst operand
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# keep rnd prec
+	ori.b		&rz_mode*0x10,%d1	# insert RZ
+
+	fmov.l		%d1,%fpcr		# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fmul.x		FP_SCR0(%a6),%fp1	# execute multiply
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fabs.x		%fp1			# make absolute value
+	fcmp.b		%fp1,&0x2		# is |result| < 2.b?
+	fbge.w		fmul_normal_exit	# no; no underflow occurred
+	bra.w		fmul_unfl		# yes, underflow occurred
+
+################################################################################
+
+#
+# Multiply: inputs are not both normalized; what are they?
+#
+fmul_not_norm:
+	mov.w		(tbl_fmul_op.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_fmul_op.b,%pc,%d1.w)
+
+	swbeg		&48
+tbl_fmul_op:
+	short		fmul_norm	- tbl_fmul_op # NORM x NORM
+	short		fmul_zero	- tbl_fmul_op # NORM x ZERO
+	short		fmul_inf_src	- tbl_fmul_op # NORM x INF
+	short		fmul_res_qnan	- tbl_fmul_op # NORM x QNAN
+	short		fmul_norm	- tbl_fmul_op # NORM x DENORM
+	short		fmul_res_snan	- tbl_fmul_op # NORM x SNAN
+	short		tbl_fmul_op	- tbl_fmul_op #
+	short		tbl_fmul_op	- tbl_fmul_op #
+
+	short		fmul_zero	- tbl_fmul_op # ZERO x NORM
+	short		fmul_zero	- tbl_fmul_op # ZERO x ZERO
+	short		fmul_res_operr	- tbl_fmul_op # ZERO x INF
+	short		fmul_res_qnan	- tbl_fmul_op # ZERO x QNAN
+	short		fmul_zero	- tbl_fmul_op # ZERO x DENORM
+	short		fmul_res_snan	- tbl_fmul_op # ZERO x SNAN
+	short		tbl_fmul_op	- tbl_fmul_op #
+	short		tbl_fmul_op	- tbl_fmul_op #
+
+	short		fmul_inf_dst	- tbl_fmul_op # INF x NORM
+	short		fmul_res_operr	- tbl_fmul_op # INF x ZERO
+	short		fmul_inf_dst	- tbl_fmul_op # INF x INF
+	short		fmul_res_qnan	- tbl_fmul_op # INF x QNAN
+	short		fmul_inf_dst	- tbl_fmul_op # INF x DENORM
+	short		fmul_res_snan	- tbl_fmul_op # INF x SNAN
+	short		tbl_fmul_op	- tbl_fmul_op #
+	short		tbl_fmul_op	- tbl_fmul_op #
+
+	short		fmul_res_qnan	- tbl_fmul_op # QNAN x NORM
+	short		fmul_res_qnan	- tbl_fmul_op # QNAN x ZERO
+	short		fmul_res_qnan	- tbl_fmul_op # QNAN x INF
+	short		fmul_res_qnan	- tbl_fmul_op # QNAN x QNAN
+	short		fmul_res_qnan	- tbl_fmul_op # QNAN x DENORM
+	short		fmul_res_snan	- tbl_fmul_op # QNAN x SNAN
+	short		tbl_fmul_op	- tbl_fmul_op #
+	short		tbl_fmul_op	- tbl_fmul_op #
+
+	short		fmul_norm	- tbl_fmul_op # NORM x NORM
+	short		fmul_zero	- tbl_fmul_op # NORM x ZERO
+	short		fmul_inf_src	- tbl_fmul_op # NORM x INF
+	short		fmul_res_qnan	- tbl_fmul_op # NORM x QNAN
+	short		fmul_norm	- tbl_fmul_op # NORM x DENORM
+	short		fmul_res_snan	- tbl_fmul_op # NORM x SNAN
+	short		tbl_fmul_op	- tbl_fmul_op #
+	short		tbl_fmul_op	- tbl_fmul_op #
+
+	short		fmul_res_snan	- tbl_fmul_op # SNAN x NORM
+	short		fmul_res_snan	- tbl_fmul_op # SNAN x ZERO
+	short		fmul_res_snan	- tbl_fmul_op # SNAN x INF
+	short		fmul_res_snan	- tbl_fmul_op # SNAN x QNAN
+	short		fmul_res_snan	- tbl_fmul_op # SNAN x DENORM
+	short		fmul_res_snan	- tbl_fmul_op # SNAN x SNAN
+	short		tbl_fmul_op	- tbl_fmul_op #
+	short		tbl_fmul_op	- tbl_fmul_op #
+
+fmul_res_operr:
+	bra.l		res_operr
+fmul_res_snan:
+	bra.l		res_snan
+fmul_res_qnan:
+	bra.l		res_qnan
+
+#
+# Multiply: (Zero x Zero) || (Zero x norm) || (Zero x denorm)
+#
+	global		fmul_zero		# global for fsglmul
+fmul_zero:
+	mov.b		SRC_EX(%a0),%d0		# exclusive or the signs
+	mov.b		DST_EX(%a1),%d1
+	eor.b		%d0,%d1
+	bpl.b		fmul_zero_p		# result ZERO is pos.
+fmul_zero_n:
+	fmov.s		&0x80000000,%fp0	# load -ZERO
+	mov.b		&z_bmask+neg_bmask,FPSR_CC(%a6) # set Z/N
+	rts
+fmul_zero_p:
+	fmov.s		&0x00000000,%fp0	# load +ZERO
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set Z
+	rts
+
+#
+# Multiply: (inf x inf) || (inf x norm) || (inf x denorm)
+#
+# Note: The j-bit for an infinity is a don't-care. However, to be
+# strictly compatible w/ the 68881/882, we make sure to return an
+# INF w/ the j-bit set if the input INF j-bit was set. Destination
+# INFs take priority.
+#
+	global		fmul_inf_dst		# global for fsglmul
+fmul_inf_dst:
+	fmovm.x		DST(%a1),&0x80		# return INF result in fp0
+	mov.b		SRC_EX(%a0),%d0		# exclusive or the signs
+	mov.b		DST_EX(%a1),%d1
+	eor.b		%d0,%d1
+	bpl.b		fmul_inf_dst_p		# result INF is pos.
+fmul_inf_dst_n:
+	fabs.x		%fp0			# clear result sign
+	fneg.x		%fp0			# set result sign
+	mov.b		&inf_bmask+neg_bmask,FPSR_CC(%a6) # set INF/N
+	rts
+fmul_inf_dst_p:
+	fabs.x		%fp0			# clear result sign
+	mov.b		&inf_bmask,FPSR_CC(%a6)	# set INF
+	rts
+
+	global		fmul_inf_src		# global for fsglmul
+fmul_inf_src:
+	fmovm.x		SRC(%a0),&0x80		# return INF result in fp0
+	mov.b		SRC_EX(%a0),%d0		# exclusive or the signs
+	mov.b		DST_EX(%a1),%d1
+	eor.b		%d0,%d1
+	bpl.b		fmul_inf_dst_p		# result INF is pos.
+	bra.b		fmul_inf_dst_n
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fin(): emulates the fmove instruction				#
+#	fsin(): emulates the fsmove instruction				#
+#	fdin(): emulates the fdmove instruction				#
+#									#
+# XREF ****************************************************************	#
+#	norm() - normalize mantissa for EXOP on denorm			#
+#	scale_to_zero_src() - scale src exponent to zero		#
+#	ovf_res() - return default overflow result			#
+#	unf_res() - return default underflow result			#
+#	res_qnan_1op() - return QNAN result				#
+#	res_snan_1op() - return SNAN result				#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	d0 = round prec/mode						#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = result							#
+#	fp1 = EXOP (if exception occurred)				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Handle NANs, infinities, and zeroes as special cases. Divide	#
+# norms into extended, single, and double precision.			#
+#	Norms can be emulated w/ a regular fmove instruction. For	#
+# sgl/dbl, must scale exponent and perform an "fmove". Check to see	#
+# if the result would have overflowed/underflowed. If so, use unf_res()	#
+# or ovf_res() to return the default result. Also return EXOP if	#
+# exception is enabled. If no exception, return the default result.	#
+#	Unnorms don't pass through here.				#
+#									#
+#########################################################################
+
+	global		fsin
+fsin:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&s_mode*0x10,%d0	# insert sgl precision
+	bra.b		fin
+
+	global		fdin
+fdin:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&d_mode*0x10,%d0	# insert dbl precision
+
+	global		fin
+fin:
+	mov.l		%d0,L_SCR3(%a6)		# store rnd info
+
+	mov.b		STAG(%a6),%d1		# fetch src optype tag
+	bne.w		fin_not_norm		# optimize on non-norm input
+
+#
+# FP MOVE IN: NORMs and DENORMs ONLY!
+#
+fin_norm:
+	andi.b		&0xc0,%d0		# is precision extended?
+	bne.w		fin_not_ext		# no, so go handle dbl or sgl
+
+#
+# precision selected is extended. so...we cannot get an underflow
+# or overflow because of rounding to the correct precision. so...
+# skip the scaling and unscaling...
+#
+	tst.b		SRC_EX(%a0)		# is the operand negative?
+	bpl.b		fin_norm_done		# no
+	bset		&neg_bit,FPSR_CC(%a6)	# yes, so set 'N' ccode bit
+fin_norm_done:
+	fmovm.x		SRC(%a0),&0x80		# return result in fp0
+	rts
+
+#
+# for an extended precision DENORM, the UNFL exception bit is set
+# the accrued bit is NOT set in this instance(no inexactness!)
+#
+fin_denorm:
+	andi.b		&0xc0,%d0		# is precision extended?
+	bne.w		fin_not_ext		# no, so go handle dbl or sgl
+
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+	tst.b		SRC_EX(%a0)		# is the operand negative?
+	bpl.b		fin_denorm_done		# no
+	bset		&neg_bit,FPSR_CC(%a6)	# yes, so set 'N' ccode bit
+fin_denorm_done:
+	fmovm.x		SRC(%a0),&0x80		# return result in fp0
+	btst		&unfl_bit,FPCR_ENABLE(%a6) # is UNFL enabled?
+	bne.b		fin_denorm_unfl_ena	# yes
+	rts
+
+#
+# the input is an extended DENORM and underflow is enabled in the FPCR.
+# normalize the mantissa and add the bias of 0x6000 to the resulting negative
+# exponent and insert back into the operand.
+#
+fin_denorm_unfl_ena:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	lea		FP_SCR0(%a6),%a0	# pass: ptr to operand
+	bsr.l		norm			# normalize result
+	neg.w		%d0			# new exponent = -(shft val)
+	addi.w		&0x6000,%d0		# add new bias to exponent
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch old sign,exp
+	andi.w		&0x8000,%d1		# keep old sign
+	andi.w		&0x7fff,%d0		# clear sign position
+	or.w		%d1,%d0			# concat new exo,old sign
+	mov.w		%d0,FP_SCR0_EX(%a6)	# insert new exponent
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	rts
+
+#
+# operand is to be rounded to single or double precision
+#
+fin_not_ext:
+	cmpi.b		%d0,&s_mode*0x10	# separate sgl/dbl prec
+	bne.b		fin_dbl
+
+#
+# operand is to be rounded to single precision
+#
+fin_sgl:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	bsr.l		scale_to_zero_src	# calculate scale factor
+
+	cmpi.l		%d0,&0x3fff-0x3f80	# will move in underflow?
+	bge.w		fin_sd_unfl		# yes; go handle underflow
+	cmpi.l		%d0,&0x3fff-0x407e	# will move in overflow?
+	beq.w		fin_sd_may_ovfl		# maybe; go check
+	blt.w		fin_sd_ovfl		# yes; go handle overflow
+
+#
+# operand will NOT overflow or underflow when moved into the fp reg file
+#
+fin_sd_normal:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fmov.x		FP_SCR0(%a6),%fp0	# perform move
+
+	fmov.l		%fpsr,%d1		# save FPSR
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fin_sd_normal_exit:
+	mov.l		%d2,-(%sp)		# save d2
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+	mov.w		FP_SCR0_EX(%a6),%d1	# load {sgn,exp}
+	mov.w		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	sub.l		%d0,%d1			# add scale factor
+	andi.w		&0x8000,%d2		# keep old sign
+	or.w		%d1,%d2			# concat old sign,new exponent
+	mov.w		%d2,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x80	# return result in fp0
+	rts
+
+#
+# operand is to be rounded to double precision
+#
+fin_dbl:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	bsr.l		scale_to_zero_src	# calculate scale factor
+
+	cmpi.l		%d0,&0x3fff-0x3c00	# will move in underflow?
+	bge.w		fin_sd_unfl		# yes; go handle underflow
+	cmpi.l		%d0,&0x3fff-0x43fe	# will move in overflow?
+	beq.w		fin_sd_may_ovfl		# maybe; go check
+	blt.w		fin_sd_ovfl		# yes; go handle overflow
+	bra.w		fin_sd_normal		# no; ho handle normalized op
+
+#
+# operand WILL underflow when moved in to the fp register file
+#
+fin_sd_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+	tst.b		FP_SCR0_EX(%a6)		# is operand negative?
+	bpl.b		fin_sd_unfl_tst
+	bset		&neg_bit,FPSR_CC(%a6)	# set 'N' ccode bit
+
+# if underflow or inexact is enabled, then go calculate the EXOP first.
+fin_sd_unfl_tst:
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0b,%d1		# is UNFL or INEX enabled?
+	bne.b		fin_sd_unfl_ena		# yes
+
+fin_sd_unfl_dis:
+	lea		FP_SCR0(%a6),%a0	# pass: result addr
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# unf_res may have set 'Z'
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+	rts
+
+#
+# operand will underflow AND underflow or inexact is enabled.
+# therefore, we must return the result rounded to extended precision.
+#
+fin_sd_unfl_ena:
+	mov.l		FP_SCR0_HI(%a6),FP_SCR1_HI(%a6)
+	mov.l		FP_SCR0_LO(%a6),FP_SCR1_LO(%a6)
+	mov.w		FP_SCR0_EX(%a6),%d1	# load current exponent
+
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	sub.l		%d0,%d1			# subtract scale factor
+	andi.w		&0x8000,%d2		# extract old sign
+	addi.l		&0x6000,%d1		# add new bias
+	andi.w		&0x7fff,%d1
+	or.w		%d1,%d2			# concat old sign,new exp
+	mov.w		%d2,FP_SCR1_EX(%a6)	# insert new exponent
+	fmovm.x		FP_SCR1(%a6),&0x40	# return EXOP in fp1
+	mov.l		(%sp)+,%d2		# restore d2
+	bra.b		fin_sd_unfl_dis
+
+#
+# operand WILL overflow.
+#
+fin_sd_ovfl:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fmov.x		FP_SCR0(%a6),%fp0	# perform move
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# save FPSR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fin_sd_ovfl_tst:
+	or.l		&ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x13,%d1		# is OVFL or INEX enabled?
+	bne.b		fin_sd_ovfl_ena		# yes
+
+#
+# OVFL is not enabled; therefore, we must create the default result by
+# calling ovf_res().
+#
+fin_sd_ovfl_dis:
+	btst		&neg_bit,FPSR_CC(%a6)	# is result negative?
+	sne		%d1			# set sign param accordingly
+	mov.l		L_SCR3(%a6),%d0		# pass: prec,mode
+	bsr.l		ovf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set INF,N if applicable
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+	rts
+
+#
+# OVFL is enabled.
+# the INEX2 bit has already been updated by the round to the correct precision.
+# now, round to extended(and don't alter the FPSR).
+#
+fin_sd_ovfl_ena:
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	sub.l		&0x6000,%d1		# subtract bias
+	andi.w		&0x7fff,%d1
+	or.w		%d2,%d1
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	bra.b		fin_sd_ovfl_dis
+
+#
+# the move in MAY overflow. so...
+#
+fin_sd_may_ovfl:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fmov.x		FP_SCR0(%a6),%fp0	# perform the move
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	fabs.x		%fp0,%fp1		# make a copy of result
+	fcmp.b		%fp1,&0x2		# is |result| >= 2.b?
+	fbge.w		fin_sd_ovfl_tst		# yes; overflow has occurred
+
+# no, it didn't overflow; we have correct result
+	bra.w		fin_sd_normal_exit
+
+##########################################################################
+
+#
+# operand is not a NORM: check its optype and branch accordingly
+#
+fin_not_norm:
+	cmpi.b		%d1,&DENORM		# weed out DENORM
+	beq.w		fin_denorm
+	cmpi.b		%d1,&SNAN		# weed out SNANs
+	beq.l		res_snan_1op
+	cmpi.b		%d1,&QNAN		# weed out QNANs
+	beq.l		res_qnan_1op
+
+#
+# do the fmove in; at this point, only possible ops are ZERO and INF.
+# use fmov to determine ccodes.
+# prec:mode should be zero at this point but it won't affect answer anyways.
+#
+	fmov.x		SRC(%a0),%fp0		# do fmove in
+	fmov.l		%fpsr,%d0		# no exceptions possible
+	rol.l		&0x8,%d0		# put ccodes in lo byte
+	mov.b		%d0,FPSR_CC(%a6)	# insert correct ccodes
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fdiv(): emulates the fdiv instruction				#
+#	fsdiv(): emulates the fsdiv instruction				#
+#	fddiv(): emulates the fddiv instruction				#
+#									#
+# XREF ****************************************************************	#
+#	scale_to_zero_src() - scale src exponent to zero		#
+#	scale_to_zero_dst() - scale dst exponent to zero		#
+#	unf_res() - return default underflow result			#
+#	ovf_res() - return default overflow result			#
+#	res_qnan() - return QNAN result					#
+#	res_snan() - return SNAN result					#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	a1 = pointer to extended precision destination operand		#
+#	d0  rnd prec,mode						#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = result							#
+#	fp1 = EXOP (if exception occurred)				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Handle NANs, infinities, and zeroes as special cases. Divide	#
+# norms/denorms into ext/sgl/dbl precision.				#
+#	For norms/denorms, scale the exponents such that a divide	#
+# instruction won't cause an exception. Use the regular fdiv to		#
+# compute a result. Check if the regular operands would have taken	#
+# an exception. If so, return the default overflow/underflow result	#
+# and return the EXOP if exceptions are enabled. Else, scale the	#
+# result operand to the proper exponent.				#
+#									#
+#########################################################################
+
+	align		0x10
+tbl_fdiv_unfl:
+	long		0x3fff - 0x0000		# ext_unfl
+	long		0x3fff - 0x3f81		# sgl_unfl
+	long		0x3fff - 0x3c01		# dbl_unfl
+
+tbl_fdiv_ovfl:
+	long		0x3fff - 0x7ffe		# ext overflow exponent
+	long		0x3fff - 0x407e		# sgl overflow exponent
+	long		0x3fff - 0x43fe		# dbl overflow exponent
+
+	global		fsdiv
+fsdiv:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&s_mode*0x10,%d0	# insert sgl prec
+	bra.b		fdiv
+
+	global		fddiv
+fddiv:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&d_mode*0x10,%d0	# insert dbl prec
+
+	global		fdiv
+fdiv:
+	mov.l		%d0,L_SCR3(%a6)		# store rnd info
+
+	clr.w		%d1
+	mov.b		DTAG(%a6),%d1
+	lsl.b		&0x3,%d1
+	or.b		STAG(%a6),%d1		# combine src tags
+
+	bne.w		fdiv_not_norm		# optimize on non-norm input
+
+#
+# DIVIDE: NORMs and DENORMs ONLY!
+#
+fdiv_norm:
+	mov.w		DST_EX(%a1),FP_SCR1_EX(%a6)
+	mov.l		DST_HI(%a1),FP_SCR1_HI(%a6)
+	mov.l		DST_LO(%a1),FP_SCR1_LO(%a6)
+
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+	bsr.l		scale_to_zero_src	# scale src exponent
+	mov.l		%d0,-(%sp)		# save scale factor 1
+
+	bsr.l		scale_to_zero_dst	# scale dst exponent
+
+	neg.l		(%sp)			# SCALE FACTOR = scale1 - scale2
+	add.l		%d0,(%sp)
+
+	mov.w		2+L_SCR3(%a6),%d1	# fetch precision
+	lsr.b		&0x6,%d1		# shift to lo bits
+	mov.l		(%sp)+,%d0		# load S.F.
+	cmp.l		%d0,(tbl_fdiv_ovfl.b,%pc,%d1.w*4) # will result overflow?
+	ble.w		fdiv_may_ovfl		# result will overflow
+
+	cmp.l		%d0,(tbl_fdiv_unfl.w,%pc,%d1.w*4) # will result underflow?
+	beq.w		fdiv_may_unfl		# maybe
+	bgt.w		fdiv_unfl		# yes; go handle underflow
+
+fdiv_normal:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# save FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fdiv.x		FP_SCR0(%a6),%fp0	# perform divide
+
+	fmov.l		%fpsr,%d1		# save FPSR
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fdiv_normal_exit:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store result on stack
+	mov.l		%d2,-(%sp)		# store d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# load {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	or.w		%d2,%d1			# concat old sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x80	# return result in fp0
+	rts
+
+tbl_fdiv_ovfl2:
+	long		0x7fff
+	long		0x407f
+	long		0x43ff
+
+fdiv_no_ovfl:
+	mov.l		(%sp)+,%d0		# restore scale factor
+	bra.b		fdiv_normal_exit
+
+fdiv_may_ovfl:
+	mov.l		%d0,-(%sp)		# save scale factor
+
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# set FPSR
+
+	fdiv.x		FP_SCR0(%a6),%fp0	# execute divide
+
+	fmov.l		%fpsr,%d0
+	fmov.l		&0x0,%fpcr
+
+	or.l		%d0,USER_FPSR(%a6)	# save INEX,N
+
+	fmovm.x		&0x01,-(%sp)		# save result to stack
+	mov.w		(%sp),%d0		# fetch new exponent
+	add.l		&0xc,%sp		# clear result from stack
+	andi.l		&0x7fff,%d0		# strip sign
+	sub.l		(%sp),%d0		# add scale factor
+	cmp.l		%d0,(tbl_fdiv_ovfl2.b,%pc,%d1.w*4)
+	blt.b		fdiv_no_ovfl
+	mov.l		(%sp)+,%d0
+
+fdiv_ovfl_tst:
+	or.l		&ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x13,%d1		# is OVFL or INEX enabled?
+	bne.b		fdiv_ovfl_ena		# yes
+
+fdiv_ovfl_dis:
+	btst		&neg_bit,FPSR_CC(%a6)	# is result negative?
+	sne		%d1			# set sign param accordingly
+	mov.l		L_SCR3(%a6),%d0		# pass prec:rnd
+	bsr.l		ovf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set INF if applicable
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+	rts
+
+fdiv_ovfl_ena:
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# is precision extended?
+	bne.b		fdiv_ovfl_ena_sd	# no, do sgl or dbl
+
+fdiv_ovfl_ena_cont:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# move result to stack
+
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.w		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	sub.l		%d0,%d1			# add scale factor
+	subi.l		&0x6000,%d1		# subtract bias
+	andi.w		&0x7fff,%d1		# clear sign bit
+	andi.w		&0x8000,%d2		# keep old sign
+	or.w		%d2,%d1			# concat old sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	bra.b		fdiv_ovfl_dis
+
+fdiv_ovfl_ena_sd:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst operand
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0x30,%d1		# keep rnd mode
+	fmov.l		%d1,%fpcr		# set FPCR
+
+	fdiv.x		FP_SCR0(%a6),%fp0	# execute divide
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	bra.b		fdiv_ovfl_ena_cont
+
+fdiv_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		&rz_mode*0x10,%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fdiv.x		FP_SCR0(%a6),%fp0	# execute divide
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0b,%d1		# is UNFL or INEX enabled?
+	bne.b		fdiv_unfl_ena		# yes
+
+fdiv_unfl_dis:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+
+	lea		FP_SCR0(%a6),%a0	# pass: result addr
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# 'Z' may have been set
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+	rts
+
+#
+# UNFL is enabled.
+#
+fdiv_unfl_ena:
+	fmovm.x		FP_SCR1(%a6),&0x40	# load dst op
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# is precision extended?
+	bne.b		fdiv_unfl_ena_sd	# no, sgl or dbl
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+fdiv_unfl_ena_cont:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fdiv.x		FP_SCR0(%a6),%fp1	# execute divide
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	fmovm.x		&0x40,FP_SCR0(%a6)	# save result to stack
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factoer
+	addi.l		&0x6000,%d1		# add bias
+	andi.w		&0x7fff,%d1
+	or.w		%d2,%d1			# concat old sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exp
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	bra.w		fdiv_unfl_dis
+
+fdiv_unfl_ena_sd:
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0x30,%d1		# use only rnd mode
+	fmov.l		%d1,%fpcr		# set FPCR
+
+	bra.b		fdiv_unfl_ena_cont
+
+#
+# the divide operation MAY underflow:
+#
+fdiv_may_unfl:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fdiv.x		FP_SCR0(%a6),%fp0	# execute divide
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	fabs.x		%fp0,%fp1		# make a copy of result
+	fcmp.b		%fp1,&0x1		# is |result| > 1.b?
+	fbgt.w		fdiv_normal_exit	# no; no underflow occurred
+	fblt.w		fdiv_unfl		# yes; underflow occurred
+
+#
+# we still don't know if underflow occurred. result is ~ equal to 1. but,
+# we don't know if the result was an underflow that rounded up to a 1
+# or a normalized number that rounded down to a 1. so, redo the entire
+# operation using RZ as the rounding mode to see what the pre-rounded
+# result is. this case should be relatively rare.
+#
+	fmovm.x		FP_SCR1(%a6),&0x40	# load dst op into fp1
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# keep rnd prec
+	ori.b		&rz_mode*0x10,%d1	# insert RZ
+
+	fmov.l		%d1,%fpcr		# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fdiv.x		FP_SCR0(%a6),%fp1	# execute divide
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fabs.x		%fp1			# make absolute value
+	fcmp.b		%fp1,&0x1		# is |result| < 1.b?
+	fbge.w		fdiv_normal_exit	# no; no underflow occurred
+	bra.w		fdiv_unfl		# yes; underflow occurred
+
+############################################################################
+
+#
+# Divide: inputs are not both normalized; what are they?
+#
+fdiv_not_norm:
+	mov.w		(tbl_fdiv_op.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_fdiv_op.b,%pc,%d1.w*1)
+
+	swbeg		&48
+tbl_fdiv_op:
+	short		fdiv_norm	- tbl_fdiv_op # NORM / NORM
+	short		fdiv_inf_load	- tbl_fdiv_op # NORM / ZERO
+	short		fdiv_zero_load	- tbl_fdiv_op # NORM / INF
+	short		fdiv_res_qnan	- tbl_fdiv_op # NORM / QNAN
+	short		fdiv_norm	- tbl_fdiv_op # NORM / DENORM
+	short		fdiv_res_snan	- tbl_fdiv_op # NORM / SNAN
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+
+	short		fdiv_zero_load	- tbl_fdiv_op # ZERO / NORM
+	short		fdiv_res_operr	- tbl_fdiv_op # ZERO / ZERO
+	short		fdiv_zero_load	- tbl_fdiv_op # ZERO / INF
+	short		fdiv_res_qnan	- tbl_fdiv_op # ZERO / QNAN
+	short		fdiv_zero_load	- tbl_fdiv_op # ZERO / DENORM
+	short		fdiv_res_snan	- tbl_fdiv_op # ZERO / SNAN
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+
+	short		fdiv_inf_dst	- tbl_fdiv_op # INF / NORM
+	short		fdiv_inf_dst	- tbl_fdiv_op # INF / ZERO
+	short		fdiv_res_operr	- tbl_fdiv_op # INF / INF
+	short		fdiv_res_qnan	- tbl_fdiv_op # INF / QNAN
+	short		fdiv_inf_dst	- tbl_fdiv_op # INF / DENORM
+	short		fdiv_res_snan	- tbl_fdiv_op # INF / SNAN
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+
+	short		fdiv_res_qnan	- tbl_fdiv_op # QNAN / NORM
+	short		fdiv_res_qnan	- tbl_fdiv_op # QNAN / ZERO
+	short		fdiv_res_qnan	- tbl_fdiv_op # QNAN / INF
+	short		fdiv_res_qnan	- tbl_fdiv_op # QNAN / QNAN
+	short		fdiv_res_qnan	- tbl_fdiv_op # QNAN / DENORM
+	short		fdiv_res_snan	- tbl_fdiv_op # QNAN / SNAN
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+
+	short		fdiv_norm	- tbl_fdiv_op # DENORM / NORM
+	short		fdiv_inf_load	- tbl_fdiv_op # DENORM / ZERO
+	short		fdiv_zero_load	- tbl_fdiv_op # DENORM / INF
+	short		fdiv_res_qnan	- tbl_fdiv_op # DENORM / QNAN
+	short		fdiv_norm	- tbl_fdiv_op # DENORM / DENORM
+	short		fdiv_res_snan	- tbl_fdiv_op # DENORM / SNAN
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+
+	short		fdiv_res_snan	- tbl_fdiv_op # SNAN / NORM
+	short		fdiv_res_snan	- tbl_fdiv_op # SNAN / ZERO
+	short		fdiv_res_snan	- tbl_fdiv_op # SNAN / INF
+	short		fdiv_res_snan	- tbl_fdiv_op # SNAN / QNAN
+	short		fdiv_res_snan	- tbl_fdiv_op # SNAN / DENORM
+	short		fdiv_res_snan	- tbl_fdiv_op # SNAN / SNAN
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+	short		tbl_fdiv_op	- tbl_fdiv_op #
+
+fdiv_res_qnan:
+	bra.l		res_qnan
+fdiv_res_snan:
+	bra.l		res_snan
+fdiv_res_operr:
+	bra.l		res_operr
+
+	global		fdiv_zero_load		# global for fsgldiv
+fdiv_zero_load:
+	mov.b		SRC_EX(%a0),%d0		# result sign is exclusive
+	mov.b		DST_EX(%a1),%d1		# or of input signs.
+	eor.b		%d0,%d1
+	bpl.b		fdiv_zero_load_p	# result is positive
+	fmov.s		&0x80000000,%fp0	# load a -ZERO
+	mov.b		&z_bmask+neg_bmask,FPSR_CC(%a6)	# set Z/N
+	rts
+fdiv_zero_load_p:
+	fmov.s		&0x00000000,%fp0	# load a +ZERO
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set Z
+	rts
+
+#
+# The destination was In Range and the source was a ZERO. The result,
+# therefore, is an INF w/ the proper sign.
+# So, determine the sign and return a new INF (w/ the j-bit cleared).
+#
+	global		fdiv_inf_load		# global for fsgldiv
+fdiv_inf_load:
+	ori.w		&dz_mask+adz_mask,2+USER_FPSR(%a6) # no; set DZ/ADZ
+	mov.b		SRC_EX(%a0),%d0		# load both signs
+	mov.b		DST_EX(%a1),%d1
+	eor.b		%d0,%d1
+	bpl.b		fdiv_inf_load_p		# result is positive
+	fmov.s		&0xff800000,%fp0	# make result -INF
+	mov.b		&inf_bmask+neg_bmask,FPSR_CC(%a6) # set INF/N
+	rts
+fdiv_inf_load_p:
+	fmov.s		&0x7f800000,%fp0	# make result +INF
+	mov.b		&inf_bmask,FPSR_CC(%a6)	# set INF
+	rts
+
+#
+# The destination was an INF w/ an In Range or ZERO source, the result is
+# an INF w/ the proper sign.
+# The 68881/882 returns the destination INF w/ the new sign(if the j-bit of the
+# dst INF is set, then then j-bit of the result INF is also set).
+#
+	global		fdiv_inf_dst		# global for fsgldiv
+fdiv_inf_dst:
+	mov.b		DST_EX(%a1),%d0		# load both signs
+	mov.b		SRC_EX(%a0),%d1
+	eor.b		%d0,%d1
+	bpl.b		fdiv_inf_dst_p		# result is positive
+
+	fmovm.x		DST(%a1),&0x80		# return result in fp0
+	fabs.x		%fp0			# clear sign bit
+	fneg.x		%fp0			# set sign bit
+	mov.b		&inf_bmask+neg_bmask,FPSR_CC(%a6) # set INF/NEG
+	rts
+
+fdiv_inf_dst_p:
+	fmovm.x		DST(%a1),&0x80		# return result in fp0
+	fabs.x		%fp0			# return positive INF
+	mov.b		&inf_bmask,FPSR_CC(%a6) # set INF
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fneg(): emulates the fneg instruction				#
+#	fsneg(): emulates the fsneg instruction				#
+#	fdneg(): emulates the fdneg instruction				#
+#									#
+# XREF ****************************************************************	#
+#	norm() - normalize a denorm to provide EXOP			#
+#	scale_to_zero_src() - scale sgl/dbl source exponent		#
+#	ovf_res() - return default overflow result			#
+#	unf_res() - return default underflow result			#
+#	res_qnan_1op() - return QNAN result				#
+#	res_snan_1op() - return SNAN result				#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	d0 = rnd prec,mode						#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = result							#
+#	fp1 = EXOP (if exception occurred)				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Handle NANs, zeroes, and infinities as special cases. Separate	#
+# norms/denorms into ext/sgl/dbl precisions. Extended precision can be	#
+# emulated by simply setting sign bit. Sgl/dbl operands must be scaled	#
+# and an actual fneg performed to see if overflow/underflow would have	#
+# occurred. If so, return default underflow/overflow result. Else,	#
+# scale the result exponent and return result. FPSR gets set based on	#
+# the result value.							#
+#									#
+#########################################################################
+
+	global		fsneg
+fsneg:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&s_mode*0x10,%d0	# insert sgl precision
+	bra.b		fneg
+
+	global		fdneg
+fdneg:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&d_mode*0x10,%d0	# insert dbl prec
+
+	global		fneg
+fneg:
+	mov.l		%d0,L_SCR3(%a6)		# store rnd info
+	mov.b		STAG(%a6),%d1
+	bne.w		fneg_not_norm		# optimize on non-norm input
+
+#
+# NEGATE SIGN : norms and denorms ONLY!
+#
+fneg_norm:
+	andi.b		&0xc0,%d0		# is precision extended?
+	bne.w		fneg_not_ext		# no; go handle sgl or dbl
+
+#
+# precision selected is extended. so...we can not get an underflow
+# or overflow because of rounding to the correct precision. so...
+# skip the scaling and unscaling...
+#
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	mov.w		SRC_EX(%a0),%d0
+	eori.w		&0x8000,%d0		# negate sign
+	bpl.b		fneg_norm_load		# sign is positive
+	mov.b		&neg_bmask,FPSR_CC(%a6)	# set 'N' ccode bit
+fneg_norm_load:
+	mov.w		%d0,FP_SCR0_EX(%a6)
+	fmovm.x		FP_SCR0(%a6),&0x80	# return result in fp0
+	rts
+
+#
+# for an extended precision DENORM, the UNFL exception bit is set
+# the accrued bit is NOT set in this instance(no inexactness!)
+#
+fneg_denorm:
+	andi.b		&0xc0,%d0		# is precision extended?
+	bne.b		fneg_not_ext		# no; go handle sgl or dbl
+
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	mov.w		SRC_EX(%a0),%d0
+	eori.w		&0x8000,%d0		# negate sign
+	bpl.b		fneg_denorm_done	# no
+	mov.b		&neg_bmask,FPSR_CC(%a6)	# yes, set 'N' ccode bit
+fneg_denorm_done:
+	mov.w		%d0,FP_SCR0_EX(%a6)
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+
+	btst		&unfl_bit,FPCR_ENABLE(%a6) # is UNFL enabled?
+	bne.b		fneg_ext_unfl_ena	# yes
+	rts
+
+#
+# the input is an extended DENORM and underflow is enabled in the FPCR.
+# normalize the mantissa and add the bias of 0x6000 to the resulting negative
+# exponent and insert back into the operand.
+#
+fneg_ext_unfl_ena:
+	lea		FP_SCR0(%a6),%a0	# pass: ptr to operand
+	bsr.l		norm			# normalize result
+	neg.w		%d0			# new exponent = -(shft val)
+	addi.w		&0x6000,%d0		# add new bias to exponent
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch old sign,exp
+	andi.w		&0x8000,%d1		# keep old sign
+	andi.w		&0x7fff,%d0		# clear sign position
+	or.w		%d1,%d0			# concat old sign, new exponent
+	mov.w		%d0,FP_SCR0_EX(%a6)	# insert new exponent
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	rts
+
+#
+# operand is either single or double
+#
+fneg_not_ext:
+	cmpi.b		%d0,&s_mode*0x10	# separate sgl/dbl prec
+	bne.b		fneg_dbl
+
+#
+# operand is to be rounded to single precision
+#
+fneg_sgl:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	bsr.l		scale_to_zero_src	# calculate scale factor
+
+	cmpi.l		%d0,&0x3fff-0x3f80	# will move in underflow?
+	bge.w		fneg_sd_unfl		# yes; go handle underflow
+	cmpi.l		%d0,&0x3fff-0x407e	# will move in overflow?
+	beq.w		fneg_sd_may_ovfl	# maybe; go check
+	blt.w		fneg_sd_ovfl		# yes; go handle overflow
+
+#
+# operand will NOT overflow or underflow when moved in to the fp reg file
+#
+fneg_sd_normal:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fneg.x		FP_SCR0(%a6),%fp0	# perform negation
+
+	fmov.l		%fpsr,%d1		# save FPSR
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fneg_sd_normal_exit:
+	mov.l		%d2,-(%sp)		# save d2
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+	mov.w		FP_SCR0_EX(%a6),%d1	# load sgn,exp
+	mov.w		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	sub.l		%d0,%d1			# add scale factor
+	andi.w		&0x8000,%d2		# keep old sign
+	or.w		%d1,%d2			# concat old sign,new exp
+	mov.w		%d2,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x80	# return result in fp0
+	rts
+
+#
+# operand is to be rounded to double precision
+#
+fneg_dbl:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	bsr.l		scale_to_zero_src	# calculate scale factor
+
+	cmpi.l		%d0,&0x3fff-0x3c00	# will move in underflow?
+	bge.b		fneg_sd_unfl		# yes; go handle underflow
+	cmpi.l		%d0,&0x3fff-0x43fe	# will move in overflow?
+	beq.w		fneg_sd_may_ovfl	# maybe; go check
+	blt.w		fneg_sd_ovfl		# yes; go handle overflow
+	bra.w		fneg_sd_normal		# no; ho handle normalized op
+
+#
+# operand WILL underflow when moved in to the fp register file
+#
+fneg_sd_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+	eori.b		&0x80,FP_SCR0_EX(%a6)	# negate sign
+	bpl.b		fneg_sd_unfl_tst
+	bset		&neg_bit,FPSR_CC(%a6)	# set 'N' ccode bit
+
+# if underflow or inexact is enabled, go calculate EXOP first.
+fneg_sd_unfl_tst:
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0b,%d1		# is UNFL or INEX enabled?
+	bne.b		fneg_sd_unfl_ena	# yes
+
+fneg_sd_unfl_dis:
+	lea		FP_SCR0(%a6),%a0	# pass: result addr
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# unf_res may have set 'Z'
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+	rts
+
+#
+# operand will underflow AND underflow is enabled.
+# therefore, we must return the result rounded to extended precision.
+#
+fneg_sd_unfl_ena:
+	mov.l		FP_SCR0_HI(%a6),FP_SCR1_HI(%a6)
+	mov.l		FP_SCR0_LO(%a6),FP_SCR1_LO(%a6)
+	mov.w		FP_SCR0_EX(%a6),%d1	# load current exponent
+
+	mov.l		%d2,-(%sp)		# save d2
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# subtract scale factor
+	addi.l		&0x6000,%d1		# add new bias
+	andi.w		&0x7fff,%d1
+	or.w		%d2,%d1			# concat new sign,new exp
+	mov.w		%d1,FP_SCR1_EX(%a6)	# insert new exp
+	fmovm.x		FP_SCR1(%a6),&0x40	# return EXOP in fp1
+	mov.l		(%sp)+,%d2		# restore d2
+	bra.b		fneg_sd_unfl_dis
+
+#
+# operand WILL overflow.
+#
+fneg_sd_ovfl:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fneg.x		FP_SCR0(%a6),%fp0	# perform negation
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# save FPSR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fneg_sd_ovfl_tst:
+	or.l		&ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x13,%d1		# is OVFL or INEX enabled?
+	bne.b		fneg_sd_ovfl_ena	# yes
+
+#
+# OVFL is not enabled; therefore, we must create the default result by
+# calling ovf_res().
+#
+fneg_sd_ovfl_dis:
+	btst		&neg_bit,FPSR_CC(%a6)	# is result negative?
+	sne		%d1			# set sign param accordingly
+	mov.l		L_SCR3(%a6),%d0		# pass: prec,mode
+	bsr.l		ovf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set INF,N if applicable
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+	rts
+
+#
+# OVFL is enabled.
+# the INEX2 bit has already been updated by the round to the correct precision.
+# now, round to extended(and don't alter the FPSR).
+#
+fneg_sd_ovfl_ena:
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	subi.l		&0x6000,%d1		# subtract bias
+	andi.w		&0x7fff,%d1
+	or.w		%d2,%d1			# concat sign,exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	mov.l		(%sp)+,%d2		# restore d2
+	bra.b		fneg_sd_ovfl_dis
+
+#
+# the move in MAY underflow. so...
+#
+fneg_sd_may_ovfl:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fneg.x		FP_SCR0(%a6),%fp0	# perform negation
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	fabs.x		%fp0,%fp1		# make a copy of result
+	fcmp.b		%fp1,&0x2		# is |result| >= 2.b?
+	fbge.w		fneg_sd_ovfl_tst	# yes; overflow has occurred
+
+# no, it didn't overflow; we have correct result
+	bra.w		fneg_sd_normal_exit
+
+##########################################################################
+
+#
+# input is not normalized; what is it?
+#
+fneg_not_norm:
+	cmpi.b		%d1,&DENORM		# weed out DENORM
+	beq.w		fneg_denorm
+	cmpi.b		%d1,&SNAN		# weed out SNAN
+	beq.l		res_snan_1op
+	cmpi.b		%d1,&QNAN		# weed out QNAN
+	beq.l		res_qnan_1op
+
+#
+# do the fneg; at this point, only possible ops are ZERO and INF.
+# use fneg to determine ccodes.
+# prec:mode should be zero at this point but it won't affect answer anyways.
+#
+	fneg.x		SRC_EX(%a0),%fp0	# do fneg
+	fmov.l		%fpsr,%d0
+	rol.l		&0x8,%d0		# put ccodes in lo byte
+	mov.b		%d0,FPSR_CC(%a6)	# insert correct ccodes
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	ftst(): emulates the ftest instruction				#
+#									#
+# XREF ****************************************************************	#
+#	res{s,q}nan_1op() - set NAN result for monadic instruction	#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#									#
+# OUTPUT **************************************************************	#
+#	none								#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Check the source operand tag (STAG) and set the FPCR according	#
+# to the operand type and sign.						#
+#									#
+#########################################################################
+
+	global		ftst
+ftst:
+	mov.b		STAG(%a6),%d1
+	bne.b		ftst_not_norm		# optimize on non-norm input
+
+#
+# Norm:
+#
+ftst_norm:
+	tst.b		SRC_EX(%a0)		# is operand negative?
+	bmi.b		ftst_norm_m		# yes
+	rts
+ftst_norm_m:
+	mov.b		&neg_bmask,FPSR_CC(%a6)	# set 'N' ccode bit
+	rts
+
+#
+# input is not normalized; what is it?
+#
+ftst_not_norm:
+	cmpi.b		%d1,&ZERO		# weed out ZERO
+	beq.b		ftst_zero
+	cmpi.b		%d1,&INF		# weed out INF
+	beq.b		ftst_inf
+	cmpi.b		%d1,&SNAN		# weed out SNAN
+	beq.l		res_snan_1op
+	cmpi.b		%d1,&QNAN		# weed out QNAN
+	beq.l		res_qnan_1op
+
+#
+# Denorm:
+#
+ftst_denorm:
+	tst.b		SRC_EX(%a0)		# is operand negative?
+	bmi.b		ftst_denorm_m		# yes
+	rts
+ftst_denorm_m:
+	mov.b		&neg_bmask,FPSR_CC(%a6)	# set 'N' ccode bit
+	rts
+
+#
+# Infinity:
+#
+ftst_inf:
+	tst.b		SRC_EX(%a0)		# is operand negative?
+	bmi.b		ftst_inf_m		# yes
+ftst_inf_p:
+	mov.b		&inf_bmask,FPSR_CC(%a6)	# set 'I' ccode bit
+	rts
+ftst_inf_m:
+	mov.b		&inf_bmask+neg_bmask,FPSR_CC(%a6) # set 'I','N' ccode bits
+	rts
+
+#
+# Zero:
+#
+ftst_zero:
+	tst.b		SRC_EX(%a0)		# is operand negative?
+	bmi.b		ftst_zero_m		# yes
+ftst_zero_p:
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set 'N' ccode bit
+	rts
+ftst_zero_m:
+	mov.b		&z_bmask+neg_bmask,FPSR_CC(%a6)	# set 'Z','N' ccode bits
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fint(): emulates the fint instruction				#
+#									#
+# XREF ****************************************************************	#
+#	res_{s,q}nan_1op() - set NAN result for monadic operation	#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	d0 = round precision/mode					#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = result							#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Separate according to operand type. Unnorms don't pass through	#
+# here. For norms, load the rounding mode/prec, execute a "fint", then	#
+# store the resulting FPSR bits.					#
+#	For denorms, force the j-bit to a one and do the same as for	#
+# norms. Denorms are so low that the answer will either be a zero or a	#
+# one.									#
+#	For zeroes/infs/NANs, return the same while setting the FPSR	#
+# as appropriate.							#
+#									#
+#########################################################################
+
+	global		fint
+fint:
+	mov.b		STAG(%a6),%d1
+	bne.b		fint_not_norm		# optimize on non-norm input
+
+#
+# Norm:
+#
+fint_norm:
+	andi.b		&0x30,%d0		# set prec = ext
+
+	fmov.l		%d0,%fpcr		# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fint.x		SRC(%a0),%fp0		# execute fint
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d0		# save FPSR
+	or.l		%d0,USER_FPSR(%a6)	# set exception bits
+
+	rts
+
+#
+# input is not normalized; what is it?
+#
+fint_not_norm:
+	cmpi.b		%d1,&ZERO		# weed out ZERO
+	beq.b		fint_zero
+	cmpi.b		%d1,&INF		# weed out INF
+	beq.b		fint_inf
+	cmpi.b		%d1,&DENORM		# weed out DENORM
+	beq.b		fint_denorm
+	cmpi.b		%d1,&SNAN		# weed out SNAN
+	beq.l		res_snan_1op
+	bra.l		res_qnan_1op		# weed out QNAN
+
+#
+# Denorm:
+#
+# for DENORMs, the result will be either (+/-)ZERO or (+/-)1.
+# also, the INEX2 and AINEX exception bits will be set.
+# so, we could either set these manually or force the DENORM
+# to a very small NORM and ship it to the NORM routine.
+# I do the latter.
+#
+fint_denorm:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6) # copy sign, zero exp
+	mov.b		&0x80,FP_SCR0_HI(%a6)	# force DENORM ==> small NORM
+	lea		FP_SCR0(%a6),%a0
+	bra.b		fint_norm
+
+#
+# Zero:
+#
+fint_zero:
+	tst.b		SRC_EX(%a0)		# is ZERO negative?
+	bmi.b		fint_zero_m		# yes
+fint_zero_p:
+	fmov.s		&0x00000000,%fp0	# return +ZERO in fp0
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set 'Z' ccode bit
+	rts
+fint_zero_m:
+	fmov.s		&0x80000000,%fp0	# return -ZERO in fp0
+	mov.b		&z_bmask+neg_bmask,FPSR_CC(%a6) # set 'Z','N' ccode bits
+	rts
+
+#
+# Infinity:
+#
+fint_inf:
+	fmovm.x		SRC(%a0),&0x80		# return result in fp0
+	tst.b		SRC_EX(%a0)		# is INF negative?
+	bmi.b		fint_inf_m		# yes
+fint_inf_p:
+	mov.b		&inf_bmask,FPSR_CC(%a6)	# set 'I' ccode bit
+	rts
+fint_inf_m:
+	mov.b		&inf_bmask+neg_bmask,FPSR_CC(%a6) # set 'N','I' ccode bits
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fintrz(): emulates the fintrz instruction			#
+#									#
+# XREF ****************************************************************	#
+#	res_{s,q}nan_1op() - set NAN result for monadic operation	#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	d0 = round precision/mode					#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = result							#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Separate according to operand type. Unnorms don't pass through	#
+# here. For norms, load the rounding mode/prec, execute a "fintrz",	#
+# then store the resulting FPSR bits.					#
+#	For denorms, force the j-bit to a one and do the same as for	#
+# norms. Denorms are so low that the answer will either be a zero or a	#
+# one.									#
+#	For zeroes/infs/NANs, return the same while setting the FPSR	#
+# as appropriate.							#
+#									#
+#########################################################################
+
+	global		fintrz
+fintrz:
+	mov.b		STAG(%a6),%d1
+	bne.b		fintrz_not_norm		# optimize on non-norm input
+
+#
+# Norm:
+#
+fintrz_norm:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fintrz.x	SRC(%a0),%fp0		# execute fintrz
+
+	fmov.l		%fpsr,%d0		# save FPSR
+	or.l		%d0,USER_FPSR(%a6)	# set exception bits
+
+	rts
+
+#
+# input is not normalized; what is it?
+#
+fintrz_not_norm:
+	cmpi.b		%d1,&ZERO		# weed out ZERO
+	beq.b		fintrz_zero
+	cmpi.b		%d1,&INF		# weed out INF
+	beq.b		fintrz_inf
+	cmpi.b		%d1,&DENORM		# weed out DENORM
+	beq.b		fintrz_denorm
+	cmpi.b		%d1,&SNAN		# weed out SNAN
+	beq.l		res_snan_1op
+	bra.l		res_qnan_1op		# weed out QNAN
+
+#
+# Denorm:
+#
+# for DENORMs, the result will be (+/-)ZERO.
+# also, the INEX2 and AINEX exception bits will be set.
+# so, we could either set these manually or force the DENORM
+# to a very small NORM and ship it to the NORM routine.
+# I do the latter.
+#
+fintrz_denorm:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6) # copy sign, zero exp
+	mov.b		&0x80,FP_SCR0_HI(%a6)	# force DENORM ==> small NORM
+	lea		FP_SCR0(%a6),%a0
+	bra.b		fintrz_norm
+
+#
+# Zero:
+#
+fintrz_zero:
+	tst.b		SRC_EX(%a0)		# is ZERO negative?
+	bmi.b		fintrz_zero_m		# yes
+fintrz_zero_p:
+	fmov.s		&0x00000000,%fp0	# return +ZERO in fp0
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set 'Z' ccode bit
+	rts
+fintrz_zero_m:
+	fmov.s		&0x80000000,%fp0	# return -ZERO in fp0
+	mov.b		&z_bmask+neg_bmask,FPSR_CC(%a6) # set 'Z','N' ccode bits
+	rts
+
+#
+# Infinity:
+#
+fintrz_inf:
+	fmovm.x		SRC(%a0),&0x80		# return result in fp0
+	tst.b		SRC_EX(%a0)		# is INF negative?
+	bmi.b		fintrz_inf_m		# yes
+fintrz_inf_p:
+	mov.b		&inf_bmask,FPSR_CC(%a6)	# set 'I' ccode bit
+	rts
+fintrz_inf_m:
+	mov.b		&inf_bmask+neg_bmask,FPSR_CC(%a6) # set 'N','I' ccode bits
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fabs():  emulates the fabs instruction				#
+#	fsabs(): emulates the fsabs instruction				#
+#	fdabs(): emulates the fdabs instruction				#
+#									#
+# XREF **************************************************************** #
+#	norm() - normalize denorm mantissa to provide EXOP		#
+#	scale_to_zero_src() - make exponent. = 0; get scale factor	#
+#	unf_res() - calculate underflow result				#
+#	ovf_res() - calculate overflow result				#
+#	res_{s,q}nan_1op() - set NAN result for monadic operation	#
+#									#
+# INPUT *************************************************************** #
+#	a0 = pointer to extended precision source operand		#
+#	d0 = rnd precision/mode						#
+#									#
+# OUTPUT ************************************************************** #
+#	fp0 = result							#
+#	fp1 = EXOP (if exception occurred)				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Handle NANs, infinities, and zeroes as special cases. Divide	#
+# norms into extended, single, and double precision.			#
+#	Simply clear sign for extended precision norm. Ext prec denorm	#
+# gets an EXOP created for it since it's an underflow.			#
+#	Double and single precision can overflow and underflow. First,	#
+# scale the operand such that the exponent is zero. Perform an "fabs"	#
+# using the correct rnd mode/prec. Check to see if the original		#
+# exponent would take an exception. If so, use unf_res() or ovf_res()	#
+# to calculate the default result. Also, create the EXOP for the	#
+# exceptional case. If no exception should occur, insert the correct	#
+# result exponent and return.						#
+#	Unnorms don't pass through here.				#
+#									#
+#########################################################################
+
+	global		fsabs
+fsabs:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&s_mode*0x10,%d0	# insert sgl precision
+	bra.b		fabs
+
+	global		fdabs
+fdabs:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&d_mode*0x10,%d0	# insert dbl precision
+
+	global		fabs
+fabs:
+	mov.l		%d0,L_SCR3(%a6)		# store rnd info
+	mov.b		STAG(%a6),%d1
+	bne.w		fabs_not_norm		# optimize on non-norm input
+
+#
+# ABSOLUTE VALUE: norms and denorms ONLY!
+#
+fabs_norm:
+	andi.b		&0xc0,%d0		# is precision extended?
+	bne.b		fabs_not_ext		# no; go handle sgl or dbl
+
+#
+# precision selected is extended. so...we can not get an underflow
+# or overflow because of rounding to the correct precision. so...
+# skip the scaling and unscaling...
+#
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	mov.w		SRC_EX(%a0),%d1
+	bclr		&15,%d1			# force absolute value
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert exponent
+	fmovm.x		FP_SCR0(%a6),&0x80	# return result in fp0
+	rts
+
+#
+# for an extended precision DENORM, the UNFL exception bit is set
+# the accrued bit is NOT set in this instance(no inexactness!)
+#
+fabs_denorm:
+	andi.b		&0xc0,%d0		# is precision extended?
+	bne.b		fabs_not_ext		# no
+
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	mov.w		SRC_EX(%a0),%d0
+	bclr		&15,%d0			# clear sign
+	mov.w		%d0,FP_SCR0_EX(%a6)	# insert exponent
+
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+
+	btst		&unfl_bit,FPCR_ENABLE(%a6) # is UNFL enabled?
+	bne.b		fabs_ext_unfl_ena
+	rts
+
+#
+# the input is an extended DENORM and underflow is enabled in the FPCR.
+# normalize the mantissa and add the bias of 0x6000 to the resulting negative
+# exponent and insert back into the operand.
+#
+fabs_ext_unfl_ena:
+	lea		FP_SCR0(%a6),%a0	# pass: ptr to operand
+	bsr.l		norm			# normalize result
+	neg.w		%d0			# new exponent = -(shft val)
+	addi.w		&0x6000,%d0		# add new bias to exponent
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch old sign,exp
+	andi.w		&0x8000,%d1		# keep old sign
+	andi.w		&0x7fff,%d0		# clear sign position
+	or.w		%d1,%d0			# concat old sign, new exponent
+	mov.w		%d0,FP_SCR0_EX(%a6)	# insert new exponent
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	rts
+
+#
+# operand is either single or double
+#
+fabs_not_ext:
+	cmpi.b		%d0,&s_mode*0x10	# separate sgl/dbl prec
+	bne.b		fabs_dbl
+
+#
+# operand is to be rounded to single precision
+#
+fabs_sgl:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	bsr.l		scale_to_zero_src	# calculate scale factor
+
+	cmpi.l		%d0,&0x3fff-0x3f80	# will move in underflow?
+	bge.w		fabs_sd_unfl		# yes; go handle underflow
+	cmpi.l		%d0,&0x3fff-0x407e	# will move in overflow?
+	beq.w		fabs_sd_may_ovfl	# maybe; go check
+	blt.w		fabs_sd_ovfl		# yes; go handle overflow
+
+#
+# operand will NOT overflow or underflow when moved in to the fp reg file
+#
+fabs_sd_normal:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fabs.x		FP_SCR0(%a6),%fp0	# perform absolute
+
+	fmov.l		%fpsr,%d1		# save FPSR
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fabs_sd_normal_exit:
+	mov.l		%d2,-(%sp)		# save d2
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+	mov.w		FP_SCR0_EX(%a6),%d1	# load sgn,exp
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	sub.l		%d0,%d1			# add scale factor
+	andi.w		&0x8000,%d2		# keep old sign
+	or.w		%d1,%d2			# concat old sign,new exp
+	mov.w		%d2,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x80	# return result in fp0
+	rts
+
+#
+# operand is to be rounded to double precision
+#
+fabs_dbl:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	bsr.l		scale_to_zero_src	# calculate scale factor
+
+	cmpi.l		%d0,&0x3fff-0x3c00	# will move in underflow?
+	bge.b		fabs_sd_unfl		# yes; go handle underflow
+	cmpi.l		%d0,&0x3fff-0x43fe	# will move in overflow?
+	beq.w		fabs_sd_may_ovfl	# maybe; go check
+	blt.w		fabs_sd_ovfl		# yes; go handle overflow
+	bra.w		fabs_sd_normal		# no; ho handle normalized op
+
+#
+# operand WILL underflow when moved in to the fp register file
+#
+fabs_sd_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+	bclr		&0x7,FP_SCR0_EX(%a6)	# force absolute value
+
+# if underflow or inexact is enabled, go calculate EXOP first.
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0b,%d1		# is UNFL or INEX enabled?
+	bne.b		fabs_sd_unfl_ena	# yes
+
+fabs_sd_unfl_dis:
+	lea		FP_SCR0(%a6),%a0	# pass: result addr
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set possible 'Z' ccode
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+	rts
+
+#
+# operand will underflow AND underflow is enabled.
+# therefore, we must return the result rounded to extended precision.
+#
+fabs_sd_unfl_ena:
+	mov.l		FP_SCR0_HI(%a6),FP_SCR1_HI(%a6)
+	mov.l		FP_SCR0_LO(%a6),FP_SCR1_LO(%a6)
+	mov.w		FP_SCR0_EX(%a6),%d1	# load current exponent
+
+	mov.l		%d2,-(%sp)		# save d2
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# subtract scale factor
+	addi.l		&0x6000,%d1		# add new bias
+	andi.w		&0x7fff,%d1
+	or.w		%d2,%d1			# concat new sign,new exp
+	mov.w		%d1,FP_SCR1_EX(%a6)	# insert new exp
+	fmovm.x		FP_SCR1(%a6),&0x40	# return EXOP in fp1
+	mov.l		(%sp)+,%d2		# restore d2
+	bra.b		fabs_sd_unfl_dis
+
+#
+# operand WILL overflow.
+#
+fabs_sd_ovfl:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fabs.x		FP_SCR0(%a6),%fp0	# perform absolute
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# save FPSR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fabs_sd_ovfl_tst:
+	or.l		&ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x13,%d1		# is OVFL or INEX enabled?
+	bne.b		fabs_sd_ovfl_ena	# yes
+
+#
+# OVFL is not enabled; therefore, we must create the default result by
+# calling ovf_res().
+#
+fabs_sd_ovfl_dis:
+	btst		&neg_bit,FPSR_CC(%a6)	# is result negative?
+	sne		%d1			# set sign param accordingly
+	mov.l		L_SCR3(%a6),%d0		# pass: prec,mode
+	bsr.l		ovf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set INF,N if applicable
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+	rts
+
+#
+# OVFL is enabled.
+# the INEX2 bit has already been updated by the round to the correct precision.
+# now, round to extended(and don't alter the FPSR).
+#
+fabs_sd_ovfl_ena:
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	subi.l		&0x6000,%d1		# subtract bias
+	andi.w		&0x7fff,%d1
+	or.w		%d2,%d1			# concat sign,exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	mov.l		(%sp)+,%d2		# restore d2
+	bra.b		fabs_sd_ovfl_dis
+
+#
+# the move in MAY underflow. so...
+#
+fabs_sd_may_ovfl:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fabs.x		FP_SCR0(%a6),%fp0	# perform absolute
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	fabs.x		%fp0,%fp1		# make a copy of result
+	fcmp.b		%fp1,&0x2		# is |result| >= 2.b?
+	fbge.w		fabs_sd_ovfl_tst	# yes; overflow has occurred
+
+# no, it didn't overflow; we have correct result
+	bra.w		fabs_sd_normal_exit
+
+##########################################################################
+
+#
+# input is not normalized; what is it?
+#
+fabs_not_norm:
+	cmpi.b		%d1,&DENORM		# weed out DENORM
+	beq.w		fabs_denorm
+	cmpi.b		%d1,&SNAN		# weed out SNAN
+	beq.l		res_snan_1op
+	cmpi.b		%d1,&QNAN		# weed out QNAN
+	beq.l		res_qnan_1op
+
+	fabs.x		SRC(%a0),%fp0		# force absolute value
+
+	cmpi.b		%d1,&INF		# weed out INF
+	beq.b		fabs_inf
+fabs_zero:
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set 'Z' ccode bit
+	rts
+fabs_inf:
+	mov.b		&inf_bmask,FPSR_CC(%a6)	# set 'I' ccode bit
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fcmp(): fp compare op routine					#
+#									#
+# XREF ****************************************************************	#
+#	res_qnan() - return QNAN result					#
+#	res_snan() - return SNAN result					#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	a1 = pointer to extended precision destination operand		#
+#	d0 = round prec/mode						#
+#									#
+# OUTPUT ************************************************************** #
+#	None								#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Handle NANs and denorms as special cases. For everything else,	#
+# just use the actual fcmp instruction to produce the correct condition	#
+# codes.								#
+#									#
+#########################################################################
+
+	global		fcmp
+fcmp:
+	clr.w		%d1
+	mov.b		DTAG(%a6),%d1
+	lsl.b		&0x3,%d1
+	or.b		STAG(%a6),%d1
+	bne.b		fcmp_not_norm		# optimize on non-norm input
+
+#
+# COMPARE FP OPs : NORMs, ZEROs, INFs, and "corrected" DENORMs
+#
+fcmp_norm:
+	fmovm.x		DST(%a1),&0x80		# load dst op
+
+	fcmp.x		%fp0,SRC(%a0)		# do compare
+
+	fmov.l		%fpsr,%d0		# save FPSR
+	rol.l		&0x8,%d0		# extract ccode bits
+	mov.b		%d0,FPSR_CC(%a6)	# set ccode bits(no exc bits are set)
+
+	rts
+
+#
+# fcmp: inputs are not both normalized; what are they?
+#
+fcmp_not_norm:
+	mov.w		(tbl_fcmp_op.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_fcmp_op.b,%pc,%d1.w*1)
+
+	swbeg		&48
+tbl_fcmp_op:
+	short		fcmp_norm	- tbl_fcmp_op # NORM - NORM
+	short		fcmp_norm	- tbl_fcmp_op # NORM - ZERO
+	short		fcmp_norm	- tbl_fcmp_op # NORM - INF
+	short		fcmp_res_qnan	- tbl_fcmp_op # NORM - QNAN
+	short		fcmp_nrm_dnrm	- tbl_fcmp_op # NORM - DENORM
+	short		fcmp_res_snan	- tbl_fcmp_op # NORM - SNAN
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+
+	short		fcmp_norm	- tbl_fcmp_op # ZERO - NORM
+	short		fcmp_norm	- tbl_fcmp_op # ZERO - ZERO
+	short		fcmp_norm	- tbl_fcmp_op # ZERO - INF
+	short		fcmp_res_qnan	- tbl_fcmp_op # ZERO - QNAN
+	short		fcmp_dnrm_s	- tbl_fcmp_op # ZERO - DENORM
+	short		fcmp_res_snan	- tbl_fcmp_op # ZERO - SNAN
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+
+	short		fcmp_norm	- tbl_fcmp_op # INF - NORM
+	short		fcmp_norm	- tbl_fcmp_op # INF - ZERO
+	short		fcmp_norm	- tbl_fcmp_op # INF - INF
+	short		fcmp_res_qnan	- tbl_fcmp_op # INF - QNAN
+	short		fcmp_dnrm_s	- tbl_fcmp_op # INF - DENORM
+	short		fcmp_res_snan	- tbl_fcmp_op # INF - SNAN
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+
+	short		fcmp_res_qnan	- tbl_fcmp_op # QNAN - NORM
+	short		fcmp_res_qnan	- tbl_fcmp_op # QNAN - ZERO
+	short		fcmp_res_qnan	- tbl_fcmp_op # QNAN - INF
+	short		fcmp_res_qnan	- tbl_fcmp_op # QNAN - QNAN
+	short		fcmp_res_qnan	- tbl_fcmp_op # QNAN - DENORM
+	short		fcmp_res_snan	- tbl_fcmp_op # QNAN - SNAN
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+
+	short		fcmp_dnrm_nrm	- tbl_fcmp_op # DENORM - NORM
+	short		fcmp_dnrm_d	- tbl_fcmp_op # DENORM - ZERO
+	short		fcmp_dnrm_d	- tbl_fcmp_op # DENORM - INF
+	short		fcmp_res_qnan	- tbl_fcmp_op # DENORM - QNAN
+	short		fcmp_dnrm_sd	- tbl_fcmp_op # DENORM - DENORM
+	short		fcmp_res_snan	- tbl_fcmp_op # DENORM - SNAN
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+
+	short		fcmp_res_snan	- tbl_fcmp_op # SNAN - NORM
+	short		fcmp_res_snan	- tbl_fcmp_op # SNAN - ZERO
+	short		fcmp_res_snan	- tbl_fcmp_op # SNAN - INF
+	short		fcmp_res_snan	- tbl_fcmp_op # SNAN - QNAN
+	short		fcmp_res_snan	- tbl_fcmp_op # SNAN - DENORM
+	short		fcmp_res_snan	- tbl_fcmp_op # SNAN - SNAN
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+	short		tbl_fcmp_op	- tbl_fcmp_op #
+
+# unlike all other functions for QNAN and SNAN, fcmp does NOT set the
+# 'N' bit for a negative QNAN or SNAN input so we must squelch it here.
+fcmp_res_qnan:
+	bsr.l		res_qnan
+	andi.b		&0xf7,FPSR_CC(%a6)
+	rts
+fcmp_res_snan:
+	bsr.l		res_snan
+	andi.b		&0xf7,FPSR_CC(%a6)
+	rts
+
+#
+# DENORMs are a little more difficult.
+# If you have a 2 DENORMs, then you can just force the j-bit to a one
+# and use the fcmp_norm routine.
+# If you have a DENORM and an INF or ZERO, just force the DENORM's j-bit to a one
+# and use the fcmp_norm routine.
+# If you have a DENORM and a NORM with opposite signs, then use fcmp_norm, also.
+# But with a DENORM and a NORM of the same sign, the neg bit is set if the
+# (1) signs are (+) and the DENORM is the dst or
+# (2) signs are (-) and the DENORM is the src
+#
+
+fcmp_dnrm_s:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),%d0
+	bset		&31,%d0			# DENORM src; make into small norm
+	mov.l		%d0,FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	lea		FP_SCR0(%a6),%a0
+	bra.w		fcmp_norm
+
+fcmp_dnrm_d:
+	mov.l		DST_EX(%a1),FP_SCR0_EX(%a6)
+	mov.l		DST_HI(%a1),%d0
+	bset		&31,%d0			# DENORM src; make into small norm
+	mov.l		%d0,FP_SCR0_HI(%a6)
+	mov.l		DST_LO(%a1),FP_SCR0_LO(%a6)
+	lea		FP_SCR0(%a6),%a1
+	bra.w		fcmp_norm
+
+fcmp_dnrm_sd:
+	mov.w		DST_EX(%a1),FP_SCR1_EX(%a6)
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		DST_HI(%a1),%d0
+	bset		&31,%d0			# DENORM dst; make into small norm
+	mov.l		%d0,FP_SCR1_HI(%a6)
+	mov.l		SRC_HI(%a0),%d0
+	bset		&31,%d0			# DENORM dst; make into small norm
+	mov.l		%d0,FP_SCR0_HI(%a6)
+	mov.l		DST_LO(%a1),FP_SCR1_LO(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	lea		FP_SCR1(%a6),%a1
+	lea		FP_SCR0(%a6),%a0
+	bra.w		fcmp_norm
+
+fcmp_nrm_dnrm:
+	mov.b		SRC_EX(%a0),%d0		# determine if like signs
+	mov.b		DST_EX(%a1),%d1
+	eor.b		%d0,%d1
+	bmi.w		fcmp_dnrm_s
+
+# signs are the same, so must determine the answer ourselves.
+	tst.b		%d0			# is src op negative?
+	bmi.b		fcmp_nrm_dnrm_m		# yes
+	rts
+fcmp_nrm_dnrm_m:
+	mov.b		&neg_bmask,FPSR_CC(%a6)	# set 'Z' ccode bit
+	rts
+
+fcmp_dnrm_nrm:
+	mov.b		SRC_EX(%a0),%d0		# determine if like signs
+	mov.b		DST_EX(%a1),%d1
+	eor.b		%d0,%d1
+	bmi.w		fcmp_dnrm_d
+
+# signs are the same, so must determine the answer ourselves.
+	tst.b		%d0			# is src op negative?
+	bpl.b		fcmp_dnrm_nrm_m		# no
+	rts
+fcmp_dnrm_nrm_m:
+	mov.b		&neg_bmask,FPSR_CC(%a6)	# set 'Z' ccode bit
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fsglmul(): emulates the fsglmul instruction			#
+#									#
+# XREF ****************************************************************	#
+#	scale_to_zero_src() - scale src exponent to zero		#
+#	scale_to_zero_dst() - scale dst exponent to zero		#
+#	unf_res4() - return default underflow result for sglop		#
+#	ovf_res() - return default overflow result			#
+#	res_qnan() - return QNAN result					#
+#	res_snan() - return SNAN result					#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	a1 = pointer to extended precision destination operand		#
+#	d0  rnd prec,mode						#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = result							#
+#	fp1 = EXOP (if exception occurred)				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Handle NANs, infinities, and zeroes as special cases. Divide	#
+# norms/denorms into ext/sgl/dbl precision.				#
+#	For norms/denorms, scale the exponents such that a multiply	#
+# instruction won't cause an exception. Use the regular fsglmul to	#
+# compute a result. Check if the regular operands would have taken	#
+# an exception. If so, return the default overflow/underflow result	#
+# and return the EXOP if exceptions are enabled. Else, scale the	#
+# result operand to the proper exponent.				#
+#									#
+#########################################################################
+
+	global		fsglmul
+fsglmul:
+	mov.l		%d0,L_SCR3(%a6)		# store rnd info
+
+	clr.w		%d1
+	mov.b		DTAG(%a6),%d1
+	lsl.b		&0x3,%d1
+	or.b		STAG(%a6),%d1
+
+	bne.w		fsglmul_not_norm	# optimize on non-norm input
+
+fsglmul_norm:
+	mov.w		DST_EX(%a1),FP_SCR1_EX(%a6)
+	mov.l		DST_HI(%a1),FP_SCR1_HI(%a6)
+	mov.l		DST_LO(%a1),FP_SCR1_LO(%a6)
+
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+	bsr.l		scale_to_zero_src	# scale exponent
+	mov.l		%d0,-(%sp)		# save scale factor 1
+
+	bsr.l		scale_to_zero_dst	# scale dst exponent
+
+	add.l		(%sp)+,%d0		# SCALE_FACTOR = scale1 + scale2
+
+	cmpi.l		%d0,&0x3fff-0x7ffe	# would result ovfl?
+	beq.w		fsglmul_may_ovfl	# result may rnd to overflow
+	blt.w		fsglmul_ovfl		# result will overflow
+
+	cmpi.l		%d0,&0x3fff+0x0001	# would result unfl?
+	beq.w		fsglmul_may_unfl	# result may rnd to no unfl
+	bgt.w		fsglmul_unfl		# result will underflow
+
+fsglmul_normal:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsglmul.x	FP_SCR0(%a6),%fp0	# execute sgl multiply
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fsglmul_normal_exit:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# load {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	or.w		%d2,%d1			# concat old sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x80	# return result in fp0
+	rts
+
+fsglmul_ovfl:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsglmul.x	FP_SCR0(%a6),%fp0	# execute sgl multiply
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fsglmul_ovfl_tst:
+
+# save setting this until now because this is where fsglmul_may_ovfl may jump in
+	or.l		&ovfl_inx_mask, USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x13,%d1		# is OVFL or INEX enabled?
+	bne.b		fsglmul_ovfl_ena	# yes
+
+fsglmul_ovfl_dis:
+	btst		&neg_bit,FPSR_CC(%a6)	# is result negative?
+	sne		%d1			# set sign param accordingly
+	mov.l		L_SCR3(%a6),%d0		# pass prec:rnd
+	andi.b		&0x30,%d0		# force prec = ext
+	bsr.l		ovf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set INF,N if applicable
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+	rts
+
+fsglmul_ovfl_ena:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# move result to stack
+
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	sub.l		%d0,%d1			# add scale factor
+	subi.l		&0x6000,%d1		# subtract bias
+	andi.w		&0x7fff,%d1
+	andi.w		&0x8000,%d2		# keep old sign
+	or.w		%d2,%d1			# concat old sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	bra.b		fsglmul_ovfl_dis
+
+fsglmul_may_ovfl:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsglmul.x	FP_SCR0(%a6),%fp0	# execute sgl multiply
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	fabs.x		%fp0,%fp1		# make a copy of result
+	fcmp.b		%fp1,&0x2		# is |result| >= 2.b?
+	fbge.w		fsglmul_ovfl_tst	# yes; overflow has occurred
+
+# no, it didn't overflow; we have correct result
+	bra.w		fsglmul_normal_exit
+
+fsglmul_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		&rz_mode*0x10,%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsglmul.x	FP_SCR0(%a6),%fp0	# execute sgl multiply
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0b,%d1		# is UNFL or INEX enabled?
+	bne.b		fsglmul_unfl_ena	# yes
+
+fsglmul_unfl_dis:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+
+	lea		FP_SCR0(%a6),%a0	# pass: result addr
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res4		# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# 'Z' bit may have been set
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+	rts
+
+#
+# UNFL is enabled.
+#
+fsglmul_unfl_ena:
+	fmovm.x		FP_SCR1(%a6),&0x40	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsglmul.x	FP_SCR0(%a6),%fp1	# execute sgl multiply
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	fmovm.x		&0x40,FP_SCR0(%a6)	# save result to stack
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	addi.l		&0x6000,%d1		# add bias
+	andi.w		&0x7fff,%d1
+	or.w		%d2,%d1			# concat old sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	bra.w		fsglmul_unfl_dis
+
+fsglmul_may_unfl:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsglmul.x	FP_SCR0(%a6),%fp0	# execute sgl multiply
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	fabs.x		%fp0,%fp1		# make a copy of result
+	fcmp.b		%fp1,&0x2		# is |result| > 2.b?
+	fbgt.w		fsglmul_normal_exit	# no; no underflow occurred
+	fblt.w		fsglmul_unfl		# yes; underflow occurred
+
+#
+# we still don't know if underflow occurred. result is ~ equal to 2. but,
+# we don't know if the result was an underflow that rounded up to a 2 or
+# a normalized number that rounded down to a 2. so, redo the entire operation
+# using RZ as the rounding mode to see what the pre-rounded result is.
+# this case should be relatively rare.
+#
+	fmovm.x		FP_SCR1(%a6),&0x40	# load dst op into fp1
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# keep rnd prec
+	ori.b		&rz_mode*0x10,%d1	# insert RZ
+
+	fmov.l		%d1,%fpcr		# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsglmul.x	FP_SCR0(%a6),%fp1	# execute sgl multiply
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fabs.x		%fp1			# make absolute value
+	fcmp.b		%fp1,&0x2		# is |result| < 2.b?
+	fbge.w		fsglmul_normal_exit	# no; no underflow occurred
+	bra.w		fsglmul_unfl		# yes, underflow occurred
+
+##############################################################################
+
+#
+# Single Precision Multiply: inputs are not both normalized; what are they?
+#
+fsglmul_not_norm:
+	mov.w		(tbl_fsglmul_op.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_fsglmul_op.b,%pc,%d1.w*1)
+
+	swbeg		&48
+tbl_fsglmul_op:
+	short		fsglmul_norm		- tbl_fsglmul_op # NORM x NORM
+	short		fsglmul_zero		- tbl_fsglmul_op # NORM x ZERO
+	short		fsglmul_inf_src		- tbl_fsglmul_op # NORM x INF
+	short		fsglmul_res_qnan	- tbl_fsglmul_op # NORM x QNAN
+	short		fsglmul_norm		- tbl_fsglmul_op # NORM x DENORM
+	short		fsglmul_res_snan	- tbl_fsglmul_op # NORM x SNAN
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+
+	short		fsglmul_zero		- tbl_fsglmul_op # ZERO x NORM
+	short		fsglmul_zero		- tbl_fsglmul_op # ZERO x ZERO
+	short		fsglmul_res_operr	- tbl_fsglmul_op # ZERO x INF
+	short		fsglmul_res_qnan	- tbl_fsglmul_op # ZERO x QNAN
+	short		fsglmul_zero		- tbl_fsglmul_op # ZERO x DENORM
+	short		fsglmul_res_snan	- tbl_fsglmul_op # ZERO x SNAN
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+
+	short		fsglmul_inf_dst		- tbl_fsglmul_op # INF x NORM
+	short		fsglmul_res_operr	- tbl_fsglmul_op # INF x ZERO
+	short		fsglmul_inf_dst		- tbl_fsglmul_op # INF x INF
+	short		fsglmul_res_qnan	- tbl_fsglmul_op # INF x QNAN
+	short		fsglmul_inf_dst		- tbl_fsglmul_op # INF x DENORM
+	short		fsglmul_res_snan	- tbl_fsglmul_op # INF x SNAN
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+
+	short		fsglmul_res_qnan	- tbl_fsglmul_op # QNAN x NORM
+	short		fsglmul_res_qnan	- tbl_fsglmul_op # QNAN x ZERO
+	short		fsglmul_res_qnan	- tbl_fsglmul_op # QNAN x INF
+	short		fsglmul_res_qnan	- tbl_fsglmul_op # QNAN x QNAN
+	short		fsglmul_res_qnan	- tbl_fsglmul_op # QNAN x DENORM
+	short		fsglmul_res_snan	- tbl_fsglmul_op # QNAN x SNAN
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+
+	short		fsglmul_norm		- tbl_fsglmul_op # NORM x NORM
+	short		fsglmul_zero		- tbl_fsglmul_op # NORM x ZERO
+	short		fsglmul_inf_src		- tbl_fsglmul_op # NORM x INF
+	short		fsglmul_res_qnan	- tbl_fsglmul_op # NORM x QNAN
+	short		fsglmul_norm		- tbl_fsglmul_op # NORM x DENORM
+	short		fsglmul_res_snan	- tbl_fsglmul_op # NORM x SNAN
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+
+	short		fsglmul_res_snan	- tbl_fsglmul_op # SNAN x NORM
+	short		fsglmul_res_snan	- tbl_fsglmul_op # SNAN x ZERO
+	short		fsglmul_res_snan	- tbl_fsglmul_op # SNAN x INF
+	short		fsglmul_res_snan	- tbl_fsglmul_op # SNAN x QNAN
+	short		fsglmul_res_snan	- tbl_fsglmul_op # SNAN x DENORM
+	short		fsglmul_res_snan	- tbl_fsglmul_op # SNAN x SNAN
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+	short		tbl_fsglmul_op		- tbl_fsglmul_op #
+
+fsglmul_res_operr:
+	bra.l		res_operr
+fsglmul_res_snan:
+	bra.l		res_snan
+fsglmul_res_qnan:
+	bra.l		res_qnan
+fsglmul_zero:
+	bra.l		fmul_zero
+fsglmul_inf_src:
+	bra.l		fmul_inf_src
+fsglmul_inf_dst:
+	bra.l		fmul_inf_dst
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fsgldiv(): emulates the fsgldiv instruction			#
+#									#
+# XREF ****************************************************************	#
+#	scale_to_zero_src() - scale src exponent to zero		#
+#	scale_to_zero_dst() - scale dst exponent to zero		#
+#	unf_res4() - return default underflow result for sglop		#
+#	ovf_res() - return default overflow result			#
+#	res_qnan() - return QNAN result					#
+#	res_snan() - return SNAN result					#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	a1 = pointer to extended precision destination operand		#
+#	d0  rnd prec,mode						#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = result							#
+#	fp1 = EXOP (if exception occurred)				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Handle NANs, infinities, and zeroes as special cases. Divide	#
+# norms/denorms into ext/sgl/dbl precision.				#
+#	For norms/denorms, scale the exponents such that a divide	#
+# instruction won't cause an exception. Use the regular fsgldiv to	#
+# compute a result. Check if the regular operands would have taken	#
+# an exception. If so, return the default overflow/underflow result	#
+# and return the EXOP if exceptions are enabled. Else, scale the	#
+# result operand to the proper exponent.				#
+#									#
+#########################################################################
+
+	global		fsgldiv
+fsgldiv:
+	mov.l		%d0,L_SCR3(%a6)		# store rnd info
+
+	clr.w		%d1
+	mov.b		DTAG(%a6),%d1
+	lsl.b		&0x3,%d1
+	or.b		STAG(%a6),%d1		# combine src tags
+
+	bne.w		fsgldiv_not_norm	# optimize on non-norm input
+
+#
+# DIVIDE: NORMs and DENORMs ONLY!
+#
+fsgldiv_norm:
+	mov.w		DST_EX(%a1),FP_SCR1_EX(%a6)
+	mov.l		DST_HI(%a1),FP_SCR1_HI(%a6)
+	mov.l		DST_LO(%a1),FP_SCR1_LO(%a6)
+
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+	bsr.l		scale_to_zero_src	# calculate scale factor 1
+	mov.l		%d0,-(%sp)		# save scale factor 1
+
+	bsr.l		scale_to_zero_dst	# calculate scale factor 2
+
+	neg.l		(%sp)			# S.F. = scale1 - scale2
+	add.l		%d0,(%sp)
+
+	mov.w		2+L_SCR3(%a6),%d1	# fetch precision,mode
+	lsr.b		&0x6,%d1
+	mov.l		(%sp)+,%d0
+	cmpi.l		%d0,&0x3fff-0x7ffe
+	ble.w		fsgldiv_may_ovfl
+
+	cmpi.l		%d0,&0x3fff-0x0000	# will result underflow?
+	beq.w		fsgldiv_may_unfl	# maybe
+	bgt.w		fsgldiv_unfl		# yes; go handle underflow
+
+fsgldiv_normal:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# save FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsgldiv.x	FP_SCR0(%a6),%fp0	# perform sgl divide
+
+	fmov.l		%fpsr,%d1		# save FPSR
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fsgldiv_normal_exit:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store result on stack
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# load {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	or.w		%d2,%d1			# concat old sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x80	# return result in fp0
+	rts
+
+fsgldiv_may_ovfl:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# set FPSR
+
+	fsgldiv.x	FP_SCR0(%a6),%fp0	# execute divide
+
+	fmov.l		%fpsr,%d1
+	fmov.l		&0x0,%fpcr
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX,N
+
+	fmovm.x		&0x01,-(%sp)		# save result to stack
+	mov.w		(%sp),%d1		# fetch new exponent
+	add.l		&0xc,%sp		# clear result
+	andi.l		&0x7fff,%d1		# strip sign
+	sub.l		%d0,%d1			# add scale factor
+	cmp.l		%d1,&0x7fff		# did divide overflow?
+	blt.b		fsgldiv_normal_exit
+
+fsgldiv_ovfl_tst:
+	or.w		&ovfl_inx_mask,2+USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x13,%d1		# is OVFL or INEX enabled?
+	bne.b		fsgldiv_ovfl_ena	# yes
+
+fsgldiv_ovfl_dis:
+	btst		&neg_bit,FPSR_CC(%a6)	# is result negative
+	sne		%d1			# set sign param accordingly
+	mov.l		L_SCR3(%a6),%d0		# pass prec:rnd
+	andi.b		&0x30,%d0		# kill precision
+	bsr.l		ovf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set INF if applicable
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+	rts
+
+fsgldiv_ovfl_ena:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# move result to stack
+
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	subi.l		&0x6000,%d1		# subtract new bias
+	andi.w		&0x7fff,%d1		# clear ms bit
+	or.w		%d2,%d1			# concat old sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	bra.b		fsgldiv_ovfl_dis
+
+fsgldiv_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		&rz_mode*0x10,%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsgldiv.x	FP_SCR0(%a6),%fp0	# execute sgl divide
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0b,%d1		# is UNFL or INEX enabled?
+	bne.b		fsgldiv_unfl_ena	# yes
+
+fsgldiv_unfl_dis:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+
+	lea		FP_SCR0(%a6),%a0	# pass: result addr
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res4		# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# 'Z' bit may have been set
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+	rts
+
+#
+# UNFL is enabled.
+#
+fsgldiv_unfl_ena:
+	fmovm.x		FP_SCR1(%a6),&0x40	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsgldiv.x	FP_SCR0(%a6),%fp1	# execute sgl divide
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	fmovm.x		&0x40,FP_SCR0(%a6)	# save result to stack
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	addi.l		&0x6000,%d1		# add bias
+	andi.w		&0x7fff,%d1		# clear top bit
+	or.w		%d2,%d1			# concat old sign, new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	bra.b		fsgldiv_unfl_dis
+
+#
+# the divide operation MAY underflow:
+#
+fsgldiv_may_unfl:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsgldiv.x	FP_SCR0(%a6),%fp0	# execute sgl divide
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	fabs.x		%fp0,%fp1		# make a copy of result
+	fcmp.b		%fp1,&0x1		# is |result| > 1.b?
+	fbgt.w		fsgldiv_normal_exit	# no; no underflow occurred
+	fblt.w		fsgldiv_unfl		# yes; underflow occurred
+
+#
+# we still don't know if underflow occurred. result is ~ equal to 1. but,
+# we don't know if the result was an underflow that rounded up to a 1
+# or a normalized number that rounded down to a 1. so, redo the entire
+# operation using RZ as the rounding mode to see what the pre-rounded
+# result is. this case should be relatively rare.
+#
+	fmovm.x		FP_SCR1(%a6),&0x40	# load dst op into %fp1
+
+	clr.l		%d1			# clear scratch register
+	ori.b		&rz_mode*0x10,%d1	# force RZ rnd mode
+
+	fmov.l		%d1,%fpcr		# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsgldiv.x	FP_SCR0(%a6),%fp1	# execute sgl divide
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fabs.x		%fp1			# make absolute value
+	fcmp.b		%fp1,&0x1		# is |result| < 1.b?
+	fbge.w		fsgldiv_normal_exit	# no; no underflow occurred
+	bra.w		fsgldiv_unfl		# yes; underflow occurred
+
+############################################################################
+
+#
+# Divide: inputs are not both normalized; what are they?
+#
+fsgldiv_not_norm:
+	mov.w		(tbl_fsgldiv_op.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_fsgldiv_op.b,%pc,%d1.w*1)
+
+	swbeg		&48
+tbl_fsgldiv_op:
+	short		fsgldiv_norm		- tbl_fsgldiv_op # NORM / NORM
+	short		fsgldiv_inf_load	- tbl_fsgldiv_op # NORM / ZERO
+	short		fsgldiv_zero_load	- tbl_fsgldiv_op # NORM / INF
+	short		fsgldiv_res_qnan	- tbl_fsgldiv_op # NORM / QNAN
+	short		fsgldiv_norm		- tbl_fsgldiv_op # NORM / DENORM
+	short		fsgldiv_res_snan	- tbl_fsgldiv_op # NORM / SNAN
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+
+	short		fsgldiv_zero_load	- tbl_fsgldiv_op # ZERO / NORM
+	short		fsgldiv_res_operr	- tbl_fsgldiv_op # ZERO / ZERO
+	short		fsgldiv_zero_load	- tbl_fsgldiv_op # ZERO / INF
+	short		fsgldiv_res_qnan	- tbl_fsgldiv_op # ZERO / QNAN
+	short		fsgldiv_zero_load	- tbl_fsgldiv_op # ZERO / DENORM
+	short		fsgldiv_res_snan	- tbl_fsgldiv_op # ZERO / SNAN
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+
+	short		fsgldiv_inf_dst		- tbl_fsgldiv_op # INF / NORM
+	short		fsgldiv_inf_dst		- tbl_fsgldiv_op # INF / ZERO
+	short		fsgldiv_res_operr	- tbl_fsgldiv_op # INF / INF
+	short		fsgldiv_res_qnan	- tbl_fsgldiv_op # INF / QNAN
+	short		fsgldiv_inf_dst		- tbl_fsgldiv_op # INF / DENORM
+	short		fsgldiv_res_snan	- tbl_fsgldiv_op # INF / SNAN
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+
+	short		fsgldiv_res_qnan	- tbl_fsgldiv_op # QNAN / NORM
+	short		fsgldiv_res_qnan	- tbl_fsgldiv_op # QNAN / ZERO
+	short		fsgldiv_res_qnan	- tbl_fsgldiv_op # QNAN / INF
+	short		fsgldiv_res_qnan	- tbl_fsgldiv_op # QNAN / QNAN
+	short		fsgldiv_res_qnan	- tbl_fsgldiv_op # QNAN / DENORM
+	short		fsgldiv_res_snan	- tbl_fsgldiv_op # QNAN / SNAN
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+
+	short		fsgldiv_norm		- tbl_fsgldiv_op # DENORM / NORM
+	short		fsgldiv_inf_load	- tbl_fsgldiv_op # DENORM / ZERO
+	short		fsgldiv_zero_load	- tbl_fsgldiv_op # DENORM / INF
+	short		fsgldiv_res_qnan	- tbl_fsgldiv_op # DENORM / QNAN
+	short		fsgldiv_norm		- tbl_fsgldiv_op # DENORM / DENORM
+	short		fsgldiv_res_snan	- tbl_fsgldiv_op # DENORM / SNAN
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+
+	short		fsgldiv_res_snan	- tbl_fsgldiv_op # SNAN / NORM
+	short		fsgldiv_res_snan	- tbl_fsgldiv_op # SNAN / ZERO
+	short		fsgldiv_res_snan	- tbl_fsgldiv_op # SNAN / INF
+	short		fsgldiv_res_snan	- tbl_fsgldiv_op # SNAN / QNAN
+	short		fsgldiv_res_snan	- tbl_fsgldiv_op # SNAN / DENORM
+	short		fsgldiv_res_snan	- tbl_fsgldiv_op # SNAN / SNAN
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+	short		tbl_fsgldiv_op		- tbl_fsgldiv_op #
+
+fsgldiv_res_qnan:
+	bra.l		res_qnan
+fsgldiv_res_snan:
+	bra.l		res_snan
+fsgldiv_res_operr:
+	bra.l		res_operr
+fsgldiv_inf_load:
+	bra.l		fdiv_inf_load
+fsgldiv_zero_load:
+	bra.l		fdiv_zero_load
+fsgldiv_inf_dst:
+	bra.l		fdiv_inf_dst
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fadd(): emulates the fadd instruction				#
+#	fsadd(): emulates the fadd instruction				#
+#	fdadd(): emulates the fdadd instruction				#
+#									#
+# XREF ****************************************************************	#
+#	addsub_scaler2() - scale the operands so they won't take exc	#
+#	ovf_res() - return default overflow result			#
+#	unf_res() - return default underflow result			#
+#	res_qnan() - set QNAN result					#
+#	res_snan() - set SNAN result					#
+#	res_operr() - set OPERR result					#
+#	scale_to_zero_src() - set src operand exponent equal to zero	#
+#	scale_to_zero_dst() - set dst operand exponent equal to zero	#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	a1 = pointer to extended precision destination operand		#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = result							#
+#	fp1 = EXOP (if exception occurred)				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Handle NANs, infinities, and zeroes as special cases. Divide	#
+# norms into extended, single, and double precision.			#
+#	Do addition after scaling exponents such that exception won't	#
+# occur. Then, check result exponent to see if exception would have	#
+# occurred. If so, return default result and maybe EXOP. Else, insert	#
+# the correct result exponent and return. Set FPSR bits as appropriate.	#
+#									#
+#########################################################################
+
+	global		fsadd
+fsadd:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&s_mode*0x10,%d0	# insert sgl prec
+	bra.b		fadd
+
+	global		fdadd
+fdadd:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&d_mode*0x10,%d0	# insert dbl prec
+
+	global		fadd
+fadd:
+	mov.l		%d0,L_SCR3(%a6)		# store rnd info
+
+	clr.w		%d1
+	mov.b		DTAG(%a6),%d1
+	lsl.b		&0x3,%d1
+	or.b		STAG(%a6),%d1		# combine src tags
+
+	bne.w		fadd_not_norm		# optimize on non-norm input
+
+#
+# ADD: norms and denorms
+#
+fadd_norm:
+	bsr.l		addsub_scaler2		# scale exponents
+
+fadd_zero_entry:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fadd.x		FP_SCR0(%a6),%fp0	# execute add
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# fetch INEX2,N,Z
+
+	or.l		%d1,USER_FPSR(%a6)	# save exc and ccode bits
+
+	fbeq.w		fadd_zero_exit		# if result is zero, end now
+
+	mov.l		%d2,-(%sp)		# save d2
+
+	fmovm.x		&0x01,-(%sp)		# save result to stack
+
+	mov.w		2+L_SCR3(%a6),%d1
+	lsr.b		&0x6,%d1
+
+	mov.w		(%sp),%d2		# fetch new sign, exp
+	andi.l		&0x7fff,%d2		# strip sign
+	sub.l		%d0,%d2			# add scale factor
+
+	cmp.l		%d2,(tbl_fadd_ovfl.b,%pc,%d1.w*4) # is it an overflow?
+	bge.b		fadd_ovfl		# yes
+
+	cmp.l		%d2,(tbl_fadd_unfl.b,%pc,%d1.w*4) # is it an underflow?
+	blt.w		fadd_unfl		# yes
+	beq.w		fadd_may_unfl		# maybe; go find out
+
+fadd_normal:
+	mov.w		(%sp),%d1
+	andi.w		&0x8000,%d1		# keep sign
+	or.w		%d2,%d1			# concat sign,new exp
+	mov.w		%d1,(%sp)		# insert new exponent
+
+	fmovm.x		(%sp)+,&0x80		# return result in fp0
+
+	mov.l		(%sp)+,%d2		# restore d2
+	rts
+
+fadd_zero_exit:
+#	fmov.s		&0x00000000,%fp0	# return zero in fp0
+	rts
+
+tbl_fadd_ovfl:
+	long		0x7fff			# ext ovfl
+	long		0x407f			# sgl ovfl
+	long		0x43ff			# dbl ovfl
+
+tbl_fadd_unfl:
+	long	        0x0000			# ext unfl
+	long		0x3f81			# sgl unfl
+	long		0x3c01			# dbl unfl
+
+fadd_ovfl:
+	or.l		&ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x13,%d1		# is OVFL or INEX enabled?
+	bne.b		fadd_ovfl_ena		# yes
+
+	add.l		&0xc,%sp
+fadd_ovfl_dis:
+	btst		&neg_bit,FPSR_CC(%a6)	# is result negative?
+	sne		%d1			# set sign param accordingly
+	mov.l		L_SCR3(%a6),%d0		# pass prec:rnd
+	bsr.l		ovf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set INF,N if applicable
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+	mov.l		(%sp)+,%d2		# restore d2
+	rts
+
+fadd_ovfl_ena:
+	mov.b		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# is precision extended?
+	bne.b		fadd_ovfl_ena_sd	# no; prec = sgl or dbl
+
+fadd_ovfl_ena_cont:
+	mov.w		(%sp),%d1
+	andi.w		&0x8000,%d1		# keep sign
+	subi.l		&0x6000,%d2		# add extra bias
+	andi.w		&0x7fff,%d2
+	or.w		%d2,%d1			# concat sign,new exp
+	mov.w		%d1,(%sp)		# insert new exponent
+
+	fmovm.x		(%sp)+,&0x40		# return EXOP in fp1
+	bra.b		fadd_ovfl_dis
+
+fadd_ovfl_ena_sd:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0x30,%d1		# keep rnd mode
+	fmov.l		%d1,%fpcr		# set FPCR
+
+	fadd.x		FP_SCR0(%a6),%fp0	# execute add
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	add.l		&0xc,%sp
+	fmovm.x		&0x01,-(%sp)
+	bra.b		fadd_ovfl_ena_cont
+
+fadd_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+	add.l		&0xc,%sp
+
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		&rz_mode*0x10,%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fadd.x		FP_SCR0(%a6),%fp0	# execute add
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# save status
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX,N
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0b,%d1		# is UNFL or INEX enabled?
+	bne.b		fadd_unfl_ena		# yes
+
+fadd_unfl_dis:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+
+	lea		FP_SCR0(%a6),%a0	# pass: result addr
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# 'Z' bit may have been set
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+	mov.l		(%sp)+,%d2		# restore d2
+	rts
+
+fadd_unfl_ena:
+	fmovm.x		FP_SCR1(%a6),&0x40	# load dst op
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# is precision extended?
+	bne.b		fadd_unfl_ena_sd	# no; sgl or dbl
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+fadd_unfl_ena_cont:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fadd.x		FP_SCR0(%a6),%fp1	# execute multiply
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	fmovm.x		&0x40,FP_SCR0(%a6)	# save result to stack
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	addi.l		&0x6000,%d1		# add new bias
+	andi.w		&0x7fff,%d1		# clear top bit
+	or.w		%d2,%d1			# concat sign,new exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	bra.w		fadd_unfl_dis
+
+fadd_unfl_ena_sd:
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0x30,%d1		# use only rnd mode
+	fmov.l		%d1,%fpcr		# set FPCR
+
+	bra.b		fadd_unfl_ena_cont
+
+#
+# result is equal to the smallest normalized number in the selected precision
+# if the precision is extended, this result could not have come from an
+# underflow that rounded up.
+#
+fadd_may_unfl:
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1
+	beq.w		fadd_normal		# yes; no underflow occurred
+
+	mov.l		0x4(%sp),%d1		# extract hi(man)
+	cmpi.l		%d1,&0x80000000		# is hi(man) = 0x80000000?
+	bne.w		fadd_normal		# no; no underflow occurred
+
+	tst.l		0x8(%sp)		# is lo(man) = 0x0?
+	bne.w		fadd_normal		# no; no underflow occurred
+
+	btst		&inex2_bit,FPSR_EXCEPT(%a6) # is INEX2 set?
+	beq.w		fadd_normal		# no; no underflow occurred
+
+#
+# ok, so now the result has a exponent equal to the smallest normalized
+# exponent for the selected precision. also, the mantissa is equal to
+# 0x8000000000000000 and this mantissa is the result of rounding non-zero
+# g,r,s.
+# now, we must determine whether the pre-rounded result was an underflow
+# rounded "up" or a normalized number rounded "down".
+# so, we do this be re-executing the add using RZ as the rounding mode and
+# seeing if the new result is smaller or equal to the current result.
+#
+	fmovm.x		FP_SCR1(%a6),&0x40	# load dst op into fp1
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# keep rnd prec
+	ori.b		&rz_mode*0x10,%d1	# insert rnd mode
+	fmov.l		%d1,%fpcr		# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fadd.x		FP_SCR0(%a6),%fp1	# execute add
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	fabs.x		%fp0			# compare absolute values
+	fabs.x		%fp1
+	fcmp.x		%fp0,%fp1		# is first result > second?
+
+	fbgt.w		fadd_unfl		# yes; it's an underflow
+	bra.w		fadd_normal		# no; it's not an underflow
+
+##########################################################################
+
+#
+# Add: inputs are not both normalized; what are they?
+#
+fadd_not_norm:
+	mov.w		(tbl_fadd_op.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_fadd_op.b,%pc,%d1.w*1)
+
+	swbeg		&48
+tbl_fadd_op:
+	short		fadd_norm	- tbl_fadd_op # NORM + NORM
+	short		fadd_zero_src	- tbl_fadd_op # NORM + ZERO
+	short		fadd_inf_src	- tbl_fadd_op # NORM + INF
+	short		fadd_res_qnan	- tbl_fadd_op # NORM + QNAN
+	short		fadd_norm	- tbl_fadd_op # NORM + DENORM
+	short		fadd_res_snan	- tbl_fadd_op # NORM + SNAN
+	short		tbl_fadd_op	- tbl_fadd_op #
+	short		tbl_fadd_op	- tbl_fadd_op #
+
+	short		fadd_zero_dst	- tbl_fadd_op # ZERO + NORM
+	short		fadd_zero_2	- tbl_fadd_op # ZERO + ZERO
+	short		fadd_inf_src	- tbl_fadd_op # ZERO + INF
+	short		fadd_res_qnan	- tbl_fadd_op # NORM + QNAN
+	short		fadd_zero_dst	- tbl_fadd_op # ZERO + DENORM
+	short		fadd_res_snan	- tbl_fadd_op # NORM + SNAN
+	short		tbl_fadd_op	- tbl_fadd_op #
+	short		tbl_fadd_op	- tbl_fadd_op #
+
+	short		fadd_inf_dst	- tbl_fadd_op # INF + NORM
+	short		fadd_inf_dst	- tbl_fadd_op # INF + ZERO
+	short		fadd_inf_2	- tbl_fadd_op # INF + INF
+	short		fadd_res_qnan	- tbl_fadd_op # NORM + QNAN
+	short		fadd_inf_dst	- tbl_fadd_op # INF + DENORM
+	short		fadd_res_snan	- tbl_fadd_op # NORM + SNAN
+	short		tbl_fadd_op	- tbl_fadd_op #
+	short		tbl_fadd_op	- tbl_fadd_op #
+
+	short		fadd_res_qnan	- tbl_fadd_op # QNAN + NORM
+	short		fadd_res_qnan	- tbl_fadd_op # QNAN + ZERO
+	short		fadd_res_qnan	- tbl_fadd_op # QNAN + INF
+	short		fadd_res_qnan	- tbl_fadd_op # QNAN + QNAN
+	short		fadd_res_qnan	- tbl_fadd_op # QNAN + DENORM
+	short		fadd_res_snan	- tbl_fadd_op # QNAN + SNAN
+	short		tbl_fadd_op	- tbl_fadd_op #
+	short		tbl_fadd_op	- tbl_fadd_op #
+
+	short		fadd_norm	- tbl_fadd_op # DENORM + NORM
+	short		fadd_zero_src	- tbl_fadd_op # DENORM + ZERO
+	short		fadd_inf_src	- tbl_fadd_op # DENORM + INF
+	short		fadd_res_qnan	- tbl_fadd_op # NORM + QNAN
+	short		fadd_norm	- tbl_fadd_op # DENORM + DENORM
+	short		fadd_res_snan	- tbl_fadd_op # NORM + SNAN
+	short		tbl_fadd_op	- tbl_fadd_op #
+	short		tbl_fadd_op	- tbl_fadd_op #
+
+	short		fadd_res_snan	- tbl_fadd_op # SNAN + NORM
+	short		fadd_res_snan	- tbl_fadd_op # SNAN + ZERO
+	short		fadd_res_snan	- tbl_fadd_op # SNAN + INF
+	short		fadd_res_snan	- tbl_fadd_op # SNAN + QNAN
+	short		fadd_res_snan	- tbl_fadd_op # SNAN + DENORM
+	short		fadd_res_snan	- tbl_fadd_op # SNAN + SNAN
+	short		tbl_fadd_op	- tbl_fadd_op #
+	short		tbl_fadd_op	- tbl_fadd_op #
+
+fadd_res_qnan:
+	bra.l		res_qnan
+fadd_res_snan:
+	bra.l		res_snan
+
+#
+# both operands are ZEROes
+#
+fadd_zero_2:
+	mov.b		SRC_EX(%a0),%d0		# are the signs opposite
+	mov.b		DST_EX(%a1),%d1
+	eor.b		%d0,%d1
+	bmi.w		fadd_zero_2_chk_rm	# weed out (-ZERO)+(+ZERO)
+
+# the signs are the same. so determine whether they are positive or negative
+# and return the appropriately signed zero.
+	tst.b		%d0			# are ZEROes positive or negative?
+	bmi.b		fadd_zero_rm		# negative
+	fmov.s		&0x00000000,%fp0	# return +ZERO
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set Z
+	rts
+
+#
+# the ZEROes have opposite signs:
+# - therefore, we return +ZERO if the rounding modes are RN,RZ, or RP.
+# - -ZERO is returned in the case of RM.
+#
+fadd_zero_2_chk_rm:
+	mov.b		3+L_SCR3(%a6),%d1
+	andi.b		&0x30,%d1		# extract rnd mode
+	cmpi.b		%d1,&rm_mode*0x10	# is rnd mode == RM?
+	beq.b		fadd_zero_rm		# yes
+	fmov.s		&0x00000000,%fp0	# return +ZERO
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set Z
+	rts
+
+fadd_zero_rm:
+	fmov.s		&0x80000000,%fp0	# return -ZERO
+	mov.b		&neg_bmask+z_bmask,FPSR_CC(%a6) # set NEG/Z
+	rts
+
+#
+# one operand is a ZERO and the other is a DENORM or NORM. scale
+# the DENORM or NORM and jump to the regular fadd routine.
+#
+fadd_zero_dst:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	bsr.l		scale_to_zero_src	# scale the operand
+	clr.w		FP_SCR1_EX(%a6)
+	clr.l		FP_SCR1_HI(%a6)
+	clr.l		FP_SCR1_LO(%a6)
+	bra.w		fadd_zero_entry		# go execute fadd
+
+fadd_zero_src:
+	mov.w		DST_EX(%a1),FP_SCR1_EX(%a6)
+	mov.l		DST_HI(%a1),FP_SCR1_HI(%a6)
+	mov.l		DST_LO(%a1),FP_SCR1_LO(%a6)
+	bsr.l		scale_to_zero_dst	# scale the operand
+	clr.w		FP_SCR0_EX(%a6)
+	clr.l		FP_SCR0_HI(%a6)
+	clr.l		FP_SCR0_LO(%a6)
+	bra.w		fadd_zero_entry		# go execute fadd
+
+#
+# both operands are INFs. an OPERR will result if the INFs have
+# different signs. else, an INF of the same sign is returned
+#
+fadd_inf_2:
+	mov.b		SRC_EX(%a0),%d0		# exclusive or the signs
+	mov.b		DST_EX(%a1),%d1
+	eor.b		%d1,%d0
+	bmi.l		res_operr		# weed out (-INF)+(+INF)
+
+# ok, so it's not an OPERR. but, we do have to remember to return the
+# src INF since that's where the 881/882 gets the j-bit from...
+
+#
+# operands are INF and one of {ZERO, INF, DENORM, NORM}
+#
+fadd_inf_src:
+	fmovm.x		SRC(%a0),&0x80		# return src INF
+	tst.b		SRC_EX(%a0)		# is INF positive?
+	bpl.b		fadd_inf_done		# yes; we're done
+	mov.b		&neg_bmask+inf_bmask,FPSR_CC(%a6) # set INF/NEG
+	rts
+
+#
+# operands are INF and one of {ZERO, INF, DENORM, NORM}
+#
+fadd_inf_dst:
+	fmovm.x		DST(%a1),&0x80		# return dst INF
+	tst.b		DST_EX(%a1)		# is INF positive?
+	bpl.b		fadd_inf_done		# yes; we're done
+	mov.b		&neg_bmask+inf_bmask,FPSR_CC(%a6) # set INF/NEG
+	rts
+
+fadd_inf_done:
+	mov.b		&inf_bmask,FPSR_CC(%a6) # set INF
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fsub(): emulates the fsub instruction				#
+#	fssub(): emulates the fssub instruction				#
+#	fdsub(): emulates the fdsub instruction				#
+#									#
+# XREF ****************************************************************	#
+#	addsub_scaler2() - scale the operands so they won't take exc	#
+#	ovf_res() - return default overflow result			#
+#	unf_res() - return default underflow result			#
+#	res_qnan() - set QNAN result					#
+#	res_snan() - set SNAN result					#
+#	res_operr() - set OPERR result					#
+#	scale_to_zero_src() - set src operand exponent equal to zero	#
+#	scale_to_zero_dst() - set dst operand exponent equal to zero	#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	a1 = pointer to extended precision destination operand		#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = result							#
+#	fp1 = EXOP (if exception occurred)				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Handle NANs, infinities, and zeroes as special cases. Divide	#
+# norms into extended, single, and double precision.			#
+#	Do subtraction after scaling exponents such that exception won't#
+# occur. Then, check result exponent to see if exception would have	#
+# occurred. If so, return default result and maybe EXOP. Else, insert	#
+# the correct result exponent and return. Set FPSR bits as appropriate.	#
+#									#
+#########################################################################
+
+	global		fssub
+fssub:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&s_mode*0x10,%d0	# insert sgl prec
+	bra.b		fsub
+
+	global		fdsub
+fdsub:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&d_mode*0x10,%d0	# insert dbl prec
+
+	global		fsub
+fsub:
+	mov.l		%d0,L_SCR3(%a6)		# store rnd info
+
+	clr.w		%d1
+	mov.b		DTAG(%a6),%d1
+	lsl.b		&0x3,%d1
+	or.b		STAG(%a6),%d1		# combine src tags
+
+	bne.w		fsub_not_norm		# optimize on non-norm input
+
+#
+# SUB: norms and denorms
+#
+fsub_norm:
+	bsr.l		addsub_scaler2		# scale exponents
+
+fsub_zero_entry:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fsub.x		FP_SCR0(%a6),%fp0	# execute subtract
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# fetch INEX2, N, Z
+
+	or.l		%d1,USER_FPSR(%a6)	# save exc and ccode bits
+
+	fbeq.w		fsub_zero_exit		# if result zero, end now
+
+	mov.l		%d2,-(%sp)		# save d2
+
+	fmovm.x		&0x01,-(%sp)		# save result to stack
+
+	mov.w		2+L_SCR3(%a6),%d1
+	lsr.b		&0x6,%d1
+
+	mov.w		(%sp),%d2		# fetch new exponent
+	andi.l		&0x7fff,%d2		# strip sign
+	sub.l		%d0,%d2			# add scale factor
+
+	cmp.l		%d2,(tbl_fsub_ovfl.b,%pc,%d1.w*4) # is it an overflow?
+	bge.b		fsub_ovfl		# yes
+
+	cmp.l		%d2,(tbl_fsub_unfl.b,%pc,%d1.w*4) # is it an underflow?
+	blt.w		fsub_unfl		# yes
+	beq.w		fsub_may_unfl		# maybe; go find out
+
+fsub_normal:
+	mov.w		(%sp),%d1
+	andi.w		&0x8000,%d1		# keep sign
+	or.w		%d2,%d1			# insert new exponent
+	mov.w		%d1,(%sp)		# insert new exponent
+
+	fmovm.x		(%sp)+,&0x80		# return result in fp0
+
+	mov.l		(%sp)+,%d2		# restore d2
+	rts
+
+fsub_zero_exit:
+#	fmov.s		&0x00000000,%fp0	# return zero in fp0
+	rts
+
+tbl_fsub_ovfl:
+	long		0x7fff			# ext ovfl
+	long		0x407f			# sgl ovfl
+	long		0x43ff			# dbl ovfl
+
+tbl_fsub_unfl:
+	long	        0x0000			# ext unfl
+	long		0x3f81			# sgl unfl
+	long		0x3c01			# dbl unfl
+
+fsub_ovfl:
+	or.l		&ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x13,%d1		# is OVFL or INEX enabled?
+	bne.b		fsub_ovfl_ena		# yes
+
+	add.l		&0xc,%sp
+fsub_ovfl_dis:
+	btst		&neg_bit,FPSR_CC(%a6)	# is result negative?
+	sne		%d1			# set sign param accordingly
+	mov.l		L_SCR3(%a6),%d0		# pass prec:rnd
+	bsr.l		ovf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set INF,N if applicable
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+	mov.l		(%sp)+,%d2		# restore d2
+	rts
+
+fsub_ovfl_ena:
+	mov.b		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# is precision extended?
+	bne.b		fsub_ovfl_ena_sd	# no
+
+fsub_ovfl_ena_cont:
+	mov.w		(%sp),%d1		# fetch {sgn,exp}
+	andi.w		&0x8000,%d1		# keep sign
+	subi.l		&0x6000,%d2		# subtract new bias
+	andi.w		&0x7fff,%d2		# clear top bit
+	or.w		%d2,%d1			# concat sign,exp
+	mov.w		%d1,(%sp)		# insert new exponent
+
+	fmovm.x		(%sp)+,&0x40		# return EXOP in fp1
+	bra.b		fsub_ovfl_dis
+
+fsub_ovfl_ena_sd:
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0x30,%d1		# clear rnd prec
+	fmov.l		%d1,%fpcr		# set FPCR
+
+	fsub.x		FP_SCR0(%a6),%fp0	# execute subtract
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	add.l		&0xc,%sp
+	fmovm.x		&0x01,-(%sp)
+	bra.b		fsub_ovfl_ena_cont
+
+fsub_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+	add.l		&0xc,%sp
+
+	fmovm.x		FP_SCR1(%a6),&0x80	# load dst op
+
+	fmov.l		&rz_mode*0x10,%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsub.x		FP_SCR0(%a6),%fp0	# execute subtract
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# save status
+
+	or.l		%d1,USER_FPSR(%a6)
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0b,%d1		# is UNFL or INEX enabled?
+	bne.b		fsub_unfl_ena		# yes
+
+fsub_unfl_dis:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+
+	lea		FP_SCR0(%a6),%a0	# pass: result addr
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# 'Z' may have been set
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+	mov.l		(%sp)+,%d2		# restore d2
+	rts
+
+fsub_unfl_ena:
+	fmovm.x		FP_SCR1(%a6),&0x40
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# is precision extended?
+	bne.b		fsub_unfl_ena_sd	# no
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+fsub_unfl_ena_cont:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsub.x		FP_SCR0(%a6),%fp1	# execute subtract
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	fmovm.x		&0x40,FP_SCR0(%a6)	# store result to stack
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	addi.l		&0x6000,%d1		# subtract new bias
+	andi.w		&0x7fff,%d1		# clear top bit
+	or.w		%d2,%d1			# concat sgn,exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	bra.w		fsub_unfl_dis
+
+fsub_unfl_ena_sd:
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0x30,%d1		# clear rnd prec
+	fmov.l		%d1,%fpcr		# set FPCR
+
+	bra.b		fsub_unfl_ena_cont
+
+#
+# result is equal to the smallest normalized number in the selected precision
+# if the precision is extended, this result could not have come from an
+# underflow that rounded up.
+#
+fsub_may_unfl:
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# fetch rnd prec
+	beq.w		fsub_normal		# yes; no underflow occurred
+
+	mov.l		0x4(%sp),%d1
+	cmpi.l		%d1,&0x80000000		# is hi(man) = 0x80000000?
+	bne.w		fsub_normal		# no; no underflow occurred
+
+	tst.l		0x8(%sp)		# is lo(man) = 0x0?
+	bne.w		fsub_normal		# no; no underflow occurred
+
+	btst		&inex2_bit,FPSR_EXCEPT(%a6) # is INEX2 set?
+	beq.w		fsub_normal		# no; no underflow occurred
+
+#
+# ok, so now the result has a exponent equal to the smallest normalized
+# exponent for the selected precision. also, the mantissa is equal to
+# 0x8000000000000000 and this mantissa is the result of rounding non-zero
+# g,r,s.
+# now, we must determine whether the pre-rounded result was an underflow
+# rounded "up" or a normalized number rounded "down".
+# so, we do this be re-executing the add using RZ as the rounding mode and
+# seeing if the new result is smaller or equal to the current result.
+#
+	fmovm.x		FP_SCR1(%a6),&0x40	# load dst op into fp1
+
+	mov.l		L_SCR3(%a6),%d1
+	andi.b		&0xc0,%d1		# keep rnd prec
+	ori.b		&rz_mode*0x10,%d1	# insert rnd mode
+	fmov.l		%d1,%fpcr		# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsub.x		FP_SCR0(%a6),%fp1	# execute subtract
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	fabs.x		%fp0			# compare absolute values
+	fabs.x		%fp1
+	fcmp.x		%fp0,%fp1		# is first result > second?
+
+	fbgt.w		fsub_unfl		# yes; it's an underflow
+	bra.w		fsub_normal		# no; it's not an underflow
+
+##########################################################################
+
+#
+# Sub: inputs are not both normalized; what are they?
+#
+fsub_not_norm:
+	mov.w		(tbl_fsub_op.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_fsub_op.b,%pc,%d1.w*1)
+
+	swbeg		&48
+tbl_fsub_op:
+	short		fsub_norm	- tbl_fsub_op # NORM - NORM
+	short		fsub_zero_src	- tbl_fsub_op # NORM - ZERO
+	short		fsub_inf_src	- tbl_fsub_op # NORM - INF
+	short		fsub_res_qnan	- tbl_fsub_op # NORM - QNAN
+	short		fsub_norm	- tbl_fsub_op # NORM - DENORM
+	short		fsub_res_snan	- tbl_fsub_op # NORM - SNAN
+	short		tbl_fsub_op	- tbl_fsub_op #
+	short		tbl_fsub_op	- tbl_fsub_op #
+
+	short		fsub_zero_dst	- tbl_fsub_op # ZERO - NORM
+	short		fsub_zero_2	- tbl_fsub_op # ZERO - ZERO
+	short		fsub_inf_src	- tbl_fsub_op # ZERO - INF
+	short		fsub_res_qnan	- tbl_fsub_op # NORM - QNAN
+	short		fsub_zero_dst	- tbl_fsub_op # ZERO - DENORM
+	short		fsub_res_snan	- tbl_fsub_op # NORM - SNAN
+	short		tbl_fsub_op	- tbl_fsub_op #
+	short		tbl_fsub_op	- tbl_fsub_op #
+
+	short		fsub_inf_dst	- tbl_fsub_op # INF - NORM
+	short		fsub_inf_dst	- tbl_fsub_op # INF - ZERO
+	short		fsub_inf_2	- tbl_fsub_op # INF - INF
+	short		fsub_res_qnan	- tbl_fsub_op # NORM - QNAN
+	short		fsub_inf_dst	- tbl_fsub_op # INF - DENORM
+	short		fsub_res_snan	- tbl_fsub_op # NORM - SNAN
+	short		tbl_fsub_op	- tbl_fsub_op #
+	short		tbl_fsub_op	- tbl_fsub_op #
+
+	short		fsub_res_qnan	- tbl_fsub_op # QNAN - NORM
+	short		fsub_res_qnan	- tbl_fsub_op # QNAN - ZERO
+	short		fsub_res_qnan	- tbl_fsub_op # QNAN - INF
+	short		fsub_res_qnan	- tbl_fsub_op # QNAN - QNAN
+	short		fsub_res_qnan	- tbl_fsub_op # QNAN - DENORM
+	short		fsub_res_snan	- tbl_fsub_op # QNAN - SNAN
+	short		tbl_fsub_op	- tbl_fsub_op #
+	short		tbl_fsub_op	- tbl_fsub_op #
+
+	short		fsub_norm	- tbl_fsub_op # DENORM - NORM
+	short		fsub_zero_src	- tbl_fsub_op # DENORM - ZERO
+	short		fsub_inf_src	- tbl_fsub_op # DENORM - INF
+	short		fsub_res_qnan	- tbl_fsub_op # NORM - QNAN
+	short		fsub_norm	- tbl_fsub_op # DENORM - DENORM
+	short		fsub_res_snan	- tbl_fsub_op # NORM - SNAN
+	short		tbl_fsub_op	- tbl_fsub_op #
+	short		tbl_fsub_op	- tbl_fsub_op #
+
+	short		fsub_res_snan	- tbl_fsub_op # SNAN - NORM
+	short		fsub_res_snan	- tbl_fsub_op # SNAN - ZERO
+	short		fsub_res_snan	- tbl_fsub_op # SNAN - INF
+	short		fsub_res_snan	- tbl_fsub_op # SNAN - QNAN
+	short		fsub_res_snan	- tbl_fsub_op # SNAN - DENORM
+	short		fsub_res_snan	- tbl_fsub_op # SNAN - SNAN
+	short		tbl_fsub_op	- tbl_fsub_op #
+	short		tbl_fsub_op	- tbl_fsub_op #
+
+fsub_res_qnan:
+	bra.l		res_qnan
+fsub_res_snan:
+	bra.l		res_snan
+
+#
+# both operands are ZEROes
+#
+fsub_zero_2:
+	mov.b		SRC_EX(%a0),%d0
+	mov.b		DST_EX(%a1),%d1
+	eor.b		%d1,%d0
+	bpl.b		fsub_zero_2_chk_rm
+
+# the signs are opposite, so, return a ZERO w/ the sign of the dst ZERO
+	tst.b		%d0			# is dst negative?
+	bmi.b		fsub_zero_2_rm		# yes
+	fmov.s		&0x00000000,%fp0	# no; return +ZERO
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set Z
+	rts
+
+#
+# the ZEROes have the same signs:
+# - therefore, we return +ZERO if the rounding mode is RN,RZ, or RP
+# - -ZERO is returned in the case of RM.
+#
+fsub_zero_2_chk_rm:
+	mov.b		3+L_SCR3(%a6),%d1
+	andi.b		&0x30,%d1		# extract rnd mode
+	cmpi.b		%d1,&rm_mode*0x10	# is rnd mode = RM?
+	beq.b		fsub_zero_2_rm		# yes
+	fmov.s		&0x00000000,%fp0	# no; return +ZERO
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set Z
+	rts
+
+fsub_zero_2_rm:
+	fmov.s		&0x80000000,%fp0	# return -ZERO
+	mov.b		&z_bmask+neg_bmask,FPSR_CC(%a6)	# set Z/NEG
+	rts
+
+#
+# one operand is a ZERO and the other is a DENORM or a NORM.
+# scale the DENORM or NORM and jump to the regular fsub routine.
+#
+fsub_zero_dst:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+	bsr.l		scale_to_zero_src	# scale the operand
+	clr.w		FP_SCR1_EX(%a6)
+	clr.l		FP_SCR1_HI(%a6)
+	clr.l		FP_SCR1_LO(%a6)
+	bra.w		fsub_zero_entry		# go execute fsub
+
+fsub_zero_src:
+	mov.w		DST_EX(%a1),FP_SCR1_EX(%a6)
+	mov.l		DST_HI(%a1),FP_SCR1_HI(%a6)
+	mov.l		DST_LO(%a1),FP_SCR1_LO(%a6)
+	bsr.l		scale_to_zero_dst	# scale the operand
+	clr.w		FP_SCR0_EX(%a6)
+	clr.l		FP_SCR0_HI(%a6)
+	clr.l		FP_SCR0_LO(%a6)
+	bra.w		fsub_zero_entry		# go execute fsub
+
+#
+# both operands are INFs. an OPERR will result if the INFs have the
+# same signs. else,
+#
+fsub_inf_2:
+	mov.b		SRC_EX(%a0),%d0		# exclusive or the signs
+	mov.b		DST_EX(%a1),%d1
+	eor.b		%d1,%d0
+	bpl.l		res_operr		# weed out (-INF)+(+INF)
+
+# ok, so it's not an OPERR. but we do have to remember to return
+# the src INF since that's where the 881/882 gets the j-bit.
+
+fsub_inf_src:
+	fmovm.x		SRC(%a0),&0x80		# return src INF
+	fneg.x		%fp0			# invert sign
+	fbge.w		fsub_inf_done		# sign is now positive
+	mov.b		&neg_bmask+inf_bmask,FPSR_CC(%a6) # set INF/NEG
+	rts
+
+fsub_inf_dst:
+	fmovm.x		DST(%a1),&0x80		# return dst INF
+	tst.b		DST_EX(%a1)		# is INF negative?
+	bpl.b		fsub_inf_done		# no
+	mov.b		&neg_bmask+inf_bmask,FPSR_CC(%a6) # set INF/NEG
+	rts
+
+fsub_inf_done:
+	mov.b		&inf_bmask,FPSR_CC(%a6)	# set INF
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fsqrt(): emulates the fsqrt instruction				#
+#	fssqrt(): emulates the fssqrt instruction			#
+#	fdsqrt(): emulates the fdsqrt instruction			#
+#									#
+# XREF ****************************************************************	#
+#	scale_sqrt() - scale the source operand				#
+#	unf_res() - return default underflow result			#
+#	ovf_res() - return default overflow result			#
+#	res_qnan_1op() - return QNAN result				#
+#	res_snan_1op() - return SNAN result				#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to extended precision source operand		#
+#	d0  rnd prec,mode						#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = result							#
+#	fp1 = EXOP (if exception occurred)				#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Handle NANs, infinities, and zeroes as special cases. Divide	#
+# norms/denorms into ext/sgl/dbl precision.				#
+#	For norms/denorms, scale the exponents such that a sqrt		#
+# instruction won't cause an exception. Use the regular fsqrt to	#
+# compute a result. Check if the regular operands would have taken	#
+# an exception. If so, return the default overflow/underflow result	#
+# and return the EXOP if exceptions are enabled. Else, scale the	#
+# result operand to the proper exponent.				#
+#									#
+#########################################################################
+
+	global		fssqrt
+fssqrt:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&s_mode*0x10,%d0	# insert sgl precision
+	bra.b		fsqrt
+
+	global		fdsqrt
+fdsqrt:
+	andi.b		&0x30,%d0		# clear rnd prec
+	ori.b		&d_mode*0x10,%d0	# insert dbl precision
+
+	global		fsqrt
+fsqrt:
+	mov.l		%d0,L_SCR3(%a6)		# store rnd info
+	clr.w		%d1
+	mov.b		STAG(%a6),%d1
+	bne.w		fsqrt_not_norm		# optimize on non-norm input
+
+#
+# SQUARE ROOT: norms and denorms ONLY!
+#
+fsqrt_norm:
+	tst.b		SRC_EX(%a0)		# is operand negative?
+	bmi.l		res_operr		# yes
+
+	andi.b		&0xc0,%d0		# is precision extended?
+	bne.b		fsqrt_not_ext		# no; go handle sgl or dbl
+
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsqrt.x		(%a0),%fp0		# execute square root
+
+	fmov.l		%fpsr,%d1
+	or.l		%d1,USER_FPSR(%a6)	# set N,INEX
+
+	rts
+
+fsqrt_denorm:
+	tst.b		SRC_EX(%a0)		# is operand negative?
+	bmi.l		res_operr		# yes
+
+	andi.b		&0xc0,%d0		# is precision extended?
+	bne.b		fsqrt_not_ext		# no; go handle sgl or dbl
+
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+	bsr.l		scale_sqrt		# calculate scale factor
+
+	bra.w		fsqrt_sd_normal
+
+#
+# operand is either single or double
+#
+fsqrt_not_ext:
+	cmpi.b		%d0,&s_mode*0x10	# separate sgl/dbl prec
+	bne.w		fsqrt_dbl
+
+#
+# operand is to be rounded to single precision
+#
+fsqrt_sgl:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+	bsr.l		scale_sqrt		# calculate scale factor
+
+	cmpi.l		%d0,&0x3fff-0x3f81	# will move in underflow?
+	beq.w		fsqrt_sd_may_unfl
+	bgt.w		fsqrt_sd_unfl		# yes; go handle underflow
+	cmpi.l		%d0,&0x3fff-0x407f	# will move in overflow?
+	beq.w		fsqrt_sd_may_ovfl	# maybe; go check
+	blt.w		fsqrt_sd_ovfl		# yes; go handle overflow
+
+#
+# operand will NOT overflow or underflow when moved in to the fp reg file
+#
+fsqrt_sd_normal:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fsqrt.x		FP_SCR0(%a6),%fp0	# perform absolute
+
+	fmov.l		%fpsr,%d1		# save FPSR
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fsqrt_sd_normal_exit:
+	mov.l		%d2,-(%sp)		# save d2
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+	mov.w		FP_SCR0_EX(%a6),%d1	# load sgn,exp
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	sub.l		%d0,%d1			# add scale factor
+	andi.w		&0x8000,%d2		# keep old sign
+	or.w		%d1,%d2			# concat old sign,new exp
+	mov.w		%d2,FP_SCR0_EX(%a6)	# insert new exponent
+	mov.l		(%sp)+,%d2		# restore d2
+	fmovm.x		FP_SCR0(%a6),&0x80	# return result in fp0
+	rts
+
+#
+# operand is to be rounded to double precision
+#
+fsqrt_dbl:
+	mov.w		SRC_EX(%a0),FP_SCR0_EX(%a6)
+	mov.l		SRC_HI(%a0),FP_SCR0_HI(%a6)
+	mov.l		SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+	bsr.l		scale_sqrt		# calculate scale factor
+
+	cmpi.l		%d0,&0x3fff-0x3c01	# will move in underflow?
+	beq.w		fsqrt_sd_may_unfl
+	bgt.b		fsqrt_sd_unfl		# yes; go handle underflow
+	cmpi.l		%d0,&0x3fff-0x43ff	# will move in overflow?
+	beq.w		fsqrt_sd_may_ovfl	# maybe; go check
+	blt.w		fsqrt_sd_ovfl		# yes; go handle overflow
+	bra.w		fsqrt_sd_normal		# no; ho handle normalized op
+
+# we're on the line here and the distinguising characteristic is whether
+# the exponent is 3fff or 3ffe. if it's 3ffe, then it's a safe number
+# elsewise fall through to underflow.
+fsqrt_sd_may_unfl:
+	btst		&0x0,1+FP_SCR0_EX(%a6)	# is exponent 0x3fff?
+	bne.w		fsqrt_sd_normal		# yes, so no underflow
+
+#
+# operand WILL underflow when moved in to the fp register file
+#
+fsqrt_sd_unfl:
+	bset		&unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+	fmov.l		&rz_mode*0x10,%fpcr	# set FPCR
+	fmov.l		&0x0,%fpsr		# clear FPSR
+
+	fsqrt.x		FP_SCR0(%a6),%fp0	# execute square root
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+# if underflow or inexact is enabled, go calculate EXOP first.
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x0b,%d1		# is UNFL or INEX enabled?
+	bne.b		fsqrt_sd_unfl_ena	# yes
+
+fsqrt_sd_unfl_dis:
+	fmovm.x		&0x80,FP_SCR0(%a6)	# store out result
+
+	lea		FP_SCR0(%a6),%a0	# pass: result addr
+	mov.l		L_SCR3(%a6),%d1		# pass: rnd prec,mode
+	bsr.l		unf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set possible 'Z' ccode
+	fmovm.x		FP_SCR0(%a6),&0x80	# return default result in fp0
+	rts
+
+#
+# operand will underflow AND underflow is enabled.
+# therefore, we must return the result rounded to extended precision.
+#
+fsqrt_sd_unfl_ena:
+	mov.l		FP_SCR0_HI(%a6),FP_SCR1_HI(%a6)
+	mov.l		FP_SCR0_LO(%a6),FP_SCR1_LO(%a6)
+	mov.w		FP_SCR0_EX(%a6),%d1	# load current exponent
+
+	mov.l		%d2,-(%sp)		# save d2
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# subtract scale factor
+	addi.l		&0x6000,%d1		# add new bias
+	andi.w		&0x7fff,%d1
+	or.w		%d2,%d1			# concat new sign,new exp
+	mov.w		%d1,FP_SCR1_EX(%a6)	# insert new exp
+	fmovm.x		FP_SCR1(%a6),&0x40	# return EXOP in fp1
+	mov.l		(%sp)+,%d2		# restore d2
+	bra.b		fsqrt_sd_unfl_dis
+
+#
+# operand WILL overflow.
+#
+fsqrt_sd_ovfl:
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fsqrt.x		FP_SCR0(%a6),%fp0	# perform square root
+
+	fmov.l		&0x0,%fpcr		# clear FPCR
+	fmov.l		%fpsr,%d1		# save FPSR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+fsqrt_sd_ovfl_tst:
+	or.l		&ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+	mov.b		FPCR_ENABLE(%a6),%d1
+	andi.b		&0x13,%d1		# is OVFL or INEX enabled?
+	bne.b		fsqrt_sd_ovfl_ena	# yes
+
+#
+# OVFL is not enabled; therefore, we must create the default result by
+# calling ovf_res().
+#
+fsqrt_sd_ovfl_dis:
+	btst		&neg_bit,FPSR_CC(%a6)	# is result negative?
+	sne		%d1			# set sign param accordingly
+	mov.l		L_SCR3(%a6),%d0		# pass: prec,mode
+	bsr.l		ovf_res			# calculate default result
+	or.b		%d0,FPSR_CC(%a6)	# set INF,N if applicable
+	fmovm.x		(%a0),&0x80		# return default result in fp0
+	rts
+
+#
+# OVFL is enabled.
+# the INEX2 bit has already been updated by the round to the correct precision.
+# now, round to extended(and don't alter the FPSR).
+#
+fsqrt_sd_ovfl_ena:
+	mov.l		%d2,-(%sp)		# save d2
+	mov.w		FP_SCR0_EX(%a6),%d1	# fetch {sgn,exp}
+	mov.l		%d1,%d2			# make a copy
+	andi.l		&0x7fff,%d1		# strip sign
+	andi.w		&0x8000,%d2		# keep old sign
+	sub.l		%d0,%d1			# add scale factor
+	subi.l		&0x6000,%d1		# subtract bias
+	andi.w		&0x7fff,%d1
+	or.w		%d2,%d1			# concat sign,exp
+	mov.w		%d1,FP_SCR0_EX(%a6)	# insert new exponent
+	fmovm.x		FP_SCR0(%a6),&0x40	# return EXOP in fp1
+	mov.l		(%sp)+,%d2		# restore d2
+	bra.b		fsqrt_sd_ovfl_dis
+
+#
+# the move in MAY underflow. so...
+#
+fsqrt_sd_may_ovfl:
+	btst		&0x0,1+FP_SCR0_EX(%a6)	# is exponent 0x3fff?
+	bne.w		fsqrt_sd_ovfl		# yes, so overflow
+
+	fmov.l		&0x0,%fpsr		# clear FPSR
+	fmov.l		L_SCR3(%a6),%fpcr	# set FPCR
+
+	fsqrt.x		FP_SCR0(%a6),%fp0	# perform absolute
+
+	fmov.l		%fpsr,%d1		# save status
+	fmov.l		&0x0,%fpcr		# clear FPCR
+
+	or.l		%d1,USER_FPSR(%a6)	# save INEX2,N
+
+	fmov.x		%fp0,%fp1		# make a copy of result
+	fcmp.b		%fp1,&0x1		# is |result| >= 1.b?
+	fbge.w		fsqrt_sd_ovfl_tst	# yes; overflow has occurred
+
+# no, it didn't overflow; we have correct result
+	bra.w		fsqrt_sd_normal_exit
+
+##########################################################################
+
+#
+# input is not normalized; what is it?
+#
+fsqrt_not_norm:
+	cmpi.b		%d1,&DENORM		# weed out DENORM
+	beq.w		fsqrt_denorm
+	cmpi.b		%d1,&ZERO		# weed out ZERO
+	beq.b		fsqrt_zero
+	cmpi.b		%d1,&INF		# weed out INF
+	beq.b		fsqrt_inf
+	cmpi.b		%d1,&SNAN		# weed out SNAN
+	beq.l		res_snan_1op
+	bra.l		res_qnan_1op
+
+#
+#	fsqrt(+0) = +0
+#	fsqrt(-0) = -0
+#	fsqrt(+INF) = +INF
+#	fsqrt(-INF) = OPERR
+#
+fsqrt_zero:
+	tst.b		SRC_EX(%a0)		# is ZERO positive or negative?
+	bmi.b		fsqrt_zero_m		# negative
+fsqrt_zero_p:
+	fmov.s		&0x00000000,%fp0	# return +ZERO
+	mov.b		&z_bmask,FPSR_CC(%a6)	# set 'Z' ccode bit
+	rts
+fsqrt_zero_m:
+	fmov.s		&0x80000000,%fp0	# return -ZERO
+	mov.b		&z_bmask+neg_bmask,FPSR_CC(%a6)	# set 'Z','N' ccode bits
+	rts
+
+fsqrt_inf:
+	tst.b		SRC_EX(%a0)		# is INF positive or negative?
+	bmi.l		res_operr		# negative
+fsqrt_inf_p:
+	fmovm.x		SRC(%a0),&0x80		# return +INF in fp0
+	mov.b		&inf_bmask,FPSR_CC(%a6)	# set 'I' ccode bit
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	fetch_dreg(): fetch register according to index in d1		#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	d1 = index of register to fetch from				#
+#									#
+# OUTPUT **************************************************************	#
+#	d0 = value of register fetched					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	According to the index value in d1 which can range from zero	#
+# to fifteen, load the corresponding register file value (where		#
+# address register indexes start at 8). D0/D1/A0/A1/A6/A7 are on the	#
+# stack. The rest should still be in their original places.		#
+#									#
+#########################################################################
+
+# this routine leaves d1 intact for subsequent store_dreg calls.
+	global		fetch_dreg
+fetch_dreg:
+	mov.w		(tbl_fdreg.b,%pc,%d1.w*2),%d0
+	jmp		(tbl_fdreg.b,%pc,%d0.w*1)
+
+tbl_fdreg:
+	short		fdreg0 - tbl_fdreg
+	short		fdreg1 - tbl_fdreg
+	short		fdreg2 - tbl_fdreg
+	short		fdreg3 - tbl_fdreg
+	short		fdreg4 - tbl_fdreg
+	short		fdreg5 - tbl_fdreg
+	short		fdreg6 - tbl_fdreg
+	short		fdreg7 - tbl_fdreg
+	short		fdreg8 - tbl_fdreg
+	short		fdreg9 - tbl_fdreg
+	short		fdrega - tbl_fdreg
+	short		fdregb - tbl_fdreg
+	short		fdregc - tbl_fdreg
+	short		fdregd - tbl_fdreg
+	short		fdrege - tbl_fdreg
+	short		fdregf - tbl_fdreg
+
+fdreg0:
+	mov.l		EXC_DREGS+0x0(%a6),%d0
+	rts
+fdreg1:
+	mov.l		EXC_DREGS+0x4(%a6),%d0
+	rts
+fdreg2:
+	mov.l		%d2,%d0
+	rts
+fdreg3:
+	mov.l		%d3,%d0
+	rts
+fdreg4:
+	mov.l		%d4,%d0
+	rts
+fdreg5:
+	mov.l		%d5,%d0
+	rts
+fdreg6:
+	mov.l		%d6,%d0
+	rts
+fdreg7:
+	mov.l		%d7,%d0
+	rts
+fdreg8:
+	mov.l		EXC_DREGS+0x8(%a6),%d0
+	rts
+fdreg9:
+	mov.l		EXC_DREGS+0xc(%a6),%d0
+	rts
+fdrega:
+	mov.l		%a2,%d0
+	rts
+fdregb:
+	mov.l		%a3,%d0
+	rts
+fdregc:
+	mov.l		%a4,%d0
+	rts
+fdregd:
+	mov.l		%a5,%d0
+	rts
+fdrege:
+	mov.l		(%a6),%d0
+	rts
+fdregf:
+	mov.l		EXC_A7(%a6),%d0
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	store_dreg_l(): store longword to data register specified by d1	#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	d0 = longowrd value to store					#
+#	d1 = index of register to fetch from				#
+#									#
+# OUTPUT **************************************************************	#
+#	(data register is updated)					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	According to the index value in d1, store the longword value	#
+# in d0 to the corresponding data register. D0/D1 are on the stack	#
+# while the rest are in their initial places.				#
+#									#
+#########################################################################
+
+	global		store_dreg_l
+store_dreg_l:
+	mov.w		(tbl_sdregl.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_sdregl.b,%pc,%d1.w*1)
+
+tbl_sdregl:
+	short		sdregl0 - tbl_sdregl
+	short		sdregl1 - tbl_sdregl
+	short		sdregl2 - tbl_sdregl
+	short		sdregl3 - tbl_sdregl
+	short		sdregl4 - tbl_sdregl
+	short		sdregl5 - tbl_sdregl
+	short		sdregl6 - tbl_sdregl
+	short		sdregl7 - tbl_sdregl
+
+sdregl0:
+	mov.l		%d0,EXC_DREGS+0x0(%a6)
+	rts
+sdregl1:
+	mov.l		%d0,EXC_DREGS+0x4(%a6)
+	rts
+sdregl2:
+	mov.l		%d0,%d2
+	rts
+sdregl3:
+	mov.l		%d0,%d3
+	rts
+sdregl4:
+	mov.l		%d0,%d4
+	rts
+sdregl5:
+	mov.l		%d0,%d5
+	rts
+sdregl6:
+	mov.l		%d0,%d6
+	rts
+sdregl7:
+	mov.l		%d0,%d7
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	store_dreg_w(): store word to data register specified by d1	#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	d0 = word value to store					#
+#	d1 = index of register to fetch from				#
+#									#
+# OUTPUT **************************************************************	#
+#	(data register is updated)					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	According to the index value in d1, store the word value	#
+# in d0 to the corresponding data register. D0/D1 are on the stack	#
+# while the rest are in their initial places.				#
+#									#
+#########################################################################
+
+	global		store_dreg_w
+store_dreg_w:
+	mov.w		(tbl_sdregw.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_sdregw.b,%pc,%d1.w*1)
+
+tbl_sdregw:
+	short		sdregw0 - tbl_sdregw
+	short		sdregw1 - tbl_sdregw
+	short		sdregw2 - tbl_sdregw
+	short		sdregw3 - tbl_sdregw
+	short		sdregw4 - tbl_sdregw
+	short		sdregw5 - tbl_sdregw
+	short		sdregw6 - tbl_sdregw
+	short		sdregw7 - tbl_sdregw
+
+sdregw0:
+	mov.w		%d0,2+EXC_DREGS+0x0(%a6)
+	rts
+sdregw1:
+	mov.w		%d0,2+EXC_DREGS+0x4(%a6)
+	rts
+sdregw2:
+	mov.w		%d0,%d2
+	rts
+sdregw3:
+	mov.w		%d0,%d3
+	rts
+sdregw4:
+	mov.w		%d0,%d4
+	rts
+sdregw5:
+	mov.w		%d0,%d5
+	rts
+sdregw6:
+	mov.w		%d0,%d6
+	rts
+sdregw7:
+	mov.w		%d0,%d7
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	store_dreg_b(): store byte to data register specified by d1	#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	d0 = byte value to store					#
+#	d1 = index of register to fetch from				#
+#									#
+# OUTPUT **************************************************************	#
+#	(data register is updated)					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	According to the index value in d1, store the byte value	#
+# in d0 to the corresponding data register. D0/D1 are on the stack	#
+# while the rest are in their initial places.				#
+#									#
+#########################################################################
+
+	global		store_dreg_b
+store_dreg_b:
+	mov.w		(tbl_sdregb.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_sdregb.b,%pc,%d1.w*1)
+
+tbl_sdregb:
+	short		sdregb0 - tbl_sdregb
+	short		sdregb1 - tbl_sdregb
+	short		sdregb2 - tbl_sdregb
+	short		sdregb3 - tbl_sdregb
+	short		sdregb4 - tbl_sdregb
+	short		sdregb5 - tbl_sdregb
+	short		sdregb6 - tbl_sdregb
+	short		sdregb7 - tbl_sdregb
+
+sdregb0:
+	mov.b		%d0,3+EXC_DREGS+0x0(%a6)
+	rts
+sdregb1:
+	mov.b		%d0,3+EXC_DREGS+0x4(%a6)
+	rts
+sdregb2:
+	mov.b		%d0,%d2
+	rts
+sdregb3:
+	mov.b		%d0,%d3
+	rts
+sdregb4:
+	mov.b		%d0,%d4
+	rts
+sdregb5:
+	mov.b		%d0,%d5
+	rts
+sdregb6:
+	mov.b		%d0,%d6
+	rts
+sdregb7:
+	mov.b		%d0,%d7
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	inc_areg(): increment an address register by the value in d0	#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	d0 = amount to increment by					#
+#	d1 = index of address register to increment			#
+#									#
+# OUTPUT **************************************************************	#
+#	(address register is updated)					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Typically used for an instruction w/ a post-increment <ea>,	#
+# this routine adds the increment value in d0 to the address register	#
+# specified by d1. A0/A1/A6/A7 reside on the stack. The rest reside	#
+# in their original places.						#
+#	For a7, if the increment amount is one, then we have to		#
+# increment by two. For any a7 update, set the mia7_flag so that if	#
+# an access error exception occurs later in emulation, this address	#
+# register update can be undone.					#
+#									#
+#########################################################################
+
+	global		inc_areg
+inc_areg:
+	mov.w		(tbl_iareg.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_iareg.b,%pc,%d1.w*1)
+
+tbl_iareg:
+	short		iareg0 - tbl_iareg
+	short		iareg1 - tbl_iareg
+	short		iareg2 - tbl_iareg
+	short		iareg3 - tbl_iareg
+	short		iareg4 - tbl_iareg
+	short		iareg5 - tbl_iareg
+	short		iareg6 - tbl_iareg
+	short		iareg7 - tbl_iareg
+
+iareg0:	add.l		%d0,EXC_DREGS+0x8(%a6)
+	rts
+iareg1:	add.l		%d0,EXC_DREGS+0xc(%a6)
+	rts
+iareg2:	add.l		%d0,%a2
+	rts
+iareg3:	add.l		%d0,%a3
+	rts
+iareg4:	add.l		%d0,%a4
+	rts
+iareg5:	add.l		%d0,%a5
+	rts
+iareg6:	add.l		%d0,(%a6)
+	rts
+iareg7:	mov.b		&mia7_flg,SPCOND_FLG(%a6)
+	cmpi.b		%d0,&0x1
+	beq.b		iareg7b
+	add.l		%d0,EXC_A7(%a6)
+	rts
+iareg7b:
+	addq.l		&0x2,EXC_A7(%a6)
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	dec_areg(): decrement an address register by the value in d0	#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	d0 = amount to decrement by					#
+#	d1 = index of address register to decrement			#
+#									#
+# OUTPUT **************************************************************	#
+#	(address register is updated)					#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Typically used for an instruction w/ a pre-decrement <ea>,	#
+# this routine adds the decrement value in d0 to the address register	#
+# specified by d1. A0/A1/A6/A7 reside on the stack. The rest reside	#
+# in their original places.						#
+#	For a7, if the decrement amount is one, then we have to		#
+# decrement by two. For any a7 update, set the mda7_flag so that if	#
+# an access error exception occurs later in emulation, this address	#
+# register update can be undone.					#
+#									#
+#########################################################################
+
+	global		dec_areg
+dec_areg:
+	mov.w		(tbl_dareg.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_dareg.b,%pc,%d1.w*1)
+
+tbl_dareg:
+	short		dareg0 - tbl_dareg
+	short		dareg1 - tbl_dareg
+	short		dareg2 - tbl_dareg
+	short		dareg3 - tbl_dareg
+	short		dareg4 - tbl_dareg
+	short		dareg5 - tbl_dareg
+	short		dareg6 - tbl_dareg
+	short		dareg7 - tbl_dareg
+
+dareg0:	sub.l		%d0,EXC_DREGS+0x8(%a6)
+	rts
+dareg1:	sub.l		%d0,EXC_DREGS+0xc(%a6)
+	rts
+dareg2:	sub.l		%d0,%a2
+	rts
+dareg3:	sub.l		%d0,%a3
+	rts
+dareg4:	sub.l		%d0,%a4
+	rts
+dareg5:	sub.l		%d0,%a5
+	rts
+dareg6:	sub.l		%d0,(%a6)
+	rts
+dareg7:	mov.b		&mda7_flg,SPCOND_FLG(%a6)
+	cmpi.b		%d0,&0x1
+	beq.b		dareg7b
+	sub.l		%d0,EXC_A7(%a6)
+	rts
+dareg7b:
+	subq.l		&0x2,EXC_A7(%a6)
+	rts
+
+##############################################################################
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	load_fpn1(): load FP register value into FP_SRC(a6).		#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	d0 = index of FP register to load				#
+#									#
+# OUTPUT **************************************************************	#
+#	FP_SRC(a6) = value loaded from FP register file			#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Using the index in d0, load FP_SRC(a6) with a number from the	#
+# FP register file.							#
+#									#
+#########################################################################
+
+	global		load_fpn1
+load_fpn1:
+	mov.w		(tbl_load_fpn1.b,%pc,%d0.w*2), %d0
+	jmp		(tbl_load_fpn1.b,%pc,%d0.w*1)
+
+tbl_load_fpn1:
+	short		load_fpn1_0 - tbl_load_fpn1
+	short		load_fpn1_1 - tbl_load_fpn1
+	short		load_fpn1_2 - tbl_load_fpn1
+	short		load_fpn1_3 - tbl_load_fpn1
+	short		load_fpn1_4 - tbl_load_fpn1
+	short		load_fpn1_5 - tbl_load_fpn1
+	short		load_fpn1_6 - tbl_load_fpn1
+	short		load_fpn1_7 - tbl_load_fpn1
+
+load_fpn1_0:
+	mov.l		0+EXC_FP0(%a6), 0+FP_SRC(%a6)
+	mov.l		4+EXC_FP0(%a6), 4+FP_SRC(%a6)
+	mov.l		8+EXC_FP0(%a6), 8+FP_SRC(%a6)
+	lea		FP_SRC(%a6), %a0
+	rts
+load_fpn1_1:
+	mov.l		0+EXC_FP1(%a6), 0+FP_SRC(%a6)
+	mov.l		4+EXC_FP1(%a6), 4+FP_SRC(%a6)
+	mov.l		8+EXC_FP1(%a6), 8+FP_SRC(%a6)
+	lea		FP_SRC(%a6), %a0
+	rts
+load_fpn1_2:
+	fmovm.x		&0x20, FP_SRC(%a6)
+	lea		FP_SRC(%a6), %a0
+	rts
+load_fpn1_3:
+	fmovm.x		&0x10, FP_SRC(%a6)
+	lea		FP_SRC(%a6), %a0
+	rts
+load_fpn1_4:
+	fmovm.x		&0x08, FP_SRC(%a6)
+	lea		FP_SRC(%a6), %a0
+	rts
+load_fpn1_5:
+	fmovm.x		&0x04, FP_SRC(%a6)
+	lea		FP_SRC(%a6), %a0
+	rts
+load_fpn1_6:
+	fmovm.x		&0x02, FP_SRC(%a6)
+	lea		FP_SRC(%a6), %a0
+	rts
+load_fpn1_7:
+	fmovm.x		&0x01, FP_SRC(%a6)
+	lea		FP_SRC(%a6), %a0
+	rts
+
+#############################################################################
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	load_fpn2(): load FP register value into FP_DST(a6).		#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	d0 = index of FP register to load				#
+#									#
+# OUTPUT **************************************************************	#
+#	FP_DST(a6) = value loaded from FP register file			#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Using the index in d0, load FP_DST(a6) with a number from the	#
+# FP register file.							#
+#									#
+#########################################################################
+
+	global		load_fpn2
+load_fpn2:
+	mov.w		(tbl_load_fpn2.b,%pc,%d0.w*2), %d0
+	jmp		(tbl_load_fpn2.b,%pc,%d0.w*1)
+
+tbl_load_fpn2:
+	short		load_fpn2_0 - tbl_load_fpn2
+	short		load_fpn2_1 - tbl_load_fpn2
+	short		load_fpn2_2 - tbl_load_fpn2
+	short		load_fpn2_3 - tbl_load_fpn2
+	short		load_fpn2_4 - tbl_load_fpn2
+	short		load_fpn2_5 - tbl_load_fpn2
+	short		load_fpn2_6 - tbl_load_fpn2
+	short		load_fpn2_7 - tbl_load_fpn2
+
+load_fpn2_0:
+	mov.l		0+EXC_FP0(%a6), 0+FP_DST(%a6)
+	mov.l		4+EXC_FP0(%a6), 4+FP_DST(%a6)
+	mov.l		8+EXC_FP0(%a6), 8+FP_DST(%a6)
+	lea		FP_DST(%a6), %a0
+	rts
+load_fpn2_1:
+	mov.l		0+EXC_FP1(%a6), 0+FP_DST(%a6)
+	mov.l		4+EXC_FP1(%a6), 4+FP_DST(%a6)
+	mov.l		8+EXC_FP1(%a6), 8+FP_DST(%a6)
+	lea		FP_DST(%a6), %a0
+	rts
+load_fpn2_2:
+	fmovm.x		&0x20, FP_DST(%a6)
+	lea		FP_DST(%a6), %a0
+	rts
+load_fpn2_3:
+	fmovm.x		&0x10, FP_DST(%a6)
+	lea		FP_DST(%a6), %a0
+	rts
+load_fpn2_4:
+	fmovm.x		&0x08, FP_DST(%a6)
+	lea		FP_DST(%a6), %a0
+	rts
+load_fpn2_5:
+	fmovm.x		&0x04, FP_DST(%a6)
+	lea		FP_DST(%a6), %a0
+	rts
+load_fpn2_6:
+	fmovm.x		&0x02, FP_DST(%a6)
+	lea		FP_DST(%a6), %a0
+	rts
+load_fpn2_7:
+	fmovm.x		&0x01, FP_DST(%a6)
+	lea		FP_DST(%a6), %a0
+	rts
+
+#############################################################################
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	store_fpreg(): store an fp value to the fpreg designated d0.	#
+#									#
+# XREF ****************************************************************	#
+#	None								#
+#									#
+# INPUT ***************************************************************	#
+#	fp0 = extended precision value to store				#
+#	d0  = index of floating-point register				#
+#									#
+# OUTPUT **************************************************************	#
+#	None								#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Store the value in fp0 to the FP register designated by the	#
+# value in d0. The FP number can be DENORM or SNAN so we have to be	#
+# careful that we don't take an exception here.				#
+#									#
+#########################################################################
+
+	global		store_fpreg
+store_fpreg:
+	mov.w		(tbl_store_fpreg.b,%pc,%d0.w*2), %d0
+	jmp		(tbl_store_fpreg.b,%pc,%d0.w*1)
+
+tbl_store_fpreg:
+	short		store_fpreg_0 - tbl_store_fpreg
+	short		store_fpreg_1 - tbl_store_fpreg
+	short		store_fpreg_2 - tbl_store_fpreg
+	short		store_fpreg_3 - tbl_store_fpreg
+	short		store_fpreg_4 - tbl_store_fpreg
+	short		store_fpreg_5 - tbl_store_fpreg
+	short		store_fpreg_6 - tbl_store_fpreg
+	short		store_fpreg_7 - tbl_store_fpreg
+
+store_fpreg_0:
+	fmovm.x		&0x80, EXC_FP0(%a6)
+	rts
+store_fpreg_1:
+	fmovm.x		&0x80, EXC_FP1(%a6)
+	rts
+store_fpreg_2:
+	fmovm.x		&0x01, -(%sp)
+	fmovm.x		(%sp)+, &0x20
+	rts
+store_fpreg_3:
+	fmovm.x		&0x01, -(%sp)
+	fmovm.x		(%sp)+, &0x10
+	rts
+store_fpreg_4:
+	fmovm.x		&0x01, -(%sp)
+	fmovm.x		(%sp)+, &0x08
+	rts
+store_fpreg_5:
+	fmovm.x		&0x01, -(%sp)
+	fmovm.x		(%sp)+, &0x04
+	rts
+store_fpreg_6:
+	fmovm.x		&0x01, -(%sp)
+	fmovm.x		(%sp)+, &0x02
+	rts
+store_fpreg_7:
+	fmovm.x		&0x01, -(%sp)
+	fmovm.x		(%sp)+, &0x01
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	get_packed(): fetch a packed operand from memory and then	#
+#		      convert it to a floating-point binary number.	#
+#									#
+# XREF ****************************************************************	#
+#	_dcalc_ea() - calculate the correct <ea>			#
+#	_mem_read() - fetch the packed operand from memory		#
+#	facc_in_x() - the fetch failed so jump to special exit code	#
+#	decbin()    - convert packed to binary extended precision	#
+#									#
+# INPUT ***************************************************************	#
+#	None								#
+#									#
+# OUTPUT **************************************************************	#
+#	If no failure on _mem_read():					#
+#	FP_SRC(a6) = packed operand now as a binary FP number		#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Get the correct <ea> whihc is the value on the exception stack	#
+# frame w/ maybe a correction factor if the <ea> is -(an) or (an)+.	#
+# Then, fetch the operand from memory. If the fetch fails, exit		#
+# through facc_in_x().							#
+#	If the packed operand is a ZERO,NAN, or INF, convert it to	#
+# its binary representation here. Else, call decbin() which will	#
+# convert the packed value to an extended precision binary value.	#
+#									#
+#########################################################################
+
+# the stacked <ea> for packed is correct except for -(An).
+# the base reg must be updated for both -(An) and (An)+.
+	global		get_packed
+get_packed:
+	mov.l		&0xc,%d0		# packed is 12 bytes
+	bsr.l		_dcalc_ea		# fetch <ea>; correct An
+
+	lea		FP_SRC(%a6),%a1		# pass: ptr to super dst
+	mov.l		&0xc,%d0		# pass: 12 bytes
+	bsr.l		_dmem_read		# read packed operand
+
+	tst.l		%d1			# did dfetch fail?
+	bne.l		facc_in_x		# yes
+
+# The packed operand is an INF or a NAN if the exponent field is all ones.
+	bfextu		FP_SRC(%a6){&1:&15},%d0	# get exp
+	cmpi.w		%d0,&0x7fff		# INF or NAN?
+	bne.b		gp_try_zero		# no
+	rts					# operand is an INF or NAN
+
+# The packed operand is a zero if the mantissa is all zero, else it's
+# a normal packed op.
+gp_try_zero:
+	mov.b		3+FP_SRC(%a6),%d0	# get byte 4
+	andi.b		&0x0f,%d0		# clear all but last nybble
+	bne.b		gp_not_spec		# not a zero
+	tst.l		FP_SRC_HI(%a6)		# is lw 2 zero?
+	bne.b		gp_not_spec		# not a zero
+	tst.l		FP_SRC_LO(%a6)		# is lw 3 zero?
+	bne.b		gp_not_spec		# not a zero
+	rts					# operand is a ZERO
+gp_not_spec:
+	lea		FP_SRC(%a6),%a0		# pass: ptr to packed op
+	bsr.l		decbin			# convert to extended
+	fmovm.x		&0x80,FP_SRC(%a6)	# make this the srcop
+	rts
+
+#########################################################################
+# decbin(): Converts normalized packed bcd value pointed to by register	#
+#	    a0 to extended-precision value in fp0.			#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to normalized packed bcd value			#
+#									#
+# OUTPUT **************************************************************	#
+#	fp0 = exact fp representation of the packed bcd value.		#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Expected is a normal bcd (i.e. non-exceptional; all inf, zero,	#
+#	and NaN operands are dispatched without entering this routine)	#
+#	value in 68881/882 format at location (a0).			#
+#									#
+#	A1. Convert the bcd exponent to binary by successive adds and	#
+#	muls. Set the sign according to SE. Subtract 16 to compensate	#
+#	for the mantissa which is to be interpreted as 17 integer	#
+#	digits, rather than 1 integer and 16 fraction digits.		#
+#	Note: this operation can never overflow.			#
+#									#
+#	A2. Convert the bcd mantissa to binary by successive		#
+#	adds and muls in FP0. Set the sign according to SM.		#
+#	The mantissa digits will be converted with the decimal point	#
+#	assumed following the least-significant digit.			#
+#	Note: this operation can never overflow.			#
+#									#
+#	A3. Count the number of leading/trailing zeros in the		#
+#	bcd string.  If SE is positive, count the leading zeros;	#
+#	if negative, count the trailing zeros.  Set the adjusted	#
+#	exponent equal to the exponent from A1 and the zero count	#
+#	added if SM = 1 and subtracted if SM = 0.  Scale the		#
+#	mantissa the equivalent of forcing in the bcd value:		#
+#									#
+#	SM = 0	a non-zero digit in the integer position		#
+#	SM = 1	a non-zero digit in Mant0, lsd of the fraction		#
+#									#
+#	this will insure that any value, regardless of its		#
+#	representation (ex. 0.1E2, 1E1, 10E0, 100E-1), is converted	#
+#	consistently.							#
+#									#
+#	A4. Calculate the factor 10^exp in FP1 using a table of		#
+#	10^(2^n) values.  To reduce the error in forming factors	#
+#	greater than 10^27, a directed rounding scheme is used with	#
+#	tables rounded to RN, RM, and RP, according to the table	#
+#	in the comments of the pwrten section.				#
+#									#
+#	A5. Form the final binary number by scaling the mantissa by	#
+#	the exponent factor.  This is done by multiplying the		#
+#	mantissa in FP0 by the factor in FP1 if the adjusted		#
+#	exponent sign is positive, and dividing FP0 by FP1 if		#
+#	it is negative.							#
+#									#
+#	Clean up and return. Check if the final mul or div was inexact.	#
+#	If so, set INEX1 in USER_FPSR.					#
+#									#
+#########################################################################
+
+#
+#	PTENRN, PTENRM, and PTENRP are arrays of powers of 10 rounded
+#	to nearest, minus, and plus, respectively.  The tables include
+#	10**{1,2,4,8,16,32,64,128,256,512,1024,2048,4096}.  No rounding
+#	is required until the power is greater than 27, however, all
+#	tables include the first 5 for ease of indexing.
+#
+RTABLE:
+	byte		0,0,0,0
+	byte		2,3,2,3
+	byte		2,3,3,2
+	byte		3,2,2,3
+
+	set		FNIBS,7
+	set		FSTRT,0
+
+	set		ESTRT,4
+	set		EDIGITS,2
+
+	global		decbin
+decbin:
+	mov.l		0x0(%a0),FP_SCR0_EX(%a6) # make a copy of input
+	mov.l		0x4(%a0),FP_SCR0_HI(%a6) # so we don't alter it
+	mov.l		0x8(%a0),FP_SCR0_LO(%a6)
+
+	lea		FP_SCR0(%a6),%a0
+
+	movm.l		&0x3c00,-(%sp)		# save d2-d5
+	fmovm.x		&0x1,-(%sp)		# save fp1
+#
+# Calculate exponent:
+#  1. Copy bcd value in memory for use as a working copy.
+#  2. Calculate absolute value of exponent in d1 by mul and add.
+#  3. Correct for exponent sign.
+#  4. Subtract 16 to compensate for interpreting the mant as all integer digits.
+#     (i.e., all digits assumed left of the decimal point.)
+#
+# Register usage:
+#
+#  calc_e:
+#	(*)  d0: temp digit storage
+#	(*)  d1: accumulator for binary exponent
+#	(*)  d2: digit count
+#	(*)  d3: offset pointer
+#	( )  d4: first word of bcd
+#	( )  a0: pointer to working bcd value
+#	( )  a6: pointer to original bcd value
+#	(*)  FP_SCR1: working copy of original bcd value
+#	(*)  L_SCR1: copy of original exponent word
+#
+calc_e:
+	mov.l		&EDIGITS,%d2		# # of nibbles (digits) in fraction part
+	mov.l		&ESTRT,%d3		# counter to pick up digits
+	mov.l		(%a0),%d4		# get first word of bcd
+	clr.l		%d1			# zero d1 for accumulator
+e_gd:
+	mulu.l		&0xa,%d1		# mul partial product by one digit place
+	bfextu		%d4{%d3:&4},%d0		# get the digit and zero extend into d0
+	add.l		%d0,%d1			# d1 = d1 + d0
+	addq.b		&4,%d3			# advance d3 to the next digit
+	dbf.w		%d2,e_gd		# if we have used all 3 digits, exit loop
+	btst		&30,%d4			# get SE
+	beq.b		e_pos			# don't negate if pos
+	neg.l		%d1			# negate before subtracting
+e_pos:
+	sub.l		&16,%d1			# sub to compensate for shift of mant
+	bge.b		e_save			# if still pos, do not neg
+	neg.l		%d1			# now negative, make pos and set SE
+	or.l		&0x40000000,%d4		# set SE in d4,
+	or.l		&0x40000000,(%a0)	# and in working bcd
+e_save:
+	mov.l		%d1,-(%sp)		# save exp on stack
+#
+#
+# Calculate mantissa:
+#  1. Calculate absolute value of mantissa in fp0 by mul and add.
+#  2. Correct for mantissa sign.
+#     (i.e., all digits assumed left of the decimal point.)
+#
+# Register usage:
+#
+#  calc_m:
+#	(*)  d0: temp digit storage
+#	(*)  d1: lword counter
+#	(*)  d2: digit count
+#	(*)  d3: offset pointer
+#	( )  d4: words 2 and 3 of bcd
+#	( )  a0: pointer to working bcd value
+#	( )  a6: pointer to original bcd value
+#	(*) fp0: mantissa accumulator
+#	( )  FP_SCR1: working copy of original bcd value
+#	( )  L_SCR1: copy of original exponent word
+#
+calc_m:
+	mov.l		&1,%d1			# word counter, init to 1
+	fmov.s		&0x00000000,%fp0	# accumulator
+#
+#
+#  Since the packed number has a long word between the first & second parts,
+#  get the integer digit then skip down & get the rest of the
+#  mantissa.  We will unroll the loop once.
+#
+	bfextu		(%a0){&28:&4},%d0	# integer part is ls digit in long word
+	fadd.b		%d0,%fp0		# add digit to sum in fp0
+#
+#
+#  Get the rest of the mantissa.
+#
+loadlw:
+	mov.l		(%a0,%d1.L*4),%d4	# load mantissa lonqword into d4
+	mov.l		&FSTRT,%d3		# counter to pick up digits
+	mov.l		&FNIBS,%d2		# reset number of digits per a0 ptr
+md2b:
+	fmul.s		&0x41200000,%fp0	# fp0 = fp0 * 10
+	bfextu		%d4{%d3:&4},%d0		# get the digit and zero extend
+	fadd.b		%d0,%fp0		# fp0 = fp0 + digit
+#
+#
+#  If all the digits (8) in that long word have been converted (d2=0),
+#  then inc d1 (=2) to point to the next long word and reset d3 to 0
+#  to initialize the digit offset, and set d2 to 7 for the digit count;
+#  else continue with this long word.
+#
+	addq.b		&4,%d3			# advance d3 to the next digit
+	dbf.w		%d2,md2b		# check for last digit in this lw
+nextlw:
+	addq.l		&1,%d1			# inc lw pointer in mantissa
+	cmp.l		%d1,&2			# test for last lw
+	ble.b		loadlw			# if not, get last one
+#
+#  Check the sign of the mant and make the value in fp0 the same sign.
+#
+m_sign:
+	btst		&31,(%a0)		# test sign of the mantissa
+	beq.b		ap_st_z			# if clear, go to append/strip zeros
+	fneg.x		%fp0			# if set, negate fp0
+#
+# Append/strip zeros:
+#
+#  For adjusted exponents which have an absolute value greater than 27*,
+#  this routine calculates the amount needed to normalize the mantissa
+#  for the adjusted exponent.  That number is subtracted from the exp
+#  if the exp was positive, and added if it was negative.  The purpose
+#  of this is to reduce the value of the exponent and the possibility
+#  of error in calculation of pwrten.
+#
+#  1. Branch on the sign of the adjusted exponent.
+#  2p.(positive exp)
+#   2. Check M16 and the digits in lwords 2 and 3 in decending order.
+#   3. Add one for each zero encountered until a non-zero digit.
+#   4. Subtract the count from the exp.
+#   5. Check if the exp has crossed zero in #3 above; make the exp abs
+#	   and set SE.
+#	6. Multiply the mantissa by 10**count.
+#  2n.(negative exp)
+#   2. Check the digits in lwords 3 and 2 in decending order.
+#   3. Add one for each zero encountered until a non-zero digit.
+#   4. Add the count to the exp.
+#   5. Check if the exp has crossed zero in #3 above; clear SE.
+#   6. Divide the mantissa by 10**count.
+#
+#  *Why 27?  If the adjusted exponent is within -28 < expA < 28, than
+#   any adjustment due to append/strip zeros will drive the resultane
+#   exponent towards zero.  Since all pwrten constants with a power
+#   of 27 or less are exact, there is no need to use this routine to
+#   attempt to lessen the resultant exponent.
+#
+# Register usage:
+#
+#  ap_st_z:
+#	(*)  d0: temp digit storage
+#	(*)  d1: zero count
+#	(*)  d2: digit count
+#	(*)  d3: offset pointer
+#	( )  d4: first word of bcd
+#	(*)  d5: lword counter
+#	( )  a0: pointer to working bcd value
+#	( )  FP_SCR1: working copy of original bcd value
+#	( )  L_SCR1: copy of original exponent word
+#
+#
+# First check the absolute value of the exponent to see if this
+# routine is necessary.  If so, then check the sign of the exponent
+# and do append (+) or strip (-) zeros accordingly.
+# This section handles a positive adjusted exponent.
+#
+ap_st_z:
+	mov.l		(%sp),%d1		# load expA for range test
+	cmp.l		%d1,&27			# test is with 27
+	ble.w		pwrten			# if abs(expA) <28, skip ap/st zeros
+	btst		&30,(%a0)		# check sign of exp
+	bne.b		ap_st_n			# if neg, go to neg side
+	clr.l		%d1			# zero count reg
+	mov.l		(%a0),%d4		# load lword 1 to d4
+	bfextu		%d4{&28:&4},%d0		# get M16 in d0
+	bne.b		ap_p_fx			# if M16 is non-zero, go fix exp
+	addq.l		&1,%d1			# inc zero count
+	mov.l		&1,%d5			# init lword counter
+	mov.l		(%a0,%d5.L*4),%d4	# get lword 2 to d4
+	bne.b		ap_p_cl			# if lw 2 is zero, skip it
+	addq.l		&8,%d1			# and inc count by 8
+	addq.l		&1,%d5			# inc lword counter
+	mov.l		(%a0,%d5.L*4),%d4	# get lword 3 to d4
+ap_p_cl:
+	clr.l		%d3			# init offset reg
+	mov.l		&7,%d2			# init digit counter
+ap_p_gd:
+	bfextu		%d4{%d3:&4},%d0		# get digit
+	bne.b		ap_p_fx			# if non-zero, go to fix exp
+	addq.l		&4,%d3			# point to next digit
+	addq.l		&1,%d1			# inc digit counter
+	dbf.w		%d2,ap_p_gd		# get next digit
+ap_p_fx:
+	mov.l		%d1,%d0			# copy counter to d2
+	mov.l		(%sp),%d1		# get adjusted exp from memory
+	sub.l		%d0,%d1			# subtract count from exp
+	bge.b		ap_p_fm			# if still pos, go to pwrten
+	neg.l		%d1			# now its neg; get abs
+	mov.l		(%a0),%d4		# load lword 1 to d4
+	or.l		&0x40000000,%d4		# and set SE in d4
+	or.l		&0x40000000,(%a0)	# and in memory
+#
+# Calculate the mantissa multiplier to compensate for the striping of
+# zeros from the mantissa.
+#
+ap_p_fm:
+	lea.l		PTENRN(%pc),%a1		# get address of power-of-ten table
+	clr.l		%d3			# init table index
+	fmov.s		&0x3f800000,%fp1	# init fp1 to 1
+	mov.l		&3,%d2			# init d2 to count bits in counter
+ap_p_el:
+	asr.l		&1,%d0			# shift lsb into carry
+	bcc.b		ap_p_en			# if 1, mul fp1 by pwrten factor
+	fmul.x		(%a1,%d3),%fp1		# mul by 10**(d3_bit_no)
+ap_p_en:
+	add.l		&12,%d3			# inc d3 to next rtable entry
+	tst.l		%d0			# check if d0 is zero
+	bne.b		ap_p_el			# if not, get next bit
+	fmul.x		%fp1,%fp0		# mul mantissa by 10**(no_bits_shifted)
+	bra.b		pwrten			# go calc pwrten
+#
+# This section handles a negative adjusted exponent.
+#
+ap_st_n:
+	clr.l		%d1			# clr counter
+	mov.l		&2,%d5			# set up d5 to point to lword 3
+	mov.l		(%a0,%d5.L*4),%d4	# get lword 3
+	bne.b		ap_n_cl			# if not zero, check digits
+	sub.l		&1,%d5			# dec d5 to point to lword 2
+	addq.l		&8,%d1			# inc counter by 8
+	mov.l		(%a0,%d5.L*4),%d4	# get lword 2
+ap_n_cl:
+	mov.l		&28,%d3			# point to last digit
+	mov.l		&7,%d2			# init digit counter
+ap_n_gd:
+	bfextu		%d4{%d3:&4},%d0		# get digit
+	bne.b		ap_n_fx			# if non-zero, go to exp fix
+	subq.l		&4,%d3			# point to previous digit
+	addq.l		&1,%d1			# inc digit counter
+	dbf.w		%d2,ap_n_gd		# get next digit
+ap_n_fx:
+	mov.l		%d1,%d0			# copy counter to d0
+	mov.l		(%sp),%d1		# get adjusted exp from memory
+	sub.l		%d0,%d1			# subtract count from exp
+	bgt.b		ap_n_fm			# if still pos, go fix mantissa
+	neg.l		%d1			# take abs of exp and clr SE
+	mov.l		(%a0),%d4		# load lword 1 to d4
+	and.l		&0xbfffffff,%d4		# and clr SE in d4
+	and.l		&0xbfffffff,(%a0)	# and in memory
+#
+# Calculate the mantissa multiplier to compensate for the appending of
+# zeros to the mantissa.
+#
+ap_n_fm:
+	lea.l		PTENRN(%pc),%a1		# get address of power-of-ten table
+	clr.l		%d3			# init table index
+	fmov.s		&0x3f800000,%fp1	# init fp1 to 1
+	mov.l		&3,%d2			# init d2 to count bits in counter
+ap_n_el:
+	asr.l		&1,%d0			# shift lsb into carry
+	bcc.b		ap_n_en			# if 1, mul fp1 by pwrten factor
+	fmul.x		(%a1,%d3),%fp1		# mul by 10**(d3_bit_no)
+ap_n_en:
+	add.l		&12,%d3			# inc d3 to next rtable entry
+	tst.l		%d0			# check if d0 is zero
+	bne.b		ap_n_el			# if not, get next bit
+	fdiv.x		%fp1,%fp0		# div mantissa by 10**(no_bits_shifted)
+#
+#
+# Calculate power-of-ten factor from adjusted and shifted exponent.
+#
+# Register usage:
+#
+#  pwrten:
+#	(*)  d0: temp
+#	( )  d1: exponent
+#	(*)  d2: {FPCR[6:5],SM,SE} as index in RTABLE; temp
+#	(*)  d3: FPCR work copy
+#	( )  d4: first word of bcd
+#	(*)  a1: RTABLE pointer
+#  calc_p:
+#	(*)  d0: temp
+#	( )  d1: exponent
+#	(*)  d3: PWRTxx table index
+#	( )  a0: pointer to working copy of bcd
+#	(*)  a1: PWRTxx pointer
+#	(*) fp1: power-of-ten accumulator
+#
+# Pwrten calculates the exponent factor in the selected rounding mode
+# according to the following table:
+#
+#	Sign of Mant  Sign of Exp  Rounding Mode  PWRTEN Rounding Mode
+#
+#	ANY	  ANY	RN	RN
+#
+#	 +	   +	RP	RP
+#	 -	   +	RP	RM
+#	 +	   -	RP	RM
+#	 -	   -	RP	RP
+#
+#	 +	   +	RM	RM
+#	 -	   +	RM	RP
+#	 +	   -	RM	RP
+#	 -	   -	RM	RM
+#
+#	 +	   +	RZ	RM
+#	 -	   +	RZ	RM
+#	 +	   -	RZ	RP
+#	 -	   -	RZ	RP
+#
+#
+pwrten:
+	mov.l		USER_FPCR(%a6),%d3	# get user's FPCR
+	bfextu		%d3{&26:&2},%d2		# isolate rounding mode bits
+	mov.l		(%a0),%d4		# reload 1st bcd word to d4
+	asl.l		&2,%d2			# format d2 to be
+	bfextu		%d4{&0:&2},%d0		# {FPCR[6],FPCR[5],SM,SE}
+	add.l		%d0,%d2			# in d2 as index into RTABLE
+	lea.l		RTABLE(%pc),%a1		# load rtable base
+	mov.b		(%a1,%d2),%d0		# load new rounding bits from table
+	clr.l		%d3			# clear d3 to force no exc and extended
+	bfins		%d0,%d3{&26:&2}		# stuff new rounding bits in FPCR
+	fmov.l		%d3,%fpcr		# write new FPCR
+	asr.l		&1,%d0			# write correct PTENxx table
+	bcc.b		not_rp			# to a1
+	lea.l		PTENRP(%pc),%a1		# it is RP
+	bra.b		calc_p			# go to init section
+not_rp:
+	asr.l		&1,%d0			# keep checking
+	bcc.b		not_rm
+	lea.l		PTENRM(%pc),%a1		# it is RM
+	bra.b		calc_p			# go to init section
+not_rm:
+	lea.l		PTENRN(%pc),%a1		# it is RN
+calc_p:
+	mov.l		%d1,%d0			# copy exp to d0;use d0
+	bpl.b		no_neg			# if exp is negative,
+	neg.l		%d0			# invert it
+	or.l		&0x40000000,(%a0)	# and set SE bit
+no_neg:
+	clr.l		%d3			# table index
+	fmov.s		&0x3f800000,%fp1	# init fp1 to 1
+e_loop:
+	asr.l		&1,%d0			# shift next bit into carry
+	bcc.b		e_next			# if zero, skip the mul
+	fmul.x		(%a1,%d3),%fp1		# mul by 10**(d3_bit_no)
+e_next:
+	add.l		&12,%d3			# inc d3 to next rtable entry
+	tst.l		%d0			# check if d0 is zero
+	bne.b		e_loop			# not zero, continue shifting
+#
+#
+#  Check the sign of the adjusted exp and make the value in fp0 the
+#  same sign. If the exp was pos then multiply fp1*fp0;
+#  else divide fp0/fp1.
+#
+# Register Usage:
+#  norm:
+#	( )  a0: pointer to working bcd value
+#	(*) fp0: mantissa accumulator
+#	( ) fp1: scaling factor - 10**(abs(exp))
+#
+pnorm:
+	btst		&30,(%a0)		# test the sign of the exponent
+	beq.b		mul			# if clear, go to multiply
+div:
+	fdiv.x		%fp1,%fp0		# exp is negative, so divide mant by exp
+	bra.b		end_dec
+mul:
+	fmul.x		%fp1,%fp0		# exp is positive, so multiply by exp
+#
+#
+# Clean up and return with result in fp0.
+#
+# If the final mul/div in decbin incurred an inex exception,
+# it will be inex2, but will be reported as inex1 by get_op.
+#
+end_dec:
+	fmov.l		%fpsr,%d0		# get status register
+	bclr		&inex2_bit+8,%d0	# test for inex2 and clear it
+	beq.b		no_exc			# skip this if no exc
+	ori.w		&inx1a_mask,2+USER_FPSR(%a6) # set INEX1/AINEX
+no_exc:
+	add.l		&0x4,%sp		# clear 1 lw param
+	fmovm.x		(%sp)+,&0x40		# restore fp1
+	movm.l		(%sp)+,&0x3c		# restore d2-d5
+	fmov.l		&0x0,%fpcr
+	fmov.l		&0x0,%fpsr
+	rts
+
+#########################################################################
+# bindec(): Converts an input in extended precision format to bcd format#
+#									#
+# INPUT ***************************************************************	#
+#	a0 = pointer to the input extended precision value in memory.	#
+#	     the input may be either normalized, unnormalized, or	#
+#	     denormalized.						#
+#	d0 = contains the k-factor sign-extended to 32-bits.		#
+#									#
+# OUTPUT **************************************************************	#
+#	FP_SCR0(a6) = bcd format result on the stack.			#
+#									#
+# ALGORITHM ***********************************************************	#
+#									#
+#	A1.	Set RM and size ext;  Set SIGMA = sign of input.	#
+#		The k-factor is saved for use in d7. Clear the		#
+#		BINDEC_FLG for separating normalized/denormalized	#
+#		input.  If input is unnormalized or denormalized,	#
+#		normalize it.						#
+#									#
+#	A2.	Set X = abs(input).					#
+#									#
+#	A3.	Compute ILOG.						#
+#		ILOG is the log base 10 of the input value.  It is	#
+#		approximated by adding e + 0.f when the original	#
+#		value is viewed as 2^^e * 1.f in extended precision.	#
+#		This value is stored in d6.				#
+#									#
+#	A4.	Clr INEX bit.						#
+#		The operation in A3 above may have set INEX2.		#
+#									#
+#	A5.	Set ICTR = 0;						#
+#		ICTR is a flag used in A13.  It must be set before the	#
+#		loop entry A6.						#
+#									#
+#	A6.	Calculate LEN.						#
+#		LEN is the number of digits to be displayed.  The	#
+#		k-factor can dictate either the total number of digits,	#
+#		if it is a positive number, or the number of digits	#
+#		after the decimal point which are to be included as	#
+#		significant.  See the 68882 manual for examples.	#
+#		If LEN is computed to be greater than 17, set OPERR in	#
+#		USER_FPSR.  LEN is stored in d4.			#
+#									#
+#	A7.	Calculate SCALE.					#
+#		SCALE is equal to 10^ISCALE, where ISCALE is the number	#
+#		of decimal places needed to insure LEN integer digits	#
+#		in the output before conversion to bcd. LAMBDA is the	#
+#		sign of ISCALE, used in A9. Fp1 contains		#
+#		10^^(abs(ISCALE)) using a rounding mode which is a	#
+#		function of the original rounding mode and the signs	#
+#		of ISCALE and X.  A table is given in the code.		#
+#									#
+#	A8.	Clr INEX; Force RZ.					#
+#		The operation in A3 above may have set INEX2.		#
+#		RZ mode is forced for the scaling operation to insure	#
+#		only one rounding error.  The grs bits are collected in #
+#		the INEX flag for use in A10.				#
+#									#
+#	A9.	Scale X -> Y.						#
+#		The mantissa is scaled to the desired number of		#
+#		significant digits.  The excess digits are collected	#
+#		in INEX2.						#
+#									#
+#	A10.	Or in INEX.						#
+#		If INEX is set, round error occurred.  This is		#
+#		compensated for by 'or-ing' in the INEX2 flag to	#
+#		the lsb of Y.						#
+#									#
+#	A11.	Restore original FPCR; set size ext.			#
+#		Perform FINT operation in the user's rounding mode.	#
+#		Keep the size to extended.				#
+#									#
+#	A12.	Calculate YINT = FINT(Y) according to user's rounding	#
+#		mode.  The FPSP routine sintd0 is used.  The output	#
+#		is in fp0.						#
+#									#
+#	A13.	Check for LEN digits.					#
+#		If the int operation results in more than LEN digits,	#
+#		or less than LEN -1 digits, adjust ILOG and repeat from	#
+#		A6.  This test occurs only on the first pass.  If the	#
+#		result is exactly 10^LEN, decrement ILOG and divide	#
+#		the mantissa by 10.					#
+#									#
+#	A14.	Convert the mantissa to bcd.				#
+#		The binstr routine is used to convert the LEN digit	#
+#		mantissa to bcd in memory.  The input to binstr is	#
+#		to be a fraction; i.e. (mantissa)/10^LEN and adjusted	#
+#		such that the decimal point is to the left of bit 63.	#
+#		The bcd digits are stored in the correct position in	#
+#		the final string area in memory.			#
+#									#
+#	A15.	Convert the exponent to bcd.				#
+#		As in A14 above, the exp is converted to bcd and the	#
+#		digits are stored in the final string.			#
+#		Test the length of the final exponent string.  If the	#
+#		length is 4, set operr.					#
+#									#
+#	A16.	Write sign bits to final string.			#
+#									#
+#########################################################################
+
+set	BINDEC_FLG,	EXC_TEMP	# DENORM flag
+
+# Constants in extended precision
+PLOG2:
+	long		0x3FFD0000,0x9A209A84,0xFBCFF798,0x00000000
+PLOG2UP1:
+	long		0x3FFD0000,0x9A209A84,0xFBCFF799,0x00000000
+
+# Constants in single precision
+FONE:
+	long		0x3F800000,0x00000000,0x00000000,0x00000000
+FTWO:
+	long		0x40000000,0x00000000,0x00000000,0x00000000
+FTEN:
+	long		0x41200000,0x00000000,0x00000000,0x00000000
+F4933:
+	long		0x459A2800,0x00000000,0x00000000,0x00000000
+
+RBDTBL:
+	byte		0,0,0,0
+	byte		3,3,2,2
+	byte		3,2,2,3
+	byte		2,3,3,2
+
+#	Implementation Notes:
+#
+#	The registers are used as follows:
+#
+#		d0: scratch; LEN input to binstr
+#		d1: scratch
+#		d2: upper 32-bits of mantissa for binstr
+#		d3: scratch;lower 32-bits of mantissa for binstr
+#		d4: LEN
+#		d5: LAMBDA/ICTR
+#		d6: ILOG
+#		d7: k-factor
+#		a0: ptr for original operand/final result
+#		a1: scratch pointer
+#		a2: pointer to FP_X; abs(original value) in ext
+#		fp0: scratch
+#		fp1: scratch
+#		fp2: scratch
+#		F_SCR1:
+#		F_SCR2:
+#		L_SCR1:
+#		L_SCR2:
+
+	global		bindec
+bindec:
+	movm.l		&0x3f20,-(%sp)	#  {%d2-%d7/%a2}
+	fmovm.x		&0x7,-(%sp)	#  {%fp0-%fp2}
+
+# A1. Set RM and size ext. Set SIGMA = sign input;
+#     The k-factor is saved for use in d7.  Clear BINDEC_FLG for
+#     separating  normalized/denormalized input.  If the input
+#     is a denormalized number, set the BINDEC_FLG memory word
+#     to signal denorm.  If the input is unnormalized, normalize
+#     the input and test for denormalized result.
+#
+	fmov.l		&rm_mode*0x10,%fpcr	# set RM and ext
+	mov.l		(%a0),L_SCR2(%a6)	# save exponent for sign check
+	mov.l		%d0,%d7		# move k-factor to d7
+
+	clr.b		BINDEC_FLG(%a6)	# clr norm/denorm flag
+	cmpi.b		STAG(%a6),&DENORM # is input a DENORM?
+	bne.w		A2_str		# no; input is a NORM
+
+#
+# Normalize the denorm
+#
+un_de_norm:
+	mov.w		(%a0),%d0
+	and.w		&0x7fff,%d0	# strip sign of normalized exp
+	mov.l		4(%a0),%d1
+	mov.l		8(%a0),%d2
+norm_loop:
+	sub.w		&1,%d0
+	lsl.l		&1,%d2
+	roxl.l		&1,%d1
+	tst.l		%d1
+	bge.b		norm_loop
+#
+# Test if the normalized input is denormalized
+#
+	tst.w		%d0
+	bgt.b		pos_exp		# if greater than zero, it is a norm
+	st		BINDEC_FLG(%a6)	# set flag for denorm
+pos_exp:
+	and.w		&0x7fff,%d0	# strip sign of normalized exp
+	mov.w		%d0,(%a0)
+	mov.l		%d1,4(%a0)
+	mov.l		%d2,8(%a0)
+
+# A2. Set X = abs(input).
+#
+A2_str:
+	mov.l		(%a0),FP_SCR1(%a6)	# move input to work space
+	mov.l		4(%a0),FP_SCR1+4(%a6)	# move input to work space
+	mov.l		8(%a0),FP_SCR1+8(%a6)	# move input to work space
+	and.l		&0x7fffffff,FP_SCR1(%a6)	# create abs(X)
+
+# A3. Compute ILOG.
+#     ILOG is the log base 10 of the input value.  It is approx-
+#     imated by adding e + 0.f when the original value is viewed
+#     as 2^^e * 1.f in extended precision.  This value is stored
+#     in d6.
+#
+# Register usage:
+#	Input/Output
+#	d0: k-factor/exponent
+#	d2: x/x
+#	d3: x/x
+#	d4: x/x
+#	d5: x/x
+#	d6: x/ILOG
+#	d7: k-factor/Unchanged
+#	a0: ptr for original operand/final result
+#	a1: x/x
+#	a2: x/x
+#	fp0: x/float(ILOG)
+#	fp1: x/x
+#	fp2: x/x
+#	F_SCR1:x/x
+#	F_SCR2:Abs(X)/Abs(X) with $3fff exponent
+#	L_SCR1:x/x
+#	L_SCR2:first word of X packed/Unchanged
+
+	tst.b		BINDEC_FLG(%a6)	# check for denorm
+	beq.b		A3_cont		# if clr, continue with norm
+	mov.l		&-4933,%d6	# force ILOG = -4933
+	bra.b		A4_str
+A3_cont:
+	mov.w		FP_SCR1(%a6),%d0	# move exp to d0
+	mov.w		&0x3fff,FP_SCR1(%a6)	# replace exponent with 0x3fff
+	fmov.x		FP_SCR1(%a6),%fp0	# now fp0 has 1.f
+	sub.w		&0x3fff,%d0	# strip off bias
+	fadd.w		%d0,%fp0	# add in exp
+	fsub.s		FONE(%pc),%fp0	# subtract off 1.0
+	fbge.w		pos_res		# if pos, branch
+	fmul.x		PLOG2UP1(%pc),%fp0	# if neg, mul by LOG2UP1
+	fmov.l		%fp0,%d6	# put ILOG in d6 as a lword
+	bra.b		A4_str		# go move out ILOG
+pos_res:
+	fmul.x		PLOG2(%pc),%fp0	# if pos, mul by LOG2
+	fmov.l		%fp0,%d6	# put ILOG in d6 as a lword
+
+
+# A4. Clr INEX bit.
+#     The operation in A3 above may have set INEX2.
+
+A4_str:
+	fmov.l		&0,%fpsr	# zero all of fpsr - nothing needed
+
+
+# A5. Set ICTR = 0;
+#     ICTR is a flag used in A13.  It must be set before the
+#     loop entry A6. The lower word of d5 is used for ICTR.
+
+	clr.w		%d5		# clear ICTR
+
+# A6. Calculate LEN.
+#     LEN is the number of digits to be displayed.  The k-factor
+#     can dictate either the total number of digits, if it is
+#     a positive number, or the number of digits after the
+#     original decimal point which are to be included as
+#     significant.  See the 68882 manual for examples.
+#     If LEN is computed to be greater than 17, set OPERR in
+#     USER_FPSR.  LEN is stored in d4.
+#
+# Register usage:
+#	Input/Output
+#	d0: exponent/Unchanged
+#	d2: x/x/scratch
+#	d3: x/x
+#	d4: exc picture/LEN
+#	d5: ICTR/Unchanged
+#	d6: ILOG/Unchanged
+#	d7: k-factor/Unchanged
+#	a0: ptr for original operand/final result
+#	a1: x/x
+#	a2: x/x
+#	fp0: float(ILOG)/Unchanged
+#	fp1: x/x
+#	fp2: x/x
+#	F_SCR1:x/x
+#	F_SCR2:Abs(X) with $3fff exponent/Unchanged
+#	L_SCR1:x/x
+#	L_SCR2:first word of X packed/Unchanged
+
+A6_str:
+	tst.l		%d7		# branch on sign of k
+	ble.b		k_neg		# if k <= 0, LEN = ILOG + 1 - k
+	mov.l		%d7,%d4		# if k > 0, LEN = k
+	bra.b		len_ck		# skip to LEN check
+k_neg:
+	mov.l		%d6,%d4		# first load ILOG to d4
+	sub.l		%d7,%d4		# subtract off k
+	addq.l		&1,%d4		# add in the 1
+len_ck:
+	tst.l		%d4		# LEN check: branch on sign of LEN
+	ble.b		LEN_ng		# if neg, set LEN = 1
+	cmp.l		%d4,&17		# test if LEN > 17
+	ble.b		A7_str		# if not, forget it
+	mov.l		&17,%d4		# set max LEN = 17
+	tst.l		%d7		# if negative, never set OPERR
+	ble.b		A7_str		# if positive, continue
+	or.l		&opaop_mask,USER_FPSR(%a6)	# set OPERR & AIOP in USER_FPSR
+	bra.b		A7_str		# finished here
+LEN_ng:
+	mov.l		&1,%d4		# min LEN is 1
+
+
+# A7. Calculate SCALE.
+#     SCALE is equal to 10^ISCALE, where ISCALE is the number
+#     of decimal places needed to insure LEN integer digits
+#     in the output before conversion to bcd. LAMBDA is the sign
+#     of ISCALE, used in A9.  Fp1 contains 10^^(abs(ISCALE)) using
+#     the rounding mode as given in the following table (see
+#     Coonen, p. 7.23 as ref.; however, the SCALE variable is
+#     of opposite sign in bindec.sa from Coonen).
+#
+#	Initial					USE
+#	FPCR[6:5]	LAMBDA	SIGN(X)		FPCR[6:5]
+#	----------------------------------------------
+#	 RN	00	   0	   0		00/0	RN
+#	 RN	00	   0	   1		00/0	RN
+#	 RN	00	   1	   0		00/0	RN
+#	 RN	00	   1	   1		00/0	RN
+#	 RZ	01	   0	   0		11/3	RP
+#	 RZ	01	   0	   1		11/3	RP
+#	 RZ	01	   1	   0		10/2	RM
+#	 RZ	01	   1	   1		10/2	RM
+#	 RM	10	   0	   0		11/3	RP
+#	 RM	10	   0	   1		10/2	RM
+#	 RM	10	   1	   0		10/2	RM
+#	 RM	10	   1	   1		11/3	RP
+#	 RP	11	   0	   0		10/2	RM
+#	 RP	11	   0	   1		11/3	RP
+#	 RP	11	   1	   0		11/3	RP
+#	 RP	11	   1	   1		10/2	RM
+#
+# Register usage:
+#	Input/Output
+#	d0: exponent/scratch - final is 0
+#	d2: x/0 or 24 for A9
+#	d3: x/scratch - offset ptr into PTENRM array
+#	d4: LEN/Unchanged
+#	d5: 0/ICTR:LAMBDA
+#	d6: ILOG/ILOG or k if ((k<=0)&(ILOG<k))
+#	d7: k-factor/Unchanged
+#	a0: ptr for original operand/final result
+#	a1: x/ptr to PTENRM array
+#	a2: x/x
+#	fp0: float(ILOG)/Unchanged
+#	fp1: x/10^ISCALE
+#	fp2: x/x
+#	F_SCR1:x/x
+#	F_SCR2:Abs(X) with $3fff exponent/Unchanged
+#	L_SCR1:x/x
+#	L_SCR2:first word of X packed/Unchanged
+
+A7_str:
+	tst.l		%d7		# test sign of k
+	bgt.b		k_pos		# if pos and > 0, skip this
+	cmp.l		%d7,%d6		# test k - ILOG
+	blt.b		k_pos		# if ILOG >= k, skip this
+	mov.l		%d7,%d6		# if ((k<0) & (ILOG < k)) ILOG = k
+k_pos:
+	mov.l		%d6,%d0		# calc ILOG + 1 - LEN in d0
+	addq.l		&1,%d0		# add the 1
+	sub.l		%d4,%d0		# sub off LEN
+	swap		%d5		# use upper word of d5 for LAMBDA
+	clr.w		%d5		# set it zero initially
+	clr.w		%d2		# set up d2 for very small case
+	tst.l		%d0		# test sign of ISCALE
+	bge.b		iscale		# if pos, skip next inst
+	addq.w		&1,%d5		# if neg, set LAMBDA true
+	cmp.l		%d0,&0xffffecd4	# test iscale <= -4908
+	bgt.b		no_inf		# if false, skip rest
+	add.l		&24,%d0		# add in 24 to iscale
+	mov.l		&24,%d2		# put 24 in d2 for A9
+no_inf:
+	neg.l		%d0		# and take abs of ISCALE
+iscale:
+	fmov.s		FONE(%pc),%fp1	# init fp1 to 1
+	bfextu		USER_FPCR(%a6){&26:&2},%d1	# get initial rmode bits
+	lsl.w		&1,%d1		# put them in bits 2:1
+	add.w		%d5,%d1		# add in LAMBDA
+	lsl.w		&1,%d1		# put them in bits 3:1
+	tst.l		L_SCR2(%a6)	# test sign of original x
+	bge.b		x_pos		# if pos, don't set bit 0
+	addq.l		&1,%d1		# if neg, set bit 0
+x_pos:
+	lea.l		RBDTBL(%pc),%a2	# load rbdtbl base
+	mov.b		(%a2,%d1),%d3	# load d3 with new rmode
+	lsl.l		&4,%d3		# put bits in proper position
+	fmov.l		%d3,%fpcr	# load bits into fpu
+	lsr.l		&4,%d3		# put bits in proper position
+	tst.b		%d3		# decode new rmode for pten table
+	bne.b		not_rn		# if zero, it is RN
+	lea.l		PTENRN(%pc),%a1	# load a1 with RN table base
+	bra.b		rmode		# exit decode
+not_rn:
+	lsr.b		&1,%d3		# get lsb in carry
+	bcc.b		not_rp2		# if carry clear, it is RM
+	lea.l		PTENRP(%pc),%a1	# load a1 with RP table base
+	bra.b		rmode		# exit decode
+not_rp2:
+	lea.l		PTENRM(%pc),%a1	# load a1 with RM table base
+rmode:
+	clr.l		%d3		# clr table index
+e_loop2:
+	lsr.l		&1,%d0		# shift next bit into carry
+	bcc.b		e_next2		# if zero, skip the mul
+	fmul.x		(%a1,%d3),%fp1	# mul by 10**(d3_bit_no)
+e_next2:
+	add.l		&12,%d3		# inc d3 to next pwrten table entry
+	tst.l		%d0		# test if ISCALE is zero
+	bne.b		e_loop2		# if not, loop
+
+# A8. Clr INEX; Force RZ.
+#     The operation in A3 above may have set INEX2.
+#     RZ mode is forced for the scaling operation to insure
+#     only one rounding error.  The grs bits are collected in
+#     the INEX flag for use in A10.
+#
+# Register usage:
+#	Input/Output
+
+	fmov.l		&0,%fpsr	# clr INEX
+	fmov.l		&rz_mode*0x10,%fpcr	# set RZ rounding mode
+
+# A9. Scale X -> Y.
+#     The mantissa is scaled to the desired number of significant
+#     digits.  The excess digits are collected in INEX2. If mul,
+#     Check d2 for excess 10 exponential value.  If not zero,
+#     the iscale value would have caused the pwrten calculation
+#     to overflow.  Only a negative iscale can cause this, so
+#     multiply by 10^(d2), which is now only allowed to be 24,
+#     with a multiply by 10^8 and 10^16, which is exact since
+#     10^24 is exact.  If the input was denormalized, we must
+#     create a busy stack frame with the mul command and the
+#     two operands, and allow the fpu to complete the multiply.
+#
+# Register usage:
+#	Input/Output
+#	d0: FPCR with RZ mode/Unchanged
+#	d2: 0 or 24/unchanged
+#	d3: x/x
+#	d4: LEN/Unchanged
+#	d5: ICTR:LAMBDA
+#	d6: ILOG/Unchanged
+#	d7: k-factor/Unchanged
+#	a0: ptr for original operand/final result
+#	a1: ptr to PTENRM array/Unchanged
+#	a2: x/x
+#	fp0: float(ILOG)/X adjusted for SCALE (Y)
+#	fp1: 10^ISCALE/Unchanged
+#	fp2: x/x
+#	F_SCR1:x/x
+#	F_SCR2:Abs(X) with $3fff exponent/Unchanged
+#	L_SCR1:x/x
+#	L_SCR2:first word of X packed/Unchanged
+
+A9_str:
+	fmov.x		(%a0),%fp0	# load X from memory
+	fabs.x		%fp0		# use abs(X)
+	tst.w		%d5		# LAMBDA is in lower word of d5
+	bne.b		sc_mul		# if neg (LAMBDA = 1), scale by mul
+	fdiv.x		%fp1,%fp0	# calculate X / SCALE -> Y to fp0
+	bra.w		A10_st		# branch to A10
+
+sc_mul:
+	tst.b		BINDEC_FLG(%a6)	# check for denorm
+	beq.w		A9_norm		# if norm, continue with mul
+
+# for DENORM, we must calculate:
+#	fp0 = input_op * 10^ISCALE * 10^24
+# since the input operand is a DENORM, we can't multiply it directly.
+# so, we do the multiplication of the exponents and mantissas separately.
+# in this way, we avoid underflow on intermediate stages of the
+# multiplication and guarantee a result without exception.
+	fmovm.x		&0x2,-(%sp)	# save 10^ISCALE to stack
+
+	mov.w		(%sp),%d3	# grab exponent
+	andi.w		&0x7fff,%d3	# clear sign
+	ori.w		&0x8000,(%a0)	# make DENORM exp negative
+	add.w		(%a0),%d3	# add DENORM exp to 10^ISCALE exp
+	subi.w		&0x3fff,%d3	# subtract BIAS
+	add.w		36(%a1),%d3
+	subi.w		&0x3fff,%d3	# subtract BIAS
+	add.w		48(%a1),%d3
+	subi.w		&0x3fff,%d3	# subtract BIAS
+
+	bmi.w		sc_mul_err	# is result is DENORM, punt!!!
+
+	andi.w		&0x8000,(%sp)	# keep sign
+	or.w		%d3,(%sp)	# insert new exponent
+	andi.w		&0x7fff,(%a0)	# clear sign bit on DENORM again
+	mov.l		0x8(%a0),-(%sp) # put input op mantissa on stk
+	mov.l		0x4(%a0),-(%sp)
+	mov.l		&0x3fff0000,-(%sp) # force exp to zero
+	fmovm.x		(%sp)+,&0x80	# load normalized DENORM into fp0
+	fmul.x		(%sp)+,%fp0
+
+#	fmul.x	36(%a1),%fp0	# multiply fp0 by 10^8
+#	fmul.x	48(%a1),%fp0	# multiply fp0 by 10^16
+	mov.l		36+8(%a1),-(%sp) # get 10^8 mantissa
+	mov.l		36+4(%a1),-(%sp)
+	mov.l		&0x3fff0000,-(%sp) # force exp to zero
+	mov.l		48+8(%a1),-(%sp) # get 10^16 mantissa
+	mov.l		48+4(%a1),-(%sp)
+	mov.l		&0x3fff0000,-(%sp)# force exp to zero
+	fmul.x		(%sp)+,%fp0	# multiply fp0 by 10^8
+	fmul.x		(%sp)+,%fp0	# multiply fp0 by 10^16
+	bra.b		A10_st
+
+sc_mul_err:
+	bra.b		sc_mul_err
+
+A9_norm:
+	tst.w		%d2		# test for small exp case
+	beq.b		A9_con		# if zero, continue as normal
+	fmul.x		36(%a1),%fp0	# multiply fp0 by 10^8
+	fmul.x		48(%a1),%fp0	# multiply fp0 by 10^16
+A9_con:
+	fmul.x		%fp1,%fp0	# calculate X * SCALE -> Y to fp0
+
+# A10. Or in INEX.
+#      If INEX is set, round error occurred.  This is compensated
+#      for by 'or-ing' in the INEX2 flag to the lsb of Y.
+#
+# Register usage:
+#	Input/Output
+#	d0: FPCR with RZ mode/FPSR with INEX2 isolated
+#	d2: x/x
+#	d3: x/x
+#	d4: LEN/Unchanged
+#	d5: ICTR:LAMBDA
+#	d6: ILOG/Unchanged
+#	d7: k-factor/Unchanged
+#	a0: ptr for original operand/final result
+#	a1: ptr to PTENxx array/Unchanged
+#	a2: x/ptr to FP_SCR1(a6)
+#	fp0: Y/Y with lsb adjusted
+#	fp1: 10^ISCALE/Unchanged
+#	fp2: x/x
+
+A10_st:
+	fmov.l		%fpsr,%d0	# get FPSR
+	fmov.x		%fp0,FP_SCR1(%a6)	# move Y to memory
+	lea.l		FP_SCR1(%a6),%a2	# load a2 with ptr to FP_SCR1
+	btst		&9,%d0		# check if INEX2 set
+	beq.b		A11_st		# if clear, skip rest
+	or.l		&1,8(%a2)	# or in 1 to lsb of mantissa
+	fmov.x		FP_SCR1(%a6),%fp0	# write adjusted Y back to fpu
+
+
+# A11. Restore original FPCR; set size ext.
+#      Perform FINT operation in the user's rounding mode.  Keep
+#      the size to extended.  The sintdo entry point in the sint
+#      routine expects the FPCR value to be in USER_FPCR for
+#      mode and precision.  The original FPCR is saved in L_SCR1.
+
+A11_st:
+	mov.l		USER_FPCR(%a6),L_SCR1(%a6)	# save it for later
+	and.l		&0x00000030,USER_FPCR(%a6)	# set size to ext,
+#					;block exceptions
+
+
+# A12. Calculate YINT = FINT(Y) according to user's rounding mode.
+#      The FPSP routine sintd0 is used.  The output is in fp0.
+#
+# Register usage:
+#	Input/Output
+#	d0: FPSR with AINEX cleared/FPCR with size set to ext
+#	d2: x/x/scratch
+#	d3: x/x
+#	d4: LEN/Unchanged
+#	d5: ICTR:LAMBDA/Unchanged
+#	d6: ILOG/Unchanged
+#	d7: k-factor/Unchanged
+#	a0: ptr for original operand/src ptr for sintdo
+#	a1: ptr to PTENxx array/Unchanged
+#	a2: ptr to FP_SCR1(a6)/Unchanged
+#	a6: temp pointer to FP_SCR1(a6) - orig value saved and restored
+#	fp0: Y/YINT
+#	fp1: 10^ISCALE/Unchanged
+#	fp2: x/x
+#	F_SCR1:x/x
+#	F_SCR2:Y adjusted for inex/Y with original exponent
+#	L_SCR1:x/original USER_FPCR
+#	L_SCR2:first word of X packed/Unchanged
+
+A12_st:
+	movm.l	&0xc0c0,-(%sp)	# save regs used by sintd0	 {%d0-%d1/%a0-%a1}
+	mov.l	L_SCR1(%a6),-(%sp)
+	mov.l	L_SCR2(%a6),-(%sp)
+
+	lea.l		FP_SCR1(%a6),%a0	# a0 is ptr to FP_SCR1(a6)
+	fmov.x		%fp0,(%a0)	# move Y to memory at FP_SCR1(a6)
+	tst.l		L_SCR2(%a6)	# test sign of original operand
+	bge.b		do_fint12		# if pos, use Y
+	or.l		&0x80000000,(%a0)	# if neg, use -Y
+do_fint12:
+	mov.l	USER_FPSR(%a6),-(%sp)
+#	bsr	sintdo		# sint routine returns int in fp0
+
+	fmov.l	USER_FPCR(%a6),%fpcr
+	fmov.l	&0x0,%fpsr			# clear the AEXC bits!!!
+##	mov.l		USER_FPCR(%a6),%d0	# ext prec/keep rnd mode
+##	andi.l		&0x00000030,%d0
+##	fmov.l		%d0,%fpcr
+	fint.x		FP_SCR1(%a6),%fp0	# do fint()
+	fmov.l	%fpsr,%d0
+	or.w	%d0,FPSR_EXCEPT(%a6)
+##	fmov.l		&0x0,%fpcr
+##	fmov.l		%fpsr,%d0		# don't keep ccodes
+##	or.w		%d0,FPSR_EXCEPT(%a6)
+
+	mov.b	(%sp),USER_FPSR(%a6)
+	add.l	&4,%sp
+
+	mov.l	(%sp)+,L_SCR2(%a6)
+	mov.l	(%sp)+,L_SCR1(%a6)
+	movm.l	(%sp)+,&0x303	# restore regs used by sint	 {%d0-%d1/%a0-%a1}
+
+	mov.l	L_SCR2(%a6),FP_SCR1(%a6)	# restore original exponent
+	mov.l	L_SCR1(%a6),USER_FPCR(%a6)	# restore user's FPCR
+
+# A13. Check for LEN digits.
+#      If the int operation results in more than LEN digits,
+#      or less than LEN -1 digits, adjust ILOG and repeat from
+#      A6.  This test occurs only on the first pass.  If the
+#      result is exactly 10^LEN, decrement ILOG and divide
+#      the mantissa by 10.  The calculation of 10^LEN cannot
+#      be inexact, since all powers of ten upto 10^27 are exact
+#      in extended precision, so the use of a previous power-of-ten
+#      table will introduce no error.
+#
+#
+# Register usage:
+#	Input/Output
+#	d0: FPCR with size set to ext/scratch final = 0
+#	d2: x/x
+#	d3: x/scratch final = x
+#	d4: LEN/LEN adjusted
+#	d5: ICTR:LAMBDA/LAMBDA:ICTR
+#	d6: ILOG/ILOG adjusted
+#	d7: k-factor/Unchanged
+#	a0: pointer into memory for packed bcd string formation
+#	a1: ptr to PTENxx array/Unchanged
+#	a2: ptr to FP_SCR1(a6)/Unchanged
+#	fp0: int portion of Y/abs(YINT) adjusted
+#	fp1: 10^ISCALE/Unchanged
+#	fp2: x/10^LEN
+#	F_SCR1:x/x
+#	F_SCR2:Y with original exponent/Unchanged
+#	L_SCR1:original USER_FPCR/Unchanged
+#	L_SCR2:first word of X packed/Unchanged
+
+A13_st:
+	swap		%d5		# put ICTR in lower word of d5
+	tst.w		%d5		# check if ICTR = 0
+	bne		not_zr		# if non-zero, go to second test
+#
+# Compute 10^(LEN-1)
+#
+	fmov.s		FONE(%pc),%fp2	# init fp2 to 1.0
+	mov.l		%d4,%d0		# put LEN in d0
+	subq.l		&1,%d0		# d0 = LEN -1
+	clr.l		%d3		# clr table index
+l_loop:
+	lsr.l		&1,%d0		# shift next bit into carry
+	bcc.b		l_next		# if zero, skip the mul
+	fmul.x		(%a1,%d3),%fp2	# mul by 10**(d3_bit_no)
+l_next:
+	add.l		&12,%d3		# inc d3 to next pwrten table entry
+	tst.l		%d0		# test if LEN is zero
+	bne.b		l_loop		# if not, loop
+#
+# 10^LEN-1 is computed for this test and A14.  If the input was
+# denormalized, check only the case in which YINT > 10^LEN.
+#
+	tst.b		BINDEC_FLG(%a6)	# check if input was norm
+	beq.b		A13_con		# if norm, continue with checking
+	fabs.x		%fp0		# take abs of YINT
+	bra		test_2
+#
+# Compare abs(YINT) to 10^(LEN-1) and 10^LEN
+#
+A13_con:
+	fabs.x		%fp0		# take abs of YINT
+	fcmp.x		%fp0,%fp2	# compare abs(YINT) with 10^(LEN-1)
+	fbge.w		test_2		# if greater, do next test
+	subq.l		&1,%d6		# subtract 1 from ILOG
+	mov.w		&1,%d5		# set ICTR
+	fmov.l		&rm_mode*0x10,%fpcr	# set rmode to RM
+	fmul.s		FTEN(%pc),%fp2	# compute 10^LEN
+	bra.w		A6_str		# return to A6 and recompute YINT
+test_2:
+	fmul.s		FTEN(%pc),%fp2	# compute 10^LEN
+	fcmp.x		%fp0,%fp2	# compare abs(YINT) with 10^LEN
+	fblt.w		A14_st		# if less, all is ok, go to A14
+	fbgt.w		fix_ex		# if greater, fix and redo
+	fdiv.s		FTEN(%pc),%fp0	# if equal, divide by 10
+	addq.l		&1,%d6		# and inc ILOG
+	bra.b		A14_st		# and continue elsewhere
+fix_ex:
+	addq.l		&1,%d6		# increment ILOG by 1
+	mov.w		&1,%d5		# set ICTR
+	fmov.l		&rm_mode*0x10,%fpcr	# set rmode to RM
+	bra.w		A6_str		# return to A6 and recompute YINT
+#
+# Since ICTR <> 0, we have already been through one adjustment,
+# and shouldn't have another; this is to check if abs(YINT) = 10^LEN
+# 10^LEN is again computed using whatever table is in a1 since the
+# value calculated cannot be inexact.
+#
+not_zr:
+	fmov.s		FONE(%pc),%fp2	# init fp2 to 1.0
+	mov.l		%d4,%d0		# put LEN in d0
+	clr.l		%d3		# clr table index
+z_loop:
+	lsr.l		&1,%d0		# shift next bit into carry
+	bcc.b		z_next		# if zero, skip the mul
+	fmul.x		(%a1,%d3),%fp2	# mul by 10**(d3_bit_no)
+z_next:
+	add.l		&12,%d3		# inc d3 to next pwrten table entry
+	tst.l		%d0		# test if LEN is zero
+	bne.b		z_loop		# if not, loop
+	fabs.x		%fp0		# get abs(YINT)
+	fcmp.x		%fp0,%fp2	# check if abs(YINT) = 10^LEN
+	fbneq.w		A14_st		# if not, skip this
+	fdiv.s		FTEN(%pc),%fp0	# divide abs(YINT) by 10
+	addq.l		&1,%d6		# and inc ILOG by 1
+	addq.l		&1,%d4		# and inc LEN
+	fmul.s		FTEN(%pc),%fp2	# if LEN++, the get 10^^LEN
+
+# A14. Convert the mantissa to bcd.
+#      The binstr routine is used to convert the LEN digit
+#      mantissa to bcd in memory.  The input to binstr is
+#      to be a fraction; i.e. (mantissa)/10^LEN and adjusted
+#      such that the decimal point is to the left of bit 63.
+#      The bcd digits are stored in the correct position in
+#      the final string area in memory.
+#
+#
+# Register usage:
+#	Input/Output
+#	d0: x/LEN call to binstr - final is 0
+#	d1: x/0
+#	d2: x/ms 32-bits of mant of abs(YINT)
+#	d3: x/ls 32-bits of mant of abs(YINT)
+#	d4: LEN/Unchanged
+#	d5: ICTR:LAMBDA/LAMBDA:ICTR
+#	d6: ILOG
+#	d7: k-factor/Unchanged
+#	a0: pointer into memory for packed bcd string formation
+#	    /ptr to first mantissa byte in result string
+#	a1: ptr to PTENxx array/Unchanged
+#	a2: ptr to FP_SCR1(a6)/Unchanged
+#	fp0: int portion of Y/abs(YINT) adjusted
+#	fp1: 10^ISCALE/Unchanged
+#	fp2: 10^LEN/Unchanged
+#	F_SCR1:x/Work area for final result
+#	F_SCR2:Y with original exponent/Unchanged
+#	L_SCR1:original USER_FPCR/Unchanged
+#	L_SCR2:first word of X packed/Unchanged
+
+A14_st:
+	fmov.l		&rz_mode*0x10,%fpcr	# force rz for conversion
+	fdiv.x		%fp2,%fp0	# divide abs(YINT) by 10^LEN
+	lea.l		FP_SCR0(%a6),%a0
+	fmov.x		%fp0,(%a0)	# move abs(YINT)/10^LEN to memory
+	mov.l		4(%a0),%d2	# move 2nd word of FP_RES to d2
+	mov.l		8(%a0),%d3	# move 3rd word of FP_RES to d3
+	clr.l		4(%a0)		# zero word 2 of FP_RES
+	clr.l		8(%a0)		# zero word 3 of FP_RES
+	mov.l		(%a0),%d0	# move exponent to d0
+	swap		%d0		# put exponent in lower word
+	beq.b		no_sft		# if zero, don't shift
+	sub.l		&0x3ffd,%d0	# sub bias less 2 to make fract
+	tst.l		%d0		# check if > 1
+	bgt.b		no_sft		# if so, don't shift
+	neg.l		%d0		# make exp positive
+m_loop:
+	lsr.l		&1,%d2		# shift d2:d3 right, add 0s
+	roxr.l		&1,%d3		# the number of places
+	dbf.w		%d0,m_loop	# given in d0
+no_sft:
+	tst.l		%d2		# check for mantissa of zero
+	bne.b		no_zr		# if not, go on
+	tst.l		%d3		# continue zero check
+	beq.b		zer_m		# if zero, go directly to binstr
+no_zr:
+	clr.l		%d1		# put zero in d1 for addx
+	add.l		&0x00000080,%d3	# inc at bit 7
+	addx.l		%d1,%d2		# continue inc
+	and.l		&0xffffff80,%d3	# strip off lsb not used by 882
+zer_m:
+	mov.l		%d4,%d0		# put LEN in d0 for binstr call
+	addq.l		&3,%a0		# a0 points to M16 byte in result
+	bsr		binstr		# call binstr to convert mant
+
+
+# A15. Convert the exponent to bcd.
+#      As in A14 above, the exp is converted to bcd and the
+#      digits are stored in the final string.
+#
+#      Digits are stored in L_SCR1(a6) on return from BINDEC as:
+#
+#	 32               16 15                0
+#	-----------------------------------------
+#	|  0 | e3 | e2 | e1 | e4 |  X |  X |  X |
+#	-----------------------------------------
+#
+# And are moved into their proper places in FP_SCR0.  If digit e4
+# is non-zero, OPERR is signaled.  In all cases, all 4 digits are
+# written as specified in the 881/882 manual for packed decimal.
+#
+# Register usage:
+#	Input/Output
+#	d0: x/LEN call to binstr - final is 0
+#	d1: x/scratch (0);shift count for final exponent packing
+#	d2: x/ms 32-bits of exp fraction/scratch
+#	d3: x/ls 32-bits of exp fraction
+#	d4: LEN/Unchanged
+#	d5: ICTR:LAMBDA/LAMBDA:ICTR
+#	d6: ILOG
+#	d7: k-factor/Unchanged
+#	a0: ptr to result string/ptr to L_SCR1(a6)
+#	a1: ptr to PTENxx array/Unchanged
+#	a2: ptr to FP_SCR1(a6)/Unchanged
+#	fp0: abs(YINT) adjusted/float(ILOG)
+#	fp1: 10^ISCALE/Unchanged
+#	fp2: 10^LEN/Unchanged
+#	F_SCR1:Work area for final result/BCD result
+#	F_SCR2:Y with original exponent/ILOG/10^4
+#	L_SCR1:original USER_FPCR/Exponent digits on return from binstr
+#	L_SCR2:first word of X packed/Unchanged
+
+A15_st:
+	tst.b		BINDEC_FLG(%a6)	# check for denorm
+	beq.b		not_denorm
+	ftest.x		%fp0		# test for zero
+	fbeq.w		den_zero	# if zero, use k-factor or 4933
+	fmov.l		%d6,%fp0	# float ILOG
+	fabs.x		%fp0		# get abs of ILOG
+	bra.b		convrt
+den_zero:
+	tst.l		%d7		# check sign of the k-factor
+	blt.b		use_ilog	# if negative, use ILOG
+	fmov.s		F4933(%pc),%fp0	# force exponent to 4933
+	bra.b		convrt		# do it
+use_ilog:
+	fmov.l		%d6,%fp0	# float ILOG
+	fabs.x		%fp0		# get abs of ILOG
+	bra.b		convrt
+not_denorm:
+	ftest.x		%fp0		# test for zero
+	fbneq.w		not_zero	# if zero, force exponent
+	fmov.s		FONE(%pc),%fp0	# force exponent to 1
+	bra.b		convrt		# do it
+not_zero:
+	fmov.l		%d6,%fp0	# float ILOG
+	fabs.x		%fp0		# get abs of ILOG
+convrt:
+	fdiv.x		24(%a1),%fp0	# compute ILOG/10^4
+	fmov.x		%fp0,FP_SCR1(%a6)	# store fp0 in memory
+	mov.l		4(%a2),%d2	# move word 2 to d2
+	mov.l		8(%a2),%d3	# move word 3 to d3
+	mov.w		(%a2),%d0	# move exp to d0
+	beq.b		x_loop_fin	# if zero, skip the shift
+	sub.w		&0x3ffd,%d0	# subtract off bias
+	neg.w		%d0		# make exp positive
+x_loop:
+	lsr.l		&1,%d2		# shift d2:d3 right
+	roxr.l		&1,%d3		# the number of places
+	dbf.w		%d0,x_loop	# given in d0
+x_loop_fin:
+	clr.l		%d1		# put zero in d1 for addx
+	add.l		&0x00000080,%d3	# inc at bit 6
+	addx.l		%d1,%d2		# continue inc
+	and.l		&0xffffff80,%d3	# strip off lsb not used by 882
+	mov.l		&4,%d0		# put 4 in d0 for binstr call
+	lea.l		L_SCR1(%a6),%a0	# a0 is ptr to L_SCR1 for exp digits
+	bsr		binstr		# call binstr to convert exp
+	mov.l		L_SCR1(%a6),%d0	# load L_SCR1 lword to d0
+	mov.l		&12,%d1		# use d1 for shift count
+	lsr.l		%d1,%d0		# shift d0 right by 12
+	bfins		%d0,FP_SCR0(%a6){&4:&12}	# put e3:e2:e1 in FP_SCR0
+	lsr.l		%d1,%d0		# shift d0 right by 12
+	bfins		%d0,FP_SCR0(%a6){&16:&4}	# put e4 in FP_SCR0
+	tst.b		%d0		# check if e4 is zero
+	beq.b		A16_st		# if zero, skip rest
+	or.l		&opaop_mask,USER_FPSR(%a6)	# set OPERR & AIOP in USER_FPSR
+
+
+# A16. Write sign bits to final string.
+#	   Sigma is bit 31 of initial value; RHO is bit 31 of d6 (ILOG).
+#
+# Register usage:
+#	Input/Output
+#	d0: x/scratch - final is x
+#	d2: x/x
+#	d3: x/x
+#	d4: LEN/Unchanged
+#	d5: ICTR:LAMBDA/LAMBDA:ICTR
+#	d6: ILOG/ILOG adjusted
+#	d7: k-factor/Unchanged
+#	a0: ptr to L_SCR1(a6)/Unchanged
+#	a1: ptr to PTENxx array/Unchanged
+#	a2: ptr to FP_SCR1(a6)/Unchanged
+#	fp0: float(ILOG)/Unchanged
+#	fp1: 10^ISCALE/Unchanged
+#	fp2: 10^LEN/Unchanged
+#	F_SCR1:BCD result with correct signs
+#	F_SCR2:ILOG/10^4
+#	L_SCR1:Exponent digits on return from binstr
+#	L_SCR2:first word of X packed/Unchanged
+
+A16_st:
+	clr.l		%d0		# clr d0 for collection of signs
+	and.b		&0x0f,FP_SCR0(%a6)	# clear first nibble of FP_SCR0
+	tst.l		L_SCR2(%a6)	# check sign of original mantissa
+	bge.b		mant_p		# if pos, don't set SM
+	mov.l		&2,%d0		# move 2 in to d0 for SM
+mant_p:
+	tst.l		%d6		# check sign of ILOG
+	bge.b		wr_sgn		# if pos, don't set SE
+	addq.l		&1,%d0		# set bit 0 in d0 for SE
+wr_sgn:
+	bfins		%d0,FP_SCR0(%a6){&0:&2}	# insert SM and SE into FP_SCR0
+
+# Clean up and restore all registers used.
+
+	fmov.l		&0,%fpsr	# clear possible inex2/ainex bits
+	fmovm.x		(%sp)+,&0xe0	#  {%fp0-%fp2}
+	movm.l		(%sp)+,&0x4fc	#  {%d2-%d7/%a2}
+	rts
+
+	global		PTENRN
+PTENRN:
+	long		0x40020000,0xA0000000,0x00000000	# 10 ^ 1
+	long		0x40050000,0xC8000000,0x00000000	# 10 ^ 2
+	long		0x400C0000,0x9C400000,0x00000000	# 10 ^ 4
+	long		0x40190000,0xBEBC2000,0x00000000	# 10 ^ 8
+	long		0x40340000,0x8E1BC9BF,0x04000000	# 10 ^ 16
+	long		0x40690000,0x9DC5ADA8,0x2B70B59E	# 10 ^ 32
+	long		0x40D30000,0xC2781F49,0xFFCFA6D5	# 10 ^ 64
+	long		0x41A80000,0x93BA47C9,0x80E98CE0	# 10 ^ 128
+	long		0x43510000,0xAA7EEBFB,0x9DF9DE8E	# 10 ^ 256
+	long		0x46A30000,0xE319A0AE,0xA60E91C7	# 10 ^ 512
+	long		0x4D480000,0xC9767586,0x81750C17	# 10 ^ 1024
+	long		0x5A920000,0x9E8B3B5D,0xC53D5DE5	# 10 ^ 2048
+	long		0x75250000,0xC4605202,0x8A20979B	# 10 ^ 4096
+
+	global		PTENRP
+PTENRP:
+	long		0x40020000,0xA0000000,0x00000000	# 10 ^ 1
+	long		0x40050000,0xC8000000,0x00000000	# 10 ^ 2
+	long		0x400C0000,0x9C400000,0x00000000	# 10 ^ 4
+	long		0x40190000,0xBEBC2000,0x00000000	# 10 ^ 8
+	long		0x40340000,0x8E1BC9BF,0x04000000	# 10 ^ 16
+	long		0x40690000,0x9DC5ADA8,0x2B70B59E	# 10 ^ 32
+	long		0x40D30000,0xC2781F49,0xFFCFA6D6	# 10 ^ 64
+	long		0x41A80000,0x93BA47C9,0x80E98CE0	# 10 ^ 128
+	long		0x43510000,0xAA7EEBFB,0x9DF9DE8E	# 10 ^ 256
+	long		0x46A30000,0xE319A0AE,0xA60E91C7	# 10 ^ 512
+	long		0x4D480000,0xC9767586,0x81750C18	# 10 ^ 1024
+	long		0x5A920000,0x9E8B3B5D,0xC53D5DE5	# 10 ^ 2048
+	long		0x75250000,0xC4605202,0x8A20979B	# 10 ^ 4096
+
+	global		PTENRM
+PTENRM:
+	long		0x40020000,0xA0000000,0x00000000	# 10 ^ 1
+	long		0x40050000,0xC8000000,0x00000000	# 10 ^ 2
+	long		0x400C0000,0x9C400000,0x00000000	# 10 ^ 4
+	long		0x40190000,0xBEBC2000,0x00000000	# 10 ^ 8
+	long		0x40340000,0x8E1BC9BF,0x04000000	# 10 ^ 16
+	long		0x40690000,0x9DC5ADA8,0x2B70B59D	# 10 ^ 32
+	long		0x40D30000,0xC2781F49,0xFFCFA6D5	# 10 ^ 64
+	long		0x41A80000,0x93BA47C9,0x80E98CDF	# 10 ^ 128
+	long		0x43510000,0xAA7EEBFB,0x9DF9DE8D	# 10 ^ 256
+	long		0x46A30000,0xE319A0AE,0xA60E91C6	# 10 ^ 512
+	long		0x4D480000,0xC9767586,0x81750C17	# 10 ^ 1024
+	long		0x5A920000,0x9E8B3B5D,0xC53D5DE4	# 10 ^ 2048
+	long		0x75250000,0xC4605202,0x8A20979A	# 10 ^ 4096
+
+#########################################################################
+# binstr(): Converts a 64-bit binary integer to bcd.			#
+#									#
+# INPUT *************************************************************** #
+#	d2:d3 = 64-bit binary integer					#
+#	d0    = desired length (LEN)					#
+#	a0    = pointer to start in memory for bcd characters		#
+#		(This pointer must point to byte 4 of the first		#
+#		 lword of the packed decimal memory string.)		#
+#									#
+# OUTPUT ************************************************************** #
+#	a0 = pointer to LEN bcd digits representing the 64-bit integer.	#
+#									#
+# ALGORITHM ***********************************************************	#
+#	The 64-bit binary is assumed to have a decimal point before	#
+#	bit 63.  The fraction is multiplied by 10 using a mul by 2	#
+#	shift and a mul by 8 shift.  The bits shifted out of the	#
+#	msb form a decimal digit.  This process is iterated until	#
+#	LEN digits are formed.						#
+#									#
+# A1. Init d7 to 1.  D7 is the byte digit counter, and if 1, the	#
+#     digit formed will be assumed the least significant.  This is	#
+#     to force the first byte formed to have a 0 in the upper 4 bits.	#
+#									#
+# A2. Beginning of the loop:						#
+#     Copy the fraction in d2:d3 to d4:d5.				#
+#									#
+# A3. Multiply the fraction in d2:d3 by 8 using bit-field		#
+#     extracts and shifts.  The three msbs from d2 will go into d1.	#
+#									#
+# A4. Multiply the fraction in d4:d5 by 2 using shifts.  The msb	#
+#     will be collected by the carry.					#
+#									#
+# A5. Add using the carry the 64-bit quantities in d2:d3 and d4:d5	#
+#     into d2:d3.  D1 will contain the bcd digit formed.		#
+#									#
+# A6. Test d7.  If zero, the digit formed is the ms digit.  If non-	#
+#     zero, it is the ls digit.  Put the digit in its place in the	#
+#     upper word of d0.  If it is the ls digit, write the word		#
+#     from d0 to memory.						#
+#									#
+# A7. Decrement d6 (LEN counter) and repeat the loop until zero.	#
+#									#
+#########################################################################
+
+#	Implementation Notes:
+#
+#	The registers are used as follows:
+#
+#		d0: LEN counter
+#		d1: temp used to form the digit
+#		d2: upper 32-bits of fraction for mul by 8
+#		d3: lower 32-bits of fraction for mul by 8
+#		d4: upper 32-bits of fraction for mul by 2
+#		d5: lower 32-bits of fraction for mul by 2
+#		d6: temp for bit-field extracts
+#		d7: byte digit formation word;digit count {0,1}
+#		a0: pointer into memory for packed bcd string formation
+#
+
+	global		binstr
+binstr:
+	movm.l		&0xff00,-(%sp)	#  {%d0-%d7}
+
+#
+# A1: Init d7
+#
+	mov.l		&1,%d7		# init d7 for second digit
+	subq.l		&1,%d0		# for dbf d0 would have LEN+1 passes
+#
+# A2. Copy d2:d3 to d4:d5.  Start loop.
+#
+loop:
+	mov.l		%d2,%d4		# copy the fraction before muls
+	mov.l		%d3,%d5		# to d4:d5
+#
+# A3. Multiply d2:d3 by 8; extract msbs into d1.
+#
+	bfextu		%d2{&0:&3},%d1	# copy 3 msbs of d2 into d1
+	asl.l		&3,%d2		# shift d2 left by 3 places
+	bfextu		%d3{&0:&3},%d6	# copy 3 msbs of d3 into d6
+	asl.l		&3,%d3		# shift d3 left by 3 places
+	or.l		%d6,%d2		# or in msbs from d3 into d2
+#
+# A4. Multiply d4:d5 by 2; add carry out to d1.
+#
+	asl.l		&1,%d5		# mul d5 by 2
+	roxl.l		&1,%d4		# mul d4 by 2
+	swap		%d6		# put 0 in d6 lower word
+	addx.w		%d6,%d1		# add in extend from mul by 2
+#
+# A5. Add mul by 8 to mul by 2.  D1 contains the digit formed.
+#
+	add.l		%d5,%d3		# add lower 32 bits
+	nop				# ERRATA FIX #13 (Rev. 1.2 6/6/90)
+	addx.l		%d4,%d2		# add with extend upper 32 bits
+	nop				# ERRATA FIX #13 (Rev. 1.2 6/6/90)
+	addx.w		%d6,%d1		# add in extend from add to d1
+	swap		%d6		# with d6 = 0; put 0 in upper word
+#
+# A6. Test d7 and branch.
+#
+	tst.w		%d7		# if zero, store digit & to loop
+	beq.b		first_d		# if non-zero, form byte & write
+sec_d:
+	swap		%d7		# bring first digit to word d7b
+	asl.w		&4,%d7		# first digit in upper 4 bits d7b
+	add.w		%d1,%d7		# add in ls digit to d7b
+	mov.b		%d7,(%a0)+	# store d7b byte in memory
+	swap		%d7		# put LEN counter in word d7a
+	clr.w		%d7		# set d7a to signal no digits done
+	dbf.w		%d0,loop	# do loop some more!
+	bra.b		end_bstr	# finished, so exit
+first_d:
+	swap		%d7		# put digit word in d7b
+	mov.w		%d1,%d7		# put new digit in d7b
+	swap		%d7		# put LEN counter in word d7a
+	addq.w		&1,%d7		# set d7a to signal first digit done
+	dbf.w		%d0,loop	# do loop some more!
+	swap		%d7		# put last digit in string
+	lsl.w		&4,%d7		# move it to upper 4 bits
+	mov.b		%d7,(%a0)+	# store it in memory string
+#
+# Clean up and return with result in fp0.
+#
+end_bstr:
+	movm.l		(%sp)+,&0xff	#  {%d0-%d7}
+	rts
+
+#########################################################################
+# XDEF ****************************************************************	#
+#	facc_in_b(): dmem_read_byte failed				#
+#	facc_in_w(): dmem_read_word failed				#
+#	facc_in_l(): dmem_read_long failed				#
+#	facc_in_d(): dmem_read of dbl prec failed			#
+#	facc_in_x(): dmem_read of ext prec failed			#
+#									#
+#	facc_out_b(): dmem_write_byte failed				#
+#	facc_out_w(): dmem_write_word failed				#
+#	facc_out_l(): dmem_write_long failed				#
+#	facc_out_d(): dmem_write of dbl prec failed			#
+#	facc_out_x(): dmem_write of ext prec failed			#
+#									#
+# XREF ****************************************************************	#
+#	_real_access() - exit through access error handler		#
+#									#
+# INPUT ***************************************************************	#
+#	None								#
+#									#
+# OUTPUT **************************************************************	#
+#	None								#
+#									#
+# ALGORITHM ***********************************************************	#
+#	Flow jumps here when an FP data fetch call gets an error	#
+# result. This means the operating system wants an access error frame	#
+# made out of the current exception stack frame.			#
+#	So, we first call restore() which makes sure that any updated	#
+# -(an)+ register gets returned to its pre-exception value and then	#
+# we change the stack to an access error stack frame.			#
+#									#
+#########################################################################
+
+facc_in_b:
+	movq.l		&0x1,%d0			# one byte
+	bsr.w		restore				# fix An
+
+	mov.w		&0x0121,EXC_VOFF(%a6)		# set FSLW
+	bra.w		facc_finish
+
+facc_in_w:
+	movq.l		&0x2,%d0			# two bytes
+	bsr.w		restore				# fix An
+
+	mov.w		&0x0141,EXC_VOFF(%a6)		# set FSLW
+	bra.b		facc_finish
+
+facc_in_l:
+	movq.l		&0x4,%d0			# four bytes
+	bsr.w		restore				# fix An
+
+	mov.w		&0x0101,EXC_VOFF(%a6)		# set FSLW
+	bra.b		facc_finish
+
+facc_in_d:
+	movq.l		&0x8,%d0			# eight bytes
+	bsr.w		restore				# fix An
+
+	mov.w		&0x0161,EXC_VOFF(%a6)		# set FSLW
+	bra.b		facc_finish
+
+facc_in_x:
+	movq.l		&0xc,%d0			# twelve bytes
+	bsr.w		restore				# fix An
+
+	mov.w		&0x0161,EXC_VOFF(%a6)		# set FSLW
+	bra.b		facc_finish
+
+################################################################
+
+facc_out_b:
+	movq.l		&0x1,%d0			# one byte
+	bsr.w		restore				# restore An
+
+	mov.w		&0x00a1,EXC_VOFF(%a6)		# set FSLW
+	bra.b		facc_finish
+
+facc_out_w:
+	movq.l		&0x2,%d0			# two bytes
+	bsr.w		restore				# restore An
+
+	mov.w		&0x00c1,EXC_VOFF(%a6)		# set FSLW
+	bra.b		facc_finish
+
+facc_out_l:
+	movq.l		&0x4,%d0			# four bytes
+	bsr.w		restore				# restore An
+
+	mov.w		&0x0081,EXC_VOFF(%a6)		# set FSLW
+	bra.b		facc_finish
+
+facc_out_d:
+	movq.l		&0x8,%d0			# eight bytes
+	bsr.w		restore				# restore An
+
+	mov.w		&0x00e1,EXC_VOFF(%a6)		# set FSLW
+	bra.b		facc_finish
+
+facc_out_x:
+	mov.l		&0xc,%d0			# twelve bytes
+	bsr.w		restore				# restore An
+
+	mov.w		&0x00e1,EXC_VOFF(%a6)		# set FSLW
+
+# here's where we actually create the access error frame from the
+# current exception stack frame.
+facc_finish:
+	mov.l		USER_FPIAR(%a6),EXC_PC(%a6) # store current PC
+
+	fmovm.x		EXC_FPREGS(%a6),&0xc0	# restore fp0-fp1
+	fmovm.l		USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+	movm.l		EXC_DREGS(%a6),&0x0303	# restore d0-d1/a0-a1
+
+	unlk		%a6
+
+	mov.l		(%sp),-(%sp)		# store SR, hi(PC)
+	mov.l		0x8(%sp),0x4(%sp)	# store lo(PC)
+	mov.l		0xc(%sp),0x8(%sp)	# store EA
+	mov.l		&0x00000001,0xc(%sp)	# store FSLW
+	mov.w		0x6(%sp),0xc(%sp)	# fix FSLW (size)
+	mov.w		&0x4008,0x6(%sp)	# store voff
+
+	btst		&0x5,(%sp)		# supervisor or user mode?
+	beq.b		facc_out2		# user
+	bset		&0x2,0xd(%sp)		# set supervisor TM bit
+
+facc_out2:
+	bra.l		_real_access
+
+##################################################################
+
+# if the effective addressing mode was predecrement or postincrement,
+# the emulation has already changed its value to the correct post-
+# instruction value. but since we're exiting to the access error
+# handler, then AN must be returned to its pre-instruction value.
+# we do that here.
+restore:
+	mov.b		EXC_OPWORD+0x1(%a6),%d1
+	andi.b		&0x38,%d1		# extract opmode
+	cmpi.b		%d1,&0x18		# postinc?
+	beq.w		rest_inc
+	cmpi.b		%d1,&0x20		# predec?
+	beq.w		rest_dec
+	rts
+
+rest_inc:
+	mov.b		EXC_OPWORD+0x1(%a6),%d1
+	andi.w		&0x0007,%d1		# fetch An
+
+	mov.w		(tbl_rest_inc.b,%pc,%d1.w*2),%d1
+	jmp		(tbl_rest_inc.b,%pc,%d1.w*1)
+
+tbl_rest_inc:
+	short		ri_a0 - tbl_rest_inc
+	short		ri_a1 - tbl_rest_inc
+	short		ri_a2 - tbl_rest_inc
+	short		ri_a3 - tbl_rest_inc
+	short		ri_a4 - tbl_rest_inc
+	short		ri_a5 - tbl_rest_inc
+	short		ri_a6 - tbl_rest_inc
+	short		ri_a7 - tbl_rest_inc
+
+ri_a0:
+	sub.l		%d0,EXC_DREGS+0x8(%a6)	# fix stacked a0
+	rts
+ri_a1:
+	sub.l		%d0,EXC_DREGS+0xc(%a6)	# fix stacked a1
+	rts
+ri_a2:
+	sub.l		%d0,%a2			# fix a2
+	rts
+ri_a3:
+	sub.l		%d0,%a3			# fix a3
+	rts
+ri_a4:
+	sub.l		%d0,%a4			# fix a4
+	rts
+ri_a5:
+	sub.l		%d0,%a5			# fix a5
+	rts
+ri_a6:
+	sub.l		%d0,(%a6)		# fix stacked a6
+	rts
+# if it's a fmove out instruction, we don't have to fix a7
+# because we hadn't changed it yet. if it's an opclass two
+# instruction (data moved in) and the exception was in supervisor
+# mode, then also also wasn't updated. if it was user mode, then
+# restore the correct a7 which is in the USP currently.
+ri_a7:
+	cmpi.b		EXC_VOFF(%a6),&0x30	# move in or out?
+	bne.b		ri_a7_done		# out
+
+	btst		&0x5,EXC_SR(%a6)	# user or supervisor?
+	bne.b		ri_a7_done		# supervisor
+	movc		%usp,%a0		# restore USP
+	sub.l		%d0,%a0
+	movc		%a0,%usp
+ri_a7_done:
+	rts
+
+# need to invert adjustment value if the <ea> was predec
+rest_dec:
+	neg.l		%d0
+	bra.b		rest_inc