[SCSI] bfa: cleanup driver

We have flattened the BFA hierarchy and also reduced the number of
source and header files we used to have earlier.

Signed-off-by: Krishna Gudipati <kgudipat@brocade.com>
Signed-off-by: James Bottomley <James.Bottomley@suse.de>
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 8e78f20..6795b24 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  * All rights reserved
  * www.brocade.com
  *
@@ -15,35 +15,33 @@
  * General Public License for more details.
  */
 
-#include <bfa.h>
-#include <bfa_ioc.h>
-#include <bfa_fwimg_priv.h>
-#include <cna/bfa_cna_trcmod.h>
-#include <cs/bfa_debug.h>
-#include <bfi/bfi_ioc.h>
-#include <bfi/bfi_ctreg.h>
-#include <aen/bfa_aen_ioc.h>
-#include <aen/bfa_aen.h>
-#include <log/bfa_log_hal.h>
-#include <defs/bfa_defs_pci.h>
+#include "bfa_ioc.h"
+#include "bfi_ctreg.h"
+#include "bfa_defs.h"
+#include "bfa_defs_svc.h"
+#include "bfad_drv.h"
 
 BFA_TRC_FILE(CNA, IOC);
 
 /**
  * IOC local definitions
  */
-#define BFA_IOC_TOV		2000	/* msecs */
-#define BFA_IOC_HWSEM_TOV       500     /* msecs */
-#define BFA_IOC_HB_TOV          500     /* msecs */
-#define BFA_IOC_HWINIT_MAX      2
-#define BFA_IOC_FWIMG_MINSZ     (16 * 1024)
-#define BFA_IOC_TOV_RECOVER      BFA_IOC_HB_TOV
+#define BFA_IOC_TOV		3000	/* msecs */
+#define BFA_IOC_HWSEM_TOV	500	/* msecs */
+#define BFA_IOC_HB_TOV		500	/* msecs */
+#define BFA_IOC_HWINIT_MAX	2
+#define BFA_IOC_TOV_RECOVER	 BFA_IOC_HB_TOV
 
 #define bfa_ioc_timer_start(__ioc)					\
 	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,	\
 			bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
 #define bfa_ioc_timer_stop(__ioc)   bfa_timer_stop(&(__ioc)->ioc_timer)
 
+#define bfa_hb_timer_start(__ioc)					\
+	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer,		\
+			bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
+#define bfa_hb_timer_stop(__ioc)	bfa_timer_stop(&(__ioc)->hb_timer)
+
 #define BFA_DBG_FWTRC_ENTS	(BFI_IOC_TRC_ENTS)
 #define BFA_DBG_FWTRC_LEN					\
 	(BFA_DBG_FWTRC_ENTS * sizeof(struct bfa_trc_s) +	\
@@ -55,100 +53,226 @@
  * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
  */
 
-#define bfa_ioc_firmware_lock(__ioc)                    \
+#define bfa_ioc_firmware_lock(__ioc)			\
 			((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
-#define bfa_ioc_firmware_unlock(__ioc)                  \
+#define bfa_ioc_firmware_unlock(__ioc)			\
 			((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
-#define bfa_ioc_notify_hbfail(__ioc)                    \
+#define bfa_ioc_notify_hbfail(__ioc)			\
 			((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc))
-#define bfa_ioc_is_optrom(__ioc)        \
-	(bfi_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ)
 
-bfa_boolean_t   bfa_auto_recover = BFA_TRUE;
+#ifdef BFA_IOC_IS_UEFI
+#define bfa_ioc_is_bios_optrom(__ioc) (0)
+#define bfa_ioc_is_uefi(__ioc) BFA_IOC_IS_UEFI
+#else
+#define bfa_ioc_is_bios_optrom(__ioc)	\
+	(bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ)
+#define bfa_ioc_is_uefi(__ioc) (0)
+#endif
+
+#define bfa_ioc_mbox_cmd_pending(__ioc)		\
+			(!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
+			bfa_reg_read((__ioc)->ioc_regs.hfn_mbox_cmd))
+
+bfa_boolean_t bfa_auto_recover = BFA_TRUE;
 
 /*
  * forward declarations
  */
-static void     bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
-static void     bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc);
-static void     bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
-static void     bfa_ioc_timeout(void *ioc);
-static void     bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
-static void     bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
-static void     bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
-static void     bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
-static void     bfa_ioc_hb_stop(struct bfa_ioc_s *ioc);
-static void     bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force);
-static void     bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
-static void     bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc);
-static void     bfa_ioc_recover(struct bfa_ioc_s *ioc);
-static void	bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc);
-static void     bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
-static void     bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
+static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
+static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc);
+static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
+static void bfa_ioc_timeout(void *ioc);
+static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
+static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
+static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
+static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
+static void bfa_ioc_hb_stop(struct bfa_ioc_s *ioc);
+static void bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force);
+static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
+static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc);
+static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
+static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc);
+static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
+static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
+static void bfa_ioc_pf_enabled(struct bfa_ioc_s *ioc);
+static void bfa_ioc_pf_disabled(struct bfa_ioc_s *ioc);
+static void bfa_ioc_pf_failed(struct bfa_ioc_s *ioc);
+static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
 
 /**
- *  bfa_ioc_sm
+ *  hal_ioc_sm
  */
 
 /**
- * IOC state machine events
+ * IOC state machine definitions/declarations
  */
 enum ioc_event {
-	IOC_E_ENABLE = 1,	/*  IOC enable request */
-	IOC_E_DISABLE = 2,	/*  IOC disable request */
-	IOC_E_TIMEOUT = 3,	/*  f/w response timeout */
-	IOC_E_FWREADY = 4,	/*  f/w initialization done */
-	IOC_E_FWRSP_GETATTR = 5,	/*  IOC get attribute response */
-	IOC_E_FWRSP_ENABLE = 6,	/*  enable f/w response */
-	IOC_E_FWRSP_DISABLE = 7,	/*  disable f/w response */
-	IOC_E_HBFAIL = 8,	/*  heartbeat failure */
-	IOC_E_HWERROR = 9,	/*  hardware error interrupt */
-	IOC_E_SEMLOCKED = 10,	/*  h/w semaphore is locked */
-	IOC_E_DETACH = 11,	/*  driver detach cleanup */
+	IOC_E_RESET		= 1,	/*  IOC reset request		*/
+	IOC_E_ENABLE		= 2,	/*  IOC enable request		*/
+	IOC_E_DISABLE		= 3,	/*  IOC disable request	*/
+	IOC_E_DETACH		= 4,	/*  driver detach cleanup	*/
+	IOC_E_ENABLED		= 5,	/*  f/w enabled		*/
+	IOC_E_FWRSP_GETATTR	= 6,	/*  IOC get attribute response	*/
+	IOC_E_DISABLED		= 7,	/*  f/w disabled		*/
+	IOC_E_FAILED		= 8,	/*  failure notice by iocpf sm	*/
+	IOC_E_HBFAIL		= 9,	/*  heartbeat failure		*/
+	IOC_E_HWERROR		= 10,	/*  hardware error interrupt	*/
+	IOC_E_TIMEOUT		= 11,	/*  timeout			*/
 };
 
+bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
-bfa_fsm_state_decl(bfa_ioc, fwcheck, struct bfa_ioc_s, enum ioc_event);
-bfa_fsm_state_decl(bfa_ioc, mismatch, struct bfa_ioc_s, enum ioc_event);
-bfa_fsm_state_decl(bfa_ioc, semwait, struct bfa_ioc_s, enum ioc_event);
-bfa_fsm_state_decl(bfa_ioc, hwinit, struct bfa_ioc_s, enum ioc_event);
 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
 bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc_s, enum ioc_event);
-bfa_fsm_state_decl(bfa_ioc, hbfail, struct bfa_ioc_s, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
 
 static struct bfa_sm_table_s ioc_sm_table[] = {
+	{BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
 	{BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
-	{BFA_SM(bfa_ioc_sm_fwcheck), BFA_IOC_FWMISMATCH},
-	{BFA_SM(bfa_ioc_sm_mismatch), BFA_IOC_FWMISMATCH},
-	{BFA_SM(bfa_ioc_sm_semwait), BFA_IOC_SEMWAIT},
-	{BFA_SM(bfa_ioc_sm_hwinit), BFA_IOC_HWINIT},
-	{BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_HWINIT},
+	{BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
 	{BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
 	{BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
 	{BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL},
-	{BFA_SM(bfa_ioc_sm_hbfail), BFA_IOC_HBFAIL},
+	{BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
 	{BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
 	{BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
 };
 
 /**
+ * IOCPF state machine definitions/declarations
+ */
+
+#define bfa_iocpf_timer_start(__ioc)					\
+	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,	\
+			bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
+#define bfa_iocpf_timer_stop(__ioc)	bfa_timer_stop(&(__ioc)->ioc_timer)
+
+#define bfa_iocpf_recovery_timer_start(__ioc)				\
+	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,	\
+			bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV_RECOVER)
+
+#define bfa_sem_timer_start(__ioc)					\
+	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer,	\
+			bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
+#define bfa_sem_timer_stop(__ioc)	bfa_timer_stop(&(__ioc)->sem_timer)
+
+/*
+ * Forward declareations for iocpf state machine
+ */
+static void bfa_iocpf_enable(struct bfa_ioc_s *ioc);
+static void bfa_iocpf_disable(struct bfa_ioc_s *ioc);
+static void bfa_iocpf_fail(struct bfa_ioc_s *ioc);
+static void bfa_iocpf_initfail(struct bfa_ioc_s *ioc);
+static void bfa_iocpf_getattrfail(struct bfa_ioc_s *ioc);
+static void bfa_iocpf_stop(struct bfa_ioc_s *ioc);
+static void bfa_iocpf_timeout(void *ioc_arg);
+static void bfa_iocpf_sem_timeout(void *ioc_arg);
+
+/**
+ * IOCPF state machine events
+ */
+enum iocpf_event {
+	IOCPF_E_ENABLE		= 1,	/*  IOCPF enable request	*/
+	IOCPF_E_DISABLE		= 2,	/*  IOCPF disable request	*/
+	IOCPF_E_STOP		= 3,	/*  stop on driver detach	*/
+	IOCPF_E_FWREADY		= 4,	/*  f/w initialization done	*/
+	IOCPF_E_FWRSP_ENABLE	= 5,	/*  enable f/w response	*/
+	IOCPF_E_FWRSP_DISABLE	= 6,	/*  disable f/w response	*/
+	IOCPF_E_FAIL		= 7,	/*  failure notice by ioc sm	*/
+	IOCPF_E_INITFAIL	= 8,	/*  init fail notice by ioc sm	*/
+	IOCPF_E_GETATTRFAIL	= 9,	/*  init fail notice by ioc sm	*/
+	IOCPF_E_SEMLOCKED	= 10,	/*  h/w semaphore is locked	*/
+	IOCPF_E_TIMEOUT		= 11,	/*  f/w response timeout	*/
+};
+
+/**
+ * IOCPF states
+ */
+enum bfa_iocpf_state {
+	BFA_IOCPF_RESET		= 1,	/*  IOC is in reset state */
+	BFA_IOCPF_SEMWAIT	= 2,	/*  Waiting for IOC h/w semaphore */
+	BFA_IOCPF_HWINIT	= 3,	/*  IOC h/w is being initialized */
+	BFA_IOCPF_READY		= 4,	/*  IOCPF is initialized */
+	BFA_IOCPF_INITFAIL	= 5,	/*  IOCPF failed */
+	BFA_IOCPF_FAIL		= 6,	/*  IOCPF failed */
+	BFA_IOCPF_DISABLING	= 7,	/*  IOCPF is being disabled */
+	BFA_IOCPF_DISABLED	= 8,	/*  IOCPF is disabled */
+	BFA_IOCPF_FWMISMATCH	= 9,	/*  IOC f/w different from drivers */
+};
+
+bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
+
+static struct bfa_sm_table_s iocpf_sm_table[] = {
+	{BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
+	{BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
+	{BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
+	{BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
+	{BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
+	{BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
+	{BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
+	{BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
+	{BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
+	{BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
+	{BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
+};
+
+/**
+ * IOC State Machine
+ */
+
+/**
+ * Beginning state. IOC uninit state.
+ */
+
+static void
+bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
+{
+}
+
+/**
+ * IOC is in uninit state.
+ */
+static void
+bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
+{
+	bfa_trc(ioc, event);
+
+	switch (event) {
+	case IOC_E_RESET:
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+/**
  * Reset entry actions -- initialize state machine
  */
 static void
 bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
 {
-	ioc->retry_count = 0;
-	ioc->auto_recover = bfa_auto_recover;
+	bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
 }
 
 /**
- * Beginning state. IOC is in reset state.
+ * IOC is in reset state.
  */
 static void
 bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
@@ -157,195 +281,15 @@
 
 	switch (event) {
 	case IOC_E_ENABLE:
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
-		break;
-
-	case IOC_E_DISABLE:
-		bfa_ioc_disable_comp(ioc);
-		break;
-
-	case IOC_E_DETACH:
-		break;
-
-	default:
-		bfa_sm_fault(ioc, event);
-	}
-}
-
-/**
- * Semaphore should be acquired for version check.
- */
-static void
-bfa_ioc_sm_fwcheck_entry(struct bfa_ioc_s *ioc)
-{
-	bfa_ioc_hw_sem_get(ioc);
-}
-
-/**
- * Awaiting h/w semaphore to continue with version check.
- */
-static void
-bfa_ioc_sm_fwcheck(struct bfa_ioc_s *ioc, enum ioc_event event)
-{
-	bfa_trc(ioc, event);
-
-	switch (event) {
-	case IOC_E_SEMLOCKED:
-		if (bfa_ioc_firmware_lock(ioc)) {
-			ioc->retry_count = 0;
-			bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
-		} else {
-			bfa_ioc_hw_sem_release(ioc);
-			bfa_fsm_set_state(ioc, bfa_ioc_sm_mismatch);
-		}
-		break;
-
-	case IOC_E_DISABLE:
-		bfa_ioc_disable_comp(ioc);
-		/*
-		 * fall through
-		 */
-
-	case IOC_E_DETACH:
-		bfa_ioc_hw_sem_get_cancel(ioc);
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
-		break;
-
-	case IOC_E_FWREADY:
-		break;
-
-	default:
-		bfa_sm_fault(ioc, event);
-	}
-}
-
-/**
- * Notify enable completion callback and generate mismatch AEN.
- */
-static void
-bfa_ioc_sm_mismatch_entry(struct bfa_ioc_s *ioc)
-{
-	/**
-	 * Provide enable completion callback and AEN notification only once.
-	 */
-	if (ioc->retry_count == 0) {
-		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
-		bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH);
-	}
-	ioc->retry_count++;
-	bfa_ioc_timer_start(ioc);
-}
-
-/**
- * Awaiting firmware version match.
- */
-static void
-bfa_ioc_sm_mismatch(struct bfa_ioc_s *ioc, enum ioc_event event)
-{
-	bfa_trc(ioc, event);
-
-	switch (event) {
-	case IOC_E_TIMEOUT:
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
-		break;
-
-	case IOC_E_DISABLE:
-		bfa_ioc_disable_comp(ioc);
-		/*
-		 * fall through
-		 */
-
-	case IOC_E_DETACH:
-		bfa_ioc_timer_stop(ioc);
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
-		break;
-
-	case IOC_E_FWREADY:
-		break;
-
-	default:
-		bfa_sm_fault(ioc, event);
-	}
-}
-
-/**
- * Request for semaphore.
- */
-static void
-bfa_ioc_sm_semwait_entry(struct bfa_ioc_s *ioc)
-{
-	bfa_ioc_hw_sem_get(ioc);
-}
-
-/**
- * Awaiting semaphore for h/w initialzation.
- */
-static void
-bfa_ioc_sm_semwait(struct bfa_ioc_s *ioc, enum ioc_event event)
-{
-	bfa_trc(ioc, event);
-
-	switch (event) {
-	case IOC_E_SEMLOCKED:
-		ioc->retry_count = 0;
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
-		break;
-
-	case IOC_E_DISABLE:
-		bfa_ioc_hw_sem_get_cancel(ioc);
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
-		break;
-
-	default:
-		bfa_sm_fault(ioc, event);
-	}
-}
-
-
-static void
-bfa_ioc_sm_hwinit_entry(struct bfa_ioc_s *ioc)
-{
-	bfa_ioc_timer_start(ioc);
-	bfa_ioc_reset(ioc, BFA_FALSE);
-}
-
-/**
- * Hardware is being initialized. Interrupts are enabled.
- * Holding hardware semaphore lock.
- */
-static void
-bfa_ioc_sm_hwinit(struct bfa_ioc_s *ioc, enum ioc_event event)
-{
-	bfa_trc(ioc, event);
-
-	switch (event) {
-	case IOC_E_FWREADY:
-		bfa_ioc_timer_stop(ioc);
 		bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
 		break;
 
-	case IOC_E_HWERROR:
-		bfa_ioc_timer_stop(ioc);
-		/*
-		 * fall through
-		 */
-
-	case IOC_E_TIMEOUT:
-		ioc->retry_count++;
-		if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
-			bfa_ioc_timer_start(ioc);
-			bfa_ioc_reset(ioc, BFA_TRUE);
-			break;
-		}
-
-		bfa_ioc_hw_sem_release(ioc);
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
+	case IOC_E_DISABLE:
+		bfa_ioc_disable_comp(ioc);
 		break;
 
-	case IOC_E_DISABLE:
-		bfa_ioc_hw_sem_release(ioc);
-		bfa_ioc_timer_stop(ioc);
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+	case IOC_E_DETACH:
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
 		break;
 
 	default:
@@ -357,8 +301,7 @@
 static void
 bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
 {
-	bfa_ioc_timer_start(ioc);
-	bfa_ioc_send_enable(ioc);
+	bfa_iocpf_enable(ioc);
 }
 
 /**
@@ -371,39 +314,29 @@
 	bfa_trc(ioc, event);
 
 	switch (event) {
-	case IOC_E_FWRSP_ENABLE:
-		bfa_ioc_timer_stop(ioc);
-		bfa_ioc_hw_sem_release(ioc);
+	case IOC_E_ENABLED:
 		bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
 		break;
 
-	case IOC_E_HWERROR:
-		bfa_ioc_timer_stop(ioc);
-		/*
-		 * fall through
-		 */
-
-	case IOC_E_TIMEOUT:
-		ioc->retry_count++;
-		if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
-			bfa_reg_write(ioc->ioc_regs.ioc_fwstate,
-				      BFI_IOC_UNINIT);
-			bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
-			break;
-		}
-
-		bfa_ioc_hw_sem_release(ioc);
+	case IOC_E_FAILED:
 		bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
 		break;
 
-	case IOC_E_DISABLE:
-		bfa_ioc_timer_stop(ioc);
-		bfa_ioc_hw_sem_release(ioc);
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+	case IOC_E_HWERROR:
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
+		bfa_iocpf_initfail(ioc);
 		break;
 
-	case IOC_E_FWREADY:
-		bfa_ioc_send_enable(ioc);
+	case IOC_E_DISABLE:
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
+		break;
+
+	case IOC_E_DETACH:
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
+		bfa_iocpf_stop(ioc);
+		break;
+
+	case IOC_E_ENABLE:
 		break;
 
 	default:
@@ -434,19 +367,26 @@
 		bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
 		break;
 
+	case IOC_E_FAILED:
+		bfa_ioc_timer_stop(ioc);
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
+		break;
+
 	case IOC_E_HWERROR:
 		bfa_ioc_timer_stop(ioc);
-		/*
-		 * fall through
-		 */
+		/* fall through */
 
 	case IOC_E_TIMEOUT:
 		bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
+		bfa_iocpf_getattrfail(ioc);
 		break;
 
 	case IOC_E_DISABLE:
 		bfa_ioc_timer_stop(ioc);
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
+		break;
+
+	case IOC_E_ENABLE:
 		break;
 
 	default:
@@ -458,9 +398,11 @@
 static void
 bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
 {
+	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
+
 	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
 	bfa_ioc_hb_monitor(ioc);
-	bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
+	BFA_LOG(KERN_INFO, bfad, log_level, "IOC enabled\n");
 }
 
 static void
@@ -477,19 +419,18 @@
 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
 		break;
 
-	case IOC_E_HWERROR:
-	case IOC_E_FWREADY:
-		/**
-		 * Hard error or IOC recovery by other function.
-		 * Treat it same as heartbeat failure.
-		 */
+	case IOC_E_FAILED:
 		bfa_ioc_hb_stop(ioc);
-		/*
-		 * !!! fall through !!!
-		 */
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
+		break;
+
+	case IOC_E_HWERROR:
+		bfa_ioc_hb_stop(ioc);
+		/* !!! fall through !!! */
 
 	case IOC_E_HBFAIL:
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_hbfail);
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
+		bfa_iocpf_fail(ioc);
 		break;
 
 	default:
@@ -501,9 +442,9 @@
 static void
 bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
 {
-	bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE);
-	bfa_ioc_timer_start(ioc);
-	bfa_ioc_send_disable(ioc);
+	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
+	bfa_iocpf_disable(ioc);
+	BFA_LOG(KERN_INFO, bfad, log_level, "IOC disabled\n");
 }
 
 /**
@@ -515,20 +456,17 @@
 	bfa_trc(ioc, event);
 
 	switch (event) {
-	case IOC_E_FWRSP_DISABLE:
-		bfa_ioc_timer_stop(ioc);
+	case IOC_E_DISABLED:
 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
 		break;
 
 	case IOC_E_HWERROR:
-		bfa_ioc_timer_stop(ioc);
 		/*
-		 * !!! fall through !!!
+		 * No state change.  Will move to disabled state
+		 * after iocpf sm completes failure processing and
+		 * moves to disabled state.
 		 */
-
-	case IOC_E_TIMEOUT:
-		bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL);
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+		bfa_iocpf_fail(ioc);
 		break;
 
 	default:
@@ -552,19 +490,16 @@
 
 	switch (event) {
 	case IOC_E_ENABLE:
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
 		break;
 
 	case IOC_E_DISABLE:
 		ioc->cbfn->disable_cbfn(ioc->bfa);
 		break;
 
-	case IOC_E_FWREADY:
-		break;
-
 	case IOC_E_DETACH:
-		bfa_ioc_firmware_unlock(ioc);
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
+		bfa_iocpf_stop(ioc);
 		break;
 
 	default:
@@ -577,7 +512,6 @@
 bfa_ioc_sm_initfail_entry(struct bfa_ioc_s *ioc)
 {
 	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
-	bfa_ioc_timer_start(ioc);
 }
 
 /**
@@ -589,19 +523,24 @@
 	bfa_trc(ioc, event);
 
 	switch (event) {
+	case IOC_E_ENABLED:
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
+		break;
+
+	case IOC_E_FAILED:
+		/**
+		 * Initialization failure during iocpf init retry.
+		 */
+		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+		break;
+
 	case IOC_E_DISABLE:
-		bfa_ioc_timer_stop(ioc);
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
 		break;
 
 	case IOC_E_DETACH:
-		bfa_ioc_timer_stop(ioc);
-		bfa_ioc_firmware_unlock(ioc);
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
-		break;
-
-	case IOC_E_TIMEOUT:
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
+		bfa_iocpf_stop(ioc);
 		break;
 
 	default:
@@ -611,74 +550,50 @@
 
 
 static void
-bfa_ioc_sm_hbfail_entry(struct bfa_ioc_s *ioc)
+bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
 {
-	struct list_head *qe;
-	struct bfa_ioc_hbfail_notify_s *notify;
-
-	/**
-	 * Mark IOC as failed in hardware and stop firmware.
-	 */
-	bfa_ioc_lpu_stop(ioc);
-	bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL);
-
-	/**
-	 * Notify other functions on HB failure.
-	 */
-	bfa_ioc_notify_hbfail(ioc);
+	struct list_head			*qe;
+	struct bfa_ioc_hbfail_notify_s	*notify;
+	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
 
 	/**
 	 * Notify driver and common modules registered for notification.
 	 */
 	ioc->cbfn->hbfail_cbfn(ioc->bfa);
 	list_for_each(qe, &ioc->hb_notify_q) {
-		notify = (struct bfa_ioc_hbfail_notify_s *)qe;
+		notify = (struct bfa_ioc_hbfail_notify_s *) qe;
 		notify->cbfn(notify->cbarg);
 	}
 
-	/**
-	 * Flush any queued up mailbox requests.
-	 */
-	bfa_ioc_mbox_hbfail(ioc);
-	bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL);
-
-	/**
-	 * Trigger auto-recovery after a delay.
-	 */
-	if (ioc->auto_recover) {
-		bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer,
-				bfa_ioc_timeout, ioc, BFA_IOC_TOV_RECOVER);
-	}
+	BFA_LOG(KERN_CRIT, bfad, log_level,
+		"Heart Beat of IOC has failed\n");
 }
 
 /**
- * IOC heartbeat failure.
+ * IOC failure.
  */
 static void
-bfa_ioc_sm_hbfail(struct bfa_ioc_s *ioc, enum ioc_event event)
+bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
 {
 	bfa_trc(ioc, event);
 
 	switch (event) {
 
+	case IOC_E_FAILED:
+		/**
+		 * Initialization failure during iocpf recovery.
+		 * !!! Fall through !!!
+		 */
 	case IOC_E_ENABLE:
 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 		break;
 
+	case IOC_E_ENABLED:
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
+		break;
+
 	case IOC_E_DISABLE:
-		if (ioc->auto_recover)
-			bfa_ioc_timer_stop(ioc);
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
-		break;
-
-	case IOC_E_TIMEOUT:
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
-		break;
-
-	case IOC_E_FWREADY:
-		/**
-		 * Recovery is already initiated by other function.
-		 */
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
 		break;
 
 	case IOC_E_HWERROR:
@@ -686,6 +601,498 @@
 		 * HB failure notification, ignore.
 		 */
 		break;
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+
+
+/**
+ * IOCPF State Machine
+ */
+
+
+/**
+ * Reset entry actions -- initialize state machine
+ */
+static void
+bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
+{
+	iocpf->retry_count = 0;
+	iocpf->auto_recover = bfa_auto_recover;
+}
+
+/**
+ * Beginning state. IOC is in reset state.
+ */
+static void
+bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
+{
+	struct bfa_ioc_s *ioc = iocpf->ioc;
+
+	bfa_trc(ioc, event);
+
+	switch (event) {
+	case IOCPF_E_ENABLE:
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
+		break;
+
+	case IOCPF_E_STOP:
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+/**
+ * Semaphore should be acquired for version check.
+ */
+static void
+bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
+{
+	bfa_ioc_hw_sem_get(iocpf->ioc);
+}
+
+/**
+ * Awaiting h/w semaphore to continue with version check.
+ */
+static void
+bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
+{
+	struct bfa_ioc_s *ioc = iocpf->ioc;
+
+	bfa_trc(ioc, event);
+
+	switch (event) {
+	case IOCPF_E_SEMLOCKED:
+		if (bfa_ioc_firmware_lock(ioc)) {
+			iocpf->retry_count = 0;
+			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
+		} else {
+			bfa_ioc_hw_sem_release(ioc);
+			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
+		}
+		break;
+
+	case IOCPF_E_DISABLE:
+		bfa_ioc_hw_sem_get_cancel(ioc);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
+		bfa_ioc_pf_disabled(ioc);
+		break;
+
+	case IOCPF_E_STOP:
+		bfa_ioc_hw_sem_get_cancel(ioc);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+/**
+ * Notify enable completion callback.
+ */
+static void
+bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
+{
+	/*
+	 * Call only the first time sm enters fwmismatch state.
+	 */
+	if (iocpf->retry_count == 0)
+		bfa_ioc_pf_fwmismatch(iocpf->ioc);
+
+	iocpf->retry_count++;
+	bfa_iocpf_timer_start(iocpf->ioc);
+}
+
+/**
+ * Awaiting firmware version match.
+ */
+static void
+bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
+{
+	struct bfa_ioc_s *ioc = iocpf->ioc;
+
+	bfa_trc(ioc, event);
+
+	switch (event) {
+	case IOCPF_E_TIMEOUT:
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
+		break;
+
+	case IOCPF_E_DISABLE:
+		bfa_iocpf_timer_stop(ioc);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
+		bfa_ioc_pf_disabled(ioc);
+		break;
+
+	case IOCPF_E_STOP:
+		bfa_iocpf_timer_stop(ioc);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+/**
+ * Request for semaphore.
+ */
+static void
+bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
+{
+	bfa_ioc_hw_sem_get(iocpf->ioc);
+}
+
+/**
+ * Awaiting semaphore for h/w initialzation.
+ */
+static void
+bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
+{
+	struct bfa_ioc_s *ioc = iocpf->ioc;
+
+	bfa_trc(ioc, event);
+
+	switch (event) {
+	case IOCPF_E_SEMLOCKED:
+		iocpf->retry_count = 0;
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
+		break;
+
+	case IOCPF_E_DISABLE:
+		bfa_ioc_hw_sem_get_cancel(ioc);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+
+static void
+bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
+{
+	bfa_iocpf_timer_start(iocpf->ioc);
+	bfa_ioc_reset(iocpf->ioc, BFA_FALSE);
+}
+
+/**
+ * Hardware is being initialized. Interrupts are enabled.
+ * Holding hardware semaphore lock.
+ */
+static void
+bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
+{
+	struct bfa_ioc_s *ioc = iocpf->ioc;
+
+	bfa_trc(ioc, event);
+
+	switch (event) {
+	case IOCPF_E_FWREADY:
+		bfa_iocpf_timer_stop(ioc);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
+		break;
+
+	case IOCPF_E_INITFAIL:
+		bfa_iocpf_timer_stop(ioc);
+		/*
+		 * !!! fall through !!!
+		 */
+
+	case IOCPF_E_TIMEOUT:
+		iocpf->retry_count++;
+		if (iocpf->retry_count < BFA_IOC_HWINIT_MAX) {
+			bfa_iocpf_timer_start(ioc);
+			bfa_ioc_reset(ioc, BFA_TRUE);
+			break;
+		}
+
+		bfa_ioc_hw_sem_release(ioc);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
+
+		if (event == IOCPF_E_TIMEOUT)
+			bfa_ioc_pf_failed(ioc);
+		break;
+
+	case IOCPF_E_DISABLE:
+		bfa_ioc_hw_sem_release(ioc);
+		bfa_iocpf_timer_stop(ioc);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+
+static void
+bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
+{
+	bfa_iocpf_timer_start(iocpf->ioc);
+	bfa_ioc_send_enable(iocpf->ioc);
+}
+
+/**
+ * Host IOC function is being enabled, awaiting response from firmware.
+ * Semaphore is acquired.
+ */
+static void
+bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
+{
+	struct bfa_ioc_s *ioc = iocpf->ioc;
+
+	bfa_trc(ioc, event);
+
+	switch (event) {
+	case IOCPF_E_FWRSP_ENABLE:
+		bfa_iocpf_timer_stop(ioc);
+		bfa_ioc_hw_sem_release(ioc);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
+		break;
+
+	case IOCPF_E_INITFAIL:
+		bfa_iocpf_timer_stop(ioc);
+		/*
+		 * !!! fall through !!!
+		 */
+
+	case IOCPF_E_TIMEOUT:
+		iocpf->retry_count++;
+		if (iocpf->retry_count < BFA_IOC_HWINIT_MAX) {
+			bfa_reg_write(ioc->ioc_regs.ioc_fwstate,
+				      BFI_IOC_UNINIT);
+			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
+			break;
+		}
+
+		bfa_ioc_hw_sem_release(ioc);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
+
+		if (event == IOCPF_E_TIMEOUT)
+			bfa_ioc_pf_failed(ioc);
+		break;
+
+	case IOCPF_E_DISABLE:
+		bfa_iocpf_timer_stop(ioc);
+		bfa_ioc_hw_sem_release(ioc);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
+		break;
+
+	case IOCPF_E_FWREADY:
+		bfa_ioc_send_enable(ioc);
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+
+
+static void
+bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
+{
+	bfa_ioc_pf_enabled(iocpf->ioc);
+}
+
+static void
+bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
+{
+	struct bfa_ioc_s *ioc = iocpf->ioc;
+
+	bfa_trc(ioc, event);
+
+	switch (event) {
+	case IOCPF_E_DISABLE:
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
+		break;
+
+	case IOCPF_E_GETATTRFAIL:
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
+		break;
+
+	case IOCPF_E_FAIL:
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+		break;
+
+	case IOCPF_E_FWREADY:
+		if (bfa_ioc_is_operational(ioc))
+			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+		else
+			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
+
+		bfa_ioc_pf_failed(ioc);
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+
+static void
+bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
+{
+	bfa_iocpf_timer_start(iocpf->ioc);
+	bfa_ioc_send_disable(iocpf->ioc);
+}
+
+/**
+ * IOC is being disabled
+ */
+static void
+bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
+{
+	struct bfa_ioc_s *ioc = iocpf->ioc;
+
+	bfa_trc(ioc, event);
+
+	switch (event) {
+	case IOCPF_E_FWRSP_DISABLE:
+	case IOCPF_E_FWREADY:
+		bfa_iocpf_timer_stop(ioc);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
+		break;
+
+	case IOCPF_E_FAIL:
+		bfa_iocpf_timer_stop(ioc);
+		/*
+		 * !!! fall through !!!
+		 */
+
+	case IOCPF_E_TIMEOUT:
+		bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
+		break;
+
+	case IOCPF_E_FWRSP_ENABLE:
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+/**
+ * IOC disable completion entry.
+ */
+static void
+bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
+{
+	bfa_ioc_pf_disabled(iocpf->ioc);
+}
+
+static void
+bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
+{
+	struct bfa_ioc_s *ioc = iocpf->ioc;
+
+	bfa_trc(ioc, event);
+
+	switch (event) {
+	case IOCPF_E_ENABLE:
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
+		break;
+
+	case IOCPF_E_STOP:
+		bfa_ioc_firmware_unlock(ioc);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+
+static void
+bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
+{
+	bfa_iocpf_timer_start(iocpf->ioc);
+}
+
+/**
+ * Hardware initialization failed.
+ */
+static void
+bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
+{
+	struct bfa_ioc_s *ioc = iocpf->ioc;
+
+	bfa_trc(ioc, event);
+
+	switch (event) {
+	case IOCPF_E_DISABLE:
+		bfa_iocpf_timer_stop(ioc);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
+		break;
+
+	case IOCPF_E_STOP:
+		bfa_iocpf_timer_stop(ioc);
+		bfa_ioc_firmware_unlock(ioc);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
+		break;
+
+	case IOCPF_E_TIMEOUT:
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+
+static void
+bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
+{
+	/**
+	 * Mark IOC as failed in hardware and stop firmware.
+	 */
+	bfa_ioc_lpu_stop(iocpf->ioc);
+	bfa_reg_write(iocpf->ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL);
+
+	/**
+	 * Notify other functions on HB failure.
+	 */
+	bfa_ioc_notify_hbfail(iocpf->ioc);
+
+	/**
+	 * Flush any queued up mailbox requests.
+	 */
+	bfa_ioc_mbox_hbfail(iocpf->ioc);
+
+	if (iocpf->auto_recover)
+		bfa_iocpf_recovery_timer_start(iocpf->ioc);
+}
+
+/**
+ * IOC is in failed state.
+ */
+static void
+bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
+{
+	struct bfa_ioc_s *ioc = iocpf->ioc;
+
+	bfa_trc(ioc, event);
+
+	switch (event) {
+	case IOCPF_E_DISABLE:
+		if (iocpf->auto_recover)
+			bfa_iocpf_timer_stop(ioc);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
+		break;
+
+	case IOCPF_E_TIMEOUT:
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
+		break;
 
 	default:
 		bfa_sm_fault(ioc, event);
@@ -695,14 +1102,14 @@
 
 
 /**
- *  bfa_ioc_pvt BFA IOC private functions
+ *  hal_ioc_pvt BFA IOC private functions
  */
 
 static void
 bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
 {
-	struct list_head *qe;
-	struct bfa_ioc_hbfail_notify_s *notify;
+	struct list_head			*qe;
+	struct bfa_ioc_hbfail_notify_s	*notify;
 
 	ioc->cbfn->disable_cbfn(ioc->bfa);
 
@@ -710,25 +1117,17 @@
 	 * Notify common modules registered for notification.
 	 */
 	list_for_each(qe, &ioc->hb_notify_q) {
-		notify = (struct bfa_ioc_hbfail_notify_s *)qe;
+		notify = (struct bfa_ioc_hbfail_notify_s *) qe;
 		notify->cbfn(notify->cbarg);
 	}
 }
 
-void
-bfa_ioc_sem_timeout(void *ioc_arg)
-{
-	struct bfa_ioc_s *ioc = (struct bfa_ioc_s *)ioc_arg;
-
-	bfa_ioc_hw_sem_get(ioc);
-}
-
 bfa_boolean_t
 bfa_ioc_sem_get(bfa_os_addr_t sem_reg)
 {
 	u32 r32;
 	int cnt = 0;
-#define BFA_SEM_SPINCNT 3000
+#define BFA_SEM_SPINCNT	3000
 
 	r32 = bfa_reg_read(sem_reg);
 
@@ -754,7 +1153,7 @@
 static void
 bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
 {
-	u32        r32;
+	u32	r32;
 
 	/**
 	 * First read to the semaphore register will return 0, subsequent reads
@@ -762,12 +1161,11 @@
 	 */
 	r32 = bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
 	if (r32 == 0) {
-		bfa_fsm_send_event(ioc, IOC_E_SEMLOCKED);
+		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
 		return;
 	}
 
-	bfa_timer_begin(ioc->timer_mod, &ioc->sem_timer, bfa_ioc_sem_timeout,
-			ioc, BFA_IOC_HWSEM_TOV);
+	bfa_sem_timer_start(ioc);
 }
 
 void
@@ -779,7 +1177,7 @@
 static void
 bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc)
 {
-	bfa_timer_stop(&ioc->sem_timer);
+	bfa_sem_timer_stop(ioc);
 }
 
 /**
@@ -788,14 +1186,18 @@
 static void
 bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
 {
-	u32        pss_ctl;
-	int             i;
+	u32	pss_ctl;
+	int		i;
 #define PSS_LMEM_INIT_TIME  10000
 
 	pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
 	pss_ctl &= ~__PSS_LMEM_RESET;
 	pss_ctl |= __PSS_LMEM_INIT_EN;
-	pss_ctl |= __PSS_I2C_CLK_DIV(3UL); /* i2c workaround 12.5khz clock */
+
+	/*
+	 * i2c workaround 12.5khz clock
+	 */
+	pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
 	bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
 
 	/**
@@ -821,7 +1223,7 @@
 static void
 bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
 {
-	u32        pss_ctl;
+	u32	pss_ctl;
 
 	/**
 	 * Take processor out of reset.
@@ -835,7 +1237,7 @@
 static void
 bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
 {
-	u32        pss_ctl;
+	u32	pss_ctl;
 
 	/**
 	 * Put processors in reset.
@@ -852,10 +1254,10 @@
 void
 bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
 {
-	u32        pgnum, pgoff;
-	u32        loff = 0;
-	int             i;
-	u32       *fwsig = (u32 *) fwhdr;
+	u32	pgnum, pgoff;
+	u32	loff = 0;
+	int		i;
+	u32	*fwsig = (u32 *) fwhdr;
 
 	pgnum = bfa_ioc_smem_pgnum(ioc, loff);
 	pgoff = bfa_ioc_smem_pgoff(ioc, loff);
@@ -863,7 +1265,8 @@
 
 	for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
 	     i++) {
-		fwsig[i] = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
+		fwsig[i] =
+			bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
 		loff += sizeof(u32);
 	}
 }
@@ -875,10 +1278,10 @@
 bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
 {
 	struct bfi_ioc_image_hdr_s *drv_fwhdr;
-	int             i;
+	int i;
 
 	drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
-			bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
+		bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
 
 	for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
 		if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) {
@@ -897,21 +1300,20 @@
  * Return true if current running version is valid. Firmware signature and
  * execution context (driver/bios) must match.
  */
-static          bfa_boolean_t
-bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc)
+static bfa_boolean_t
+bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
 {
 	struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
 
 	/**
 	 * If bios/efi boot (flash based) -- return true
 	 */
-	if (bfa_ioc_is_optrom(ioc))
+	if (bfa_ioc_is_bios_optrom(ioc))
 		return BFA_TRUE;
 
 	bfa_ioc_fwver_get(ioc, &fwhdr);
 	drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
-			bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
-
+		bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
 
 	if (fwhdr.signature != drv_fwhdr->signature) {
 		bfa_trc(ioc, fwhdr.signature);
@@ -919,9 +1321,9 @@
 		return BFA_FALSE;
 	}
 
-	if (fwhdr.exec != drv_fwhdr->exec) {
-		bfa_trc(ioc, fwhdr.exec);
-		bfa_trc(ioc, drv_fwhdr->exec);
+	if (bfa_os_swap32(fwhdr.param) != boot_env) {
+		bfa_trc(ioc, fwhdr.param);
+		bfa_trc(ioc, boot_env);
 		return BFA_FALSE;
 	}
 
@@ -934,7 +1336,7 @@
 static void
 bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
 {
-	u32        r32;
+	u32	r32;
 
 	r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd);
 	if (r32)
@@ -946,7 +1348,9 @@
 bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
 {
 	enum bfi_ioc_state ioc_fwstate;
-	bfa_boolean_t   fwvalid;
+	bfa_boolean_t fwvalid;
+	u32 boot_type;
+	u32 boot_env;
 
 	ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate);
 
@@ -955,14 +1359,33 @@
 
 	bfa_trc(ioc, ioc_fwstate);
 
+	boot_type = BFI_BOOT_TYPE_NORMAL;
+	boot_env = BFI_BOOT_LOADER_OS;
+
+	/**
+	 * Flash based firmware boot BIOS env.
+	 */
+	if (bfa_ioc_is_bios_optrom(ioc)) {
+		boot_type = BFI_BOOT_TYPE_FLASH;
+		boot_env = BFI_BOOT_LOADER_BIOS;
+	}
+
+	/**
+	 * Flash based firmware boot UEFI env.
+	 */
+	if (bfa_ioc_is_uefi(ioc)) {
+		boot_type = BFI_BOOT_TYPE_FLASH;
+		boot_env = BFI_BOOT_LOADER_UEFI;
+	}
+
 	/**
 	 * check if firmware is valid
 	 */
 	fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
-			BFA_FALSE : bfa_ioc_fwver_valid(ioc);
+		BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
 
 	if (!fwvalid) {
-		bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
+		bfa_ioc_boot(ioc, boot_type, boot_env);
 		return;
 	}
 
@@ -971,7 +1394,6 @@
 	 * just wait for an initialization completion interrupt.
 	 */
 	if (ioc_fwstate == BFI_IOC_INITING) {
-		bfa_trc(ioc, ioc_fwstate);
 		ioc->cbfn->reset_cbfn(ioc->bfa);
 		return;
 	}
@@ -985,8 +1407,7 @@
 	 * is loaded.
 	 */
 	if (ioc_fwstate == BFI_IOC_DISABLED ||
-		(!bfa_ioc_is_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) {
-		bfa_trc(ioc, ioc_fwstate);
+	    (!bfa_ioc_is_bios_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) {
 
 		/**
 		 * When using MSI-X any pending firmware ready event should
@@ -994,20 +1415,20 @@
 		 */
 		bfa_ioc_msgflush(ioc);
 		ioc->cbfn->reset_cbfn(ioc->bfa);
-		bfa_fsm_send_event(ioc, IOC_E_FWREADY);
+		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
 		return;
 	}
 
 	/**
 	 * Initialize the h/w for any other states.
 	 */
-	bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
+	bfa_ioc_boot(ioc, boot_type, boot_env);
 }
 
 static void
 bfa_ioc_timeout(void *ioc_arg)
 {
-	struct bfa_ioc_s *ioc = (struct bfa_ioc_s *)ioc_arg;
+	struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
 
 	bfa_trc(ioc, 0);
 	bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
@@ -1016,8 +1437,8 @@
 void
 bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
 {
-	u32       *msgp = (u32 *) ioc_msg;
-	u32        i;
+	u32 *msgp = (u32 *) ioc_msg;
+	u32 i;
 
 	bfa_trc(ioc, msgp[0]);
 	bfa_trc(ioc, len);
@@ -1038,17 +1459,20 @@
 	 * write 1 to mailbox CMD to trigger LPU event
 	 */
 	bfa_reg_write(ioc->ioc_regs.hfn_mbox_cmd, 1);
-	(void)bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
+	(void) bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
 }
 
 static void
 bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
 {
 	struct bfi_ioc_ctrl_req_s enable_req;
+	struct bfa_timeval_s tv;
 
 	bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
 		    bfa_ioc_portid(ioc));
 	enable_req.ioc_class = ioc->ioc_mc;
+	bfa_os_gettimeofday(&tv);
+	enable_req.tv_sec = bfa_os_ntohl(tv.tv_sec);
 	bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
 }
 
@@ -1065,7 +1489,7 @@
 static void
 bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
 {
-	struct bfi_ioc_getattr_req_s attr_req;
+	struct bfi_ioc_getattr_req_s	attr_req;
 
 	bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
 		    bfa_ioc_portid(ioc));
@@ -1077,12 +1501,11 @@
 bfa_ioc_hb_check(void *cbarg)
 {
 	struct bfa_ioc_s  *ioc = cbarg;
-	u32     hb_count;
+	u32	hb_count;
 
 	hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat);
 	if (ioc->hb_count == hb_count) {
-		bfa_log(ioc->logm, BFA_LOG_HAL_HEARTBEAT_FAILURE,
-			hb_count);
+		printk(KERN_CRIT "Firmware heartbeat failure at %d", hb_count);
 		bfa_ioc_recover(ioc);
 		return;
 	} else {
@@ -1090,61 +1513,54 @@
 	}
 
 	bfa_ioc_mbox_poll(ioc);
-	bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check,
-			ioc, BFA_IOC_HB_TOV);
+	bfa_hb_timer_start(ioc);
 }
 
 static void
 bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
 {
 	ioc->hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat);
-	bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check, ioc,
-			BFA_IOC_HB_TOV);
+	bfa_hb_timer_start(ioc);
 }
 
 static void
 bfa_ioc_hb_stop(struct bfa_ioc_s *ioc)
 {
-	bfa_timer_stop(&ioc->ioc_timer);
+	bfa_hb_timer_stop(ioc);
 }
 
+
 /**
- *      Initiate a full firmware download.
+ *	Initiate a full firmware download.
  */
 static void
 bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
-		    u32 boot_param)
+		    u32 boot_env)
 {
-	u32       *fwimg;
-	u32        pgnum, pgoff;
-	u32        loff = 0;
-	u32        chunkno = 0;
-	u32        i;
+	u32 *fwimg;
+	u32 pgnum, pgoff;
+	u32 loff = 0;
+	u32 chunkno = 0;
+	u32 i;
 
 	/**
 	 * Initialize LMEM first before code download
 	 */
 	bfa_ioc_lmem_init(ioc);
 
-	/**
-	 * Flash based firmware boot
-	 */
-	bfa_trc(ioc, bfi_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)));
-	if (bfa_ioc_is_optrom(ioc))
-		boot_type = BFI_BOOT_TYPE_FLASH;
-	fwimg = bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
-
+	bfa_trc(ioc, bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)));
+	fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
 
 	pgnum = bfa_ioc_smem_pgnum(ioc, loff);
 	pgoff = bfa_ioc_smem_pgoff(ioc, loff);
 
 	bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
 
-	for (i = 0; i < bfi_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
+	for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
 
 		if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
 			chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
-			fwimg = bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc),
+			fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc),
 					BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
 		}
 
@@ -1162,7 +1578,8 @@
 		loff = PSS_SMEM_PGOFF(loff);
 		if (loff == 0) {
 			pgnum++;
-			bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
+			bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
+				      pgnum);
 		}
 	}
 
@@ -1171,11 +1588,11 @@
 
 	/*
 	 * Set boot type and boot param at the end.
-	 */
+	*/
 	bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_TYPE_OFF,
 			bfa_os_swap32(boot_type));
-	bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_PARAM_OFF,
-			bfa_os_swap32(boot_param));
+	bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_LOADER_OFF,
+			bfa_os_swap32(boot_env));
 }
 
 static void
@@ -1190,11 +1607,11 @@
 static void
 bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
 {
-	struct bfi_ioc_attr_s *attr = ioc->attr;
+	struct bfi_ioc_attr_s	*attr = ioc->attr;
 
-	attr->adapter_prop = bfa_os_ntohl(attr->adapter_prop);
+	attr->adapter_prop  = bfa_os_ntohl(attr->adapter_prop);
 	attr->card_type     = bfa_os_ntohl(attr->card_type);
-	attr->maxfrsize = bfa_os_ntohs(attr->maxfrsize);
+	attr->maxfrsize	    = bfa_os_ntohs(attr->maxfrsize);
 
 	bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
 }
@@ -1205,8 +1622,8 @@
 static void
 bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
 {
-	struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
-	int             mc;
+	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
+	int	mc;
 
 	INIT_LIST_HEAD(&mod->cmd_q);
 	for (mc = 0; mc < BFI_MC_MAX; mc++) {
@@ -1221,9 +1638,9 @@
 static void
 bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
 {
-	struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
-	struct bfa_mbox_cmd_s *cmd;
-	u32        stat;
+	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
+	struct bfa_mbox_cmd_s		*cmd;
+	u32			stat;
 
 	/**
 	 * If no command pending, do nothing
@@ -1251,25 +1668,194 @@
 static void
 bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc)
 {
-	struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
-	struct bfa_mbox_cmd_s *cmd;
+	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
+	struct bfa_mbox_cmd_s		*cmd;
 
 	while (!list_empty(&mod->cmd_q))
 		bfa_q_deq(&mod->cmd_q, &cmd);
 }
 
 /**
- *  bfa_ioc_public
+ * Read data from SMEM to host through PCI memmap
+ *
+ * @param[in]	ioc	memory for IOC
+ * @param[in]	tbuf	app memory to store data from smem
+ * @param[in]	soff	smem offset
+ * @param[in]	sz	size of smem in bytes
  */
+static bfa_status_t
+bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
+{
+	u32 pgnum, loff, r32;
+	int i, len;
+	u32 *buf = tbuf;
+
+	pgnum = bfa_ioc_smem_pgnum(ioc, soff);
+	loff = bfa_ioc_smem_pgoff(ioc, soff);
+	bfa_trc(ioc, pgnum);
+	bfa_trc(ioc, loff);
+	bfa_trc(ioc, sz);
+
+	/*
+	 *  Hold semaphore to serialize pll init and fwtrc.
+	 */
+	if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
+		bfa_trc(ioc, 0);
+		return BFA_STATUS_FAILED;
+	}
+
+	bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
+
+	len = sz/sizeof(u32);
+	bfa_trc(ioc, len);
+	for (i = 0; i < len; i++) {
+		r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
+		buf[i] = bfa_os_ntohl(r32);
+		loff += sizeof(u32);
+
+		/**
+		 * handle page offset wrap around
+		 */
+		loff = PSS_SMEM_PGOFF(loff);
+		if (loff == 0) {
+			pgnum++;
+			bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
+		}
+	}
+	bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
+		      bfa_ioc_smem_pgnum(ioc, 0));
+	/*
+	 *  release semaphore.
+	 */
+	bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
+
+	bfa_trc(ioc, pgnum);
+	return BFA_STATUS_OK;
+}
+
+/**
+ * Clear SMEM data from host through PCI memmap
+ *
+ * @param[in]	ioc	memory for IOC
+ * @param[in]	soff	smem offset
+ * @param[in]	sz	size of smem in bytes
+ */
+static bfa_status_t
+bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
+{
+	int i, len;
+	u32 pgnum, loff;
+
+	pgnum = bfa_ioc_smem_pgnum(ioc, soff);
+	loff = bfa_ioc_smem_pgoff(ioc, soff);
+	bfa_trc(ioc, pgnum);
+	bfa_trc(ioc, loff);
+	bfa_trc(ioc, sz);
+
+	/*
+	 *  Hold semaphore to serialize pll init and fwtrc.
+	 */
+	if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
+		bfa_trc(ioc, 0);
+		return BFA_STATUS_FAILED;
+	}
+
+	bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
+
+	len = sz/sizeof(u32); /* len in words */
+	bfa_trc(ioc, len);
+	for (i = 0; i < len; i++) {
+		bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
+		loff += sizeof(u32);
+
+		/**
+		 * handle page offset wrap around
+		 */
+		loff = PSS_SMEM_PGOFF(loff);
+		if (loff == 0) {
+			pgnum++;
+			bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
+		}
+	}
+	bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
+		      bfa_ioc_smem_pgnum(ioc, 0));
+
+	/*
+	 *  release semaphore.
+	 */
+	bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
+	bfa_trc(ioc, pgnum);
+	return BFA_STATUS_OK;
+}
+
+/**
+ * hal iocpf to ioc interface
+ */
+static void
+bfa_ioc_pf_enabled(struct bfa_ioc_s *ioc)
+{
+	bfa_fsm_send_event(ioc, IOC_E_ENABLED);
+}
+
+static void
+bfa_ioc_pf_disabled(struct bfa_ioc_s *ioc)
+{
+	bfa_fsm_send_event(ioc, IOC_E_DISABLED);
+}
+
+static void
+bfa_ioc_pf_failed(struct bfa_ioc_s *ioc)
+{
+	bfa_fsm_send_event(ioc, IOC_E_FAILED);
+}
+
+static void
+bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
+{
+	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
+	/**
+	 * Provide enable completion callback.
+	 */
+	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+	BFA_LOG(KERN_WARNING, bfad, log_level,
+		"Running firmware version is incompatible "
+		"with the driver version\n");
+}
+
+
+
+/**
+ *  hal_ioc_public
+ */
+
+bfa_status_t
+bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
+{
+
+	/*
+	 *  Hold semaphore so that nobody can access the chip during init.
+	 */
+	bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
+
+	bfa_ioc_pll_init_asic(ioc);
+
+	ioc->pllinit = BFA_TRUE;
+	/*
+	 *  release semaphore.
+	 */
+	bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
+
+	return BFA_STATUS_OK;
+}
 
 /**
  * Interface used by diag module to do firmware boot with memory test
  * as the entry vector.
  */
 void
-bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_param)
+bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
 {
-	bfa_os_addr_t   rb;
+	bfa_os_addr_t	rb;
 
 	bfa_ioc_stats(ioc, ioc_boots);
 
@@ -1280,7 +1866,7 @@
 	 * Initialize IOC state of all functions on a chip reset.
 	 */
 	rb = ioc->pcidev.pci_bar_kva;
-	if (boot_param == BFI_BOOT_TYPE_MEMTEST) {
+	if (boot_type == BFI_BOOT_TYPE_MEMTEST) {
 		bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_MEMTEST);
 		bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_MEMTEST);
 	} else {
@@ -1289,7 +1875,7 @@
 	}
 
 	bfa_ioc_msgflush(ioc);
-	bfa_ioc_download_fw(ioc, boot_type, boot_param);
+	bfa_ioc_download_fw(ioc, boot_type, boot_env);
 
 	/**
 	 * Enable interrupts just before starting LPU
@@ -1308,18 +1894,29 @@
 }
 
 
+
 bfa_boolean_t
 bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
 {
 	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
 }
 
+bfa_boolean_t
+bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
+{
+	u32 r32 = bfa_reg_read(ioc->ioc_regs.ioc_fwstate);
+
+	return ((r32 != BFI_IOC_UNINIT) &&
+		(r32 != BFI_IOC_INITING) &&
+		(r32 != BFI_IOC_MEMTEST));
+}
+
 void
 bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
 {
-	u32       *msgp = mbmsg;
-	u32        r32;
-	int             i;
+	u32	*msgp = mbmsg;
+	u32	r32;
+	int		i;
 
 	/**
 	 * read the MBOX msg
@@ -1341,9 +1938,10 @@
 void
 bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
 {
-	union bfi_ioc_i2h_msg_u *msg;
+	union bfi_ioc_i2h_msg_u	*msg;
+	struct bfa_iocpf_s *iocpf = &ioc->iocpf;
 
-	msg = (union bfi_ioc_i2h_msg_u *)m;
+	msg = (union bfi_ioc_i2h_msg_u *) m;
 
 	bfa_ioc_stats(ioc, ioc_isrs);
 
@@ -1352,15 +1950,15 @@
 		break;
 
 	case BFI_IOC_I2H_READY_EVENT:
-		bfa_fsm_send_event(ioc, IOC_E_FWREADY);
+		bfa_fsm_send_event(iocpf, IOCPF_E_FWREADY);
 		break;
 
 	case BFI_IOC_I2H_ENABLE_REPLY:
-		bfa_fsm_send_event(ioc, IOC_E_FWRSP_ENABLE);
+		bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
 		break;
 
 	case BFI_IOC_I2H_DISABLE_REPLY:
-		bfa_fsm_send_event(ioc, IOC_E_FWRSP_DISABLE);
+		bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
 		break;
 
 	case BFI_IOC_I2H_GETATTR_REPLY:
@@ -1378,29 +1976,24 @@
  *
  * @param[in]	ioc	memory for IOC
  * @param[in]	bfa	driver instance structure
- * @param[in]	trcmod	kernel trace module
- * @param[in]	aen	kernel aen event module
- * @param[in]	logm	kernel logging module
  */
 void
 bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
-	       struct bfa_timer_mod_s *timer_mod, struct bfa_trc_mod_s *trcmod,
-	       struct bfa_aen_s *aen, struct bfa_log_mod_s *logm)
+	       struct bfa_timer_mod_s *timer_mod)
 {
-	ioc->bfa = bfa;
-	ioc->cbfn = cbfn;
-	ioc->timer_mod = timer_mod;
-	ioc->trcmod = trcmod;
-	ioc->aen = aen;
-	ioc->logm = logm;
-	ioc->fcmode = BFA_FALSE;
-	ioc->pllinit = BFA_FALSE;
+	ioc->bfa	= bfa;
+	ioc->cbfn	= cbfn;
+	ioc->timer_mod	= timer_mod;
+	ioc->fcmode	= BFA_FALSE;
+	ioc->pllinit	= BFA_FALSE;
 	ioc->dbg_fwsave_once = BFA_TRUE;
+	ioc->iocpf.ioc	= ioc;
 
 	bfa_ioc_mbox_attach(ioc);
 	INIT_LIST_HEAD(&ioc->hb_notify_q);
 
-	bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
+	bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
+	bfa_fsm_send_event(ioc, IOC_E_RESET);
 }
 
 /**
@@ -1421,10 +2014,10 @@
 bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
 		 enum bfi_mclass mc)
 {
-	ioc->ioc_mc = mc;
-	ioc->pcidev = *pcidev;
-	ioc->ctdev  = bfa_asic_id_ct(ioc->pcidev.device_id);
-	ioc->cna = ioc->ctdev && !ioc->fcmode;
+	ioc->ioc_mc	= mc;
+	ioc->pcidev	= *pcidev;
+	ioc->ctdev	= bfa_asic_id_ct(ioc->pcidev.device_id);
+	ioc->cna	= ioc->ctdev && !ioc->fcmode;
 
 	/**
 	 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
@@ -1445,14 +2038,14 @@
  * @param[in]	dm_pa	physical address of IOC dma memory
  */
 void
-bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
+bfa_ioc_mem_claim(struct bfa_ioc_s *ioc,  u8 *dm_kva, u64 dm_pa)
 {
 	/**
 	 * dma memory for firmware attribute
 	 */
 	ioc->attr_dma.kva = dm_kva;
 	ioc->attr_dma.pa = dm_pa;
-	ioc->attr = (struct bfi_ioc_attr_s *)dm_kva;
+	ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
 }
 
 /**
@@ -1490,7 +2083,7 @@
 int
 bfa_ioc_debug_trcsz(bfa_boolean_t auto_recover)
 {
-return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
+	return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
 }
 
 /**
@@ -1500,8 +2093,8 @@
 void
 bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
 {
-	ioc->dbg_fwsave = dbg_fwsave;
-	ioc->dbg_fwsave_len = bfa_ioc_debug_trcsz(ioc->auto_recover);
+	ioc->dbg_fwsave	    = dbg_fwsave;
+	ioc->dbg_fwsave_len = bfa_ioc_debug_trcsz(ioc->iocpf.auto_recover);
 }
 
 u32
@@ -1525,8 +2118,8 @@
 void
 bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
 {
-	struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
-	int             mc;
+	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
+	int				mc;
 
 	for (mc = 0; mc < BFI_MC_MAX; mc++)
 		mod->mbhdlr[mc].cbfn = mcfuncs[mc];
@@ -1539,10 +2132,10 @@
 bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
 		    bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
 {
-	struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
+	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
 
-	mod->mbhdlr[mc].cbfn = cbfn;
-	mod->mbhdlr[mc].cbarg = cbarg;
+	mod->mbhdlr[mc].cbfn	= cbfn;
+	mod->mbhdlr[mc].cbarg	= cbarg;
 }
 
 /**
@@ -1555,8 +2148,8 @@
 void
 bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
 {
-	struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
-	u32        stat;
+	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
+	u32			stat;
 
 	/**
 	 * If a previous command is pending, queue new command
@@ -1587,9 +2180,9 @@
 void
 bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
 {
-	struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
-	struct bfi_mbmsg_s m;
-	int             mc;
+	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
+	struct bfi_mbmsg_s		m;
+	int				mc;
 
 	bfa_ioc_msgget(ioc, &m);
 
@@ -1621,16 +2214,14 @@
 	ioc->port_id = bfa_ioc_pcifn(ioc);
 }
 
-#ifndef BFA_BIOS_BUILD
-
 /**
  * return true if IOC is disabled
  */
 bfa_boolean_t
 bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
 {
-	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling)
-		|| bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
+	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
+		bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
 }
 
 /**
@@ -1639,9 +2230,9 @@
 bfa_boolean_t
 bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
 {
-	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset)
-		|| bfa_fsm_cmp_state(ioc, bfa_ioc_sm_fwcheck)
-		|| bfa_fsm_cmp_state(ioc, bfa_ioc_sm_mismatch);
+	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
+		bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) ||
+		bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);
 }
 
 #define bfa_ioc_state_disabled(__sm)		\
@@ -1659,8 +2250,8 @@
 bfa_boolean_t
 bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
 {
-	u32        ioc_state;
-	bfa_os_addr_t   rb = ioc->pcidev.pci_bar_kva;
+	u32	ioc_state;
+	bfa_os_addr_t	rb = ioc->pcidev.pci_bar_kva;
 
 	if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
 		return BFA_FALSE;
@@ -1669,16 +2260,18 @@
 	if (!bfa_ioc_state_disabled(ioc_state))
 		return BFA_FALSE;
 
-	ioc_state = bfa_reg_read(rb + BFA_IOC1_STATE_REG);
-	if (!bfa_ioc_state_disabled(ioc_state))
-		return BFA_FALSE;
+	if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
+		ioc_state = bfa_reg_read(rb + BFA_IOC1_STATE_REG);
+		if (!bfa_ioc_state_disabled(ioc_state))
+			return BFA_FALSE;
+	}
 
 	return BFA_TRUE;
 }
 
 /**
  * Add to IOC heartbeat failure notification queue. To be used by common
- * modules such as
+ * modules such as cee, port, diag.
  */
 void
 bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc,
@@ -1692,7 +2285,7 @@
 bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
 			 struct bfa_adapter_attr_s *ad_attr)
 {
-	struct bfi_ioc_attr_s *ioc_attr;
+	struct bfi_ioc_attr_s	*ioc_attr;
 
 	ioc_attr = ioc->attr;
 
@@ -1719,7 +2312,7 @@
 		ad_attr->prototype = 0;
 
 	ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
-	ad_attr->mac = bfa_ioc_get_mac(ioc);
+	ad_attr->mac  = bfa_ioc_get_mac(ioc);
 
 	ad_attr->pcie_gen = ioc_attr->pcie_gen;
 	ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
@@ -1729,6 +2322,7 @@
 	bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
 
 	ad_attr->cna_capable = ioc->cna;
+	ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna;
 }
 
 enum bfa_ioc_type_e
@@ -1782,7 +2376,7 @@
 {
 	bfa_os_memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
 	bfa_os_memcpy(optrom_ver, ioc->attr->optrom_version,
-		BFA_VERSION_LEN);
+		      BFA_VERSION_LEN);
 }
 
 void
@@ -1795,7 +2389,7 @@
 void
 bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
 {
-	struct bfi_ioc_attr_s   *ioc_attr;
+	struct bfi_ioc_attr_s	*ioc_attr;
 
 	bfa_assert(model);
 	bfa_os_memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
@@ -1805,14 +2399,48 @@
 	/**
 	 * model name
 	 */
-	snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
-			BFA_MFG_NAME, ioc_attr->card_type);
+	bfa_os_snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
+		BFA_MFG_NAME, ioc_attr->card_type);
 }
 
 enum bfa_ioc_state
 bfa_ioc_get_state(struct bfa_ioc_s *ioc)
 {
-	return bfa_sm_to_state(ioc_sm_table, ioc->fsm);
+	enum bfa_iocpf_state iocpf_st;
+	enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
+
+	if (ioc_st == BFA_IOC_ENABLING ||
+		ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
+
+		iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
+
+		switch (iocpf_st) {
+		case BFA_IOCPF_SEMWAIT:
+			ioc_st = BFA_IOC_SEMWAIT;
+			break;
+
+		case BFA_IOCPF_HWINIT:
+			ioc_st = BFA_IOC_HWINIT;
+			break;
+
+		case BFA_IOCPF_FWMISMATCH:
+			ioc_st = BFA_IOC_FWMISMATCH;
+			break;
+
+		case BFA_IOCPF_FAIL:
+			ioc_st = BFA_IOC_FAIL;
+			break;
+
+		case BFA_IOCPF_INITFAIL:
+			ioc_st = BFA_IOC_INITFAIL;
+			break;
+
+		default:
+			break;
+		}
+	}
+
+	return ioc_st;
 }
 
 void
@@ -1833,7 +2461,7 @@
 }
 
 /**
- *  bfa_wwn_public
+ *  hal_wwn_public
  */
 wwn_t
 bfa_ioc_get_pwwn(struct bfa_ioc_s *ioc)
@@ -1857,10 +2485,10 @@
 bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
 {
 	/*
-	 * Currently mfg mac is used as FCoE enode mac (not configured by PBC)
+	 * Check the IOC type and return the appropriate MAC
 	 */
 	if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
-		return bfa_ioc_get_mfg_mac(ioc);
+		return ioc->attr->fcoe_mac;
 	else
 		return ioc->attr->mac;
 }
@@ -1880,12 +2508,16 @@
 mac_t
 bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
 {
-	mac_t   mac;
+	mac_t	m;
 
-	mac = ioc->attr->mfg_mac;
-	mac.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
+	m = ioc->attr->mfg_mac;
+	if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
+		m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
+	else
+		bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
+			bfa_ioc_pcifn(ioc));
 
-	return mac;
+	return m;
 }
 
 bfa_boolean_t
@@ -1895,46 +2527,12 @@
 }
 
 /**
- * Send AEN notification
- */
-void
-bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
-{
-	union bfa_aen_data_u aen_data;
-	struct bfa_log_mod_s *logmod = ioc->logm;
-	s32         inst_num = 0;
-	enum bfa_ioc_type_e ioc_type;
-
-	bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, event), inst_num);
-
-	memset(&aen_data.ioc.pwwn, 0, sizeof(aen_data.ioc.pwwn));
-	memset(&aen_data.ioc.mac, 0, sizeof(aen_data.ioc.mac));
-	ioc_type = bfa_ioc_get_type(ioc);
-	switch (ioc_type) {
-	case BFA_IOC_TYPE_FC:
-		aen_data.ioc.pwwn = bfa_ioc_get_pwwn(ioc);
-		break;
-	case BFA_IOC_TYPE_FCoE:
-		aen_data.ioc.pwwn = bfa_ioc_get_pwwn(ioc);
-		aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
-		break;
-	case BFA_IOC_TYPE_LL:
-		aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
-		break;
-	default:
-		bfa_assert(ioc_type == BFA_IOC_TYPE_FC);
-		break;
-	}
-	aen_data.ioc.ioc_type = ioc_type;
-}
-
-/**
  * Retrieve saved firmware trace from a prior IOC failure.
  */
 bfa_status_t
 bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
 {
-	int             tlen;
+	int	tlen;
 
 	if (ioc->dbg_fwsave_len == 0)
 		return BFA_STATUS_ENOFSAVE;
@@ -1963,57 +2561,145 @@
 bfa_status_t
 bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
 {
-	u32        pgnum;
-	u32        loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
-	int             i, tlen;
-	u32       *tbuf = trcdata, r32;
+	u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
+	int tlen;
+	bfa_status_t status;
 
 	bfa_trc(ioc, *trclen);
 
-	pgnum = bfa_ioc_smem_pgnum(ioc, loff);
-	loff = bfa_ioc_smem_pgoff(ioc, loff);
-
-	/*
-	 *  Hold semaphore to serialize pll init and fwtrc.
-	 */
-	if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg))
-		return BFA_STATUS_FAILED;
-
-	bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
-
 	tlen = *trclen;
 	if (tlen > BFA_DBG_FWTRC_LEN)
 		tlen = BFA_DBG_FWTRC_LEN;
-	tlen /= sizeof(u32);
 
-	bfa_trc(ioc, tlen);
+	status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen);
+	*trclen = tlen;
+	return status;
+}
 
-	for (i = 0; i < tlen; i++) {
-		r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
-		tbuf[i] = bfa_os_ntohl(r32);
-		loff += sizeof(u32);
+static void
+bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
+{
+	struct bfa_mbox_cmd_s cmd;
+	struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg;
 
-		/**
-		 * handle page offset wrap around
-		 */
-		loff = PSS_SMEM_PGOFF(loff);
-		if (loff == 0) {
-			pgnum++;
-			bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
-		}
-	}
-	bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
-		      bfa_ioc_smem_pgnum(ioc, 0));
+	bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
+		    bfa_ioc_portid(ioc));
+	req->ioc_class = ioc->ioc_mc;
+	bfa_ioc_mbox_queue(ioc, &cmd);
+}
 
-	/*
-	 *  release semaphore.
+static void
+bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
+{
+	u32 fwsync_iter = 1000;
+
+	bfa_ioc_send_fwsync(ioc);
+
+	/**
+	 * After sending a fw sync mbox command wait for it to
+	 * take effect.  We will not wait for a response because
+	 *    1. fw_sync mbox cmd doesn't have a response.
+	 *    2. Even if we implement that,  interrupts might not
+	 *	 be enabled when we call this function.
+	 * So, just keep checking if any mbox cmd is pending, and
+	 * after waiting for a reasonable amount of time, go ahead.
+	 * It is possible that fw has crashed and the mbox command
+	 * is never acknowledged.
 	 */
-	bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
+	while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0)
+		fwsync_iter--;
+}
 
-	bfa_trc(ioc, pgnum);
+/**
+ * Dump firmware smem
+ */
+bfa_status_t
+bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
+				u32 *offset, int *buflen)
+{
+	u32 loff;
+	int dlen;
+	bfa_status_t status;
+	u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc);
 
-	*trclen = tlen * sizeof(u32);
-	return BFA_STATUS_OK;
+	if (*offset >= smem_len) {
+		*offset = *buflen = 0;
+		return BFA_STATUS_EINVAL;
+	}
+
+	loff = *offset;
+	dlen = *buflen;
+
+	/**
+	 * First smem read, sync smem before proceeding
+	 * No need to sync before reading every chunk.
+	 */
+	if (loff == 0)
+		bfa_ioc_fwsync(ioc);
+
+	if ((loff + dlen) >= smem_len)
+		dlen = smem_len - loff;
+
+	status = bfa_ioc_smem_read(ioc, buf, loff, dlen);
+
+	if (status != BFA_STATUS_OK) {
+		*offset = *buflen = 0;
+		return status;
+	}
+
+	*offset += dlen;
+
+	if (*offset >= smem_len)
+		*offset = 0;
+
+	*buflen = dlen;
+
+	return status;
+}
+
+/**
+ * Firmware statistics
+ */
+bfa_status_t
+bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats)
+{
+	u32 loff = BFI_IOC_FWSTATS_OFF + \
+		BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
+	int tlen;
+	bfa_status_t status;
+
+	if (ioc->stats_busy) {
+		bfa_trc(ioc, ioc->stats_busy);
+		return BFA_STATUS_DEVBUSY;
+	}
+	ioc->stats_busy = BFA_TRUE;
+
+	tlen = sizeof(struct bfa_fw_stats_s);
+	status = bfa_ioc_smem_read(ioc, stats, loff, tlen);
+
+	ioc->stats_busy = BFA_FALSE;
+	return status;
+}
+
+bfa_status_t
+bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
+{
+	u32 loff = BFI_IOC_FWSTATS_OFF + \
+		BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
+	int tlen;
+	bfa_status_t status;
+
+	if (ioc->stats_busy) {
+		bfa_trc(ioc, ioc->stats_busy);
+		return BFA_STATUS_DEVBUSY;
+	}
+	ioc->stats_busy = BFA_TRUE;
+
+	tlen = sizeof(struct bfa_fw_stats_s);
+	status = bfa_ioc_smem_clr(ioc, loff, tlen);
+
+	ioc->stats_busy = BFA_FALSE;
+	return status;
 }
 
 /**
@@ -2022,7 +2708,7 @@
 static void
 bfa_ioc_debug_save(struct bfa_ioc_s *ioc)
 {
-	int             tlen;
+	int		tlen;
 
 	if (ioc->dbg_fwsave_len) {
 		tlen = ioc->dbg_fwsave_len;
@@ -2050,11 +2736,135 @@
 {
 	if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
 		return;
-
-	if (ioc->attr->nwwn == 0)
-		bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_NWWN);
-	if (ioc->attr->pwwn == 0)
-		bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_PWWN);
 }
 
-#endif
+/**
+ *  hal_iocpf_pvt BFA IOC PF private functions
+ */
+
+static void
+bfa_iocpf_enable(struct bfa_ioc_s *ioc)
+{
+	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
+}
+
+static void
+bfa_iocpf_disable(struct bfa_ioc_s *ioc)
+{
+	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
+}
+
+static void
+bfa_iocpf_fail(struct bfa_ioc_s *ioc)
+{
+	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
+}
+
+static void
+bfa_iocpf_initfail(struct bfa_ioc_s *ioc)
+{
+	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
+}
+
+static void
+bfa_iocpf_getattrfail(struct bfa_ioc_s *ioc)
+{
+	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
+}
+
+static void
+bfa_iocpf_stop(struct bfa_ioc_s *ioc)
+{
+	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
+}
+
+static void
+bfa_iocpf_timeout(void *ioc_arg)
+{
+	struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
+
+	bfa_trc(ioc, 0);
+	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
+}
+
+static void
+bfa_iocpf_sem_timeout(void *ioc_arg)
+{
+	struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
+
+	bfa_ioc_hw_sem_get(ioc);
+}
+
+/**
+ *  bfa timer function
+ */
+void
+bfa_timer_init(struct bfa_timer_mod_s *mod)
+{
+	INIT_LIST_HEAD(&mod->timer_q);
+}
+
+void
+bfa_timer_beat(struct bfa_timer_mod_s *mod)
+{
+	struct list_head *qh = &mod->timer_q;
+	struct list_head *qe, *qe_next;
+	struct bfa_timer_s *elem;
+	struct list_head timedout_q;
+
+	INIT_LIST_HEAD(&timedout_q);
+
+	qe = bfa_q_next(qh);
+
+	while (qe != qh) {
+		qe_next = bfa_q_next(qe);
+
+		elem = (struct bfa_timer_s *) qe;
+		if (elem->timeout <= BFA_TIMER_FREQ) {
+			elem->timeout = 0;
+			list_del(&elem->qe);
+			list_add_tail(&elem->qe, &timedout_q);
+		} else {
+			elem->timeout -= BFA_TIMER_FREQ;
+		}
+
+		qe = qe_next;	/* go to next elem */
+	}
+
+	/*
+	 * Pop all the timeout entries
+	 */
+	while (!list_empty(&timedout_q)) {
+		bfa_q_deq(&timedout_q, &elem);
+		elem->timercb(elem->arg);
+	}
+}
+
+/**
+ * Should be called with lock protection
+ */
+void
+bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
+		    void (*timercb) (void *), void *arg, unsigned int timeout)
+{
+
+	bfa_assert(timercb != NULL);
+	bfa_assert(!bfa_q_is_on_q(&mod->timer_q, timer));
+
+	timer->timeout = timeout;
+	timer->timercb = timercb;
+	timer->arg = arg;
+
+	list_add_tail(&timer->qe, &mod->timer_q);
+}
+
+/**
+ * Should be called with lock protection
+ */
+void
+bfa_timer_stop(struct bfa_timer_s *timer)
+{
+	bfa_assert(!list_empty(&timer->qe));
+
+	list_del(&timer->qe);
+}