| /******************************************************************* | 
 |  * This file is part of the Emulex Linux Device Driver for         * | 
 |  * Fibre Channel Host Bus Adapters.                                * | 
 |  * Copyright (C) 2004-2008 Emulex.  All rights reserved.           * | 
 |  * EMULEX and SLI are trademarks of Emulex.                        * | 
 |  * www.emulex.com                                                  * | 
 |  * Portions Copyright (C) 2004-2005 Christoph Hellwig              * | 
 |  *                                                                 * | 
 |  * This program is free software; you can redistribute it and/or   * | 
 |  * modify it under the terms of version 2 of the GNU General       * | 
 |  * Public License as published by the Free Software Foundation.    * | 
 |  * This program is distributed in the hope that it will be useful. * | 
 |  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          * | 
 |  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  * | 
 |  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      * | 
 |  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * | 
 |  * TO BE LEGALLY INVALID.  See the GNU General Public License for  * | 
 |  * more details, a copy of which can be found in the file COPYING  * | 
 |  * included with this package.                                     * | 
 |  *******************************************************************/ | 
 |  | 
 | #include <linux/blkdev.h> | 
 | #include <linux/delay.h> | 
 | #include <linux/dma-mapping.h> | 
 | #include <linux/idr.h> | 
 | #include <linux/interrupt.h> | 
 | #include <linux/kthread.h> | 
 | #include <linux/pci.h> | 
 | #include <linux/spinlock.h> | 
 | #include <linux/ctype.h> | 
 |  | 
 | #include <scsi/scsi.h> | 
 | #include <scsi/scsi_device.h> | 
 | #include <scsi/scsi_host.h> | 
 | #include <scsi/scsi_transport_fc.h> | 
 |  | 
 | #include "lpfc_hw.h" | 
 | #include "lpfc_sli.h" | 
 | #include "lpfc_disc.h" | 
 | #include "lpfc_scsi.h" | 
 | #include "lpfc.h" | 
 | #include "lpfc_logmsg.h" | 
 | #include "lpfc_crtn.h" | 
 | #include "lpfc_vport.h" | 
 | #include "lpfc_version.h" | 
 |  | 
 | static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); | 
 | static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); | 
 | static int lpfc_post_rcv_buf(struct lpfc_hba *); | 
 |  | 
 | static struct scsi_transport_template *lpfc_transport_template = NULL; | 
 | static struct scsi_transport_template *lpfc_vport_transport_template = NULL; | 
 | static DEFINE_IDR(lpfc_hba_index); | 
 |  | 
 | /************************************************************************/ | 
 | /*                                                                      */ | 
 | /*    lpfc_config_port_prep                                             */ | 
 | /*    This routine will do LPFC initialization prior to the             */ | 
 | /*    CONFIG_PORT mailbox command. This will be initialized             */ | 
 | /*    as a SLI layer callback routine.                                  */ | 
 | /*    This routine returns 0 on success or -ERESTART if it wants        */ | 
 | /*    the SLI layer to reset the HBA and try again. Any                 */ | 
 | /*    other return value indicates an error.                            */ | 
 | /*                                                                      */ | 
 | /************************************************************************/ | 
 | int | 
 | lpfc_config_port_prep(struct lpfc_hba *phba) | 
 | { | 
 | 	lpfc_vpd_t *vp = &phba->vpd; | 
 | 	int i = 0, rc; | 
 | 	LPFC_MBOXQ_t *pmb; | 
 | 	MAILBOX_t *mb; | 
 | 	char *lpfc_vpd_data = NULL; | 
 | 	uint16_t offset = 0; | 
 | 	static char licensed[56] = | 
 | 		    "key unlock for use with gnu public licensed code only\0"; | 
 | 	static int init_key = 1; | 
 |  | 
 | 	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | 
 | 	if (!pmb) { | 
 | 		phba->link_state = LPFC_HBA_ERROR; | 
 | 		return -ENOMEM; | 
 | 	} | 
 |  | 
 | 	mb = &pmb->mb; | 
 | 	phba->link_state = LPFC_INIT_MBX_CMDS; | 
 |  | 
 | 	if (lpfc_is_LC_HBA(phba->pcidev->device)) { | 
 | 		if (init_key) { | 
 | 			uint32_t *ptext = (uint32_t *) licensed; | 
 |  | 
 | 			for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) | 
 | 				*ptext = cpu_to_be32(*ptext); | 
 | 			init_key = 0; | 
 | 		} | 
 |  | 
 | 		lpfc_read_nv(phba, pmb); | 
 | 		memset((char*)mb->un.varRDnvp.rsvd3, 0, | 
 | 			sizeof (mb->un.varRDnvp.rsvd3)); | 
 | 		memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, | 
 | 			 sizeof (licensed)); | 
 |  | 
 | 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); | 
 |  | 
 | 		if (rc != MBX_SUCCESS) { | 
 | 			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, | 
 | 					"0324 Config Port initialization " | 
 | 					"error, mbxCmd x%x READ_NVPARM, " | 
 | 					"mbxStatus x%x\n", | 
 | 					mb->mbxCommand, mb->mbxStatus); | 
 | 			mempool_free(pmb, phba->mbox_mem_pool); | 
 | 			return -ERESTART; | 
 | 		} | 
 | 		memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, | 
 | 		       sizeof(phba->wwnn)); | 
 | 		memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, | 
 | 		       sizeof(phba->wwpn)); | 
 | 	} | 
 |  | 
 | 	phba->sli3_options = 0x0; | 
 |  | 
 | 	/* Setup and issue mailbox READ REV command */ | 
 | 	lpfc_read_rev(phba, pmb); | 
 | 	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); | 
 | 	if (rc != MBX_SUCCESS) { | 
 | 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 
 | 				"0439 Adapter failed to init, mbxCmd x%x " | 
 | 				"READ_REV, mbxStatus x%x\n", | 
 | 				mb->mbxCommand, mb->mbxStatus); | 
 | 		mempool_free( pmb, phba->mbox_mem_pool); | 
 | 		return -ERESTART; | 
 | 	} | 
 |  | 
 |  | 
 | 	/* | 
 | 	 * The value of rr must be 1 since the driver set the cv field to 1. | 
 | 	 * This setting requires the FW to set all revision fields. | 
 | 	 */ | 
 | 	if (mb->un.varRdRev.rr == 0) { | 
 | 		vp->rev.rBit = 0; | 
 | 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 
 | 				"0440 Adapter failed to init, READ_REV has " | 
 | 				"missing revision information.\n"); | 
 | 		mempool_free(pmb, phba->mbox_mem_pool); | 
 | 		return -ERESTART; | 
 | 	} | 
 |  | 
 | 	if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) | 
 | 		return -EINVAL; | 
 |  | 
 | 	/* Save information as VPD data */ | 
 | 	vp->rev.rBit = 1; | 
 | 	memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); | 
 | 	vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; | 
 | 	memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); | 
 | 	vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; | 
 | 	memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); | 
 | 	vp->rev.biuRev = mb->un.varRdRev.biuRev; | 
 | 	vp->rev.smRev = mb->un.varRdRev.smRev; | 
 | 	vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; | 
 | 	vp->rev.endecRev = mb->un.varRdRev.endecRev; | 
 | 	vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; | 
 | 	vp->rev.fcphLow = mb->un.varRdRev.fcphLow; | 
 | 	vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; | 
 | 	vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; | 
 | 	vp->rev.postKernRev = mb->un.varRdRev.postKernRev; | 
 | 	vp->rev.opFwRev = mb->un.varRdRev.opFwRev; | 
 |  | 
 | 	/* If the sli feature level is less then 9, we must | 
 | 	 * tear down all RPIs and VPIs on link down if NPIV | 
 | 	 * is enabled. | 
 | 	 */ | 
 | 	if (vp->rev.feaLevelHigh < 9) | 
 | 		phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; | 
 |  | 
 | 	if (lpfc_is_LC_HBA(phba->pcidev->device)) | 
 | 		memcpy(phba->RandomData, (char *)&mb->un.varWords[24], | 
 | 						sizeof (phba->RandomData)); | 
 |  | 
 | 	/* Get adapter VPD information */ | 
 | 	pmb->context2 = kmalloc(DMP_RSP_SIZE, GFP_KERNEL); | 
 | 	if (!pmb->context2) | 
 | 		goto out_free_mbox; | 
 | 	lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); | 
 | 	if (!lpfc_vpd_data) | 
 | 		goto out_free_context2; | 
 |  | 
 | 	do { | 
 | 		lpfc_dump_mem(phba, pmb, offset); | 
 | 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); | 
 |  | 
 | 		if (rc != MBX_SUCCESS) { | 
 | 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | 
 | 					"0441 VPD not present on adapter, " | 
 | 					"mbxCmd x%x DUMP VPD, mbxStatus x%x\n", | 
 | 					mb->mbxCommand, mb->mbxStatus); | 
 | 			mb->un.varDmp.word_cnt = 0; | 
 | 		} | 
 | 		if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) | 
 | 			mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; | 
 | 		lpfc_sli_pcimem_bcopy(pmb->context2, lpfc_vpd_data + offset, | 
 | 				      mb->un.varDmp.word_cnt); | 
 | 		offset += mb->un.varDmp.word_cnt; | 
 | 	} while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); | 
 | 	lpfc_parse_vpd(phba, lpfc_vpd_data, offset); | 
 |  | 
 | 	kfree(lpfc_vpd_data); | 
 | out_free_context2: | 
 | 	kfree(pmb->context2); | 
 | out_free_mbox: | 
 | 	mempool_free(pmb, phba->mbox_mem_pool); | 
 | 	return 0; | 
 | } | 
 |  | 
 | /* Completion handler for config async event mailbox command. */ | 
 | static void | 
 | lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) | 
 | { | 
 | 	if (pmboxq->mb.mbxStatus == MBX_SUCCESS) | 
 | 		phba->temp_sensor_support = 1; | 
 | 	else | 
 | 		phba->temp_sensor_support = 0; | 
 | 	mempool_free(pmboxq, phba->mbox_mem_pool); | 
 | 	return; | 
 | } | 
 |  | 
 | /************************************************************************/ | 
 | /*                                                                      */ | 
 | /*    lpfc_config_port_post                                             */ | 
 | /*    This routine will do LPFC initialization after the                */ | 
 | /*    CONFIG_PORT mailbox command. This will be initialized             */ | 
 | /*    as a SLI layer callback routine.                                  */ | 
 | /*    This routine returns 0 on success. Any other return value         */ | 
 | /*    indicates an error.                                               */ | 
 | /*                                                                      */ | 
 | /************************************************************************/ | 
 | int | 
 | lpfc_config_port_post(struct lpfc_hba *phba) | 
 | { | 
 | 	struct lpfc_vport *vport = phba->pport; | 
 | 	LPFC_MBOXQ_t *pmb; | 
 | 	MAILBOX_t *mb; | 
 | 	struct lpfc_dmabuf *mp; | 
 | 	struct lpfc_sli *psli = &phba->sli; | 
 | 	uint32_t status, timeout; | 
 | 	int i, j; | 
 | 	int rc; | 
 |  | 
 | 	spin_lock_irq(&phba->hbalock); | 
 | 	/* | 
 | 	 * If the Config port completed correctly the HBA is not | 
 | 	 * over heated any more. | 
 | 	 */ | 
 | 	if (phba->over_temp_state == HBA_OVER_TEMP) | 
 | 		phba->over_temp_state = HBA_NORMAL_TEMP; | 
 | 	spin_unlock_irq(&phba->hbalock); | 
 |  | 
 | 	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | 
 | 	if (!pmb) { | 
 | 		phba->link_state = LPFC_HBA_ERROR; | 
 | 		return -ENOMEM; | 
 | 	} | 
 | 	mb = &pmb->mb; | 
 |  | 
 | 	/* Get login parameters for NID.  */ | 
 | 	lpfc_read_sparam(phba, pmb, 0); | 
 | 	pmb->vport = vport; | 
 | 	if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { | 
 | 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 
 | 				"0448 Adapter failed init, mbxCmd x%x " | 
 | 				"READ_SPARM mbxStatus x%x\n", | 
 | 				mb->mbxCommand, mb->mbxStatus); | 
 | 		phba->link_state = LPFC_HBA_ERROR; | 
 | 		mp = (struct lpfc_dmabuf *) pmb->context1; | 
 | 		mempool_free( pmb, phba->mbox_mem_pool); | 
 | 		lpfc_mbuf_free(phba, mp->virt, mp->phys); | 
 | 		kfree(mp); | 
 | 		return -EIO; | 
 | 	} | 
 |  | 
 | 	mp = (struct lpfc_dmabuf *) pmb->context1; | 
 |  | 
 | 	memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); | 
 | 	lpfc_mbuf_free(phba, mp->virt, mp->phys); | 
 | 	kfree(mp); | 
 | 	pmb->context1 = NULL; | 
 |  | 
 | 	if (phba->cfg_soft_wwnn) | 
 | 		u64_to_wwn(phba->cfg_soft_wwnn, | 
 | 			   vport->fc_sparam.nodeName.u.wwn); | 
 | 	if (phba->cfg_soft_wwpn) | 
 | 		u64_to_wwn(phba->cfg_soft_wwpn, | 
 | 			   vport->fc_sparam.portName.u.wwn); | 
 | 	memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, | 
 | 	       sizeof (struct lpfc_name)); | 
 | 	memcpy(&vport->fc_portname, &vport->fc_sparam.portName, | 
 | 	       sizeof (struct lpfc_name)); | 
 | 	/* If no serial number in VPD data, use low 6 bytes of WWNN */ | 
 | 	/* This should be consolidated into parse_vpd ? - mr */ | 
 | 	if (phba->SerialNumber[0] == 0) { | 
 | 		uint8_t *outptr; | 
 |  | 
 | 		outptr = &vport->fc_nodename.u.s.IEEE[0]; | 
 | 		for (i = 0; i < 12; i++) { | 
 | 			status = *outptr++; | 
 | 			j = ((status & 0xf0) >> 4); | 
 | 			if (j <= 9) | 
 | 				phba->SerialNumber[i] = | 
 | 				    (char)((uint8_t) 0x30 + (uint8_t) j); | 
 | 			else | 
 | 				phba->SerialNumber[i] = | 
 | 				    (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); | 
 | 			i++; | 
 | 			j = (status & 0xf); | 
 | 			if (j <= 9) | 
 | 				phba->SerialNumber[i] = | 
 | 				    (char)((uint8_t) 0x30 + (uint8_t) j); | 
 | 			else | 
 | 				phba->SerialNumber[i] = | 
 | 				    (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); | 
 | 		} | 
 | 	} | 
 |  | 
 | 	lpfc_read_config(phba, pmb); | 
 | 	pmb->vport = vport; | 
 | 	if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { | 
 | 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 
 | 				"0453 Adapter failed to init, mbxCmd x%x " | 
 | 				"READ_CONFIG, mbxStatus x%x\n", | 
 | 				mb->mbxCommand, mb->mbxStatus); | 
 | 		phba->link_state = LPFC_HBA_ERROR; | 
 | 		mempool_free( pmb, phba->mbox_mem_pool); | 
 | 		return -EIO; | 
 | 	} | 
 |  | 
 | 	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */ | 
 | 	if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1)) | 
 | 		phba->cfg_hba_queue_depth = | 
 | 			mb->un.varRdConfig.max_xri + 1; | 
 |  | 
 | 	phba->lmt = mb->un.varRdConfig.lmt; | 
 |  | 
 | 	/* Get the default values for Model Name and Description */ | 
 | 	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); | 
 |  | 
 | 	if ((phba->cfg_link_speed > LINK_SPEED_10G) | 
 | 	    || ((phba->cfg_link_speed == LINK_SPEED_1G) | 
 | 		&& !(phba->lmt & LMT_1Gb)) | 
 | 	    || ((phba->cfg_link_speed == LINK_SPEED_2G) | 
 | 		&& !(phba->lmt & LMT_2Gb)) | 
 | 	    || ((phba->cfg_link_speed == LINK_SPEED_4G) | 
 | 		&& !(phba->lmt & LMT_4Gb)) | 
 | 	    || ((phba->cfg_link_speed == LINK_SPEED_8G) | 
 | 		&& !(phba->lmt & LMT_8Gb)) | 
 | 	    || ((phba->cfg_link_speed == LINK_SPEED_10G) | 
 | 		&& !(phba->lmt & LMT_10Gb))) { | 
 | 		/* Reset link speed to auto */ | 
 | 		lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT, | 
 | 			"1302 Invalid speed for this board: " | 
 | 			"Reset link speed to auto: x%x\n", | 
 | 			phba->cfg_link_speed); | 
 | 			phba->cfg_link_speed = LINK_SPEED_AUTO; | 
 | 	} | 
 |  | 
 | 	phba->link_state = LPFC_LINK_DOWN; | 
 |  | 
 | 	/* Only process IOCBs on ELS ring till hba_state is READY */ | 
 | 	if (psli->ring[psli->extra_ring].cmdringaddr) | 
 | 		psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT; | 
 | 	if (psli->ring[psli->fcp_ring].cmdringaddr) | 
 | 		psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT; | 
 | 	if (psli->ring[psli->next_ring].cmdringaddr) | 
 | 		psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT; | 
 |  | 
 | 	/* Post receive buffers for desired rings */ | 
 | 	if (phba->sli_rev != 3) | 
 | 		lpfc_post_rcv_buf(phba); | 
 |  | 
 | 	/* Enable appropriate host interrupts */ | 
 | 	spin_lock_irq(&phba->hbalock); | 
 | 	status = readl(phba->HCregaddr); | 
 | 	status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; | 
 | 	if (psli->num_rings > 0) | 
 | 		status |= HC_R0INT_ENA; | 
 | 	if (psli->num_rings > 1) | 
 | 		status |= HC_R1INT_ENA; | 
 | 	if (psli->num_rings > 2) | 
 | 		status |= HC_R2INT_ENA; | 
 | 	if (psli->num_rings > 3) | 
 | 		status |= HC_R3INT_ENA; | 
 |  | 
 | 	if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && | 
 | 	    (phba->cfg_poll & DISABLE_FCP_RING_INT)) | 
 | 		status &= ~(HC_R0INT_ENA << LPFC_FCP_RING); | 
 |  | 
 | 	writel(status, phba->HCregaddr); | 
 | 	readl(phba->HCregaddr); /* flush */ | 
 | 	spin_unlock_irq(&phba->hbalock); | 
 |  | 
 | 	/* | 
 | 	 * Setup the ring 0 (els)  timeout handler | 
 | 	 */ | 
 | 	timeout = phba->fc_ratov << 1; | 
 | 	mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); | 
 | 	mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); | 
 | 	phba->hb_outstanding = 0; | 
 | 	phba->last_completion_time = jiffies; | 
 |  | 
 | 	lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed); | 
 | 	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; | 
 | 	pmb->vport = vport; | 
 | 	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); | 
 | 	lpfc_set_loopback_flag(phba); | 
 | 	if (rc != MBX_SUCCESS) { | 
 | 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 
 | 				"0454 Adapter failed to init, mbxCmd x%x " | 
 | 				"INIT_LINK, mbxStatus x%x\n", | 
 | 				mb->mbxCommand, mb->mbxStatus); | 
 |  | 
 | 		/* Clear all interrupt enable conditions */ | 
 | 		writel(0, phba->HCregaddr); | 
 | 		readl(phba->HCregaddr); /* flush */ | 
 | 		/* Clear all pending interrupts */ | 
 | 		writel(0xffffffff, phba->HAregaddr); | 
 | 		readl(phba->HAregaddr); /* flush */ | 
 |  | 
 | 		phba->link_state = LPFC_HBA_ERROR; | 
 | 		if (rc != MBX_BUSY) | 
 | 			mempool_free(pmb, phba->mbox_mem_pool); | 
 | 		return -EIO; | 
 | 	} | 
 | 	/* MBOX buffer will be freed in mbox compl */ | 
 | 	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | 
 | 	lpfc_config_async(phba, pmb, LPFC_ELS_RING); | 
 | 	pmb->mbox_cmpl = lpfc_config_async_cmpl; | 
 | 	pmb->vport = phba->pport; | 
 | 	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); | 
 |  | 
 | 	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { | 
 | 		lpfc_printf_log(phba, | 
 | 				KERN_ERR, | 
 | 				LOG_INIT, | 
 | 				"0456 Adapter failed to issue " | 
 | 				"ASYNCEVT_ENABLE mbox status x%x \n.", | 
 | 				rc); | 
 | 		mempool_free(pmb, phba->mbox_mem_pool); | 
 | 	} | 
 | 	return (0); | 
 | } | 
 |  | 
 | /************************************************************************/ | 
 | /*                                                                      */ | 
 | /*    lpfc_hba_down_prep                                                */ | 
 | /*    This routine will do LPFC uninitialization before the             */ | 
 | /*    HBA is reset when bringing down the SLI Layer. This will be       */ | 
 | /*    initialized as a SLI layer callback routine.                      */ | 
 | /*    This routine returns 0 on success. Any other return value         */ | 
 | /*    indicates an error.                                               */ | 
 | /*                                                                      */ | 
 | /************************************************************************/ | 
 | int | 
 | lpfc_hba_down_prep(struct lpfc_hba *phba) | 
 | { | 
 | 	struct lpfc_vport **vports; | 
 | 	int i; | 
 | 	/* Disable interrupts */ | 
 | 	writel(0, phba->HCregaddr); | 
 | 	readl(phba->HCregaddr); /* flush */ | 
 |  | 
 | 	if (phba->pport->load_flag & FC_UNLOADING) | 
 | 		lpfc_cleanup_discovery_resources(phba->pport); | 
 | 	else { | 
 | 		vports = lpfc_create_vport_work_array(phba); | 
 | 		if (vports != NULL) | 
 | 			for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) | 
 | 				lpfc_cleanup_discovery_resources(vports[i]); | 
 | 		lpfc_destroy_vport_work_array(phba, vports); | 
 | 	} | 
 | 	return 0; | 
 | } | 
 |  | 
 | /************************************************************************/ | 
 | /*                                                                      */ | 
 | /*    lpfc_hba_down_post                                                */ | 
 | /*    This routine will do uninitialization after the HBA is reset      */ | 
 | /*    when bringing down the SLI Layer.                                 */ | 
 | /*    This routine returns 0 on success. Any other return value         */ | 
 | /*    indicates an error.                                               */ | 
 | /*                                                                      */ | 
 | /************************************************************************/ | 
 | int | 
 | lpfc_hba_down_post(struct lpfc_hba *phba) | 
 | { | 
 | 	struct lpfc_sli *psli = &phba->sli; | 
 | 	struct lpfc_sli_ring *pring; | 
 | 	struct lpfc_dmabuf *mp, *next_mp; | 
 | 	struct lpfc_iocbq *iocb; | 
 | 	IOCB_t *cmd = NULL; | 
 | 	LIST_HEAD(completions); | 
 | 	int i; | 
 |  | 
 | 	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) | 
 | 		lpfc_sli_hbqbuf_free_all(phba); | 
 | 	else { | 
 | 		/* Cleanup preposted buffers on the ELS ring */ | 
 | 		pring = &psli->ring[LPFC_ELS_RING]; | 
 | 		list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { | 
 | 			list_del(&mp->list); | 
 | 			pring->postbufq_cnt--; | 
 | 			lpfc_mbuf_free(phba, mp->virt, mp->phys); | 
 | 			kfree(mp); | 
 | 		} | 
 | 	} | 
 |  | 
 | 	spin_lock_irq(&phba->hbalock); | 
 | 	for (i = 0; i < psli->num_rings; i++) { | 
 | 		pring = &psli->ring[i]; | 
 |  | 
 | 		/* At this point in time the HBA is either reset or DOA. Either | 
 | 		 * way, nothing should be on txcmplq as it will NEVER complete. | 
 | 		 */ | 
 | 		list_splice_init(&pring->txcmplq, &completions); | 
 | 		pring->txcmplq_cnt = 0; | 
 | 		spin_unlock_irq(&phba->hbalock); | 
 |  | 
 | 		while (!list_empty(&completions)) { | 
 | 			iocb = list_get_first(&completions, struct lpfc_iocbq, | 
 | 				list); | 
 | 			cmd = &iocb->iocb; | 
 | 			list_del_init(&iocb->list); | 
 |  | 
 | 			if (!iocb->iocb_cmpl) | 
 | 				lpfc_sli_release_iocbq(phba, iocb); | 
 | 			else { | 
 | 				cmd->ulpStatus = IOSTAT_LOCAL_REJECT; | 
 | 				cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; | 
 | 				(iocb->iocb_cmpl) (phba, iocb, iocb); | 
 | 			} | 
 | 		} | 
 |  | 
 | 		lpfc_sli_abort_iocb_ring(phba, pring); | 
 | 		spin_lock_irq(&phba->hbalock); | 
 | 	} | 
 | 	spin_unlock_irq(&phba->hbalock); | 
 |  | 
 | 	return 0; | 
 | } | 
 |  | 
 | /* HBA heart beat timeout handler */ | 
 | static void | 
 | lpfc_hb_timeout(unsigned long ptr) | 
 | { | 
 | 	struct lpfc_hba *phba; | 
 | 	unsigned long iflag; | 
 |  | 
 | 	phba = (struct lpfc_hba *)ptr; | 
 | 	spin_lock_irqsave(&phba->pport->work_port_lock, iflag); | 
 | 	if (!(phba->pport->work_port_events & WORKER_HB_TMO)) | 
 | 		phba->pport->work_port_events |= WORKER_HB_TMO; | 
 | 	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); | 
 |  | 
 | 	spin_lock_irqsave(&phba->hbalock, iflag); | 
 | 	if (phba->work_wait) | 
 | 		wake_up(phba->work_wait); | 
 | 	spin_unlock_irqrestore(&phba->hbalock, iflag); | 
 | 	return; | 
 | } | 
 |  | 
 | static void | 
 | lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) | 
 | { | 
 | 	unsigned long drvr_flag; | 
 |  | 
 | 	spin_lock_irqsave(&phba->hbalock, drvr_flag); | 
 | 	phba->hb_outstanding = 0; | 
 | 	spin_unlock_irqrestore(&phba->hbalock, drvr_flag); | 
 |  | 
 | 	mempool_free(pmboxq, phba->mbox_mem_pool); | 
 | 	if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && | 
 | 		!(phba->link_state == LPFC_HBA_ERROR) && | 
 | 		!(phba->pport->load_flag & FC_UNLOADING)) | 
 | 		mod_timer(&phba->hb_tmofunc, | 
 | 			jiffies + HZ * LPFC_HB_MBOX_INTERVAL); | 
 | 	return; | 
 | } | 
 |  | 
 | void | 
 | lpfc_hb_timeout_handler(struct lpfc_hba *phba) | 
 | { | 
 | 	LPFC_MBOXQ_t *pmboxq; | 
 | 	struct lpfc_dmabuf *buf_ptr; | 
 | 	int retval; | 
 | 	struct lpfc_sli *psli = &phba->sli; | 
 | 	LIST_HEAD(completions); | 
 |  | 
 | 	if ((phba->link_state == LPFC_HBA_ERROR) || | 
 | 		(phba->pport->load_flag & FC_UNLOADING) || | 
 | 		(phba->pport->fc_flag & FC_OFFLINE_MODE)) | 
 | 		return; | 
 |  | 
 | 	spin_lock_irq(&phba->pport->work_port_lock); | 
 | 	/* If the timer is already canceled do nothing */ | 
 | 	if (!(phba->pport->work_port_events & WORKER_HB_TMO)) { | 
 | 		spin_unlock_irq(&phba->pport->work_port_lock); | 
 | 		return; | 
 | 	} | 
 |  | 
 | 	if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ, | 
 | 		jiffies)) { | 
 | 		spin_unlock_irq(&phba->pport->work_port_lock); | 
 | 		if (!phba->hb_outstanding) | 
 | 			mod_timer(&phba->hb_tmofunc, | 
 | 				jiffies + HZ * LPFC_HB_MBOX_INTERVAL); | 
 | 		else | 
 | 			mod_timer(&phba->hb_tmofunc, | 
 | 				jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); | 
 | 		return; | 
 | 	} | 
 | 	spin_unlock_irq(&phba->pport->work_port_lock); | 
 |  | 
 | 	if (phba->elsbuf_cnt && | 
 | 		(phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { | 
 | 		spin_lock_irq(&phba->hbalock); | 
 | 		list_splice_init(&phba->elsbuf, &completions); | 
 | 		phba->elsbuf_cnt = 0; | 
 | 		phba->elsbuf_prev_cnt = 0; | 
 | 		spin_unlock_irq(&phba->hbalock); | 
 |  | 
 | 		while (!list_empty(&completions)) { | 
 | 			list_remove_head(&completions, buf_ptr, | 
 | 				struct lpfc_dmabuf, list); | 
 | 			lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); | 
 | 			kfree(buf_ptr); | 
 | 		} | 
 | 	} | 
 | 	phba->elsbuf_prev_cnt = phba->elsbuf_cnt; | 
 |  | 
 | 	/* If there is no heart beat outstanding, issue a heartbeat command */ | 
 | 	if (phba->cfg_enable_hba_heartbeat) { | 
 | 		if (!phba->hb_outstanding) { | 
 | 			pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL); | 
 | 			if (!pmboxq) { | 
 | 				mod_timer(&phba->hb_tmofunc, | 
 | 					  jiffies + HZ * LPFC_HB_MBOX_INTERVAL); | 
 | 				return; | 
 | 			} | 
 |  | 
 | 			lpfc_heart_beat(phba, pmboxq); | 
 | 			pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; | 
 | 			pmboxq->vport = phba->pport; | 
 | 			retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); | 
 |  | 
 | 			if (retval != MBX_BUSY && retval != MBX_SUCCESS) { | 
 | 				mempool_free(pmboxq, phba->mbox_mem_pool); | 
 | 				mod_timer(&phba->hb_tmofunc, | 
 | 					  jiffies + HZ * LPFC_HB_MBOX_INTERVAL); | 
 | 				return; | 
 | 			} | 
 | 			mod_timer(&phba->hb_tmofunc, | 
 | 				  jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); | 
 | 			phba->hb_outstanding = 1; | 
 | 			return; | 
 | 		} else { | 
 | 			/* | 
 | 			* If heart beat timeout called with hb_outstanding set | 
 | 			* we need to take the HBA offline. | 
 | 			*/ | 
 | 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 
 | 					"0459 Adapter heartbeat failure, " | 
 | 					"taking this port offline.\n"); | 
 |  | 
 | 			spin_lock_irq(&phba->hbalock); | 
 | 			psli->sli_flag &= ~LPFC_SLI2_ACTIVE; | 
 | 			spin_unlock_irq(&phba->hbalock); | 
 |  | 
 | 			lpfc_offline_prep(phba); | 
 | 			lpfc_offline(phba); | 
 | 			lpfc_unblock_mgmt_io(phba); | 
 | 			phba->link_state = LPFC_HBA_ERROR; | 
 | 			lpfc_hba_down_post(phba); | 
 | 		} | 
 | 	} | 
 | } | 
 |  | 
 | static void | 
 | lpfc_offline_eratt(struct lpfc_hba *phba) | 
 | { | 
 | 	struct lpfc_sli   *psli = &phba->sli; | 
 |  | 
 | 	spin_lock_irq(&phba->hbalock); | 
 | 	psli->sli_flag &= ~LPFC_SLI2_ACTIVE; | 
 | 	spin_unlock_irq(&phba->hbalock); | 
 | 	lpfc_offline_prep(phba); | 
 |  | 
 | 	lpfc_offline(phba); | 
 | 	lpfc_reset_barrier(phba); | 
 | 	lpfc_sli_brdreset(phba); | 
 | 	lpfc_hba_down_post(phba); | 
 | 	lpfc_sli_brdready(phba, HS_MBRDY); | 
 | 	lpfc_unblock_mgmt_io(phba); | 
 | 	phba->link_state = LPFC_HBA_ERROR; | 
 | 	return; | 
 | } | 
 |  | 
 | /************************************************************************/ | 
 | /*                                                                      */ | 
 | /*    lpfc_handle_eratt                                                 */ | 
 | /*    This routine will handle processing a Host Attention              */ | 
 | /*    Error Status event. This will be initialized                      */ | 
 | /*    as a SLI layer callback routine.                                  */ | 
 | /*                                                                      */ | 
 | /************************************************************************/ | 
 | void | 
 | lpfc_handle_eratt(struct lpfc_hba *phba) | 
 | { | 
 | 	struct lpfc_vport *vport = phba->pport; | 
 | 	struct lpfc_sli   *psli = &phba->sli; | 
 | 	struct lpfc_sli_ring  *pring; | 
 | 	uint32_t event_data; | 
 | 	unsigned long temperature; | 
 | 	struct temp_event temp_event_data; | 
 | 	struct Scsi_Host  *shost; | 
 |  | 
 | 	/* If the pci channel is offline, ignore possible errors, | 
 | 	 * since we cannot communicate with the pci card anyway. */ | 
 | 	if (pci_channel_offline(phba->pcidev)) | 
 | 		return; | 
 | 	/* If resets are disabled then leave the HBA alone and return */ | 
 | 	if (!phba->cfg_enable_hba_reset) | 
 | 		return; | 
 |  | 
 | 	if (phba->work_hs & HS_FFER6) { | 
 | 		/* Re-establishing Link */ | 
 | 		lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, | 
 | 				"1301 Re-establishing Link " | 
 | 				"Data: x%x x%x x%x\n", | 
 | 				phba->work_hs, | 
 | 				phba->work_status[0], phba->work_status[1]); | 
 |  | 
 | 		spin_lock_irq(&phba->hbalock); | 
 | 		psli->sli_flag &= ~LPFC_SLI2_ACTIVE; | 
 | 		spin_unlock_irq(&phba->hbalock); | 
 |  | 
 | 		/* | 
 | 		* Firmware stops when it triggled erratt with HS_FFER6. | 
 | 		* That could cause the I/Os dropped by the firmware. | 
 | 		* Error iocb (I/O) on txcmplq and let the SCSI layer | 
 | 		* retry it after re-establishing link. | 
 | 		*/ | 
 | 		pring = &psli->ring[psli->fcp_ring]; | 
 | 		lpfc_sli_abort_iocb_ring(phba, pring); | 
 |  | 
 | 		/* | 
 | 		 * There was a firmware error.  Take the hba offline and then | 
 | 		 * attempt to restart it. | 
 | 		 */ | 
 | 		lpfc_offline_prep(phba); | 
 | 		lpfc_offline(phba); | 
 | 		lpfc_sli_brdrestart(phba); | 
 | 		if (lpfc_online(phba) == 0) {	/* Initialize the HBA */ | 
 | 			lpfc_unblock_mgmt_io(phba); | 
 | 			return; | 
 | 		} | 
 | 		lpfc_unblock_mgmt_io(phba); | 
 | 	} else if (phba->work_hs & HS_CRIT_TEMP) { | 
 | 		temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); | 
 | 		temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; | 
 | 		temp_event_data.event_code = LPFC_CRIT_TEMP; | 
 | 		temp_event_data.data = (uint32_t)temperature; | 
 |  | 
 | 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 
 | 				"0459 Adapter maximum temperature exceeded " | 
 | 				"(%ld), taking this port offline " | 
 | 				"Data: x%x x%x x%x\n", | 
 | 				temperature, phba->work_hs, | 
 | 				phba->work_status[0], phba->work_status[1]); | 
 |  | 
 | 		shost = lpfc_shost_from_vport(phba->pport); | 
 | 		fc_host_post_vendor_event(shost, fc_get_event_number(), | 
 | 					  sizeof(temp_event_data), | 
 | 					  (char *) &temp_event_data, | 
 | 					  SCSI_NL_VID_TYPE_PCI | 
 | 					  | PCI_VENDOR_ID_EMULEX); | 
 |  | 
 | 		spin_lock_irq(&phba->hbalock); | 
 | 		phba->over_temp_state = HBA_OVER_TEMP; | 
 | 		spin_unlock_irq(&phba->hbalock); | 
 | 		lpfc_offline_eratt(phba); | 
 |  | 
 | 	} else { | 
 | 		/* The if clause above forces this code path when the status | 
 | 		 * failure is a value other than FFER6.  Do not call the offline | 
 | 		 *  twice. This is the adapter hardware error path. | 
 | 		 */ | 
 | 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 
 | 				"0457 Adapter Hardware Error " | 
 | 				"Data: x%x x%x x%x\n", | 
 | 				phba->work_hs, | 
 | 				phba->work_status[0], phba->work_status[1]); | 
 |  | 
 | 		event_data = FC_REG_DUMP_EVENT; | 
 | 		shost = lpfc_shost_from_vport(vport); | 
 | 		fc_host_post_vendor_event(shost, fc_get_event_number(), | 
 | 				sizeof(event_data), (char *) &event_data, | 
 | 				SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); | 
 |  | 
 | 		lpfc_offline_eratt(phba); | 
 | 	} | 
 | } | 
 |  | 
 | /************************************************************************/ | 
 | /*                                                                      */ | 
 | /*    lpfc_handle_latt                                                  */ | 
 | /*    This routine will handle processing a Host Attention              */ | 
 | /*    Link Status event. This will be initialized                       */ | 
 | /*    as a SLI layer callback routine.                                  */ | 
 | /*                                                                      */ | 
 | /************************************************************************/ | 
 | void | 
 | lpfc_handle_latt(struct lpfc_hba *phba) | 
 | { | 
 | 	struct lpfc_vport *vport = phba->pport; | 
 | 	struct lpfc_sli   *psli = &phba->sli; | 
 | 	LPFC_MBOXQ_t *pmb; | 
 | 	volatile uint32_t control; | 
 | 	struct lpfc_dmabuf *mp; | 
 | 	int rc = 0; | 
 |  | 
 | 	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | 
 | 	if (!pmb) { | 
 | 		rc = 1; | 
 | 		goto lpfc_handle_latt_err_exit; | 
 | 	} | 
 |  | 
 | 	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); | 
 | 	if (!mp) { | 
 | 		rc = 2; | 
 | 		goto lpfc_handle_latt_free_pmb; | 
 | 	} | 
 |  | 
 | 	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); | 
 | 	if (!mp->virt) { | 
 | 		rc = 3; | 
 | 		goto lpfc_handle_latt_free_mp; | 
 | 	} | 
 |  | 
 | 	/* Cleanup any outstanding ELS commands */ | 
 | 	lpfc_els_flush_all_cmd(phba); | 
 |  | 
 | 	psli->slistat.link_event++; | 
 | 	lpfc_read_la(phba, pmb, mp); | 
 | 	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la; | 
 | 	pmb->vport = vport; | 
 | 	rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); | 
 | 	if (rc == MBX_NOT_FINISHED) { | 
 | 		rc = 4; | 
 | 		goto lpfc_handle_latt_free_mbuf; | 
 | 	} | 
 |  | 
 | 	/* Clear Link Attention in HA REG */ | 
 | 	spin_lock_irq(&phba->hbalock); | 
 | 	writel(HA_LATT, phba->HAregaddr); | 
 | 	readl(phba->HAregaddr); /* flush */ | 
 | 	spin_unlock_irq(&phba->hbalock); | 
 |  | 
 | 	return; | 
 |  | 
 | lpfc_handle_latt_free_mbuf: | 
 | 	lpfc_mbuf_free(phba, mp->virt, mp->phys); | 
 | lpfc_handle_latt_free_mp: | 
 | 	kfree(mp); | 
 | lpfc_handle_latt_free_pmb: | 
 | 	mempool_free(pmb, phba->mbox_mem_pool); | 
 | lpfc_handle_latt_err_exit: | 
 | 	/* Enable Link attention interrupts */ | 
 | 	spin_lock_irq(&phba->hbalock); | 
 | 	psli->sli_flag |= LPFC_PROCESS_LA; | 
 | 	control = readl(phba->HCregaddr); | 
 | 	control |= HC_LAINT_ENA; | 
 | 	writel(control, phba->HCregaddr); | 
 | 	readl(phba->HCregaddr); /* flush */ | 
 |  | 
 | 	/* Clear Link Attention in HA REG */ | 
 | 	writel(HA_LATT, phba->HAregaddr); | 
 | 	readl(phba->HAregaddr); /* flush */ | 
 | 	spin_unlock_irq(&phba->hbalock); | 
 | 	lpfc_linkdown(phba); | 
 | 	phba->link_state = LPFC_HBA_ERROR; | 
 |  | 
 | 	lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, | 
 | 		     "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); | 
 |  | 
 | 	return; | 
 | } | 
 |  | 
 | /************************************************************************/ | 
 | /*                                                                      */ | 
 | /*   lpfc_parse_vpd                                                     */ | 
 | /*   This routine will parse the VPD data                               */ | 
 | /*                                                                      */ | 
 | /************************************************************************/ | 
 | static int | 
 | lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) | 
 | { | 
 | 	uint8_t lenlo, lenhi; | 
 | 	int Length; | 
 | 	int i, j; | 
 | 	int finished = 0; | 
 | 	int index = 0; | 
 |  | 
 | 	if (!vpd) | 
 | 		return 0; | 
 |  | 
 | 	/* Vital Product */ | 
 | 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | 
 | 			"0455 Vital Product Data: x%x x%x x%x x%x\n", | 
 | 			(uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], | 
 | 			(uint32_t) vpd[3]); | 
 | 	while (!finished && (index < (len - 4))) { | 
 | 		switch (vpd[index]) { | 
 | 		case 0x82: | 
 | 		case 0x91: | 
 | 			index += 1; | 
 | 			lenlo = vpd[index]; | 
 | 			index += 1; | 
 | 			lenhi = vpd[index]; | 
 | 			index += 1; | 
 | 			i = ((((unsigned short)lenhi) << 8) + lenlo); | 
 | 			index += i; | 
 | 			break; | 
 | 		case 0x90: | 
 | 			index += 1; | 
 | 			lenlo = vpd[index]; | 
 | 			index += 1; | 
 | 			lenhi = vpd[index]; | 
 | 			index += 1; | 
 | 			Length = ((((unsigned short)lenhi) << 8) + lenlo); | 
 | 			if (Length > len - index) | 
 | 				Length = len - index; | 
 | 			while (Length > 0) { | 
 | 			/* Look for Serial Number */ | 
 | 			if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { | 
 | 				index += 2; | 
 | 				i = vpd[index]; | 
 | 				index += 1; | 
 | 				j = 0; | 
 | 				Length -= (3+i); | 
 | 				while(i--) { | 
 | 					phba->SerialNumber[j++] = vpd[index++]; | 
 | 					if (j == 31) | 
 | 						break; | 
 | 				} | 
 | 				phba->SerialNumber[j] = 0; | 
 | 				continue; | 
 | 			} | 
 | 			else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { | 
 | 				phba->vpd_flag |= VPD_MODEL_DESC; | 
 | 				index += 2; | 
 | 				i = vpd[index]; | 
 | 				index += 1; | 
 | 				j = 0; | 
 | 				Length -= (3+i); | 
 | 				while(i--) { | 
 | 					phba->ModelDesc[j++] = vpd[index++]; | 
 | 					if (j == 255) | 
 | 						break; | 
 | 				} | 
 | 				phba->ModelDesc[j] = 0; | 
 | 				continue; | 
 | 			} | 
 | 			else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { | 
 | 				phba->vpd_flag |= VPD_MODEL_NAME; | 
 | 				index += 2; | 
 | 				i = vpd[index]; | 
 | 				index += 1; | 
 | 				j = 0; | 
 | 				Length -= (3+i); | 
 | 				while(i--) { | 
 | 					phba->ModelName[j++] = vpd[index++]; | 
 | 					if (j == 79) | 
 | 						break; | 
 | 				} | 
 | 				phba->ModelName[j] = 0; | 
 | 				continue; | 
 | 			} | 
 | 			else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { | 
 | 				phba->vpd_flag |= VPD_PROGRAM_TYPE; | 
 | 				index += 2; | 
 | 				i = vpd[index]; | 
 | 				index += 1; | 
 | 				j = 0; | 
 | 				Length -= (3+i); | 
 | 				while(i--) { | 
 | 					phba->ProgramType[j++] = vpd[index++]; | 
 | 					if (j == 255) | 
 | 						break; | 
 | 				} | 
 | 				phba->ProgramType[j] = 0; | 
 | 				continue; | 
 | 			} | 
 | 			else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { | 
 | 				phba->vpd_flag |= VPD_PORT; | 
 | 				index += 2; | 
 | 				i = vpd[index]; | 
 | 				index += 1; | 
 | 				j = 0; | 
 | 				Length -= (3+i); | 
 | 				while(i--) { | 
 | 				phba->Port[j++] = vpd[index++]; | 
 | 				if (j == 19) | 
 | 					break; | 
 | 				} | 
 | 				phba->Port[j] = 0; | 
 | 				continue; | 
 | 			} | 
 | 			else { | 
 | 				index += 2; | 
 | 				i = vpd[index]; | 
 | 				index += 1; | 
 | 				index += i; | 
 | 				Length -= (3 + i); | 
 | 			} | 
 | 		} | 
 | 		finished = 0; | 
 | 		break; | 
 | 		case 0x78: | 
 | 			finished = 1; | 
 | 			break; | 
 | 		default: | 
 | 			index ++; | 
 | 			break; | 
 | 		} | 
 | 	} | 
 |  | 
 | 	return(1); | 
 | } | 
 |  | 
 | static void | 
 | lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) | 
 | { | 
 | 	lpfc_vpd_t *vp; | 
 | 	uint16_t dev_id = phba->pcidev->device; | 
 | 	int max_speed; | 
 | 	struct { | 
 | 		char * name; | 
 | 		int    max_speed; | 
 | 		char * bus; | 
 | 	} m = {"<Unknown>", 0, ""}; | 
 |  | 
 | 	if (mdp && mdp[0] != '\0' | 
 | 		&& descp && descp[0] != '\0') | 
 | 		return; | 
 |  | 
 | 	if (phba->lmt & LMT_10Gb) | 
 | 		max_speed = 10; | 
 | 	else if (phba->lmt & LMT_8Gb) | 
 | 		max_speed = 8; | 
 | 	else if (phba->lmt & LMT_4Gb) | 
 | 		max_speed = 4; | 
 | 	else if (phba->lmt & LMT_2Gb) | 
 | 		max_speed = 2; | 
 | 	else | 
 | 		max_speed = 1; | 
 |  | 
 | 	vp = &phba->vpd; | 
 |  | 
 | 	switch (dev_id) { | 
 | 	case PCI_DEVICE_ID_FIREFLY: | 
 | 		m = (typeof(m)){"LP6000", max_speed, "PCI"}; | 
 | 		break; | 
 | 	case PCI_DEVICE_ID_SUPERFLY: | 
 | 		if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) | 
 | 			m = (typeof(m)){"LP7000", max_speed,  "PCI"}; | 
 | 		else | 
 | 			m = (typeof(m)){"LP7000E", max_speed, "PCI"}; | 
 | 		break; | 
 | 	case PCI_DEVICE_ID_DRAGONFLY: | 
 | 		m = (typeof(m)){"LP8000", max_speed, "PCI"}; | 
 | 		break; | 
 | 	case PCI_DEVICE_ID_CENTAUR: | 
 | 		if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) | 
 | 			m = (typeof(m)){"LP9002", max_speed, "PCI"}; | 
 | 		else | 
 | 			m = (typeof(m)){"LP9000", max_speed, "PCI"}; | 
 | 		break; | 
 | 	case PCI_DEVICE_ID_RFLY: | 
 | 		m = (typeof(m)){"LP952", max_speed, "PCI"}; | 
 | 		break; | 
 | 	case PCI_DEVICE_ID_PEGASUS: | 
 | 		m = (typeof(m)){"LP9802", max_speed, "PCI-X"}; | 
 | 		break; | 
 | 	case PCI_DEVICE_ID_THOR: | 
 | 		m = (typeof(m)){"LP10000", max_speed, "PCI-X"}; | 
 | 		break; | 
 | 	case PCI_DEVICE_ID_VIPER: | 
 | 		m = (typeof(m)){"LPX1000", max_speed,  "PCI-X"}; | 
 | 		break; | 
 | 	case PCI_DEVICE_ID_PFLY: | 
 | 		m = (typeof(m)){"LP982", max_speed, "PCI-X"}; | 
 | 		break; | 
 | 	case PCI_DEVICE_ID_TFLY: | 
 | 		m = (typeof(m)){"LP1050", max_speed, "PCI-X"}; | 
 | 		break; | 
 | 	case PCI_DEVICE_ID_HELIOS: | 
 | 		m = (typeof(m)){"LP11000", max_speed, "PCI-X2"}; | 
 | 		break; | 
 | 	case PCI_DEVICE_ID_HELIOS_SCSP: | 
 | 		m = (typeof(m)){"LP11000-SP", max_speed, "PCI-X2"}; | 
 | 		break; | 
 | 	case PCI_DEVICE_ID_HELIOS_DCSP: | 
 | 		m = (typeof(m)){"LP11002-SP", max_speed, "PCI-X2"}; | 
 | 		break; | 
 | 	case PCI_DEVICE_ID_NEPTUNE: | 
 | 		m = (typeof(m)){"LPe1000", max_speed, "PCIe"}; | 
 | 		break; | 
 | 	case PCI_DEVICE_ID_NEPTUNE_SCSP: | 
 | 		m = (typeof(m)){"LPe1000-SP", max_speed, "PCIe"}; | 
 | 		break; | 
 | 	case PCI_DEVICE_ID_NEPTUNE_DCSP: | 
 | 		m = (typeof(m)){"LPe1002-SP", max_speed, "PCIe"}; | 
 | 		break; | 
 | 	case PCI_DEVICE_ID_BMID: | 
 | 		m = (typeof(m)){"LP1150", max_speed, "PCI-X2"}; | 
 | 		break; | 
 | 	case PCI_DEVICE_ID_BSMB: | 
 | 		m = (typeof(m)){"LP111", max_speed, "PCI-X2"}; | 
 | 		break; | 
 | 	case PCI_DEVICE_ID_ZEPHYR: | 
 | 		m = (typeof(m)){"LPe11000", max_speed, "PCIe"}; | 
 | 		break; | 
 | 	case PCI_DEVICE_ID_ZEPHYR_SCSP: | 
 | 		m = (typeof(m)){"LPe11000", max_speed, "PCIe"}; | 
 | 		break; | 
 | 	case PCI_DEVICE_ID_ZEPHYR_DCSP: | 
 | 		m = (typeof(m)){"LPe11002-SP", max_speed, "PCIe"}; | 
 | 		break; | 
 | 	case PCI_DEVICE_ID_ZMID: | 
 | 		m = (typeof(m)){"LPe1150", max_speed, "PCIe"}; | 
 | 		break; | 
 | 	case PCI_DEVICE_ID_ZSMB: | 
 | 		m = (typeof(m)){"LPe111", max_speed, "PCIe"}; | 
 | 		break; | 
 | 	case PCI_DEVICE_ID_LP101: | 
 | 		m = (typeof(m)){"LP101", max_speed, "PCI-X"}; | 
 | 		break; | 
 | 	case PCI_DEVICE_ID_LP10000S: | 
 | 		m = (typeof(m)){"LP10000-S", max_speed, "PCI"}; | 
 | 		break; | 
 | 	case PCI_DEVICE_ID_LP11000S: | 
 | 		m = (typeof(m)){"LP11000-S", max_speed, | 
 | 			"PCI-X2"}; | 
 | 		break; | 
 | 	case PCI_DEVICE_ID_LPE11000S: | 
 | 		m = (typeof(m)){"LPe11000-S", max_speed, | 
 | 			"PCIe"}; | 
 | 		break; | 
 | 	case PCI_DEVICE_ID_SAT: | 
 | 		m = (typeof(m)){"LPe12000", max_speed, "PCIe"}; | 
 | 		break; | 
 | 	case PCI_DEVICE_ID_SAT_MID: | 
 | 		m = (typeof(m)){"LPe1250", max_speed, "PCIe"}; | 
 | 		break; | 
 | 	case PCI_DEVICE_ID_SAT_SMB: | 
 | 		m = (typeof(m)){"LPe121", max_speed, "PCIe"}; | 
 | 		break; | 
 | 	case PCI_DEVICE_ID_SAT_DCSP: | 
 | 		m = (typeof(m)){"LPe12002-SP", max_speed, "PCIe"}; | 
 | 		break; | 
 | 	case PCI_DEVICE_ID_SAT_SCSP: | 
 | 		m = (typeof(m)){"LPe12000-SP", max_speed, "PCIe"}; | 
 | 		break; | 
 | 	case PCI_DEVICE_ID_SAT_S: | 
 | 		m = (typeof(m)){"LPe12000-S", max_speed, "PCIe"}; | 
 | 		break; | 
 | 	default: | 
 | 		m = (typeof(m)){ NULL }; | 
 | 		break; | 
 | 	} | 
 |  | 
 | 	if (mdp && mdp[0] == '\0') | 
 | 		snprintf(mdp, 79,"%s", m.name); | 
 | 	if (descp && descp[0] == '\0') | 
 | 		snprintf(descp, 255, | 
 | 			 "Emulex %s %dGb %s Fibre Channel Adapter", | 
 | 			 m.name, m.max_speed, m.bus); | 
 | } | 
 |  | 
 | /**************************************************/ | 
 | /*   lpfc_post_buffer                             */ | 
 | /*                                                */ | 
 | /*   This routine will post count buffers to the  */ | 
 | /*   ring with the QUE_RING_BUF_CN command. This  */ | 
 | /*   allows 3 buffers / command to be posted.     */ | 
 | /*   Returns the number of buffers NOT posted.    */ | 
 | /**************************************************/ | 
 | int | 
 | lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt, | 
 | 		 int type) | 
 | { | 
 | 	IOCB_t *icmd; | 
 | 	struct lpfc_iocbq *iocb; | 
 | 	struct lpfc_dmabuf *mp1, *mp2; | 
 |  | 
 | 	cnt += pring->missbufcnt; | 
 |  | 
 | 	/* While there are buffers to post */ | 
 | 	while (cnt > 0) { | 
 | 		/* Allocate buffer for  command iocb */ | 
 | 		iocb = lpfc_sli_get_iocbq(phba); | 
 | 		if (iocb == NULL) { | 
 | 			pring->missbufcnt = cnt; | 
 | 			return cnt; | 
 | 		} | 
 | 		icmd = &iocb->iocb; | 
 |  | 
 | 		/* 2 buffers can be posted per command */ | 
 | 		/* Allocate buffer to post */ | 
 | 		mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); | 
 | 		if (mp1) | 
 | 		    mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); | 
 | 		if (!mp1 || !mp1->virt) { | 
 | 			kfree(mp1); | 
 | 			lpfc_sli_release_iocbq(phba, iocb); | 
 | 			pring->missbufcnt = cnt; | 
 | 			return cnt; | 
 | 		} | 
 |  | 
 | 		INIT_LIST_HEAD(&mp1->list); | 
 | 		/* Allocate buffer to post */ | 
 | 		if (cnt > 1) { | 
 | 			mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); | 
 | 			if (mp2) | 
 | 				mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, | 
 | 							    &mp2->phys); | 
 | 			if (!mp2 || !mp2->virt) { | 
 | 				kfree(mp2); | 
 | 				lpfc_mbuf_free(phba, mp1->virt, mp1->phys); | 
 | 				kfree(mp1); | 
 | 				lpfc_sli_release_iocbq(phba, iocb); | 
 | 				pring->missbufcnt = cnt; | 
 | 				return cnt; | 
 | 			} | 
 |  | 
 | 			INIT_LIST_HEAD(&mp2->list); | 
 | 		} else { | 
 | 			mp2 = NULL; | 
 | 		} | 
 |  | 
 | 		icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); | 
 | 		icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); | 
 | 		icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; | 
 | 		icmd->ulpBdeCount = 1; | 
 | 		cnt--; | 
 | 		if (mp2) { | 
 | 			icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); | 
 | 			icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); | 
 | 			icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; | 
 | 			cnt--; | 
 | 			icmd->ulpBdeCount = 2; | 
 | 		} | 
 |  | 
 | 		icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; | 
 | 		icmd->ulpLe = 1; | 
 |  | 
 | 		if (lpfc_sli_issue_iocb(phba, pring, iocb, 0) == IOCB_ERROR) { | 
 | 			lpfc_mbuf_free(phba, mp1->virt, mp1->phys); | 
 | 			kfree(mp1); | 
 | 			cnt++; | 
 | 			if (mp2) { | 
 | 				lpfc_mbuf_free(phba, mp2->virt, mp2->phys); | 
 | 				kfree(mp2); | 
 | 				cnt++; | 
 | 			} | 
 | 			lpfc_sli_release_iocbq(phba, iocb); | 
 | 			pring->missbufcnt = cnt; | 
 | 			return cnt; | 
 | 		} | 
 | 		lpfc_sli_ringpostbuf_put(phba, pring, mp1); | 
 | 		if (mp2) | 
 | 			lpfc_sli_ringpostbuf_put(phba, pring, mp2); | 
 | 	} | 
 | 	pring->missbufcnt = 0; | 
 | 	return 0; | 
 | } | 
 |  | 
 | /************************************************************************/ | 
 | /*                                                                      */ | 
 | /*   lpfc_post_rcv_buf                                                  */ | 
 | /*   This routine post initial rcv buffers to the configured rings      */ | 
 | /*                                                                      */ | 
 | /************************************************************************/ | 
 | static int | 
 | lpfc_post_rcv_buf(struct lpfc_hba *phba) | 
 | { | 
 | 	struct lpfc_sli *psli = &phba->sli; | 
 |  | 
 | 	/* Ring 0, ELS / CT buffers */ | 
 | 	lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0, 1); | 
 | 	/* Ring 2 - FCP no buffers needed */ | 
 |  | 
 | 	return 0; | 
 | } | 
 |  | 
 | #define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) | 
 |  | 
 | /************************************************************************/ | 
 | /*                                                                      */ | 
 | /*   lpfc_sha_init                                                      */ | 
 | /*                                                                      */ | 
 | /************************************************************************/ | 
 | static void | 
 | lpfc_sha_init(uint32_t * HashResultPointer) | 
 | { | 
 | 	HashResultPointer[0] = 0x67452301; | 
 | 	HashResultPointer[1] = 0xEFCDAB89; | 
 | 	HashResultPointer[2] = 0x98BADCFE; | 
 | 	HashResultPointer[3] = 0x10325476; | 
 | 	HashResultPointer[4] = 0xC3D2E1F0; | 
 | } | 
 |  | 
 | /************************************************************************/ | 
 | /*                                                                      */ | 
 | /*   lpfc_sha_iterate                                                   */ | 
 | /*                                                                      */ | 
 | /************************************************************************/ | 
 | static void | 
 | lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) | 
 | { | 
 | 	int t; | 
 | 	uint32_t TEMP; | 
 | 	uint32_t A, B, C, D, E; | 
 | 	t = 16; | 
 | 	do { | 
 | 		HashWorkingPointer[t] = | 
 | 		    S(1, | 
 | 		      HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - | 
 | 								     8] ^ | 
 | 		      HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); | 
 | 	} while (++t <= 79); | 
 | 	t = 0; | 
 | 	A = HashResultPointer[0]; | 
 | 	B = HashResultPointer[1]; | 
 | 	C = HashResultPointer[2]; | 
 | 	D = HashResultPointer[3]; | 
 | 	E = HashResultPointer[4]; | 
 |  | 
 | 	do { | 
 | 		if (t < 20) { | 
 | 			TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; | 
 | 		} else if (t < 40) { | 
 | 			TEMP = (B ^ C ^ D) + 0x6ED9EBA1; | 
 | 		} else if (t < 60) { | 
 | 			TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; | 
 | 		} else { | 
 | 			TEMP = (B ^ C ^ D) + 0xCA62C1D6; | 
 | 		} | 
 | 		TEMP += S(5, A) + E + HashWorkingPointer[t]; | 
 | 		E = D; | 
 | 		D = C; | 
 | 		C = S(30, B); | 
 | 		B = A; | 
 | 		A = TEMP; | 
 | 	} while (++t <= 79); | 
 |  | 
 | 	HashResultPointer[0] += A; | 
 | 	HashResultPointer[1] += B; | 
 | 	HashResultPointer[2] += C; | 
 | 	HashResultPointer[3] += D; | 
 | 	HashResultPointer[4] += E; | 
 |  | 
 | } | 
 |  | 
 | /************************************************************************/ | 
 | /*                                                                      */ | 
 | /*   lpfc_challenge_key                                                 */ | 
 | /*                                                                      */ | 
 | /************************************************************************/ | 
 | static void | 
 | lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) | 
 | { | 
 | 	*HashWorking = (*RandomChallenge ^ *HashWorking); | 
 | } | 
 |  | 
 | /************************************************************************/ | 
 | /*                                                                      */ | 
 | /*   lpfc_hba_init                                                      */ | 
 | /*                                                                      */ | 
 | /************************************************************************/ | 
 | void | 
 | lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) | 
 | { | 
 | 	int t; | 
 | 	uint32_t *HashWorking; | 
 | 	uint32_t *pwwnn = (uint32_t *) phba->wwnn; | 
 |  | 
 | 	HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); | 
 | 	if (!HashWorking) | 
 | 		return; | 
 |  | 
 | 	HashWorking[0] = HashWorking[78] = *pwwnn++; | 
 | 	HashWorking[1] = HashWorking[79] = *pwwnn; | 
 |  | 
 | 	for (t = 0; t < 7; t++) | 
 | 		lpfc_challenge_key(phba->RandomData + t, HashWorking + t); | 
 |  | 
 | 	lpfc_sha_init(hbainit); | 
 | 	lpfc_sha_iterate(hbainit, HashWorking); | 
 | 	kfree(HashWorking); | 
 | } | 
 |  | 
 | void | 
 | lpfc_cleanup(struct lpfc_vport *vport) | 
 | { | 
 | 	struct lpfc_hba   *phba = vport->phba; | 
 | 	struct lpfc_nodelist *ndlp, *next_ndlp; | 
 | 	int i = 0; | 
 |  | 
 | 	if (phba->link_state > LPFC_LINK_DOWN) | 
 | 		lpfc_port_link_failure(vport); | 
 |  | 
 | 	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { | 
 | 		if (!NLP_CHK_NODE_ACT(ndlp)) { | 
 | 			ndlp = lpfc_enable_node(vport, ndlp, | 
 | 						NLP_STE_UNUSED_NODE); | 
 | 			if (!ndlp) | 
 | 				continue; | 
 | 			spin_lock_irq(&phba->ndlp_lock); | 
 | 			NLP_SET_FREE_REQ(ndlp); | 
 | 			spin_unlock_irq(&phba->ndlp_lock); | 
 | 			/* Trigger the release of the ndlp memory */ | 
 | 			lpfc_nlp_put(ndlp); | 
 | 			continue; | 
 | 		} | 
 | 		spin_lock_irq(&phba->ndlp_lock); | 
 | 		if (NLP_CHK_FREE_REQ(ndlp)) { | 
 | 			/* The ndlp should not be in memory free mode already */ | 
 | 			spin_unlock_irq(&phba->ndlp_lock); | 
 | 			continue; | 
 | 		} else | 
 | 			/* Indicate request for freeing ndlp memory */ | 
 | 			NLP_SET_FREE_REQ(ndlp); | 
 | 		spin_unlock_irq(&phba->ndlp_lock); | 
 |  | 
 | 		if (vport->port_type != LPFC_PHYSICAL_PORT && | 
 | 		    ndlp->nlp_DID == Fabric_DID) { | 
 | 			/* Just free up ndlp with Fabric_DID for vports */ | 
 | 			lpfc_nlp_put(ndlp); | 
 | 			continue; | 
 | 		} | 
 |  | 
 | 		if (ndlp->nlp_type & NLP_FABRIC) | 
 | 			lpfc_disc_state_machine(vport, ndlp, NULL, | 
 | 					NLP_EVT_DEVICE_RECOVERY); | 
 |  | 
 | 		lpfc_disc_state_machine(vport, ndlp, NULL, | 
 | 					     NLP_EVT_DEVICE_RM); | 
 | 	} | 
 |  | 
 | 	/* At this point, ALL ndlp's should be gone | 
 | 	 * because of the previous NLP_EVT_DEVICE_RM. | 
 | 	 * Lets wait for this to happen, if needed. | 
 | 	 */ | 
 | 	while (!list_empty(&vport->fc_nodes)) { | 
 |  | 
 | 		if (i++ > 3000) { | 
 | 			lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, | 
 | 				"0233 Nodelist not empty\n"); | 
 | 			list_for_each_entry_safe(ndlp, next_ndlp, | 
 | 						&vport->fc_nodes, nlp_listp) { | 
 | 				lpfc_printf_vlog(ndlp->vport, KERN_ERR, | 
 | 						LOG_NODE, | 
 | 						"0282: did:x%x ndlp:x%p " | 
 | 						"usgmap:x%x refcnt:%d\n", | 
 | 						ndlp->nlp_DID, (void *)ndlp, | 
 | 						ndlp->nlp_usg_map, | 
 | 						atomic_read( | 
 | 							&ndlp->kref.refcount)); | 
 | 			} | 
 | 			break; | 
 | 		} | 
 |  | 
 | 		/* Wait for any activity on ndlps to settle */ | 
 | 		msleep(10); | 
 | 	} | 
 | 	return; | 
 | } | 
 |  | 
 | void | 
 | lpfc_stop_vport_timers(struct lpfc_vport *vport) | 
 | { | 
 | 	del_timer_sync(&vport->els_tmofunc); | 
 | 	del_timer_sync(&vport->fc_fdmitmo); | 
 | 	lpfc_can_disctmo(vport); | 
 | 	return; | 
 | } | 
 |  | 
 | static void | 
 | lpfc_stop_phba_timers(struct lpfc_hba *phba) | 
 | { | 
 | 	del_timer_sync(&phba->fcp_poll_timer); | 
 | 	lpfc_stop_vport_timers(phba->pport); | 
 | 	del_timer_sync(&phba->sli.mbox_tmo); | 
 | 	del_timer_sync(&phba->fabric_block_timer); | 
 | 	phba->hb_outstanding = 0; | 
 | 	del_timer_sync(&phba->hb_tmofunc); | 
 | 	return; | 
 | } | 
 |  | 
 | static void | 
 | lpfc_block_mgmt_io(struct lpfc_hba * phba) | 
 | { | 
 | 	unsigned long iflag; | 
 |  | 
 | 	spin_lock_irqsave(&phba->hbalock, iflag); | 
 | 	phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; | 
 | 	spin_unlock_irqrestore(&phba->hbalock, iflag); | 
 | } | 
 |  | 
 | int | 
 | lpfc_online(struct lpfc_hba *phba) | 
 | { | 
 | 	struct lpfc_vport *vport = phba->pport; | 
 | 	struct lpfc_vport **vports; | 
 | 	int i; | 
 |  | 
 | 	if (!phba) | 
 | 		return 0; | 
 |  | 
 | 	if (!(vport->fc_flag & FC_OFFLINE_MODE)) | 
 | 		return 0; | 
 |  | 
 | 	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, | 
 | 			"0458 Bring Adapter online\n"); | 
 |  | 
 | 	lpfc_block_mgmt_io(phba); | 
 |  | 
 | 	if (!lpfc_sli_queue_setup(phba)) { | 
 | 		lpfc_unblock_mgmt_io(phba); | 
 | 		return 1; | 
 | 	} | 
 |  | 
 | 	if (lpfc_sli_hba_setup(phba)) {	/* Initialize the HBA */ | 
 | 		lpfc_unblock_mgmt_io(phba); | 
 | 		return 1; | 
 | 	} | 
 |  | 
 | 	vports = lpfc_create_vport_work_array(phba); | 
 | 	if (vports != NULL) | 
 | 		for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { | 
 | 			struct Scsi_Host *shost; | 
 | 			shost = lpfc_shost_from_vport(vports[i]); | 
 | 			spin_lock_irq(shost->host_lock); | 
 | 			vports[i]->fc_flag &= ~FC_OFFLINE_MODE; | 
 | 			if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) | 
 | 				vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; | 
 | 			spin_unlock_irq(shost->host_lock); | 
 | 		} | 
 | 		lpfc_destroy_vport_work_array(phba, vports); | 
 |  | 
 | 	lpfc_unblock_mgmt_io(phba); | 
 | 	return 0; | 
 | } | 
 |  | 
 | void | 
 | lpfc_unblock_mgmt_io(struct lpfc_hba * phba) | 
 | { | 
 | 	unsigned long iflag; | 
 |  | 
 | 	spin_lock_irqsave(&phba->hbalock, iflag); | 
 | 	phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; | 
 | 	spin_unlock_irqrestore(&phba->hbalock, iflag); | 
 | } | 
 |  | 
 | void | 
 | lpfc_offline_prep(struct lpfc_hba * phba) | 
 | { | 
 | 	struct lpfc_vport *vport = phba->pport; | 
 | 	struct lpfc_nodelist  *ndlp, *next_ndlp; | 
 | 	struct lpfc_vport **vports; | 
 | 	int i; | 
 |  | 
 | 	if (vport->fc_flag & FC_OFFLINE_MODE) | 
 | 		return; | 
 |  | 
 | 	lpfc_block_mgmt_io(phba); | 
 |  | 
 | 	lpfc_linkdown(phba); | 
 |  | 
 | 	/* Issue an unreg_login to all nodes on all vports */ | 
 | 	vports = lpfc_create_vport_work_array(phba); | 
 | 	if (vports != NULL) { | 
 | 		for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { | 
 | 			struct Scsi_Host *shost; | 
 |  | 
 | 			if (vports[i]->load_flag & FC_UNLOADING) | 
 | 				continue; | 
 | 			shost =	lpfc_shost_from_vport(vports[i]); | 
 | 			list_for_each_entry_safe(ndlp, next_ndlp, | 
 | 						 &vports[i]->fc_nodes, | 
 | 						 nlp_listp) { | 
 | 				if (!NLP_CHK_NODE_ACT(ndlp)) | 
 | 					continue; | 
 | 				if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) | 
 | 					continue; | 
 | 				if (ndlp->nlp_type & NLP_FABRIC) { | 
 | 					lpfc_disc_state_machine(vports[i], ndlp, | 
 | 						NULL, NLP_EVT_DEVICE_RECOVERY); | 
 | 					lpfc_disc_state_machine(vports[i], ndlp, | 
 | 						NULL, NLP_EVT_DEVICE_RM); | 
 | 				} | 
 | 				spin_lock_irq(shost->host_lock); | 
 | 				ndlp->nlp_flag &= ~NLP_NPR_ADISC; | 
 | 				spin_unlock_irq(shost->host_lock); | 
 | 				lpfc_unreg_rpi(vports[i], ndlp); | 
 | 			} | 
 | 		} | 
 | 	} | 
 | 	lpfc_destroy_vport_work_array(phba, vports); | 
 |  | 
 | 	lpfc_sli_flush_mbox_queue(phba); | 
 | } | 
 |  | 
 | void | 
 | lpfc_offline(struct lpfc_hba *phba) | 
 | { | 
 | 	struct Scsi_Host  *shost; | 
 | 	struct lpfc_vport **vports; | 
 | 	int i; | 
 |  | 
 | 	if (phba->pport->fc_flag & FC_OFFLINE_MODE) | 
 | 		return; | 
 |  | 
 | 	/* stop all timers associated with this hba */ | 
 | 	lpfc_stop_phba_timers(phba); | 
 | 	vports = lpfc_create_vport_work_array(phba); | 
 | 	if (vports != NULL) | 
 | 		for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) | 
 | 			lpfc_stop_vport_timers(vports[i]); | 
 | 	lpfc_destroy_vport_work_array(phba, vports); | 
 | 	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, | 
 | 			"0460 Bring Adapter offline\n"); | 
 | 	/* Bring down the SLI Layer and cleanup.  The HBA is offline | 
 | 	   now.  */ | 
 | 	lpfc_sli_hba_down(phba); | 
 | 	spin_lock_irq(&phba->hbalock); | 
 | 	phba->work_ha = 0; | 
 | 	spin_unlock_irq(&phba->hbalock); | 
 | 	vports = lpfc_create_vport_work_array(phba); | 
 | 	if (vports != NULL) | 
 | 		for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { | 
 | 			shost = lpfc_shost_from_vport(vports[i]); | 
 | 			spin_lock_irq(shost->host_lock); | 
 | 			vports[i]->work_port_events = 0; | 
 | 			vports[i]->fc_flag |= FC_OFFLINE_MODE; | 
 | 			spin_unlock_irq(shost->host_lock); | 
 | 		} | 
 | 	lpfc_destroy_vport_work_array(phba, vports); | 
 | } | 
 |  | 
 | /****************************************************************************** | 
 | * Function name: lpfc_scsi_free | 
 | * | 
 | * Description: Called from lpfc_pci_remove_one free internal driver resources | 
 | * | 
 | ******************************************************************************/ | 
 | static int | 
 | lpfc_scsi_free(struct lpfc_hba *phba) | 
 | { | 
 | 	struct lpfc_scsi_buf *sb, *sb_next; | 
 | 	struct lpfc_iocbq *io, *io_next; | 
 |  | 
 | 	spin_lock_irq(&phba->hbalock); | 
 | 	/* Release all the lpfc_scsi_bufs maintained by this host. */ | 
 | 	list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { | 
 | 		list_del(&sb->list); | 
 | 		pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, | 
 | 			      sb->dma_handle); | 
 | 		kfree(sb); | 
 | 		phba->total_scsi_bufs--; | 
 | 	} | 
 |  | 
 | 	/* Release all the lpfc_iocbq entries maintained by this host. */ | 
 | 	list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { | 
 | 		list_del(&io->list); | 
 | 		kfree(io); | 
 | 		phba->total_iocbq_bufs--; | 
 | 	} | 
 |  | 
 | 	spin_unlock_irq(&phba->hbalock); | 
 |  | 
 | 	return 0; | 
 | } | 
 |  | 
 | struct lpfc_vport * | 
 | lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) | 
 | { | 
 | 	struct lpfc_vport *vport; | 
 | 	struct Scsi_Host  *shost; | 
 | 	int error = 0; | 
 |  | 
 | 	if (dev != &phba->pcidev->dev) | 
 | 		shost = scsi_host_alloc(&lpfc_vport_template, | 
 | 					sizeof(struct lpfc_vport)); | 
 | 	else | 
 | 		shost = scsi_host_alloc(&lpfc_template, | 
 | 					sizeof(struct lpfc_vport)); | 
 | 	if (!shost) | 
 | 		goto out; | 
 |  | 
 | 	vport = (struct lpfc_vport *) shost->hostdata; | 
 | 	vport->phba = phba; | 
 | 	vport->load_flag |= FC_LOADING; | 
 | 	vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; | 
 | 	vport->fc_rscn_flush = 0; | 
 |  | 
 | 	lpfc_get_vport_cfgparam(vport); | 
 | 	shost->unique_id = instance; | 
 | 	shost->max_id = LPFC_MAX_TARGET; | 
 | 	shost->max_lun = vport->cfg_max_luns; | 
 | 	shost->this_id = -1; | 
 | 	shost->max_cmd_len = 16; | 
 | 	/* | 
 | 	 * Set initial can_queue value since 0 is no longer supported and | 
 | 	 * scsi_add_host will fail. This will be adjusted later based on the | 
 | 	 * max xri value determined in hba setup. | 
 | 	 */ | 
 | 	shost->can_queue = phba->cfg_hba_queue_depth - 10; | 
 | 	if (dev != &phba->pcidev->dev) { | 
 | 		shost->transportt = lpfc_vport_transport_template; | 
 | 		vport->port_type = LPFC_NPIV_PORT; | 
 | 	} else { | 
 | 		shost->transportt = lpfc_transport_template; | 
 | 		vport->port_type = LPFC_PHYSICAL_PORT; | 
 | 	} | 
 |  | 
 | 	/* Initialize all internally managed lists. */ | 
 | 	INIT_LIST_HEAD(&vport->fc_nodes); | 
 | 	spin_lock_init(&vport->work_port_lock); | 
 |  | 
 | 	init_timer(&vport->fc_disctmo); | 
 | 	vport->fc_disctmo.function = lpfc_disc_timeout; | 
 | 	vport->fc_disctmo.data = (unsigned long)vport; | 
 |  | 
 | 	init_timer(&vport->fc_fdmitmo); | 
 | 	vport->fc_fdmitmo.function = lpfc_fdmi_tmo; | 
 | 	vport->fc_fdmitmo.data = (unsigned long)vport; | 
 |  | 
 | 	init_timer(&vport->els_tmofunc); | 
 | 	vport->els_tmofunc.function = lpfc_els_timeout; | 
 | 	vport->els_tmofunc.data = (unsigned long)vport; | 
 |  | 
 | 	error = scsi_add_host(shost, dev); | 
 | 	if (error) | 
 | 		goto out_put_shost; | 
 |  | 
 | 	spin_lock_irq(&phba->hbalock); | 
 | 	list_add_tail(&vport->listentry, &phba->port_list); | 
 | 	spin_unlock_irq(&phba->hbalock); | 
 | 	return vport; | 
 |  | 
 | out_put_shost: | 
 | 	scsi_host_put(shost); | 
 | out: | 
 | 	return NULL; | 
 | } | 
 |  | 
 | void | 
 | destroy_port(struct lpfc_vport *vport) | 
 | { | 
 | 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | 
 | 	struct lpfc_hba  *phba = vport->phba; | 
 |  | 
 | 	kfree(vport->vname); | 
 |  | 
 | 	lpfc_debugfs_terminate(vport); | 
 | 	fc_remove_host(shost); | 
 | 	scsi_remove_host(shost); | 
 |  | 
 | 	spin_lock_irq(&phba->hbalock); | 
 | 	list_del_init(&vport->listentry); | 
 | 	spin_unlock_irq(&phba->hbalock); | 
 |  | 
 | 	lpfc_cleanup(vport); | 
 | 	return; | 
 | } | 
 |  | 
 | int | 
 | lpfc_get_instance(void) | 
 | { | 
 | 	int instance = 0; | 
 |  | 
 | 	/* Assign an unused number */ | 
 | 	if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL)) | 
 | 		return -1; | 
 | 	if (idr_get_new(&lpfc_hba_index, NULL, &instance)) | 
 | 		return -1; | 
 | 	return instance; | 
 | } | 
 |  | 
 | /* | 
 |  * Note: there is no scan_start function as adapter initialization | 
 |  * will have asynchronously kicked off the link initialization. | 
 |  */ | 
 |  | 
 | int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) | 
 | { | 
 | 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; | 
 | 	struct lpfc_hba   *phba = vport->phba; | 
 | 	int stat = 0; | 
 |  | 
 | 	spin_lock_irq(shost->host_lock); | 
 |  | 
 | 	if (vport->load_flag & FC_UNLOADING) { | 
 | 		stat = 1; | 
 | 		goto finished; | 
 | 	} | 
 | 	if (time >= 30 * HZ) { | 
 | 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | 
 | 				"0461 Scanning longer than 30 " | 
 | 				"seconds.  Continuing initialization\n"); | 
 | 		stat = 1; | 
 | 		goto finished; | 
 | 	} | 
 | 	if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) { | 
 | 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | 
 | 				"0465 Link down longer than 15 " | 
 | 				"seconds.  Continuing initialization\n"); | 
 | 		stat = 1; | 
 | 		goto finished; | 
 | 	} | 
 |  | 
 | 	if (vport->port_state != LPFC_VPORT_READY) | 
 | 		goto finished; | 
 | 	if (vport->num_disc_nodes || vport->fc_prli_sent) | 
 | 		goto finished; | 
 | 	if (vport->fc_map_cnt == 0 && time < 2 * HZ) | 
 | 		goto finished; | 
 | 	if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) | 
 | 		goto finished; | 
 |  | 
 | 	stat = 1; | 
 |  | 
 | finished: | 
 | 	spin_unlock_irq(shost->host_lock); | 
 | 	return stat; | 
 | } | 
 |  | 
 | void lpfc_host_attrib_init(struct Scsi_Host *shost) | 
 | { | 
 | 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; | 
 | 	struct lpfc_hba   *phba = vport->phba; | 
 | 	/* | 
 | 	 * Set fixed host attributes.  Must done after lpfc_sli_hba_setup(). | 
 | 	 */ | 
 |  | 
 | 	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); | 
 | 	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); | 
 | 	fc_host_supported_classes(shost) = FC_COS_CLASS3; | 
 |  | 
 | 	memset(fc_host_supported_fc4s(shost), 0, | 
 | 	       sizeof(fc_host_supported_fc4s(shost))); | 
 | 	fc_host_supported_fc4s(shost)[2] = 1; | 
 | 	fc_host_supported_fc4s(shost)[7] = 1; | 
 |  | 
 | 	lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), | 
 | 				 sizeof fc_host_symbolic_name(shost)); | 
 |  | 
 | 	fc_host_supported_speeds(shost) = 0; | 
 | 	if (phba->lmt & LMT_10Gb) | 
 | 		fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; | 
 | 	if (phba->lmt & LMT_8Gb) | 
 | 		fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; | 
 | 	if (phba->lmt & LMT_4Gb) | 
 | 		fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; | 
 | 	if (phba->lmt & LMT_2Gb) | 
 | 		fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; | 
 | 	if (phba->lmt & LMT_1Gb) | 
 | 		fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; | 
 |  | 
 | 	fc_host_maxframe_size(shost) = | 
 | 		(((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | | 
 | 		(uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; | 
 |  | 
 | 	/* This value is also unchanging */ | 
 | 	memset(fc_host_active_fc4s(shost), 0, | 
 | 	       sizeof(fc_host_active_fc4s(shost))); | 
 | 	fc_host_active_fc4s(shost)[2] = 1; | 
 | 	fc_host_active_fc4s(shost)[7] = 1; | 
 |  | 
 | 	fc_host_max_npiv_vports(shost) = phba->max_vpi; | 
 | 	spin_lock_irq(shost->host_lock); | 
 | 	vport->load_flag &= ~FC_LOADING; | 
 | 	spin_unlock_irq(shost->host_lock); | 
 | } | 
 |  | 
 | static int | 
 | lpfc_enable_msix(struct lpfc_hba *phba) | 
 | { | 
 | 	int error; | 
 |  | 
 | 	phba->msix_entries[0].entry = 0; | 
 | 	phba->msix_entries[0].vector = 0; | 
 |  | 
 | 	error = pci_enable_msix(phba->pcidev, phba->msix_entries, | 
 | 				ARRAY_SIZE(phba->msix_entries)); | 
 | 	if (error) { | 
 | 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | 
 | 				"0420 Enable MSI-X failed (%d), continuing " | 
 | 				"with MSI\n", error); | 
 | 		pci_disable_msix(phba->pcidev); | 
 | 		return error; | 
 | 	} | 
 |  | 
 | 	error =	request_irq(phba->msix_entries[0].vector, lpfc_intr_handler, 0, | 
 | 			    LPFC_DRIVER_NAME, phba); | 
 | 	if (error) { | 
 | 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 
 | 				"0421 MSI-X request_irq failed (%d), " | 
 | 				"continuing with MSI\n", error); | 
 | 		pci_disable_msix(phba->pcidev); | 
 | 	} | 
 | 	return error; | 
 | } | 
 |  | 
 | static void | 
 | lpfc_disable_msix(struct lpfc_hba *phba) | 
 | { | 
 | 	free_irq(phba->msix_entries[0].vector, phba); | 
 | 	pci_disable_msix(phba->pcidev); | 
 | } | 
 |  | 
 | static int __devinit | 
 | lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) | 
 | { | 
 | 	struct lpfc_vport *vport = NULL; | 
 | 	struct lpfc_hba   *phba; | 
 | 	struct lpfc_sli   *psli; | 
 | 	struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; | 
 | 	struct Scsi_Host  *shost = NULL; | 
 | 	void *ptr; | 
 | 	unsigned long bar0map_len, bar2map_len; | 
 | 	int error = -ENODEV, retval; | 
 | 	int  i, hbq_count; | 
 | 	uint16_t iotag; | 
 | 	int bars = pci_select_bars(pdev, IORESOURCE_MEM); | 
 |  | 
 | 	if (pci_enable_device_mem(pdev)) | 
 | 		goto out; | 
 | 	if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME)) | 
 | 		goto out_disable_device; | 
 |  | 
 | 	phba = kzalloc(sizeof (struct lpfc_hba), GFP_KERNEL); | 
 | 	if (!phba) | 
 | 		goto out_release_regions; | 
 |  | 
 | 	spin_lock_init(&phba->hbalock); | 
 |  | 
 | 	/* Initialize ndlp management spinlock */ | 
 | 	spin_lock_init(&phba->ndlp_lock); | 
 |  | 
 | 	phba->pcidev = pdev; | 
 |  | 
 | 	/* Assign an unused board number */ | 
 | 	if ((phba->brd_no = lpfc_get_instance()) < 0) | 
 | 		goto out_free_phba; | 
 |  | 
 | 	INIT_LIST_HEAD(&phba->port_list); | 
 | 	/* | 
 | 	 * Get all the module params for configuring this host and then | 
 | 	 * establish the host. | 
 | 	 */ | 
 | 	lpfc_get_cfgparam(phba); | 
 | 	phba->max_vpi = LPFC_MAX_VPI; | 
 |  | 
 | 	/* Initialize timers used by driver */ | 
 | 	init_timer(&phba->hb_tmofunc); | 
 | 	phba->hb_tmofunc.function = lpfc_hb_timeout; | 
 | 	phba->hb_tmofunc.data = (unsigned long)phba; | 
 |  | 
 | 	psli = &phba->sli; | 
 | 	init_timer(&psli->mbox_tmo); | 
 | 	psli->mbox_tmo.function = lpfc_mbox_timeout; | 
 | 	psli->mbox_tmo.data = (unsigned long) phba; | 
 | 	init_timer(&phba->fcp_poll_timer); | 
 | 	phba->fcp_poll_timer.function = lpfc_poll_timeout; | 
 | 	phba->fcp_poll_timer.data = (unsigned long) phba; | 
 | 	init_timer(&phba->fabric_block_timer); | 
 | 	phba->fabric_block_timer.function = lpfc_fabric_block_timeout; | 
 | 	phba->fabric_block_timer.data = (unsigned long) phba; | 
 |  | 
 | 	pci_set_master(pdev); | 
 | 	pci_try_set_mwi(pdev); | 
 |  | 
 | 	if (pci_set_dma_mask(phba->pcidev, DMA_64BIT_MASK) != 0) | 
 | 		if (pci_set_dma_mask(phba->pcidev, DMA_32BIT_MASK) != 0) | 
 | 			goto out_idr_remove; | 
 |  | 
 | 	/* | 
 | 	 * Get the bus address of Bar0 and Bar2 and the number of bytes | 
 | 	 * required by each mapping. | 
 | 	 */ | 
 | 	phba->pci_bar0_map = pci_resource_start(phba->pcidev, 0); | 
 | 	bar0map_len        = pci_resource_len(phba->pcidev, 0); | 
 |  | 
 | 	phba->pci_bar2_map = pci_resource_start(phba->pcidev, 2); | 
 | 	bar2map_len        = pci_resource_len(phba->pcidev, 2); | 
 |  | 
 | 	/* Map HBA SLIM to a kernel virtual address. */ | 
 | 	phba->slim_memmap_p      = ioremap(phba->pci_bar0_map, bar0map_len); | 
 | 	if (!phba->slim_memmap_p) { | 
 | 		error = -ENODEV; | 
 | 		dev_printk(KERN_ERR, &pdev->dev, | 
 | 			   "ioremap failed for SLIM memory.\n"); | 
 | 		goto out_idr_remove; | 
 | 	} | 
 |  | 
 | 	/* Map HBA Control Registers to a kernel virtual address. */ | 
 | 	phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); | 
 | 	if (!phba->ctrl_regs_memmap_p) { | 
 | 		error = -ENODEV; | 
 | 		dev_printk(KERN_ERR, &pdev->dev, | 
 | 			   "ioremap failed for HBA control registers.\n"); | 
 | 		goto out_iounmap_slim; | 
 | 	} | 
 |  | 
 | 	/* Allocate memory for SLI-2 structures */ | 
 | 	phba->slim2p = dma_alloc_coherent(&phba->pcidev->dev, SLI2_SLIM_SIZE, | 
 | 					  &phba->slim2p_mapping, GFP_KERNEL); | 
 | 	if (!phba->slim2p) | 
 | 		goto out_iounmap; | 
 |  | 
 | 	memset(phba->slim2p, 0, SLI2_SLIM_SIZE); | 
 |  | 
 | 	phba->hbqslimp.virt = dma_alloc_coherent(&phba->pcidev->dev, | 
 | 						 lpfc_sli_hbq_size(), | 
 | 						 &phba->hbqslimp.phys, | 
 | 						 GFP_KERNEL); | 
 | 	if (!phba->hbqslimp.virt) | 
 | 		goto out_free_slim; | 
 |  | 
 | 	hbq_count = lpfc_sli_hbq_count(); | 
 | 	ptr = phba->hbqslimp.virt; | 
 | 	for (i = 0; i < hbq_count; ++i) { | 
 | 		phba->hbqs[i].hbq_virt = ptr; | 
 | 		INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); | 
 | 		ptr += (lpfc_hbq_defs[i]->entry_count * | 
 | 			sizeof(struct lpfc_hbq_entry)); | 
 | 	} | 
 | 	phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; | 
 | 	phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer  = lpfc_els_hbq_free; | 
 |  | 
 | 	memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); | 
 |  | 
 | 	INIT_LIST_HEAD(&phba->hbqbuf_in_list); | 
 |  | 
 | 	/* Initialize the SLI Layer to run with lpfc HBAs. */ | 
 | 	lpfc_sli_setup(phba); | 
 | 	lpfc_sli_queue_setup(phba); | 
 |  | 
 | 	retval = lpfc_mem_alloc(phba); | 
 | 	if (retval) { | 
 | 		error = retval; | 
 | 		goto out_free_hbqslimp; | 
 | 	} | 
 |  | 
 | 	/* Initialize and populate the iocb list per host.  */ | 
 | 	INIT_LIST_HEAD(&phba->lpfc_iocb_list); | 
 | 	for (i = 0; i < LPFC_IOCB_LIST_CNT; i++) { | 
 | 		iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); | 
 | 		if (iocbq_entry == NULL) { | 
 | 			printk(KERN_ERR "%s: only allocated %d iocbs of " | 
 | 				"expected %d count. Unloading driver.\n", | 
 | 				__FUNCTION__, i, LPFC_IOCB_LIST_CNT); | 
 | 			error = -ENOMEM; | 
 | 			goto out_free_iocbq; | 
 | 		} | 
 |  | 
 | 		iotag = lpfc_sli_next_iotag(phba, iocbq_entry); | 
 | 		if (iotag == 0) { | 
 | 			kfree (iocbq_entry); | 
 | 			printk(KERN_ERR "%s: failed to allocate IOTAG. " | 
 | 			       "Unloading driver.\n", | 
 | 				__FUNCTION__); | 
 | 			error = -ENOMEM; | 
 | 			goto out_free_iocbq; | 
 | 		} | 
 |  | 
 | 		spin_lock_irq(&phba->hbalock); | 
 | 		list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); | 
 | 		phba->total_iocbq_bufs++; | 
 | 		spin_unlock_irq(&phba->hbalock); | 
 | 	} | 
 |  | 
 | 	/* Initialize HBA structure */ | 
 | 	phba->fc_edtov = FF_DEF_EDTOV; | 
 | 	phba->fc_ratov = FF_DEF_RATOV; | 
 | 	phba->fc_altov = FF_DEF_ALTOV; | 
 | 	phba->fc_arbtov = FF_DEF_ARBTOV; | 
 |  | 
 | 	INIT_LIST_HEAD(&phba->work_list); | 
 | 	phba->work_ha_mask = (HA_ERATT|HA_MBATT|HA_LATT); | 
 | 	phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); | 
 |  | 
 | 	/* Startup the kernel thread for this host adapter. */ | 
 | 	phba->worker_thread = kthread_run(lpfc_do_work, phba, | 
 | 				       "lpfc_worker_%d", phba->brd_no); | 
 | 	if (IS_ERR(phba->worker_thread)) { | 
 | 		error = PTR_ERR(phba->worker_thread); | 
 | 		goto out_free_iocbq; | 
 | 	} | 
 |  | 
 | 	/* Initialize the list of scsi buffers used by driver for scsi IO. */ | 
 | 	spin_lock_init(&phba->scsi_buf_list_lock); | 
 | 	INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); | 
 |  | 
 | 	/* Initialize list of fabric iocbs */ | 
 | 	INIT_LIST_HEAD(&phba->fabric_iocb_list); | 
 |  | 
 | 	/* Initialize list to save ELS buffers */ | 
 | 	INIT_LIST_HEAD(&phba->elsbuf); | 
 |  | 
 | 	vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); | 
 | 	if (!vport) | 
 | 		goto out_kthread_stop; | 
 |  | 
 | 	shost = lpfc_shost_from_vport(vport); | 
 | 	phba->pport = vport; | 
 | 	lpfc_debugfs_initialize(vport); | 
 |  | 
 | 	pci_set_drvdata(pdev, shost); | 
 | 	phba->intr_type = NONE; | 
 |  | 
 | 	if (phba->cfg_use_msi == 2) { | 
 | 		error = lpfc_enable_msix(phba); | 
 | 		if (!error) | 
 | 			phba->intr_type = MSIX; | 
 | 	} | 
 |  | 
 | 	/* Fallback to MSI if MSI-X initialization failed */ | 
 | 	if (phba->cfg_use_msi >= 1 && phba->intr_type == NONE) { | 
 | 		retval = pci_enable_msi(phba->pcidev); | 
 | 		if (!retval) | 
 | 			phba->intr_type = MSI; | 
 | 		else | 
 | 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | 
 | 					"0452 Enable MSI failed, continuing " | 
 | 					"with IRQ\n"); | 
 | 	} | 
 |  | 
 | 	/* MSI-X is the only case the doesn't need to call request_irq */ | 
 | 	if (phba->intr_type != MSIX) { | 
 | 		retval = request_irq(phba->pcidev->irq, lpfc_intr_handler, | 
 | 				     IRQF_SHARED, LPFC_DRIVER_NAME, phba); | 
 | 		if (retval) { | 
 | 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0451 Enable " | 
 | 					"interrupt handler failed\n"); | 
 | 			error = retval; | 
 | 			goto out_disable_msi; | 
 | 		} else if (phba->intr_type != MSI) | 
 | 			phba->intr_type = INTx; | 
 | 	} | 
 |  | 
 | 	phba->MBslimaddr = phba->slim_memmap_p; | 
 | 	phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; | 
 | 	phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; | 
 | 	phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; | 
 | 	phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; | 
 |  | 
 | 	if (lpfc_alloc_sysfs_attr(vport)) { | 
 | 		error = -ENOMEM; | 
 | 		goto out_free_irq; | 
 | 	} | 
 |  | 
 | 	if (lpfc_sli_hba_setup(phba)) { | 
 | 		error = -ENODEV; | 
 | 		goto out_remove_device; | 
 | 	} | 
 |  | 
 | 	/* | 
 | 	 * hba setup may have changed the hba_queue_depth so we need to adjust | 
 | 	 * the value of can_queue. | 
 | 	 */ | 
 | 	shost->can_queue = phba->cfg_hba_queue_depth - 10; | 
 |  | 
 | 	lpfc_host_attrib_init(shost); | 
 |  | 
 | 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) { | 
 | 		spin_lock_irq(shost->host_lock); | 
 | 		lpfc_poll_start_timer(phba); | 
 | 		spin_unlock_irq(shost->host_lock); | 
 | 	} | 
 |  | 
 | 	scsi_scan_host(shost); | 
 |  | 
 | 	return 0; | 
 |  | 
 | out_remove_device: | 
 | 	lpfc_free_sysfs_attr(vport); | 
 | 	spin_lock_irq(shost->host_lock); | 
 | 	vport->load_flag |= FC_UNLOADING; | 
 | 	spin_unlock_irq(shost->host_lock); | 
 | out_free_irq: | 
 | 	lpfc_stop_phba_timers(phba); | 
 | 	phba->pport->work_port_events = 0; | 
 |  | 
 | 	if (phba->intr_type == MSIX) | 
 | 		lpfc_disable_msix(phba); | 
 | 	else | 
 | 		free_irq(phba->pcidev->irq, phba); | 
 |  | 
 | out_disable_msi: | 
 | 	if (phba->intr_type == MSI) | 
 | 		pci_disable_msi(phba->pcidev); | 
 | 	destroy_port(vport); | 
 | out_kthread_stop: | 
 | 	kthread_stop(phba->worker_thread); | 
 | out_free_iocbq: | 
 | 	list_for_each_entry_safe(iocbq_entry, iocbq_next, | 
 | 						&phba->lpfc_iocb_list, list) { | 
 | 		kfree(iocbq_entry); | 
 | 		phba->total_iocbq_bufs--; | 
 | 	} | 
 | 	lpfc_mem_free(phba); | 
 | out_free_hbqslimp: | 
 | 	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), phba->hbqslimp.virt, | 
 | 			  phba->hbqslimp.phys); | 
 | out_free_slim: | 
 | 	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, phba->slim2p, | 
 | 							phba->slim2p_mapping); | 
 | out_iounmap: | 
 | 	iounmap(phba->ctrl_regs_memmap_p); | 
 | out_iounmap_slim: | 
 | 	iounmap(phba->slim_memmap_p); | 
 | out_idr_remove: | 
 | 	idr_remove(&lpfc_hba_index, phba->brd_no); | 
 | out_free_phba: | 
 | 	kfree(phba); | 
 | out_release_regions: | 
 | 	pci_release_selected_regions(pdev, bars); | 
 | out_disable_device: | 
 | 	pci_disable_device(pdev); | 
 | out: | 
 | 	pci_set_drvdata(pdev, NULL); | 
 | 	if (shost) | 
 | 		scsi_host_put(shost); | 
 | 	return error; | 
 | } | 
 |  | 
 | static void __devexit | 
 | lpfc_pci_remove_one(struct pci_dev *pdev) | 
 | { | 
 | 	struct Scsi_Host  *shost = pci_get_drvdata(pdev); | 
 | 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; | 
 | 	struct lpfc_hba   *phba = vport->phba; | 
 | 	int bars = pci_select_bars(pdev, IORESOURCE_MEM); | 
 |  | 
 | 	spin_lock_irq(&phba->hbalock); | 
 | 	vport->load_flag |= FC_UNLOADING; | 
 | 	spin_unlock_irq(&phba->hbalock); | 
 |  | 
 | 	kfree(vport->vname); | 
 | 	lpfc_free_sysfs_attr(vport); | 
 |  | 
 | 	kthread_stop(phba->worker_thread); | 
 |  | 
 | 	fc_remove_host(shost); | 
 | 	scsi_remove_host(shost); | 
 | 	lpfc_cleanup(vport); | 
 |  | 
 | 	/* | 
 | 	 * Bring down the SLI Layer. This step disable all interrupts, | 
 | 	 * clears the rings, discards all mailbox commands, and resets | 
 | 	 * the HBA. | 
 | 	 */ | 
 | 	lpfc_sli_hba_down(phba); | 
 | 	lpfc_sli_brdrestart(phba); | 
 |  | 
 | 	lpfc_stop_phba_timers(phba); | 
 | 	spin_lock_irq(&phba->hbalock); | 
 | 	list_del_init(&vport->listentry); | 
 | 	spin_unlock_irq(&phba->hbalock); | 
 |  | 
 | 	lpfc_debugfs_terminate(vport); | 
 |  | 
 | 	if (phba->intr_type == MSIX) | 
 | 		lpfc_disable_msix(phba); | 
 | 	else { | 
 | 		free_irq(phba->pcidev->irq, phba); | 
 | 		if (phba->intr_type == MSI) | 
 | 			pci_disable_msi(phba->pcidev); | 
 | 	} | 
 |  | 
 | 	pci_set_drvdata(pdev, NULL); | 
 | 	scsi_host_put(shost); | 
 |  | 
 | 	/* | 
 | 	 * Call scsi_free before mem_free since scsi bufs are released to their | 
 | 	 * corresponding pools here. | 
 | 	 */ | 
 | 	lpfc_scsi_free(phba); | 
 | 	lpfc_mem_free(phba); | 
 |  | 
 | 	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), phba->hbqslimp.virt, | 
 | 			  phba->hbqslimp.phys); | 
 |  | 
 | 	/* Free resources associated with SLI2 interface */ | 
 | 	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, | 
 | 			  phba->slim2p, phba->slim2p_mapping); | 
 |  | 
 | 	/* unmap adapter SLIM and Control Registers */ | 
 | 	iounmap(phba->ctrl_regs_memmap_p); | 
 | 	iounmap(phba->slim_memmap_p); | 
 |  | 
 | 	idr_remove(&lpfc_hba_index, phba->brd_no); | 
 |  | 
 | 	kfree(phba); | 
 |  | 
 | 	pci_release_selected_regions(pdev, bars); | 
 | 	pci_disable_device(pdev); | 
 | } | 
 |  | 
 | /** | 
 |  * lpfc_io_error_detected - called when PCI error is detected | 
 |  * @pdev: Pointer to PCI device | 
 |  * @state: The current pci conneection state | 
 |  * | 
 |  * This function is called after a PCI bus error affecting | 
 |  * this device has been detected. | 
 |  */ | 
 | static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev, | 
 | 				pci_channel_state_t state) | 
 | { | 
 | 	struct Scsi_Host *shost = pci_get_drvdata(pdev); | 
 | 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; | 
 | 	struct lpfc_sli *psli = &phba->sli; | 
 | 	struct lpfc_sli_ring  *pring; | 
 |  | 
 | 	if (state == pci_channel_io_perm_failure) | 
 | 		return PCI_ERS_RESULT_DISCONNECT; | 
 |  | 
 | 	pci_disable_device(pdev); | 
 | 	/* | 
 | 	 * There may be I/Os dropped by the firmware. | 
 | 	 * Error iocb (I/O) on txcmplq and let the SCSI layer | 
 | 	 * retry it after re-establishing link. | 
 | 	 */ | 
 | 	pring = &psli->ring[psli->fcp_ring]; | 
 | 	lpfc_sli_abort_iocb_ring(phba, pring); | 
 |  | 
 | 	if (phba->intr_type == MSIX) | 
 | 		lpfc_disable_msix(phba); | 
 | 	else { | 
 | 		free_irq(phba->pcidev->irq, phba); | 
 | 		if (phba->intr_type == MSI) | 
 | 			pci_disable_msi(phba->pcidev); | 
 | 	} | 
 |  | 
 | 	/* Request a slot reset. */ | 
 | 	return PCI_ERS_RESULT_NEED_RESET; | 
 | } | 
 |  | 
 | /** | 
 |  * lpfc_io_slot_reset - called after the pci bus has been reset. | 
 |  * @pdev: Pointer to PCI device | 
 |  * | 
 |  * Restart the card from scratch, as if from a cold-boot. | 
 |  */ | 
 | static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev) | 
 | { | 
 | 	struct Scsi_Host *shost = pci_get_drvdata(pdev); | 
 | 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; | 
 | 	struct lpfc_sli *psli = &phba->sli; | 
 | 	int error, retval; | 
 |  | 
 | 	dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); | 
 | 	if (pci_enable_device_mem(pdev)) { | 
 | 		printk(KERN_ERR "lpfc: Cannot re-enable " | 
 | 			"PCI device after reset.\n"); | 
 | 		return PCI_ERS_RESULT_DISCONNECT; | 
 | 	} | 
 |  | 
 | 	pci_set_master(pdev); | 
 |  | 
 | 	spin_lock_irq(&phba->hbalock); | 
 | 	psli->sli_flag &= ~LPFC_SLI2_ACTIVE; | 
 | 	spin_unlock_irq(&phba->hbalock); | 
 |  | 
 | 	/* Enable configured interrupt method */ | 
 | 	phba->intr_type = NONE; | 
 | 	if (phba->cfg_use_msi == 2) { | 
 | 		error = lpfc_enable_msix(phba); | 
 | 		if (!error) | 
 | 			phba->intr_type = MSIX; | 
 | 	} | 
 |  | 
 | 	/* Fallback to MSI if MSI-X initialization failed */ | 
 | 	if (phba->cfg_use_msi >= 1 && phba->intr_type == NONE) { | 
 | 		retval = pci_enable_msi(phba->pcidev); | 
 | 		if (!retval) | 
 | 			phba->intr_type = MSI; | 
 | 		else | 
 | 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | 
 | 					"0470 Enable MSI failed, continuing " | 
 | 					"with IRQ\n"); | 
 | 	} | 
 |  | 
 | 	/* MSI-X is the only case the doesn't need to call request_irq */ | 
 | 	if (phba->intr_type != MSIX) { | 
 | 		retval = request_irq(phba->pcidev->irq, lpfc_intr_handler, | 
 | 				     IRQF_SHARED, LPFC_DRIVER_NAME, phba); | 
 | 		if (retval) { | 
 | 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 
 | 					"0471 Enable interrupt handler " | 
 | 					"failed\n"); | 
 | 		} else if (phba->intr_type != MSI) | 
 | 			phba->intr_type = INTx; | 
 | 	} | 
 |  | 
 | 	/* Take device offline; this will perform cleanup */ | 
 | 	lpfc_offline(phba); | 
 | 	lpfc_sli_brdrestart(phba); | 
 |  | 
 | 	return PCI_ERS_RESULT_RECOVERED; | 
 | } | 
 |  | 
 | /** | 
 |  * lpfc_io_resume - called when traffic can start flowing again. | 
 |  * @pdev: Pointer to PCI device | 
 |  * | 
 |  * This callback is called when the error recovery driver tells us that | 
 |  * its OK to resume normal operation. | 
 |  */ | 
 | static void lpfc_io_resume(struct pci_dev *pdev) | 
 | { | 
 | 	struct Scsi_Host *shost = pci_get_drvdata(pdev); | 
 | 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; | 
 |  | 
 | 	lpfc_online(phba); | 
 | } | 
 |  | 
 | static struct pci_device_id lpfc_id_table[] = { | 
 | 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER, | 
 | 		PCI_ANY_ID, PCI_ANY_ID, }, | 
 | 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY, | 
 | 		PCI_ANY_ID, PCI_ANY_ID, }, | 
 | 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR, | 
 | 		PCI_ANY_ID, PCI_ANY_ID, }, | 
 | 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS, | 
 | 		PCI_ANY_ID, PCI_ANY_ID, }, | 
 | 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR, | 
 | 		PCI_ANY_ID, PCI_ANY_ID, }, | 
 | 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY, | 
 | 		PCI_ANY_ID, PCI_ANY_ID, }, | 
 | 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY, | 
 | 		PCI_ANY_ID, PCI_ANY_ID, }, | 
 | 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY, | 
 | 		PCI_ANY_ID, PCI_ANY_ID, }, | 
 | 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY, | 
 | 		PCI_ANY_ID, PCI_ANY_ID, }, | 
 | 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE, | 
 | 		PCI_ANY_ID, PCI_ANY_ID, }, | 
 | 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP, | 
 | 		PCI_ANY_ID, PCI_ANY_ID, }, | 
 | 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP, | 
 | 		PCI_ANY_ID, PCI_ANY_ID, }, | 
 | 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS, | 
 | 		PCI_ANY_ID, PCI_ANY_ID, }, | 
 | 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP, | 
 | 		PCI_ANY_ID, PCI_ANY_ID, }, | 
 | 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP, | 
 | 		PCI_ANY_ID, PCI_ANY_ID, }, | 
 | 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID, | 
 | 		PCI_ANY_ID, PCI_ANY_ID, }, | 
 | 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB, | 
 | 		PCI_ANY_ID, PCI_ANY_ID, }, | 
 | 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR, | 
 | 		PCI_ANY_ID, PCI_ANY_ID, }, | 
 | 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP, | 
 | 		PCI_ANY_ID, PCI_ANY_ID, }, | 
 | 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP, | 
 | 		PCI_ANY_ID, PCI_ANY_ID, }, | 
 | 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID, | 
 | 		PCI_ANY_ID, PCI_ANY_ID, }, | 
 | 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB, | 
 | 		PCI_ANY_ID, PCI_ANY_ID, }, | 
 | 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY, | 
 | 		PCI_ANY_ID, PCI_ANY_ID, }, | 
 | 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101, | 
 | 		PCI_ANY_ID, PCI_ANY_ID, }, | 
 | 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S, | 
 | 		PCI_ANY_ID, PCI_ANY_ID, }, | 
 | 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S, | 
 | 		PCI_ANY_ID, PCI_ANY_ID, }, | 
 | 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S, | 
 | 		PCI_ANY_ID, PCI_ANY_ID, }, | 
 | 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT, | 
 | 		PCI_ANY_ID, PCI_ANY_ID, }, | 
 | 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID, | 
 | 		PCI_ANY_ID, PCI_ANY_ID, }, | 
 | 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB, | 
 | 		PCI_ANY_ID, PCI_ANY_ID, }, | 
 | 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP, | 
 | 		PCI_ANY_ID, PCI_ANY_ID, }, | 
 | 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP, | 
 | 		PCI_ANY_ID, PCI_ANY_ID, }, | 
 | 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S, | 
 | 		PCI_ANY_ID, PCI_ANY_ID, }, | 
 | 	{ 0 } | 
 | }; | 
 |  | 
 | MODULE_DEVICE_TABLE(pci, lpfc_id_table); | 
 |  | 
 | static struct pci_error_handlers lpfc_err_handler = { | 
 | 	.error_detected = lpfc_io_error_detected, | 
 | 	.slot_reset = lpfc_io_slot_reset, | 
 | 	.resume = lpfc_io_resume, | 
 | }; | 
 |  | 
 | static struct pci_driver lpfc_driver = { | 
 | 	.name		= LPFC_DRIVER_NAME, | 
 | 	.id_table	= lpfc_id_table, | 
 | 	.probe		= lpfc_pci_probe_one, | 
 | 	.remove		= __devexit_p(lpfc_pci_remove_one), | 
 | 	.err_handler    = &lpfc_err_handler, | 
 | }; | 
 |  | 
 | static int __init | 
 | lpfc_init(void) | 
 | { | 
 | 	int error = 0; | 
 |  | 
 | 	printk(LPFC_MODULE_DESC "\n"); | 
 | 	printk(LPFC_COPYRIGHT "\n"); | 
 |  | 
 | 	if (lpfc_enable_npiv) { | 
 | 		lpfc_transport_functions.vport_create = lpfc_vport_create; | 
 | 		lpfc_transport_functions.vport_delete = lpfc_vport_delete; | 
 | 	} | 
 | 	lpfc_transport_template = | 
 | 				fc_attach_transport(&lpfc_transport_functions); | 
 | 	if (lpfc_transport_template == NULL) | 
 | 		return -ENOMEM; | 
 | 	if (lpfc_enable_npiv) { | 
 | 		lpfc_vport_transport_template = | 
 | 			fc_attach_transport(&lpfc_vport_transport_functions); | 
 | 		if (lpfc_vport_transport_template == NULL) { | 
 | 			fc_release_transport(lpfc_transport_template); | 
 | 			return -ENOMEM; | 
 | 		} | 
 | 	} | 
 | 	error = pci_register_driver(&lpfc_driver); | 
 | 	if (error) { | 
 | 		fc_release_transport(lpfc_transport_template); | 
 | 		fc_release_transport(lpfc_vport_transport_template); | 
 | 	} | 
 |  | 
 | 	return error; | 
 | } | 
 |  | 
 | static void __exit | 
 | lpfc_exit(void) | 
 | { | 
 | 	pci_unregister_driver(&lpfc_driver); | 
 | 	fc_release_transport(lpfc_transport_template); | 
 | 	if (lpfc_enable_npiv) | 
 | 		fc_release_transport(lpfc_vport_transport_template); | 
 | } | 
 |  | 
 | module_init(lpfc_init); | 
 | module_exit(lpfc_exit); | 
 | MODULE_LICENSE("GPL"); | 
 | MODULE_DESCRIPTION(LPFC_MODULE_DESC); | 
 | MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com"); | 
 | MODULE_VERSION("0:" LPFC_DRIVER_VERSION); |