| /* | 
 |  * Broadcom specific AMBA | 
 |  * PCI Core | 
 |  * | 
 |  * Copyright 2005, 2011, Broadcom Corporation | 
 |  * Copyright 2006, 2007, Michael Buesch <m@bues.ch> | 
 |  * Copyright 2011, 2012, Hauke Mehrtens <hauke@hauke-m.de> | 
 |  * | 
 |  * Licensed under the GNU/GPL. See COPYING for details. | 
 |  */ | 
 |  | 
 | #include "bcma_private.h" | 
 | #include <linux/export.h> | 
 | #include <linux/bcma/bcma.h> | 
 |  | 
 | /************************************************** | 
 |  * R/W ops. | 
 |  **************************************************/ | 
 |  | 
 | u32 bcma_pcie_read(struct bcma_drv_pci *pc, u32 address) | 
 | { | 
 | 	pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_ADDR, address); | 
 | 	pcicore_read32(pc, BCMA_CORE_PCI_PCIEIND_ADDR); | 
 | 	return pcicore_read32(pc, BCMA_CORE_PCI_PCIEIND_DATA); | 
 | } | 
 |  | 
 | #if 0 | 
 | static void bcma_pcie_write(struct bcma_drv_pci *pc, u32 address, u32 data) | 
 | { | 
 | 	pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_ADDR, address); | 
 | 	pcicore_read32(pc, BCMA_CORE_PCI_PCIEIND_ADDR); | 
 | 	pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_DATA, data); | 
 | } | 
 | #endif | 
 |  | 
 | static void bcma_pcie_mdio_set_phy(struct bcma_drv_pci *pc, u8 phy) | 
 | { | 
 | 	u32 v; | 
 | 	int i; | 
 |  | 
 | 	v = BCMA_CORE_PCI_MDIODATA_START; | 
 | 	v |= BCMA_CORE_PCI_MDIODATA_WRITE; | 
 | 	v |= (BCMA_CORE_PCI_MDIODATA_DEV_ADDR << | 
 | 	      BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF); | 
 | 	v |= (BCMA_CORE_PCI_MDIODATA_BLK_ADDR << | 
 | 	      BCMA_CORE_PCI_MDIODATA_REGADDR_SHF); | 
 | 	v |= BCMA_CORE_PCI_MDIODATA_TA; | 
 | 	v |= (phy << 4); | 
 | 	pcicore_write32(pc, BCMA_CORE_PCI_MDIO_DATA, v); | 
 |  | 
 | 	udelay(10); | 
 | 	for (i = 0; i < 200; i++) { | 
 | 		v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL); | 
 | 		if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE) | 
 | 			break; | 
 | 		msleep(1); | 
 | 	} | 
 | } | 
 |  | 
 | static u16 bcma_pcie_mdio_read(struct bcma_drv_pci *pc, u8 device, u8 address) | 
 | { | 
 | 	int max_retries = 10; | 
 | 	u16 ret = 0; | 
 | 	u32 v; | 
 | 	int i; | 
 |  | 
 | 	/* enable mdio access to SERDES */ | 
 | 	v = BCMA_CORE_PCI_MDIOCTL_PREAM_EN; | 
 | 	v |= BCMA_CORE_PCI_MDIOCTL_DIVISOR_VAL; | 
 | 	pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, v); | 
 |  | 
 | 	if (pc->core->id.rev >= 10) { | 
 | 		max_retries = 200; | 
 | 		bcma_pcie_mdio_set_phy(pc, device); | 
 | 		v = (BCMA_CORE_PCI_MDIODATA_DEV_ADDR << | 
 | 		     BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF); | 
 | 		v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF); | 
 | 	} else { | 
 | 		v = (device << BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF_OLD); | 
 | 		v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF_OLD); | 
 | 	} | 
 |  | 
 | 	v = BCMA_CORE_PCI_MDIODATA_START; | 
 | 	v |= BCMA_CORE_PCI_MDIODATA_READ; | 
 | 	v |= BCMA_CORE_PCI_MDIODATA_TA; | 
 |  | 
 | 	pcicore_write32(pc, BCMA_CORE_PCI_MDIO_DATA, v); | 
 | 	/* Wait for the device to complete the transaction */ | 
 | 	udelay(10); | 
 | 	for (i = 0; i < max_retries; i++) { | 
 | 		v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL); | 
 | 		if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE) { | 
 | 			udelay(10); | 
 | 			ret = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_DATA); | 
 | 			break; | 
 | 		} | 
 | 		msleep(1); | 
 | 	} | 
 | 	pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, 0); | 
 | 	return ret; | 
 | } | 
 |  | 
 | static void bcma_pcie_mdio_write(struct bcma_drv_pci *pc, u8 device, | 
 | 				u8 address, u16 data) | 
 | { | 
 | 	int max_retries = 10; | 
 | 	u32 v; | 
 | 	int i; | 
 |  | 
 | 	/* enable mdio access to SERDES */ | 
 | 	v = BCMA_CORE_PCI_MDIOCTL_PREAM_EN; | 
 | 	v |= BCMA_CORE_PCI_MDIOCTL_DIVISOR_VAL; | 
 | 	pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, v); | 
 |  | 
 | 	if (pc->core->id.rev >= 10) { | 
 | 		max_retries = 200; | 
 | 		bcma_pcie_mdio_set_phy(pc, device); | 
 | 		v = (BCMA_CORE_PCI_MDIODATA_DEV_ADDR << | 
 | 		     BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF); | 
 | 		v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF); | 
 | 	} else { | 
 | 		v = (device << BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF_OLD); | 
 | 		v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF_OLD); | 
 | 	} | 
 |  | 
 | 	v = BCMA_CORE_PCI_MDIODATA_START; | 
 | 	v |= BCMA_CORE_PCI_MDIODATA_WRITE; | 
 | 	v |= BCMA_CORE_PCI_MDIODATA_TA; | 
 | 	v |= data; | 
 | 	pcicore_write32(pc, BCMA_CORE_PCI_MDIO_DATA, v); | 
 | 	/* Wait for the device to complete the transaction */ | 
 | 	udelay(10); | 
 | 	for (i = 0; i < max_retries; i++) { | 
 | 		v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL); | 
 | 		if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE) | 
 | 			break; | 
 | 		msleep(1); | 
 | 	} | 
 | 	pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, 0); | 
 | } | 
 |  | 
 | /************************************************** | 
 |  * Workarounds. | 
 |  **************************************************/ | 
 |  | 
 | static u8 bcma_pcicore_polarity_workaround(struct bcma_drv_pci *pc) | 
 | { | 
 | 	u32 tmp; | 
 |  | 
 | 	tmp = bcma_pcie_read(pc, BCMA_CORE_PCI_PLP_STATUSREG); | 
 | 	if (tmp & BCMA_CORE_PCI_PLP_POLARITYINV_STAT) | 
 | 		return BCMA_CORE_PCI_SERDES_RX_CTRL_FORCE | | 
 | 		       BCMA_CORE_PCI_SERDES_RX_CTRL_POLARITY; | 
 | 	else | 
 | 		return BCMA_CORE_PCI_SERDES_RX_CTRL_FORCE; | 
 | } | 
 |  | 
 | static void bcma_pcicore_serdes_workaround(struct bcma_drv_pci *pc) | 
 | { | 
 | 	u16 tmp; | 
 |  | 
 | 	bcma_pcie_mdio_write(pc, BCMA_CORE_PCI_MDIODATA_DEV_RX, | 
 | 	                     BCMA_CORE_PCI_SERDES_RX_CTRL, | 
 | 			     bcma_pcicore_polarity_workaround(pc)); | 
 | 	tmp = bcma_pcie_mdio_read(pc, BCMA_CORE_PCI_MDIODATA_DEV_PLL, | 
 | 	                          BCMA_CORE_PCI_SERDES_PLL_CTRL); | 
 | 	if (tmp & BCMA_CORE_PCI_PLL_CTRL_FREQDET_EN) | 
 | 		bcma_pcie_mdio_write(pc, BCMA_CORE_PCI_MDIODATA_DEV_PLL, | 
 | 		                     BCMA_CORE_PCI_SERDES_PLL_CTRL, | 
 | 		                     tmp & ~BCMA_CORE_PCI_PLL_CTRL_FREQDET_EN); | 
 | } | 
 |  | 
 | /************************************************** | 
 |  * Init. | 
 |  **************************************************/ | 
 |  | 
 | static void __devinit bcma_core_pci_clientmode_init(struct bcma_drv_pci *pc) | 
 | { | 
 | 	bcma_pcicore_serdes_workaround(pc); | 
 | } | 
 |  | 
 | void __devinit bcma_core_pci_init(struct bcma_drv_pci *pc) | 
 | { | 
 | 	if (pc->setup_done) | 
 | 		return; | 
 |  | 
 | #ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE | 
 | 	pc->hostmode = bcma_core_pci_is_in_hostmode(pc); | 
 | 	if (pc->hostmode) | 
 | 		bcma_core_pci_hostmode_init(pc); | 
 | #endif /* CONFIG_BCMA_DRIVER_PCI_HOSTMODE */ | 
 |  | 
 | 	if (!pc->hostmode) | 
 | 		bcma_core_pci_clientmode_init(pc); | 
 | } | 
 |  | 
 | int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core, | 
 | 			  bool enable) | 
 | { | 
 | 	struct pci_dev *pdev = pc->core->bus->host_pci; | 
 | 	u32 coremask, tmp; | 
 | 	int err = 0; | 
 |  | 
 | 	if (core->bus->hosttype != BCMA_HOSTTYPE_PCI) { | 
 | 		/* This bcma device is not on a PCI host-bus. So the IRQs are | 
 | 		 * not routed through the PCI core. | 
 | 		 * So we must not enable routing through the PCI core. */ | 
 | 		goto out; | 
 | 	} | 
 |  | 
 | 	err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp); | 
 | 	if (err) | 
 | 		goto out; | 
 |  | 
 | 	coremask = BIT(core->core_index) << 8; | 
 | 	if (enable) | 
 | 		tmp |= coremask; | 
 | 	else | 
 | 		tmp &= ~coremask; | 
 |  | 
 | 	err = pci_write_config_dword(pdev, BCMA_PCI_IRQMASK, tmp); | 
 |  | 
 | out: | 
 | 	return err; | 
 | } | 
 | EXPORT_SYMBOL_GPL(bcma_core_pci_irq_ctl); |