| Lennert Buytenhek | 1d22e05 | 2006-09-22 02:28:13 +0200 | [diff] [blame] | 1 | /* | 
|  | 2 | * EP93xx ethernet network device driver | 
|  | 3 | * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> | 
|  | 4 | * Dedicated to Marija Kulikova. | 
|  | 5 | * | 
|  | 6 | * This program is free software; you can redistribute it and/or modify | 
|  | 7 | * it under the terms of the GNU General Public License as published by | 
|  | 8 | * the Free Software Foundation; either version 2 of the License, or | 
|  | 9 | * (at your option) any later version. | 
|  | 10 | */ | 
|  | 11 |  | 
| Lennert Buytenhek | 1d22e05 | 2006-09-22 02:28:13 +0200 | [diff] [blame] | 12 | #include <linux/dma-mapping.h> | 
|  | 13 | #include <linux/module.h> | 
|  | 14 | #include <linux/kernel.h> | 
|  | 15 | #include <linux/netdevice.h> | 
|  | 16 | #include <linux/mii.h> | 
|  | 17 | #include <linux/etherdevice.h> | 
|  | 18 | #include <linux/ethtool.h> | 
|  | 19 | #include <linux/init.h> | 
|  | 20 | #include <linux/moduleparam.h> | 
|  | 21 | #include <linux/platform_device.h> | 
|  | 22 | #include <linux/delay.h> | 
|  | 23 | #include <asm/arch/ep93xx-regs.h> | 
|  | 24 | #include <asm/arch/platform.h> | 
|  | 25 | #include <asm/io.h> | 
|  | 26 |  | 
|  | 27 | #define DRV_MODULE_NAME		"ep93xx-eth" | 
|  | 28 | #define DRV_MODULE_VERSION	"0.1" | 
|  | 29 |  | 
|  | 30 | #define RX_QUEUE_ENTRIES	64 | 
|  | 31 | #define TX_QUEUE_ENTRIES	8 | 
|  | 32 |  | 
|  | 33 | #define MAX_PKT_SIZE		2044 | 
|  | 34 | #define PKT_BUF_SIZE		2048 | 
|  | 35 |  | 
|  | 36 | #define REG_RXCTL		0x0000 | 
|  | 37 | #define  REG_RXCTL_DEFAULT	0x00073800 | 
|  | 38 | #define REG_TXCTL		0x0004 | 
|  | 39 | #define  REG_TXCTL_ENABLE	0x00000001 | 
|  | 40 | #define REG_MIICMD		0x0010 | 
|  | 41 | #define  REG_MIICMD_READ	0x00008000 | 
|  | 42 | #define  REG_MIICMD_WRITE	0x00004000 | 
|  | 43 | #define REG_MIIDATA		0x0014 | 
|  | 44 | #define REG_MIISTS		0x0018 | 
|  | 45 | #define  REG_MIISTS_BUSY	0x00000001 | 
|  | 46 | #define REG_SELFCTL		0x0020 | 
|  | 47 | #define  REG_SELFCTL_RESET	0x00000001 | 
|  | 48 | #define REG_INTEN		0x0024 | 
|  | 49 | #define  REG_INTEN_TX		0x00000008 | 
|  | 50 | #define  REG_INTEN_RX		0x00000007 | 
|  | 51 | #define REG_INTSTSP		0x0028 | 
|  | 52 | #define  REG_INTSTS_TX		0x00000008 | 
|  | 53 | #define  REG_INTSTS_RX		0x00000004 | 
|  | 54 | #define REG_INTSTSC		0x002c | 
|  | 55 | #define REG_AFP			0x004c | 
|  | 56 | #define REG_INDAD0		0x0050 | 
|  | 57 | #define REG_INDAD1		0x0051 | 
|  | 58 | #define REG_INDAD2		0x0052 | 
|  | 59 | #define REG_INDAD3		0x0053 | 
|  | 60 | #define REG_INDAD4		0x0054 | 
|  | 61 | #define REG_INDAD5		0x0055 | 
|  | 62 | #define REG_GIINTMSK		0x0064 | 
|  | 63 | #define  REG_GIINTMSK_ENABLE	0x00008000 | 
|  | 64 | #define REG_BMCTL		0x0080 | 
|  | 65 | #define  REG_BMCTL_ENABLE_TX	0x00000100 | 
|  | 66 | #define  REG_BMCTL_ENABLE_RX	0x00000001 | 
|  | 67 | #define REG_BMSTS		0x0084 | 
|  | 68 | #define  REG_BMSTS_RX_ACTIVE	0x00000008 | 
|  | 69 | #define REG_RXDQBADD		0x0090 | 
|  | 70 | #define REG_RXDQBLEN		0x0094 | 
|  | 71 | #define REG_RXDCURADD		0x0098 | 
|  | 72 | #define REG_RXDENQ		0x009c | 
|  | 73 | #define REG_RXSTSQBADD		0x00a0 | 
|  | 74 | #define REG_RXSTSQBLEN		0x00a4 | 
|  | 75 | #define REG_RXSTSQCURADD	0x00a8 | 
|  | 76 | #define REG_RXSTSENQ		0x00ac | 
|  | 77 | #define REG_TXDQBADD		0x00b0 | 
|  | 78 | #define REG_TXDQBLEN		0x00b4 | 
|  | 79 | #define REG_TXDQCURADD		0x00b8 | 
|  | 80 | #define REG_TXDENQ		0x00bc | 
|  | 81 | #define REG_TXSTSQBADD		0x00c0 | 
|  | 82 | #define REG_TXSTSQBLEN		0x00c4 | 
|  | 83 | #define REG_TXSTSQCURADD	0x00c8 | 
|  | 84 | #define REG_MAXFRMLEN		0x00e8 | 
|  | 85 |  | 
|  | 86 | struct ep93xx_rdesc | 
|  | 87 | { | 
|  | 88 | u32	buf_addr; | 
|  | 89 | u32	rdesc1; | 
|  | 90 | }; | 
|  | 91 |  | 
|  | 92 | #define RDESC1_NSOF		0x80000000 | 
|  | 93 | #define RDESC1_BUFFER_INDEX	0x7fff0000 | 
|  | 94 | #define RDESC1_BUFFER_LENGTH	0x0000ffff | 
|  | 95 |  | 
|  | 96 | struct ep93xx_rstat | 
|  | 97 | { | 
|  | 98 | u32	rstat0; | 
|  | 99 | u32	rstat1; | 
|  | 100 | }; | 
|  | 101 |  | 
|  | 102 | #define RSTAT0_RFP		0x80000000 | 
|  | 103 | #define RSTAT0_RWE		0x40000000 | 
|  | 104 | #define RSTAT0_EOF		0x20000000 | 
|  | 105 | #define RSTAT0_EOB		0x10000000 | 
|  | 106 | #define RSTAT0_AM		0x00c00000 | 
|  | 107 | #define RSTAT0_RX_ERR		0x00200000 | 
|  | 108 | #define RSTAT0_OE		0x00100000 | 
|  | 109 | #define RSTAT0_FE		0x00080000 | 
|  | 110 | #define RSTAT0_RUNT		0x00040000 | 
|  | 111 | #define RSTAT0_EDATA		0x00020000 | 
|  | 112 | #define RSTAT0_CRCE		0x00010000 | 
|  | 113 | #define RSTAT0_CRCI		0x00008000 | 
|  | 114 | #define RSTAT0_HTI		0x00003f00 | 
|  | 115 | #define RSTAT1_RFP		0x80000000 | 
|  | 116 | #define RSTAT1_BUFFER_INDEX	0x7fff0000 | 
|  | 117 | #define RSTAT1_FRAME_LENGTH	0x0000ffff | 
|  | 118 |  | 
|  | 119 | struct ep93xx_tdesc | 
|  | 120 | { | 
|  | 121 | u32	buf_addr; | 
|  | 122 | u32	tdesc1; | 
|  | 123 | }; | 
|  | 124 |  | 
|  | 125 | #define TDESC1_EOF		0x80000000 | 
|  | 126 | #define TDESC1_BUFFER_INDEX	0x7fff0000 | 
|  | 127 | #define TDESC1_BUFFER_ABORT	0x00008000 | 
|  | 128 | #define TDESC1_BUFFER_LENGTH	0x00000fff | 
|  | 129 |  | 
|  | 130 | struct ep93xx_tstat | 
|  | 131 | { | 
|  | 132 | u32	tstat0; | 
|  | 133 | }; | 
|  | 134 |  | 
|  | 135 | #define TSTAT0_TXFP		0x80000000 | 
|  | 136 | #define TSTAT0_TXWE		0x40000000 | 
|  | 137 | #define TSTAT0_FA		0x20000000 | 
|  | 138 | #define TSTAT0_LCRS		0x10000000 | 
|  | 139 | #define TSTAT0_OW		0x04000000 | 
|  | 140 | #define TSTAT0_TXU		0x02000000 | 
|  | 141 | #define TSTAT0_ECOLL		0x01000000 | 
|  | 142 | #define TSTAT0_NCOLL		0x001f0000 | 
|  | 143 | #define TSTAT0_BUFFER_INDEX	0x00007fff | 
|  | 144 |  | 
|  | 145 | struct ep93xx_descs | 
|  | 146 | { | 
|  | 147 | struct ep93xx_rdesc	rdesc[RX_QUEUE_ENTRIES]; | 
|  | 148 | struct ep93xx_tdesc	tdesc[TX_QUEUE_ENTRIES]; | 
|  | 149 | struct ep93xx_rstat	rstat[RX_QUEUE_ENTRIES]; | 
|  | 150 | struct ep93xx_tstat	tstat[TX_QUEUE_ENTRIES]; | 
|  | 151 | }; | 
|  | 152 |  | 
|  | 153 | struct ep93xx_priv | 
|  | 154 | { | 
|  | 155 | struct resource		*res; | 
|  | 156 | void			*base_addr; | 
|  | 157 | int			irq; | 
|  | 158 |  | 
|  | 159 | struct ep93xx_descs	*descs; | 
|  | 160 | dma_addr_t		descs_dma_addr; | 
|  | 161 |  | 
|  | 162 | void			*rx_buf[RX_QUEUE_ENTRIES]; | 
|  | 163 | void			*tx_buf[TX_QUEUE_ENTRIES]; | 
|  | 164 |  | 
|  | 165 | spinlock_t		rx_lock; | 
|  | 166 | unsigned int		rx_pointer; | 
|  | 167 | unsigned int		tx_clean_pointer; | 
|  | 168 | unsigned int		tx_pointer; | 
|  | 169 | spinlock_t		tx_pending_lock; | 
|  | 170 | unsigned int		tx_pending; | 
|  | 171 |  | 
|  | 172 | struct net_device_stats	stats; | 
|  | 173 |  | 
|  | 174 | struct mii_if_info	mii; | 
|  | 175 | u8			mdc_divisor; | 
|  | 176 | }; | 
|  | 177 |  | 
|  | 178 | #define rdb(ep, off)		__raw_readb((ep)->base_addr + (off)) | 
|  | 179 | #define rdw(ep, off)		__raw_readw((ep)->base_addr + (off)) | 
|  | 180 | #define rdl(ep, off)		__raw_readl((ep)->base_addr + (off)) | 
|  | 181 | #define wrb(ep, off, val)	__raw_writeb((val), (ep)->base_addr + (off)) | 
|  | 182 | #define wrw(ep, off, val)	__raw_writew((val), (ep)->base_addr + (off)) | 
|  | 183 | #define wrl(ep, off, val)	__raw_writel((val), (ep)->base_addr + (off)) | 
|  | 184 |  | 
|  | 185 | static int ep93xx_mdio_read(struct net_device *dev, int phy_id, int reg); | 
|  | 186 |  | 
|  | 187 | static struct net_device_stats *ep93xx_get_stats(struct net_device *dev) | 
|  | 188 | { | 
|  | 189 | struct ep93xx_priv *ep = netdev_priv(dev); | 
|  | 190 | return &(ep->stats); | 
|  | 191 | } | 
|  | 192 |  | 
|  | 193 | static int ep93xx_rx(struct net_device *dev, int *budget) | 
|  | 194 | { | 
|  | 195 | struct ep93xx_priv *ep = netdev_priv(dev); | 
| Lennert Buytenhek | 1d22e05 | 2006-09-22 02:28:13 +0200 | [diff] [blame] | 196 | int rx_done; | 
|  | 197 | int processed; | 
|  | 198 |  | 
| Lennert Buytenhek | 1d22e05 | 2006-09-22 02:28:13 +0200 | [diff] [blame] | 199 | rx_done = 0; | 
|  | 200 | processed = 0; | 
|  | 201 | while (*budget > 0) { | 
|  | 202 | int entry; | 
|  | 203 | struct ep93xx_rstat *rstat; | 
|  | 204 | u32 rstat0; | 
|  | 205 | u32 rstat1; | 
|  | 206 | int length; | 
|  | 207 | struct sk_buff *skb; | 
|  | 208 |  | 
|  | 209 | entry = ep->rx_pointer; | 
|  | 210 | rstat = ep->descs->rstat + entry; | 
| Lennert Buytenhek | 2d38cab | 2006-10-30 19:52:31 +0100 | [diff] [blame] | 211 |  | 
|  | 212 | rstat0 = rstat->rstat0; | 
|  | 213 | rstat1 = rstat->rstat1; | 
|  | 214 | if (!(rstat0 & RSTAT0_RFP) || !(rstat1 & RSTAT1_RFP)) { | 
| Lennert Buytenhek | 1d22e05 | 2006-09-22 02:28:13 +0200 | [diff] [blame] | 215 | rx_done = 1; | 
|  | 216 | break; | 
|  | 217 | } | 
|  | 218 |  | 
| Lennert Buytenhek | 1d22e05 | 2006-09-22 02:28:13 +0200 | [diff] [blame] | 219 | rstat->rstat0 = 0; | 
|  | 220 | rstat->rstat1 = 0; | 
|  | 221 |  | 
| Lennert Buytenhek | 1d22e05 | 2006-09-22 02:28:13 +0200 | [diff] [blame] | 222 | if (!(rstat0 & RSTAT0_EOF)) | 
|  | 223 | printk(KERN_CRIT "ep93xx_rx: not end-of-frame " | 
|  | 224 | " %.8x %.8x\n", rstat0, rstat1); | 
|  | 225 | if (!(rstat0 & RSTAT0_EOB)) | 
|  | 226 | printk(KERN_CRIT "ep93xx_rx: not end-of-buffer " | 
|  | 227 | " %.8x %.8x\n", rstat0, rstat1); | 
| Lennert Buytenhek | 1d22e05 | 2006-09-22 02:28:13 +0200 | [diff] [blame] | 228 | if ((rstat1 & RSTAT1_BUFFER_INDEX) >> 16 != entry) | 
|  | 229 | printk(KERN_CRIT "ep93xx_rx: entry mismatch " | 
|  | 230 | " %.8x %.8x\n", rstat0, rstat1); | 
|  | 231 |  | 
|  | 232 | if (!(rstat0 & RSTAT0_RWE)) { | 
| Lennert Buytenhek | 1d22e05 | 2006-09-22 02:28:13 +0200 | [diff] [blame] | 233 | ep->stats.rx_errors++; | 
|  | 234 | if (rstat0 & RSTAT0_OE) | 
|  | 235 | ep->stats.rx_fifo_errors++; | 
|  | 236 | if (rstat0 & RSTAT0_FE) | 
|  | 237 | ep->stats.rx_frame_errors++; | 
|  | 238 | if (rstat0 & (RSTAT0_RUNT | RSTAT0_EDATA)) | 
|  | 239 | ep->stats.rx_length_errors++; | 
|  | 240 | if (rstat0 & RSTAT0_CRCE) | 
|  | 241 | ep->stats.rx_crc_errors++; | 
|  | 242 | goto err; | 
|  | 243 | } | 
|  | 244 |  | 
|  | 245 | length = rstat1 & RSTAT1_FRAME_LENGTH; | 
|  | 246 | if (length > MAX_PKT_SIZE) { | 
|  | 247 | printk(KERN_NOTICE "ep93xx_rx: invalid length " | 
|  | 248 | " %.8x %.8x\n", rstat0, rstat1); | 
|  | 249 | goto err; | 
|  | 250 | } | 
|  | 251 |  | 
|  | 252 | /* Strip FCS.  */ | 
|  | 253 | if (rstat0 & RSTAT0_CRCI) | 
|  | 254 | length -= 4; | 
|  | 255 |  | 
|  | 256 | skb = dev_alloc_skb(length + 2); | 
|  | 257 | if (likely(skb != NULL)) { | 
| Lennert Buytenhek | 1d22e05 | 2006-09-22 02:28:13 +0200 | [diff] [blame] | 258 | skb_reserve(skb, 2); | 
|  | 259 | dma_sync_single(NULL, ep->descs->rdesc[entry].buf_addr, | 
|  | 260 | length, DMA_FROM_DEVICE); | 
| David S. Miller | 8c7b7fa | 2007-07-10 22:08:12 -0700 | [diff] [blame] | 261 | skb_copy_to_linear_data(skb, ep->rx_buf[entry], length); | 
| Lennert Buytenhek | 1d22e05 | 2006-09-22 02:28:13 +0200 | [diff] [blame] | 262 | skb_put(skb, length); | 
|  | 263 | skb->protocol = eth_type_trans(skb, dev); | 
|  | 264 |  | 
|  | 265 | dev->last_rx = jiffies; | 
|  | 266 |  | 
|  | 267 | netif_receive_skb(skb); | 
|  | 268 |  | 
|  | 269 | ep->stats.rx_packets++; | 
|  | 270 | ep->stats.rx_bytes += length; | 
|  | 271 | } else { | 
|  | 272 | ep->stats.rx_dropped++; | 
|  | 273 | } | 
|  | 274 |  | 
|  | 275 | err: | 
|  | 276 | ep->rx_pointer = (entry + 1) & (RX_QUEUE_ENTRIES - 1); | 
|  | 277 | processed++; | 
|  | 278 | dev->quota--; | 
|  | 279 | (*budget)--; | 
|  | 280 | } | 
|  | 281 |  | 
|  | 282 | if (processed) { | 
|  | 283 | wrw(ep, REG_RXDENQ, processed); | 
|  | 284 | wrw(ep, REG_RXSTSENQ, processed); | 
|  | 285 | } | 
|  | 286 |  | 
|  | 287 | return !rx_done; | 
|  | 288 | } | 
|  | 289 |  | 
|  | 290 | static int ep93xx_have_more_rx(struct ep93xx_priv *ep) | 
|  | 291 | { | 
| Lennert Buytenhek | 2d38cab | 2006-10-30 19:52:31 +0100 | [diff] [blame] | 292 | struct ep93xx_rstat *rstat = ep->descs->rstat + ep->rx_pointer; | 
|  | 293 | return !!((rstat->rstat0 & RSTAT0_RFP) && (rstat->rstat1 & RSTAT1_RFP)); | 
| Lennert Buytenhek | 1d22e05 | 2006-09-22 02:28:13 +0200 | [diff] [blame] | 294 | } | 
|  | 295 |  | 
|  | 296 | static int ep93xx_poll(struct net_device *dev, int *budget) | 
|  | 297 | { | 
|  | 298 | struct ep93xx_priv *ep = netdev_priv(dev); | 
|  | 299 |  | 
|  | 300 | /* | 
|  | 301 | * @@@ Have to stop polling if device is downed while we | 
|  | 302 | * are polling. | 
|  | 303 | */ | 
|  | 304 |  | 
|  | 305 | poll_some_more: | 
|  | 306 | if (ep93xx_rx(dev, budget)) | 
|  | 307 | return 1; | 
|  | 308 |  | 
|  | 309 | netif_rx_complete(dev); | 
|  | 310 |  | 
|  | 311 | spin_lock_irq(&ep->rx_lock); | 
|  | 312 | wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX); | 
|  | 313 | if (ep93xx_have_more_rx(ep)) { | 
|  | 314 | wrl(ep, REG_INTEN, REG_INTEN_TX); | 
|  | 315 | wrl(ep, REG_INTSTSP, REG_INTSTS_RX); | 
|  | 316 | spin_unlock_irq(&ep->rx_lock); | 
|  | 317 |  | 
|  | 318 | if (netif_rx_reschedule(dev, 0)) | 
|  | 319 | goto poll_some_more; | 
|  | 320 |  | 
|  | 321 | return 0; | 
|  | 322 | } | 
|  | 323 | spin_unlock_irq(&ep->rx_lock); | 
|  | 324 |  | 
|  | 325 | return 0; | 
|  | 326 | } | 
|  | 327 |  | 
|  | 328 | static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev) | 
|  | 329 | { | 
|  | 330 | struct ep93xx_priv *ep = netdev_priv(dev); | 
|  | 331 | int entry; | 
|  | 332 |  | 
| Lennert Buytenhek | 79c356f | 2006-10-30 19:52:54 +0100 | [diff] [blame] | 333 | if (unlikely(skb->len > MAX_PKT_SIZE)) { | 
| Lennert Buytenhek | 1d22e05 | 2006-09-22 02:28:13 +0200 | [diff] [blame] | 334 | ep->stats.tx_dropped++; | 
|  | 335 | dev_kfree_skb(skb); | 
|  | 336 | return NETDEV_TX_OK; | 
|  | 337 | } | 
|  | 338 |  | 
|  | 339 | entry = ep->tx_pointer; | 
|  | 340 | ep->tx_pointer = (ep->tx_pointer + 1) & (TX_QUEUE_ENTRIES - 1); | 
|  | 341 |  | 
|  | 342 | ep->descs->tdesc[entry].tdesc1 = | 
|  | 343 | TDESC1_EOF | (entry << 16) | (skb->len & 0xfff); | 
|  | 344 | skb_copy_and_csum_dev(skb, ep->tx_buf[entry]); | 
|  | 345 | dma_sync_single(NULL, ep->descs->tdesc[entry].buf_addr, | 
|  | 346 | skb->len, DMA_TO_DEVICE); | 
|  | 347 | dev_kfree_skb(skb); | 
|  | 348 |  | 
|  | 349 | dev->trans_start = jiffies; | 
|  | 350 |  | 
|  | 351 | spin_lock_irq(&ep->tx_pending_lock); | 
|  | 352 | ep->tx_pending++; | 
|  | 353 | if (ep->tx_pending == TX_QUEUE_ENTRIES) | 
|  | 354 | netif_stop_queue(dev); | 
|  | 355 | spin_unlock_irq(&ep->tx_pending_lock); | 
|  | 356 |  | 
|  | 357 | wrl(ep, REG_TXDENQ, 1); | 
|  | 358 |  | 
|  | 359 | return NETDEV_TX_OK; | 
|  | 360 | } | 
|  | 361 |  | 
|  | 362 | static void ep93xx_tx_complete(struct net_device *dev) | 
|  | 363 | { | 
|  | 364 | struct ep93xx_priv *ep = netdev_priv(dev); | 
| Lennert Buytenhek | 1d22e05 | 2006-09-22 02:28:13 +0200 | [diff] [blame] | 365 | int wake; | 
|  | 366 |  | 
| Lennert Buytenhek | 1d22e05 | 2006-09-22 02:28:13 +0200 | [diff] [blame] | 367 | wake = 0; | 
|  | 368 |  | 
|  | 369 | spin_lock(&ep->tx_pending_lock); | 
|  | 370 | while (1) { | 
|  | 371 | int entry; | 
|  | 372 | struct ep93xx_tstat *tstat; | 
|  | 373 | u32 tstat0; | 
|  | 374 |  | 
|  | 375 | entry = ep->tx_clean_pointer; | 
|  | 376 | tstat = ep->descs->tstat + entry; | 
| Lennert Buytenhek | 1d22e05 | 2006-09-22 02:28:13 +0200 | [diff] [blame] | 377 |  | 
|  | 378 | tstat0 = tstat->tstat0; | 
| Lennert Buytenhek | 2d38cab | 2006-10-30 19:52:31 +0100 | [diff] [blame] | 379 | if (!(tstat0 & TSTAT0_TXFP)) | 
|  | 380 | break; | 
|  | 381 |  | 
| Lennert Buytenhek | 1d22e05 | 2006-09-22 02:28:13 +0200 | [diff] [blame] | 382 | tstat->tstat0 = 0; | 
|  | 383 |  | 
| Lennert Buytenhek | 1d22e05 | 2006-09-22 02:28:13 +0200 | [diff] [blame] | 384 | if (tstat0 & TSTAT0_FA) | 
|  | 385 | printk(KERN_CRIT "ep93xx_tx_complete: frame aborted " | 
|  | 386 | " %.8x\n", tstat0); | 
|  | 387 | if ((tstat0 & TSTAT0_BUFFER_INDEX) != entry) | 
|  | 388 | printk(KERN_CRIT "ep93xx_tx_complete: entry mismatch " | 
|  | 389 | " %.8x\n", tstat0); | 
|  | 390 |  | 
|  | 391 | if (tstat0 & TSTAT0_TXWE) { | 
|  | 392 | int length = ep->descs->tdesc[entry].tdesc1 & 0xfff; | 
|  | 393 |  | 
|  | 394 | ep->stats.tx_packets++; | 
|  | 395 | ep->stats.tx_bytes += length; | 
|  | 396 | } else { | 
|  | 397 | ep->stats.tx_errors++; | 
|  | 398 | } | 
|  | 399 |  | 
|  | 400 | if (tstat0 & TSTAT0_OW) | 
|  | 401 | ep->stats.tx_window_errors++; | 
|  | 402 | if (tstat0 & TSTAT0_TXU) | 
|  | 403 | ep->stats.tx_fifo_errors++; | 
|  | 404 | ep->stats.collisions += (tstat0 >> 16) & 0x1f; | 
|  | 405 |  | 
|  | 406 | ep->tx_clean_pointer = (entry + 1) & (TX_QUEUE_ENTRIES - 1); | 
|  | 407 | if (ep->tx_pending == TX_QUEUE_ENTRIES) | 
|  | 408 | wake = 1; | 
|  | 409 | ep->tx_pending--; | 
|  | 410 | } | 
|  | 411 | spin_unlock(&ep->tx_pending_lock); | 
|  | 412 |  | 
|  | 413 | if (wake) | 
|  | 414 | netif_wake_queue(dev); | 
|  | 415 | } | 
|  | 416 |  | 
| David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 417 | static irqreturn_t ep93xx_irq(int irq, void *dev_id) | 
| Lennert Buytenhek | 1d22e05 | 2006-09-22 02:28:13 +0200 | [diff] [blame] | 418 | { | 
|  | 419 | struct net_device *dev = dev_id; | 
|  | 420 | struct ep93xx_priv *ep = netdev_priv(dev); | 
|  | 421 | u32 status; | 
|  | 422 |  | 
|  | 423 | status = rdl(ep, REG_INTSTSC); | 
|  | 424 | if (status == 0) | 
|  | 425 | return IRQ_NONE; | 
|  | 426 |  | 
|  | 427 | if (status & REG_INTSTS_RX) { | 
|  | 428 | spin_lock(&ep->rx_lock); | 
|  | 429 | if (likely(__netif_rx_schedule_prep(dev))) { | 
|  | 430 | wrl(ep, REG_INTEN, REG_INTEN_TX); | 
|  | 431 | __netif_rx_schedule(dev); | 
|  | 432 | } | 
|  | 433 | spin_unlock(&ep->rx_lock); | 
|  | 434 | } | 
|  | 435 |  | 
|  | 436 | if (status & REG_INTSTS_TX) | 
|  | 437 | ep93xx_tx_complete(dev); | 
|  | 438 |  | 
|  | 439 | return IRQ_HANDLED; | 
|  | 440 | } | 
|  | 441 |  | 
|  | 442 | static void ep93xx_free_buffers(struct ep93xx_priv *ep) | 
|  | 443 | { | 
|  | 444 | int i; | 
|  | 445 |  | 
|  | 446 | for (i = 0; i < RX_QUEUE_ENTRIES; i += 2) { | 
|  | 447 | dma_addr_t d; | 
|  | 448 |  | 
|  | 449 | d = ep->descs->rdesc[i].buf_addr; | 
|  | 450 | if (d) | 
|  | 451 | dma_unmap_single(NULL, d, PAGE_SIZE, DMA_FROM_DEVICE); | 
|  | 452 |  | 
|  | 453 | if (ep->rx_buf[i] != NULL) | 
|  | 454 | free_page((unsigned long)ep->rx_buf[i]); | 
|  | 455 | } | 
|  | 456 |  | 
|  | 457 | for (i = 0; i < TX_QUEUE_ENTRIES; i += 2) { | 
|  | 458 | dma_addr_t d; | 
|  | 459 |  | 
|  | 460 | d = ep->descs->tdesc[i].buf_addr; | 
|  | 461 | if (d) | 
|  | 462 | dma_unmap_single(NULL, d, PAGE_SIZE, DMA_TO_DEVICE); | 
|  | 463 |  | 
|  | 464 | if (ep->tx_buf[i] != NULL) | 
|  | 465 | free_page((unsigned long)ep->tx_buf[i]); | 
|  | 466 | } | 
|  | 467 |  | 
|  | 468 | dma_free_coherent(NULL, sizeof(struct ep93xx_descs), ep->descs, | 
|  | 469 | ep->descs_dma_addr); | 
|  | 470 | } | 
|  | 471 |  | 
|  | 472 | /* | 
|  | 473 | * The hardware enforces a sub-2K maximum packet size, so we put | 
|  | 474 | * two buffers on every hardware page. | 
|  | 475 | */ | 
|  | 476 | static int ep93xx_alloc_buffers(struct ep93xx_priv *ep) | 
|  | 477 | { | 
|  | 478 | int i; | 
|  | 479 |  | 
|  | 480 | ep->descs = dma_alloc_coherent(NULL, sizeof(struct ep93xx_descs), | 
|  | 481 | &ep->descs_dma_addr, GFP_KERNEL | GFP_DMA); | 
|  | 482 | if (ep->descs == NULL) | 
|  | 483 | return 1; | 
|  | 484 |  | 
|  | 485 | for (i = 0; i < RX_QUEUE_ENTRIES; i += 2) { | 
|  | 486 | void *page; | 
|  | 487 | dma_addr_t d; | 
|  | 488 |  | 
|  | 489 | page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA); | 
|  | 490 | if (page == NULL) | 
|  | 491 | goto err; | 
|  | 492 |  | 
|  | 493 | d = dma_map_single(NULL, page, PAGE_SIZE, DMA_FROM_DEVICE); | 
|  | 494 | if (dma_mapping_error(d)) { | 
|  | 495 | free_page((unsigned long)page); | 
|  | 496 | goto err; | 
|  | 497 | } | 
|  | 498 |  | 
|  | 499 | ep->rx_buf[i] = page; | 
|  | 500 | ep->descs->rdesc[i].buf_addr = d; | 
|  | 501 | ep->descs->rdesc[i].rdesc1 = (i << 16) | PKT_BUF_SIZE; | 
|  | 502 |  | 
|  | 503 | ep->rx_buf[i + 1] = page + PKT_BUF_SIZE; | 
|  | 504 | ep->descs->rdesc[i + 1].buf_addr = d + PKT_BUF_SIZE; | 
|  | 505 | ep->descs->rdesc[i + 1].rdesc1 = ((i + 1) << 16) | PKT_BUF_SIZE; | 
|  | 506 | } | 
|  | 507 |  | 
|  | 508 | for (i = 0; i < TX_QUEUE_ENTRIES; i += 2) { | 
|  | 509 | void *page; | 
|  | 510 | dma_addr_t d; | 
|  | 511 |  | 
|  | 512 | page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA); | 
|  | 513 | if (page == NULL) | 
|  | 514 | goto err; | 
|  | 515 |  | 
|  | 516 | d = dma_map_single(NULL, page, PAGE_SIZE, DMA_TO_DEVICE); | 
|  | 517 | if (dma_mapping_error(d)) { | 
|  | 518 | free_page((unsigned long)page); | 
|  | 519 | goto err; | 
|  | 520 | } | 
|  | 521 |  | 
|  | 522 | ep->tx_buf[i] = page; | 
|  | 523 | ep->descs->tdesc[i].buf_addr = d; | 
|  | 524 |  | 
|  | 525 | ep->tx_buf[i + 1] = page + PKT_BUF_SIZE; | 
|  | 526 | ep->descs->tdesc[i + 1].buf_addr = d + PKT_BUF_SIZE; | 
|  | 527 | } | 
|  | 528 |  | 
|  | 529 | return 0; | 
|  | 530 |  | 
|  | 531 | err: | 
|  | 532 | ep93xx_free_buffers(ep); | 
|  | 533 | return 1; | 
|  | 534 | } | 
|  | 535 |  | 
|  | 536 | static int ep93xx_start_hw(struct net_device *dev) | 
|  | 537 | { | 
|  | 538 | struct ep93xx_priv *ep = netdev_priv(dev); | 
|  | 539 | unsigned long addr; | 
|  | 540 | int i; | 
|  | 541 |  | 
|  | 542 | wrl(ep, REG_SELFCTL, REG_SELFCTL_RESET); | 
|  | 543 | for (i = 0; i < 10; i++) { | 
|  | 544 | if ((rdl(ep, REG_SELFCTL) & REG_SELFCTL_RESET) == 0) | 
|  | 545 | break; | 
|  | 546 | msleep(1); | 
|  | 547 | } | 
|  | 548 |  | 
|  | 549 | if (i == 10) { | 
|  | 550 | printk(KERN_CRIT DRV_MODULE_NAME ": hw failed to reset\n"); | 
|  | 551 | return 1; | 
|  | 552 | } | 
|  | 553 |  | 
|  | 554 | wrl(ep, REG_SELFCTL, ((ep->mdc_divisor - 1) << 9)); | 
|  | 555 |  | 
|  | 556 | /* Does the PHY support preamble suppress?  */ | 
|  | 557 | if ((ep93xx_mdio_read(dev, ep->mii.phy_id, MII_BMSR) & 0x0040) != 0) | 
|  | 558 | wrl(ep, REG_SELFCTL, ((ep->mdc_divisor - 1) << 9) | (1 << 8)); | 
|  | 559 |  | 
|  | 560 | /* Receive descriptor ring.  */ | 
|  | 561 | addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, rdesc); | 
|  | 562 | wrl(ep, REG_RXDQBADD, addr); | 
|  | 563 | wrl(ep, REG_RXDCURADD, addr); | 
|  | 564 | wrw(ep, REG_RXDQBLEN, RX_QUEUE_ENTRIES * sizeof(struct ep93xx_rdesc)); | 
|  | 565 |  | 
|  | 566 | /* Receive status ring.  */ | 
|  | 567 | addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, rstat); | 
|  | 568 | wrl(ep, REG_RXSTSQBADD, addr); | 
|  | 569 | wrl(ep, REG_RXSTSQCURADD, addr); | 
|  | 570 | wrw(ep, REG_RXSTSQBLEN, RX_QUEUE_ENTRIES * sizeof(struct ep93xx_rstat)); | 
|  | 571 |  | 
|  | 572 | /* Transmit descriptor ring.  */ | 
|  | 573 | addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, tdesc); | 
|  | 574 | wrl(ep, REG_TXDQBADD, addr); | 
|  | 575 | wrl(ep, REG_TXDQCURADD, addr); | 
|  | 576 | wrw(ep, REG_TXDQBLEN, TX_QUEUE_ENTRIES * sizeof(struct ep93xx_tdesc)); | 
|  | 577 |  | 
|  | 578 | /* Transmit status ring.  */ | 
|  | 579 | addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, tstat); | 
|  | 580 | wrl(ep, REG_TXSTSQBADD, addr); | 
|  | 581 | wrl(ep, REG_TXSTSQCURADD, addr); | 
|  | 582 | wrw(ep, REG_TXSTSQBLEN, TX_QUEUE_ENTRIES * sizeof(struct ep93xx_tstat)); | 
|  | 583 |  | 
|  | 584 | wrl(ep, REG_BMCTL, REG_BMCTL_ENABLE_TX | REG_BMCTL_ENABLE_RX); | 
|  | 585 | wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX); | 
|  | 586 | wrl(ep, REG_GIINTMSK, 0); | 
|  | 587 |  | 
|  | 588 | for (i = 0; i < 10; i++) { | 
|  | 589 | if ((rdl(ep, REG_BMSTS) & REG_BMSTS_RX_ACTIVE) != 0) | 
|  | 590 | break; | 
|  | 591 | msleep(1); | 
|  | 592 | } | 
|  | 593 |  | 
|  | 594 | if (i == 10) { | 
|  | 595 | printk(KERN_CRIT DRV_MODULE_NAME ": hw failed to start\n"); | 
|  | 596 | return 1; | 
|  | 597 | } | 
|  | 598 |  | 
|  | 599 | wrl(ep, REG_RXDENQ, RX_QUEUE_ENTRIES); | 
|  | 600 | wrl(ep, REG_RXSTSENQ, RX_QUEUE_ENTRIES); | 
|  | 601 |  | 
|  | 602 | wrb(ep, REG_INDAD0, dev->dev_addr[0]); | 
|  | 603 | wrb(ep, REG_INDAD1, dev->dev_addr[1]); | 
|  | 604 | wrb(ep, REG_INDAD2, dev->dev_addr[2]); | 
|  | 605 | wrb(ep, REG_INDAD3, dev->dev_addr[3]); | 
|  | 606 | wrb(ep, REG_INDAD4, dev->dev_addr[4]); | 
|  | 607 | wrb(ep, REG_INDAD5, dev->dev_addr[5]); | 
|  | 608 | wrl(ep, REG_AFP, 0); | 
|  | 609 |  | 
|  | 610 | wrl(ep, REG_MAXFRMLEN, (MAX_PKT_SIZE << 16) | MAX_PKT_SIZE); | 
|  | 611 |  | 
|  | 612 | wrl(ep, REG_RXCTL, REG_RXCTL_DEFAULT); | 
|  | 613 | wrl(ep, REG_TXCTL, REG_TXCTL_ENABLE); | 
|  | 614 |  | 
|  | 615 | return 0; | 
|  | 616 | } | 
|  | 617 |  | 
|  | 618 | static void ep93xx_stop_hw(struct net_device *dev) | 
|  | 619 | { | 
|  | 620 | struct ep93xx_priv *ep = netdev_priv(dev); | 
|  | 621 | int i; | 
|  | 622 |  | 
|  | 623 | wrl(ep, REG_SELFCTL, REG_SELFCTL_RESET); | 
|  | 624 | for (i = 0; i < 10; i++) { | 
|  | 625 | if ((rdl(ep, REG_SELFCTL) & REG_SELFCTL_RESET) == 0) | 
|  | 626 | break; | 
|  | 627 | msleep(1); | 
|  | 628 | } | 
|  | 629 |  | 
|  | 630 | if (i == 10) | 
|  | 631 | printk(KERN_CRIT DRV_MODULE_NAME ": hw failed to reset\n"); | 
|  | 632 | } | 
|  | 633 |  | 
|  | 634 | static int ep93xx_open(struct net_device *dev) | 
|  | 635 | { | 
|  | 636 | struct ep93xx_priv *ep = netdev_priv(dev); | 
|  | 637 | int err; | 
|  | 638 |  | 
|  | 639 | if (ep93xx_alloc_buffers(ep)) | 
|  | 640 | return -ENOMEM; | 
|  | 641 |  | 
|  | 642 | if (is_zero_ether_addr(dev->dev_addr)) { | 
|  | 643 | random_ether_addr(dev->dev_addr); | 
|  | 644 | printk(KERN_INFO "%s: generated random MAC address " | 
|  | 645 | "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x.\n", dev->name, | 
|  | 646 | dev->dev_addr[0], dev->dev_addr[1], | 
|  | 647 | dev->dev_addr[2], dev->dev_addr[3], | 
|  | 648 | dev->dev_addr[4], dev->dev_addr[5]); | 
|  | 649 | } | 
|  | 650 |  | 
|  | 651 | if (ep93xx_start_hw(dev)) { | 
|  | 652 | ep93xx_free_buffers(ep); | 
|  | 653 | return -EIO; | 
|  | 654 | } | 
|  | 655 |  | 
|  | 656 | spin_lock_init(&ep->rx_lock); | 
|  | 657 | ep->rx_pointer = 0; | 
|  | 658 | ep->tx_clean_pointer = 0; | 
|  | 659 | ep->tx_pointer = 0; | 
|  | 660 | spin_lock_init(&ep->tx_pending_lock); | 
|  | 661 | ep->tx_pending = 0; | 
|  | 662 |  | 
|  | 663 | err = request_irq(ep->irq, ep93xx_irq, IRQF_SHARED, dev->name, dev); | 
|  | 664 | if (err) { | 
|  | 665 | ep93xx_stop_hw(dev); | 
|  | 666 | ep93xx_free_buffers(ep); | 
|  | 667 | return err; | 
|  | 668 | } | 
|  | 669 |  | 
|  | 670 | wrl(ep, REG_GIINTMSK, REG_GIINTMSK_ENABLE); | 
|  | 671 |  | 
|  | 672 | netif_start_queue(dev); | 
|  | 673 |  | 
|  | 674 | return 0; | 
|  | 675 | } | 
|  | 676 |  | 
|  | 677 | static int ep93xx_close(struct net_device *dev) | 
|  | 678 | { | 
|  | 679 | struct ep93xx_priv *ep = netdev_priv(dev); | 
|  | 680 |  | 
|  | 681 | netif_stop_queue(dev); | 
|  | 682 |  | 
|  | 683 | wrl(ep, REG_GIINTMSK, 0); | 
|  | 684 | free_irq(ep->irq, dev); | 
|  | 685 | ep93xx_stop_hw(dev); | 
|  | 686 | ep93xx_free_buffers(ep); | 
|  | 687 |  | 
|  | 688 | return 0; | 
|  | 689 | } | 
|  | 690 |  | 
|  | 691 | static int ep93xx_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | 
|  | 692 | { | 
|  | 693 | struct ep93xx_priv *ep = netdev_priv(dev); | 
|  | 694 | struct mii_ioctl_data *data = if_mii(ifr); | 
|  | 695 |  | 
|  | 696 | return generic_mii_ioctl(&ep->mii, data, cmd, NULL); | 
|  | 697 | } | 
|  | 698 |  | 
|  | 699 | static int ep93xx_mdio_read(struct net_device *dev, int phy_id, int reg) | 
|  | 700 | { | 
|  | 701 | struct ep93xx_priv *ep = netdev_priv(dev); | 
|  | 702 | int data; | 
|  | 703 | int i; | 
|  | 704 |  | 
|  | 705 | wrl(ep, REG_MIICMD, REG_MIICMD_READ | (phy_id << 5) | reg); | 
|  | 706 |  | 
|  | 707 | for (i = 0; i < 10; i++) { | 
|  | 708 | if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0) | 
|  | 709 | break; | 
|  | 710 | msleep(1); | 
|  | 711 | } | 
|  | 712 |  | 
|  | 713 | if (i == 10) { | 
|  | 714 | printk(KERN_INFO DRV_MODULE_NAME ": mdio read timed out\n"); | 
|  | 715 | data = 0xffff; | 
|  | 716 | } else { | 
|  | 717 | data = rdl(ep, REG_MIIDATA); | 
|  | 718 | } | 
|  | 719 |  | 
|  | 720 | return data; | 
|  | 721 | } | 
|  | 722 |  | 
|  | 723 | static void ep93xx_mdio_write(struct net_device *dev, int phy_id, int reg, int data) | 
|  | 724 | { | 
|  | 725 | struct ep93xx_priv *ep = netdev_priv(dev); | 
|  | 726 | int i; | 
|  | 727 |  | 
|  | 728 | wrl(ep, REG_MIIDATA, data); | 
|  | 729 | wrl(ep, REG_MIICMD, REG_MIICMD_WRITE | (phy_id << 5) | reg); | 
|  | 730 |  | 
|  | 731 | for (i = 0; i < 10; i++) { | 
|  | 732 | if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0) | 
|  | 733 | break; | 
|  | 734 | msleep(1); | 
|  | 735 | } | 
|  | 736 |  | 
|  | 737 | if (i == 10) | 
|  | 738 | printk(KERN_INFO DRV_MODULE_NAME ": mdio write timed out\n"); | 
|  | 739 | } | 
|  | 740 |  | 
|  | 741 | static void ep93xx_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | 
|  | 742 | { | 
|  | 743 | strcpy(info->driver, DRV_MODULE_NAME); | 
|  | 744 | strcpy(info->version, DRV_MODULE_VERSION); | 
|  | 745 | } | 
|  | 746 |  | 
|  | 747 | static int ep93xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | 
|  | 748 | { | 
|  | 749 | struct ep93xx_priv *ep = netdev_priv(dev); | 
|  | 750 | return mii_ethtool_gset(&ep->mii, cmd); | 
|  | 751 | } | 
|  | 752 |  | 
|  | 753 | static int ep93xx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | 
|  | 754 | { | 
|  | 755 | struct ep93xx_priv *ep = netdev_priv(dev); | 
|  | 756 | return mii_ethtool_sset(&ep->mii, cmd); | 
|  | 757 | } | 
|  | 758 |  | 
|  | 759 | static int ep93xx_nway_reset(struct net_device *dev) | 
|  | 760 | { | 
|  | 761 | struct ep93xx_priv *ep = netdev_priv(dev); | 
|  | 762 | return mii_nway_restart(&ep->mii); | 
|  | 763 | } | 
|  | 764 |  | 
|  | 765 | static u32 ep93xx_get_link(struct net_device *dev) | 
|  | 766 | { | 
|  | 767 | struct ep93xx_priv *ep = netdev_priv(dev); | 
|  | 768 | return mii_link_ok(&ep->mii); | 
|  | 769 | } | 
|  | 770 |  | 
|  | 771 | static struct ethtool_ops ep93xx_ethtool_ops = { | 
|  | 772 | .get_drvinfo		= ep93xx_get_drvinfo, | 
|  | 773 | .get_settings		= ep93xx_get_settings, | 
|  | 774 | .set_settings		= ep93xx_set_settings, | 
|  | 775 | .nway_reset		= ep93xx_nway_reset, | 
|  | 776 | .get_link		= ep93xx_get_link, | 
|  | 777 | }; | 
|  | 778 |  | 
|  | 779 | struct net_device *ep93xx_dev_alloc(struct ep93xx_eth_data *data) | 
|  | 780 | { | 
|  | 781 | struct net_device *dev; | 
| Lennert Buytenhek | 1d22e05 | 2006-09-22 02:28:13 +0200 | [diff] [blame] | 782 |  | 
|  | 783 | dev = alloc_etherdev(sizeof(struct ep93xx_priv)); | 
|  | 784 | if (dev == NULL) | 
|  | 785 | return NULL; | 
| Lennert Buytenhek | 1d22e05 | 2006-09-22 02:28:13 +0200 | [diff] [blame] | 786 |  | 
|  | 787 | memcpy(dev->dev_addr, data->dev_addr, ETH_ALEN); | 
|  | 788 |  | 
|  | 789 | dev->get_stats = ep93xx_get_stats; | 
|  | 790 | dev->ethtool_ops = &ep93xx_ethtool_ops; | 
|  | 791 | dev->poll = ep93xx_poll; | 
|  | 792 | dev->hard_start_xmit = ep93xx_xmit; | 
|  | 793 | dev->open = ep93xx_open; | 
|  | 794 | dev->stop = ep93xx_close; | 
|  | 795 | dev->do_ioctl = ep93xx_ioctl; | 
|  | 796 |  | 
|  | 797 | dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; | 
|  | 798 | dev->weight = 64; | 
|  | 799 |  | 
|  | 800 | return dev; | 
|  | 801 | } | 
|  | 802 |  | 
|  | 803 |  | 
|  | 804 | static int ep93xx_eth_remove(struct platform_device *pdev) | 
|  | 805 | { | 
|  | 806 | struct net_device *dev; | 
|  | 807 | struct ep93xx_priv *ep; | 
|  | 808 |  | 
|  | 809 | dev = platform_get_drvdata(pdev); | 
|  | 810 | if (dev == NULL) | 
|  | 811 | return 0; | 
|  | 812 | platform_set_drvdata(pdev, NULL); | 
|  | 813 |  | 
|  | 814 | ep = netdev_priv(dev); | 
|  | 815 |  | 
|  | 816 | /* @@@ Force down.  */ | 
|  | 817 | unregister_netdev(dev); | 
|  | 818 | ep93xx_free_buffers(ep); | 
|  | 819 |  | 
|  | 820 | if (ep->base_addr != NULL) | 
|  | 821 | iounmap(ep->base_addr); | 
|  | 822 |  | 
|  | 823 | if (ep->res != NULL) { | 
|  | 824 | release_resource(ep->res); | 
|  | 825 | kfree(ep->res); | 
|  | 826 | } | 
|  | 827 |  | 
|  | 828 | free_netdev(dev); | 
|  | 829 |  | 
|  | 830 | return 0; | 
|  | 831 | } | 
|  | 832 |  | 
|  | 833 | static int ep93xx_eth_probe(struct platform_device *pdev) | 
|  | 834 | { | 
|  | 835 | struct ep93xx_eth_data *data; | 
|  | 836 | struct net_device *dev; | 
|  | 837 | struct ep93xx_priv *ep; | 
|  | 838 | int err; | 
|  | 839 |  | 
| Lennert Buytenhek | 1d22e05 | 2006-09-22 02:28:13 +0200 | [diff] [blame] | 840 | if (pdev == NULL) | 
|  | 841 | return -ENODEV; | 
| Yan Burman | ebf5112 | 2006-12-19 13:08:48 -0800 | [diff] [blame] | 842 | data = pdev->dev.platform_data; | 
| Lennert Buytenhek | 1d22e05 | 2006-09-22 02:28:13 +0200 | [diff] [blame] | 843 |  | 
|  | 844 | dev = ep93xx_dev_alloc(data); | 
|  | 845 | if (dev == NULL) { | 
|  | 846 | err = -ENOMEM; | 
|  | 847 | goto err_out; | 
|  | 848 | } | 
|  | 849 | ep = netdev_priv(dev); | 
|  | 850 |  | 
|  | 851 | platform_set_drvdata(pdev, dev); | 
|  | 852 |  | 
|  | 853 | ep->res = request_mem_region(pdev->resource[0].start, | 
|  | 854 | pdev->resource[0].end - pdev->resource[0].start + 1, | 
|  | 855 | pdev->dev.bus_id); | 
|  | 856 | if (ep->res == NULL) { | 
|  | 857 | dev_err(&pdev->dev, "Could not reserve memory region\n"); | 
|  | 858 | err = -ENOMEM; | 
|  | 859 | goto err_out; | 
|  | 860 | } | 
|  | 861 |  | 
|  | 862 | ep->base_addr = ioremap(pdev->resource[0].start, | 
|  | 863 | pdev->resource[0].end - pdev->resource[0].start); | 
|  | 864 | if (ep->base_addr == NULL) { | 
|  | 865 | dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n"); | 
|  | 866 | err = -EIO; | 
|  | 867 | goto err_out; | 
|  | 868 | } | 
|  | 869 | ep->irq = pdev->resource[1].start; | 
|  | 870 |  | 
|  | 871 | ep->mii.phy_id = data->phy_id; | 
|  | 872 | ep->mii.phy_id_mask = 0x1f; | 
|  | 873 | ep->mii.reg_num_mask = 0x1f; | 
|  | 874 | ep->mii.dev = dev; | 
|  | 875 | ep->mii.mdio_read = ep93xx_mdio_read; | 
|  | 876 | ep->mii.mdio_write = ep93xx_mdio_write; | 
|  | 877 | ep->mdc_divisor = 40;	/* Max HCLK 100 MHz, min MDIO clk 2.5 MHz.  */ | 
|  | 878 |  | 
|  | 879 | err = register_netdev(dev); | 
|  | 880 | if (err) { | 
|  | 881 | dev_err(&pdev->dev, "Failed to register netdev\n"); | 
|  | 882 | goto err_out; | 
|  | 883 | } | 
|  | 884 |  | 
|  | 885 | printk(KERN_INFO "%s: ep93xx on-chip ethernet, IRQ %d, " | 
|  | 886 | "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x.\n", dev->name, | 
|  | 887 | ep->irq, data->dev_addr[0], data->dev_addr[1], | 
|  | 888 | data->dev_addr[2], data->dev_addr[3], | 
|  | 889 | data->dev_addr[4], data->dev_addr[5]); | 
|  | 890 |  | 
|  | 891 | return 0; | 
|  | 892 |  | 
|  | 893 | err_out: | 
|  | 894 | ep93xx_eth_remove(pdev); | 
|  | 895 | return err; | 
|  | 896 | } | 
|  | 897 |  | 
|  | 898 |  | 
|  | 899 | static struct platform_driver ep93xx_eth_driver = { | 
|  | 900 | .probe		= ep93xx_eth_probe, | 
|  | 901 | .remove		= ep93xx_eth_remove, | 
|  | 902 | .driver		= { | 
|  | 903 | .name	= "ep93xx-eth", | 
|  | 904 | }, | 
|  | 905 | }; | 
|  | 906 |  | 
|  | 907 | static int __init ep93xx_eth_init_module(void) | 
|  | 908 | { | 
|  | 909 | printk(KERN_INFO DRV_MODULE_NAME " version " DRV_MODULE_VERSION " loading\n"); | 
|  | 910 | return platform_driver_register(&ep93xx_eth_driver); | 
|  | 911 | } | 
|  | 912 |  | 
|  | 913 | static void __exit ep93xx_eth_cleanup_module(void) | 
|  | 914 | { | 
|  | 915 | platform_driver_unregister(&ep93xx_eth_driver); | 
|  | 916 | } | 
|  | 917 |  | 
|  | 918 | module_init(ep93xx_eth_init_module); | 
|  | 919 | module_exit(ep93xx_eth_cleanup_module); | 
|  | 920 | MODULE_LICENSE("GPL"); |