| Matt Porter | f89efd5 | 2005-09-09 12:10:10 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * rionet - Ethernet driver over RapidIO messaging services | 
|  | 3 | * | 
|  | 4 | * Copyright 2005 MontaVista Software, Inc. | 
|  | 5 | * Matt Porter <mporter@kernel.crashing.org> | 
|  | 6 | * | 
|  | 7 | * This program is free software; you can redistribute  it and/or modify it | 
|  | 8 | * under  the terms of  the GNU General  Public License as published by the | 
|  | 9 | * Free Software Foundation;  either version 2 of the  License, or (at your | 
|  | 10 | * option) any later version. | 
|  | 11 | */ | 
|  | 12 |  | 
|  | 13 | #include <linux/module.h> | 
|  | 14 | #include <linux/kernel.h> | 
|  | 15 | #include <linux/dma-mapping.h> | 
|  | 16 | #include <linux/delay.h> | 
|  | 17 | #include <linux/rio.h> | 
|  | 18 | #include <linux/rio_drv.h> | 
|  | 19 | #include <linux/rio_ids.h> | 
|  | 20 |  | 
|  | 21 | #include <linux/netdevice.h> | 
|  | 22 | #include <linux/etherdevice.h> | 
|  | 23 | #include <linux/skbuff.h> | 
|  | 24 | #include <linux/crc32.h> | 
|  | 25 | #include <linux/ethtool.h> | 
|  | 26 |  | 
|  | 27 | #define DRV_NAME        "rionet" | 
|  | 28 | #define DRV_VERSION     "0.2" | 
|  | 29 | #define DRV_AUTHOR      "Matt Porter <mporter@kernel.crashing.org>" | 
|  | 30 | #define DRV_DESC        "Ethernet over RapidIO" | 
|  | 31 |  | 
|  | 32 | MODULE_AUTHOR(DRV_AUTHOR); | 
|  | 33 | MODULE_DESCRIPTION(DRV_DESC); | 
|  | 34 | MODULE_LICENSE("GPL"); | 
|  | 35 |  | 
|  | 36 | #define RIONET_DEFAULT_MSGLEVEL \ | 
|  | 37 | (NETIF_MSG_DRV          | \ | 
|  | 38 | NETIF_MSG_LINK         | \ | 
|  | 39 | NETIF_MSG_RX_ERR       | \ | 
|  | 40 | NETIF_MSG_TX_ERR) | 
|  | 41 |  | 
|  | 42 | #define RIONET_DOORBELL_JOIN	0x1000 | 
|  | 43 | #define RIONET_DOORBELL_LEAVE	0x1001 | 
|  | 44 |  | 
|  | 45 | #define RIONET_MAILBOX		0 | 
|  | 46 |  | 
|  | 47 | #define RIONET_TX_RING_SIZE	CONFIG_RIONET_TX_SIZE | 
|  | 48 | #define RIONET_RX_RING_SIZE	CONFIG_RIONET_RX_SIZE | 
|  | 49 |  | 
|  | 50 | static LIST_HEAD(rionet_peers); | 
|  | 51 |  | 
|  | 52 | struct rionet_private { | 
|  | 53 | struct rio_mport *mport; | 
|  | 54 | struct sk_buff *rx_skb[RIONET_RX_RING_SIZE]; | 
|  | 55 | struct sk_buff *tx_skb[RIONET_TX_RING_SIZE]; | 
| Matt Porter | f89efd5 | 2005-09-09 12:10:10 -0700 | [diff] [blame] | 56 | int rx_slot; | 
|  | 57 | int tx_slot; | 
|  | 58 | int tx_cnt; | 
|  | 59 | int ack_slot; | 
|  | 60 | spinlock_t lock; | 
|  | 61 | spinlock_t tx_lock; | 
|  | 62 | u32 msg_enable; | 
|  | 63 | }; | 
|  | 64 |  | 
|  | 65 | struct rionet_peer { | 
|  | 66 | struct list_head node; | 
|  | 67 | struct rio_dev *rdev; | 
|  | 68 | struct resource *res; | 
|  | 69 | }; | 
|  | 70 |  | 
|  | 71 | static int rionet_check = 0; | 
|  | 72 | static int rionet_capable = 1; | 
|  | 73 |  | 
|  | 74 | /* | 
|  | 75 | * This is a fast lookup table for for translating TX | 
|  | 76 | * Ethernet packets into a destination RIO device. It | 
|  | 77 | * could be made into a hash table to save memory depending | 
|  | 78 | * on system trade-offs. | 
|  | 79 | */ | 
| Zhang Wei | e042323 | 2008-04-18 13:33:42 -0700 | [diff] [blame] | 80 | static struct rio_dev **rionet_active; | 
| Matt Porter | f89efd5 | 2005-09-09 12:10:10 -0700 | [diff] [blame] | 81 |  | 
|  | 82 | #define is_rionet_capable(pef, src_ops, dst_ops)		\ | 
|  | 83 | ((pef & RIO_PEF_INB_MBOX) &&		\ | 
|  | 84 | (pef & RIO_PEF_INB_DOORBELL) &&	\ | 
|  | 85 | (src_ops & RIO_SRC_OPS_DOORBELL) &&	\ | 
|  | 86 | (dst_ops & RIO_DST_OPS_DOORBELL)) | 
|  | 87 | #define dev_rionet_capable(dev) \ | 
|  | 88 | is_rionet_capable(dev->pef, dev->src_ops, dev->dst_ops) | 
|  | 89 |  | 
|  | 90 | #define RIONET_MAC_MATCH(x)	(*(u32 *)x == 0x00010001) | 
|  | 91 | #define RIONET_GET_DESTID(x)	(*(u16 *)(x + 4)) | 
|  | 92 |  | 
| Matt Porter | f89efd5 | 2005-09-09 12:10:10 -0700 | [diff] [blame] | 93 | static int rionet_rx_clean(struct net_device *ndev) | 
|  | 94 | { | 
|  | 95 | int i; | 
|  | 96 | int error = 0; | 
| Wang Chen | 4cf1653 | 2008-11-12 23:38:14 -0800 | [diff] [blame] | 97 | struct rionet_private *rnet = netdev_priv(ndev); | 
| Matt Porter | f89efd5 | 2005-09-09 12:10:10 -0700 | [diff] [blame] | 98 | void *data; | 
|  | 99 |  | 
|  | 100 | i = rnet->rx_slot; | 
|  | 101 |  | 
|  | 102 | do { | 
|  | 103 | if (!rnet->rx_skb[i]) | 
|  | 104 | continue; | 
|  | 105 |  | 
|  | 106 | if (!(data = rio_get_inb_message(rnet->mport, RIONET_MAILBOX))) | 
|  | 107 | break; | 
|  | 108 |  | 
|  | 109 | rnet->rx_skb[i]->data = data; | 
|  | 110 | skb_put(rnet->rx_skb[i], RIO_MAX_MSG_SIZE); | 
| Matt Porter | f89efd5 | 2005-09-09 12:10:10 -0700 | [diff] [blame] | 111 | rnet->rx_skb[i]->protocol = | 
|  | 112 | eth_type_trans(rnet->rx_skb[i], ndev); | 
|  | 113 | error = netif_rx(rnet->rx_skb[i]); | 
|  | 114 |  | 
|  | 115 | if (error == NET_RX_DROP) { | 
| Jeff Garzik | 09f75cd | 2007-10-03 17:41:50 -0700 | [diff] [blame] | 116 | ndev->stats.rx_dropped++; | 
| Matt Porter | f89efd5 | 2005-09-09 12:10:10 -0700 | [diff] [blame] | 117 | } else if (error == NET_RX_BAD) { | 
|  | 118 | if (netif_msg_rx_err(rnet)) | 
|  | 119 | printk(KERN_WARNING "%s: bad rx packet\n", | 
|  | 120 | DRV_NAME); | 
| Jeff Garzik | 09f75cd | 2007-10-03 17:41:50 -0700 | [diff] [blame] | 121 | ndev->stats.rx_errors++; | 
| Matt Porter | f89efd5 | 2005-09-09 12:10:10 -0700 | [diff] [blame] | 122 | } else { | 
| Jeff Garzik | 09f75cd | 2007-10-03 17:41:50 -0700 | [diff] [blame] | 123 | ndev->stats.rx_packets++; | 
|  | 124 | ndev->stats.rx_bytes += RIO_MAX_MSG_SIZE; | 
| Matt Porter | f89efd5 | 2005-09-09 12:10:10 -0700 | [diff] [blame] | 125 | } | 
|  | 126 |  | 
|  | 127 | } while ((i = (i + 1) % RIONET_RX_RING_SIZE) != rnet->rx_slot); | 
|  | 128 |  | 
|  | 129 | return i; | 
|  | 130 | } | 
|  | 131 |  | 
|  | 132 | static void rionet_rx_fill(struct net_device *ndev, int end) | 
|  | 133 | { | 
|  | 134 | int i; | 
| Wang Chen | 4cf1653 | 2008-11-12 23:38:14 -0800 | [diff] [blame] | 135 | struct rionet_private *rnet = netdev_priv(ndev); | 
| Matt Porter | f89efd5 | 2005-09-09 12:10:10 -0700 | [diff] [blame] | 136 |  | 
|  | 137 | i = rnet->rx_slot; | 
|  | 138 | do { | 
|  | 139 | rnet->rx_skb[i] = dev_alloc_skb(RIO_MAX_MSG_SIZE); | 
|  | 140 |  | 
|  | 141 | if (!rnet->rx_skb[i]) | 
|  | 142 | break; | 
|  | 143 |  | 
|  | 144 | rio_add_inb_buffer(rnet->mport, RIONET_MAILBOX, | 
|  | 145 | rnet->rx_skb[i]->data); | 
|  | 146 | } while ((i = (i + 1) % RIONET_RX_RING_SIZE) != end); | 
|  | 147 |  | 
|  | 148 | rnet->rx_slot = i; | 
|  | 149 | } | 
|  | 150 |  | 
|  | 151 | static int rionet_queue_tx_msg(struct sk_buff *skb, struct net_device *ndev, | 
|  | 152 | struct rio_dev *rdev) | 
|  | 153 | { | 
| Wang Chen | 4cf1653 | 2008-11-12 23:38:14 -0800 | [diff] [blame] | 154 | struct rionet_private *rnet = netdev_priv(ndev); | 
| Matt Porter | f89efd5 | 2005-09-09 12:10:10 -0700 | [diff] [blame] | 155 |  | 
|  | 156 | rio_add_outb_message(rnet->mport, rdev, 0, skb->data, skb->len); | 
|  | 157 | rnet->tx_skb[rnet->tx_slot] = skb; | 
|  | 158 |  | 
| Jeff Garzik | 09f75cd | 2007-10-03 17:41:50 -0700 | [diff] [blame] | 159 | ndev->stats.tx_packets++; | 
|  | 160 | ndev->stats.tx_bytes += skb->len; | 
| Matt Porter | f89efd5 | 2005-09-09 12:10:10 -0700 | [diff] [blame] | 161 |  | 
|  | 162 | if (++rnet->tx_cnt == RIONET_TX_RING_SIZE) | 
|  | 163 | netif_stop_queue(ndev); | 
|  | 164 |  | 
|  | 165 | ++rnet->tx_slot; | 
|  | 166 | rnet->tx_slot &= (RIONET_TX_RING_SIZE - 1); | 
|  | 167 |  | 
|  | 168 | if (netif_msg_tx_queued(rnet)) | 
|  | 169 | printk(KERN_INFO "%s: queued skb %8.8x len %8.8x\n", DRV_NAME, | 
|  | 170 | (u32) skb, skb->len); | 
|  | 171 |  | 
|  | 172 | return 0; | 
|  | 173 | } | 
|  | 174 |  | 
|  | 175 | static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | 
|  | 176 | { | 
|  | 177 | int i; | 
| Wang Chen | 4cf1653 | 2008-11-12 23:38:14 -0800 | [diff] [blame] | 178 | struct rionet_private *rnet = netdev_priv(ndev); | 
| Matt Porter | f89efd5 | 2005-09-09 12:10:10 -0700 | [diff] [blame] | 179 | struct ethhdr *eth = (struct ethhdr *)skb->data; | 
|  | 180 | u16 destid; | 
|  | 181 | unsigned long flags; | 
|  | 182 |  | 
|  | 183 | local_irq_save(flags); | 
|  | 184 | if (!spin_trylock(&rnet->tx_lock)) { | 
|  | 185 | local_irq_restore(flags); | 
|  | 186 | return NETDEV_TX_LOCKED; | 
|  | 187 | } | 
|  | 188 |  | 
|  | 189 | if ((rnet->tx_cnt + 1) > RIONET_TX_RING_SIZE) { | 
|  | 190 | netif_stop_queue(ndev); | 
|  | 191 | spin_unlock_irqrestore(&rnet->tx_lock, flags); | 
|  | 192 | printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n", | 
|  | 193 | ndev->name); | 
|  | 194 | return NETDEV_TX_BUSY; | 
|  | 195 | } | 
|  | 196 |  | 
|  | 197 | if (eth->h_dest[0] & 0x01) { | 
| Zhang Wei | e042323 | 2008-04-18 13:33:42 -0700 | [diff] [blame] | 198 | for (i = 0; i < RIO_MAX_ROUTE_ENTRIES(rnet->mport->sys_size); | 
|  | 199 | i++) | 
| Matt Porter | f89efd5 | 2005-09-09 12:10:10 -0700 | [diff] [blame] | 200 | if (rionet_active[i]) | 
|  | 201 | rionet_queue_tx_msg(skb, ndev, | 
|  | 202 | rionet_active[i]); | 
|  | 203 | } else if (RIONET_MAC_MATCH(eth->h_dest)) { | 
|  | 204 | destid = RIONET_GET_DESTID(eth->h_dest); | 
|  | 205 | if (rionet_active[destid]) | 
|  | 206 | rionet_queue_tx_msg(skb, ndev, rionet_active[destid]); | 
|  | 207 | } | 
|  | 208 |  | 
|  | 209 | spin_unlock_irqrestore(&rnet->tx_lock, flags); | 
|  | 210 |  | 
|  | 211 | return 0; | 
|  | 212 | } | 
|  | 213 |  | 
|  | 214 | static void rionet_dbell_event(struct rio_mport *mport, void *dev_id, u16 sid, u16 tid, | 
|  | 215 | u16 info) | 
|  | 216 | { | 
|  | 217 | struct net_device *ndev = dev_id; | 
| Wang Chen | 4cf1653 | 2008-11-12 23:38:14 -0800 | [diff] [blame] | 218 | struct rionet_private *rnet = netdev_priv(ndev); | 
| Matt Porter | f89efd5 | 2005-09-09 12:10:10 -0700 | [diff] [blame] | 219 | struct rionet_peer *peer; | 
|  | 220 |  | 
|  | 221 | if (netif_msg_intr(rnet)) | 
|  | 222 | printk(KERN_INFO "%s: doorbell sid %4.4x tid %4.4x info %4.4x", | 
|  | 223 | DRV_NAME, sid, tid, info); | 
|  | 224 | if (info == RIONET_DOORBELL_JOIN) { | 
|  | 225 | if (!rionet_active[sid]) { | 
|  | 226 | list_for_each_entry(peer, &rionet_peers, node) { | 
|  | 227 | if (peer->rdev->destid == sid) | 
|  | 228 | rionet_active[sid] = peer->rdev; | 
|  | 229 | } | 
|  | 230 | rio_mport_send_doorbell(mport, sid, | 
|  | 231 | RIONET_DOORBELL_JOIN); | 
|  | 232 | } | 
|  | 233 | } else if (info == RIONET_DOORBELL_LEAVE) { | 
|  | 234 | rionet_active[sid] = NULL; | 
|  | 235 | } else { | 
|  | 236 | if (netif_msg_intr(rnet)) | 
|  | 237 | printk(KERN_WARNING "%s: unhandled doorbell\n", | 
|  | 238 | DRV_NAME); | 
|  | 239 | } | 
|  | 240 | } | 
|  | 241 |  | 
|  | 242 | static void rionet_inb_msg_event(struct rio_mport *mport, void *dev_id, int mbox, int slot) | 
|  | 243 | { | 
|  | 244 | int n; | 
|  | 245 | struct net_device *ndev = dev_id; | 
| Wang Chen | 4cf1653 | 2008-11-12 23:38:14 -0800 | [diff] [blame] | 246 | struct rionet_private *rnet = netdev_priv(ndev); | 
| Matt Porter | f89efd5 | 2005-09-09 12:10:10 -0700 | [diff] [blame] | 247 |  | 
|  | 248 | if (netif_msg_intr(rnet)) | 
|  | 249 | printk(KERN_INFO "%s: inbound message event, mbox %d slot %d\n", | 
|  | 250 | DRV_NAME, mbox, slot); | 
|  | 251 |  | 
|  | 252 | spin_lock(&rnet->lock); | 
|  | 253 | if ((n = rionet_rx_clean(ndev)) != rnet->rx_slot) | 
|  | 254 | rionet_rx_fill(ndev, n); | 
|  | 255 | spin_unlock(&rnet->lock); | 
|  | 256 | } | 
|  | 257 |  | 
|  | 258 | static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbox, int slot) | 
|  | 259 | { | 
|  | 260 | struct net_device *ndev = dev_id; | 
| Wang Chen | 4cf1653 | 2008-11-12 23:38:14 -0800 | [diff] [blame] | 261 | struct rionet_private *rnet = netdev_priv(ndev); | 
| Matt Porter | f89efd5 | 2005-09-09 12:10:10 -0700 | [diff] [blame] | 262 |  | 
|  | 263 | spin_lock(&rnet->lock); | 
|  | 264 |  | 
|  | 265 | if (netif_msg_intr(rnet)) | 
|  | 266 | printk(KERN_INFO | 
|  | 267 | "%s: outbound message event, mbox %d slot %d\n", | 
|  | 268 | DRV_NAME, mbox, slot); | 
|  | 269 |  | 
|  | 270 | while (rnet->tx_cnt && (rnet->ack_slot != slot)) { | 
|  | 271 | /* dma unmap single */ | 
|  | 272 | dev_kfree_skb_irq(rnet->tx_skb[rnet->ack_slot]); | 
|  | 273 | rnet->tx_skb[rnet->ack_slot] = NULL; | 
|  | 274 | ++rnet->ack_slot; | 
|  | 275 | rnet->ack_slot &= (RIONET_TX_RING_SIZE - 1); | 
|  | 276 | rnet->tx_cnt--; | 
|  | 277 | } | 
|  | 278 |  | 
|  | 279 | if (rnet->tx_cnt < RIONET_TX_RING_SIZE) | 
|  | 280 | netif_wake_queue(ndev); | 
|  | 281 |  | 
|  | 282 | spin_unlock(&rnet->lock); | 
|  | 283 | } | 
|  | 284 |  | 
|  | 285 | static int rionet_open(struct net_device *ndev) | 
|  | 286 | { | 
|  | 287 | int i, rc = 0; | 
|  | 288 | struct rionet_peer *peer, *tmp; | 
|  | 289 | u32 pwdcsr; | 
| Wang Chen | 4cf1653 | 2008-11-12 23:38:14 -0800 | [diff] [blame] | 290 | struct rionet_private *rnet = netdev_priv(ndev); | 
| Matt Porter | f89efd5 | 2005-09-09 12:10:10 -0700 | [diff] [blame] | 291 |  | 
|  | 292 | if (netif_msg_ifup(rnet)) | 
|  | 293 | printk(KERN_INFO "%s: open\n", DRV_NAME); | 
|  | 294 |  | 
|  | 295 | if ((rc = rio_request_inb_dbell(rnet->mport, | 
|  | 296 | (void *)ndev, | 
|  | 297 | RIONET_DOORBELL_JOIN, | 
|  | 298 | RIONET_DOORBELL_LEAVE, | 
|  | 299 | rionet_dbell_event)) < 0) | 
|  | 300 | goto out; | 
|  | 301 |  | 
|  | 302 | if ((rc = rio_request_inb_mbox(rnet->mport, | 
|  | 303 | (void *)ndev, | 
|  | 304 | RIONET_MAILBOX, | 
|  | 305 | RIONET_RX_RING_SIZE, | 
|  | 306 | rionet_inb_msg_event)) < 0) | 
|  | 307 | goto out; | 
|  | 308 |  | 
|  | 309 | if ((rc = rio_request_outb_mbox(rnet->mport, | 
|  | 310 | (void *)ndev, | 
|  | 311 | RIONET_MAILBOX, | 
|  | 312 | RIONET_TX_RING_SIZE, | 
|  | 313 | rionet_outb_msg_event)) < 0) | 
|  | 314 | goto out; | 
|  | 315 |  | 
|  | 316 | /* Initialize inbound message ring */ | 
|  | 317 | for (i = 0; i < RIONET_RX_RING_SIZE; i++) | 
|  | 318 | rnet->rx_skb[i] = NULL; | 
|  | 319 | rnet->rx_slot = 0; | 
|  | 320 | rionet_rx_fill(ndev, 0); | 
|  | 321 |  | 
|  | 322 | rnet->tx_slot = 0; | 
|  | 323 | rnet->tx_cnt = 0; | 
|  | 324 | rnet->ack_slot = 0; | 
|  | 325 |  | 
|  | 326 | netif_carrier_on(ndev); | 
|  | 327 | netif_start_queue(ndev); | 
|  | 328 |  | 
|  | 329 | list_for_each_entry_safe(peer, tmp, &rionet_peers, node) { | 
|  | 330 | if (!(peer->res = rio_request_outb_dbell(peer->rdev, | 
|  | 331 | RIONET_DOORBELL_JOIN, | 
|  | 332 | RIONET_DOORBELL_LEAVE))) | 
|  | 333 | { | 
|  | 334 | printk(KERN_ERR "%s: error requesting doorbells\n", | 
|  | 335 | DRV_NAME); | 
|  | 336 | continue; | 
|  | 337 | } | 
|  | 338 |  | 
|  | 339 | /* | 
|  | 340 | * If device has initialized inbound doorbells, | 
|  | 341 | * send a join message | 
|  | 342 | */ | 
|  | 343 | rio_read_config_32(peer->rdev, RIO_WRITE_PORT_CSR, &pwdcsr); | 
|  | 344 | if (pwdcsr & RIO_DOORBELL_AVAIL) | 
|  | 345 | rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN); | 
|  | 346 | } | 
|  | 347 |  | 
|  | 348 | out: | 
|  | 349 | return rc; | 
|  | 350 | } | 
|  | 351 |  | 
|  | 352 | static int rionet_close(struct net_device *ndev) | 
|  | 353 | { | 
| Wang Chen | 4cf1653 | 2008-11-12 23:38:14 -0800 | [diff] [blame] | 354 | struct rionet_private *rnet = netdev_priv(ndev); | 
| Matt Porter | f89efd5 | 2005-09-09 12:10:10 -0700 | [diff] [blame] | 355 | struct rionet_peer *peer, *tmp; | 
|  | 356 | int i; | 
|  | 357 |  | 
|  | 358 | if (netif_msg_ifup(rnet)) | 
|  | 359 | printk(KERN_INFO "%s: close\n", DRV_NAME); | 
|  | 360 |  | 
|  | 361 | netif_stop_queue(ndev); | 
|  | 362 | netif_carrier_off(ndev); | 
|  | 363 |  | 
|  | 364 | for (i = 0; i < RIONET_RX_RING_SIZE; i++) | 
| Wei Yongjun | aaff1e1 | 2009-02-25 00:18:12 +0000 | [diff] [blame] | 365 | kfree_skb(rnet->rx_skb[i]); | 
| Matt Porter | f89efd5 | 2005-09-09 12:10:10 -0700 | [diff] [blame] | 366 |  | 
|  | 367 | list_for_each_entry_safe(peer, tmp, &rionet_peers, node) { | 
|  | 368 | if (rionet_active[peer->rdev->destid]) { | 
|  | 369 | rio_send_doorbell(peer->rdev, RIONET_DOORBELL_LEAVE); | 
|  | 370 | rionet_active[peer->rdev->destid] = NULL; | 
|  | 371 | } | 
|  | 372 | rio_release_outb_dbell(peer->rdev, peer->res); | 
|  | 373 | } | 
|  | 374 |  | 
|  | 375 | rio_release_inb_dbell(rnet->mport, RIONET_DOORBELL_JOIN, | 
|  | 376 | RIONET_DOORBELL_LEAVE); | 
|  | 377 | rio_release_inb_mbox(rnet->mport, RIONET_MAILBOX); | 
|  | 378 | rio_release_outb_mbox(rnet->mport, RIONET_MAILBOX); | 
|  | 379 |  | 
|  | 380 | return 0; | 
|  | 381 | } | 
|  | 382 |  | 
|  | 383 | static void rionet_remove(struct rio_dev *rdev) | 
|  | 384 | { | 
|  | 385 | struct net_device *ndev = NULL; | 
|  | 386 | struct rionet_peer *peer, *tmp; | 
|  | 387 |  | 
| Zhang Wei | e042323 | 2008-04-18 13:33:42 -0700 | [diff] [blame] | 388 | free_pages((unsigned long)rionet_active, rdev->net->hport->sys_size ? | 
|  | 389 | __ilog2(sizeof(void *)) + 4 : 0); | 
| Matt Porter | f89efd5 | 2005-09-09 12:10:10 -0700 | [diff] [blame] | 390 | unregister_netdev(ndev); | 
|  | 391 | kfree(ndev); | 
|  | 392 |  | 
|  | 393 | list_for_each_entry_safe(peer, tmp, &rionet_peers, node) { | 
|  | 394 | list_del(&peer->node); | 
|  | 395 | kfree(peer); | 
|  | 396 | } | 
|  | 397 | } | 
|  | 398 |  | 
|  | 399 | static void rionet_get_drvinfo(struct net_device *ndev, | 
|  | 400 | struct ethtool_drvinfo *info) | 
|  | 401 | { | 
| Wang Chen | 4cf1653 | 2008-11-12 23:38:14 -0800 | [diff] [blame] | 402 | struct rionet_private *rnet = netdev_priv(ndev); | 
| Matt Porter | f89efd5 | 2005-09-09 12:10:10 -0700 | [diff] [blame] | 403 |  | 
|  | 404 | strcpy(info->driver, DRV_NAME); | 
|  | 405 | strcpy(info->version, DRV_VERSION); | 
|  | 406 | strcpy(info->fw_version, "n/a"); | 
|  | 407 | strcpy(info->bus_info, rnet->mport->name); | 
|  | 408 | } | 
|  | 409 |  | 
|  | 410 | static u32 rionet_get_msglevel(struct net_device *ndev) | 
|  | 411 | { | 
| Wang Chen | 4cf1653 | 2008-11-12 23:38:14 -0800 | [diff] [blame] | 412 | struct rionet_private *rnet = netdev_priv(ndev); | 
| Matt Porter | f89efd5 | 2005-09-09 12:10:10 -0700 | [diff] [blame] | 413 |  | 
|  | 414 | return rnet->msg_enable; | 
|  | 415 | } | 
|  | 416 |  | 
|  | 417 | static void rionet_set_msglevel(struct net_device *ndev, u32 value) | 
|  | 418 | { | 
| Wang Chen | 4cf1653 | 2008-11-12 23:38:14 -0800 | [diff] [blame] | 419 | struct rionet_private *rnet = netdev_priv(ndev); | 
| Matt Porter | f89efd5 | 2005-09-09 12:10:10 -0700 | [diff] [blame] | 420 |  | 
|  | 421 | rnet->msg_enable = value; | 
|  | 422 | } | 
|  | 423 |  | 
| Jeff Garzik | 7282d49 | 2006-09-13 14:30:00 -0400 | [diff] [blame] | 424 | static const struct ethtool_ops rionet_ethtool_ops = { | 
| Matt Porter | f89efd5 | 2005-09-09 12:10:10 -0700 | [diff] [blame] | 425 | .get_drvinfo = rionet_get_drvinfo, | 
|  | 426 | .get_msglevel = rionet_get_msglevel, | 
|  | 427 | .set_msglevel = rionet_set_msglevel, | 
|  | 428 | .get_link = ethtool_op_get_link, | 
|  | 429 | }; | 
|  | 430 |  | 
|  | 431 | static int rionet_setup_netdev(struct rio_mport *mport) | 
|  | 432 | { | 
|  | 433 | int rc = 0; | 
|  | 434 | struct net_device *ndev = NULL; | 
|  | 435 | struct rionet_private *rnet; | 
|  | 436 | u16 device_id; | 
|  | 437 |  | 
|  | 438 | /* Allocate our net_device structure */ | 
|  | 439 | ndev = alloc_etherdev(sizeof(struct rionet_private)); | 
|  | 440 | if (ndev == NULL) { | 
|  | 441 | printk(KERN_INFO "%s: could not allocate ethernet device.\n", | 
|  | 442 | DRV_NAME); | 
|  | 443 | rc = -ENOMEM; | 
|  | 444 | goto out; | 
|  | 445 | } | 
|  | 446 |  | 
| Zhang Wei | e042323 | 2008-04-18 13:33:42 -0700 | [diff] [blame] | 447 | rionet_active = (struct rio_dev **)__get_free_pages(GFP_KERNEL, | 
|  | 448 | mport->sys_size ? __ilog2(sizeof(void *)) + 4 : 0); | 
|  | 449 | if (!rionet_active) { | 
|  | 450 | rc = -ENOMEM; | 
|  | 451 | goto out; | 
|  | 452 | } | 
|  | 453 | memset((void *)rionet_active, 0, sizeof(void *) * | 
|  | 454 | RIO_MAX_ROUTE_ENTRIES(mport->sys_size)); | 
|  | 455 |  | 
| Matt Porter | f89efd5 | 2005-09-09 12:10:10 -0700 | [diff] [blame] | 456 | /* Set up private area */ | 
| Wang Chen | 4cf1653 | 2008-11-12 23:38:14 -0800 | [diff] [blame] | 457 | rnet = netdev_priv(ndev); | 
| Matt Porter | f89efd5 | 2005-09-09 12:10:10 -0700 | [diff] [blame] | 458 | rnet->mport = mport; | 
|  | 459 |  | 
|  | 460 | /* Set the default MAC address */ | 
|  | 461 | device_id = rio_local_get_device_id(mport); | 
|  | 462 | ndev->dev_addr[0] = 0x00; | 
|  | 463 | ndev->dev_addr[1] = 0x01; | 
|  | 464 | ndev->dev_addr[2] = 0x00; | 
|  | 465 | ndev->dev_addr[3] = 0x01; | 
|  | 466 | ndev->dev_addr[4] = device_id >> 8; | 
|  | 467 | ndev->dev_addr[5] = device_id & 0xff; | 
|  | 468 |  | 
|  | 469 | /* Fill in the driver function table */ | 
|  | 470 | ndev->open = &rionet_open; | 
|  | 471 | ndev->hard_start_xmit = &rionet_start_xmit; | 
|  | 472 | ndev->stop = &rionet_close; | 
| Matt Porter | f89efd5 | 2005-09-09 12:10:10 -0700 | [diff] [blame] | 473 | ndev->mtu = RIO_MAX_MSG_SIZE - 14; | 
|  | 474 | ndev->features = NETIF_F_LLTX; | 
|  | 475 | SET_ETHTOOL_OPS(ndev, &rionet_ethtool_ops); | 
|  | 476 |  | 
| Matt Porter | f89efd5 | 2005-09-09 12:10:10 -0700 | [diff] [blame] | 477 | spin_lock_init(&rnet->lock); | 
|  | 478 | spin_lock_init(&rnet->tx_lock); | 
|  | 479 |  | 
|  | 480 | rnet->msg_enable = RIONET_DEFAULT_MSGLEVEL; | 
|  | 481 |  | 
|  | 482 | rc = register_netdev(ndev); | 
|  | 483 | if (rc != 0) | 
|  | 484 | goto out; | 
|  | 485 |  | 
| Johannes Berg | e174961 | 2008-10-27 15:59:26 -0700 | [diff] [blame] | 486 | printk("%s: %s %s Version %s, MAC %pM\n", | 
| Matt Porter | f89efd5 | 2005-09-09 12:10:10 -0700 | [diff] [blame] | 487 | ndev->name, | 
|  | 488 | DRV_NAME, | 
|  | 489 | DRV_DESC, | 
|  | 490 | DRV_VERSION, | 
| Johannes Berg | e174961 | 2008-10-27 15:59:26 -0700 | [diff] [blame] | 491 | ndev->dev_addr); | 
| Matt Porter | f89efd5 | 2005-09-09 12:10:10 -0700 | [diff] [blame] | 492 |  | 
|  | 493 | out: | 
|  | 494 | return rc; | 
|  | 495 | } | 
|  | 496 |  | 
|  | 497 | /* | 
|  | 498 | * XXX Make multi-net safe | 
|  | 499 | */ | 
|  | 500 | static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id) | 
|  | 501 | { | 
|  | 502 | int rc = -ENODEV; | 
|  | 503 | u32 lpef, lsrc_ops, ldst_ops; | 
|  | 504 | struct rionet_peer *peer; | 
|  | 505 |  | 
|  | 506 | /* If local device is not rionet capable, give up quickly */ | 
|  | 507 | if (!rionet_capable) | 
|  | 508 | goto out; | 
|  | 509 |  | 
|  | 510 | /* | 
|  | 511 | * First time through, make sure local device is rionet | 
|  | 512 | * capable, setup netdev,  and set flags so this is skipped | 
|  | 513 | * on later probes | 
|  | 514 | */ | 
|  | 515 | if (!rionet_check) { | 
|  | 516 | rio_local_read_config_32(rdev->net->hport, RIO_PEF_CAR, &lpef); | 
|  | 517 | rio_local_read_config_32(rdev->net->hport, RIO_SRC_OPS_CAR, | 
|  | 518 | &lsrc_ops); | 
|  | 519 | rio_local_read_config_32(rdev->net->hport, RIO_DST_OPS_CAR, | 
|  | 520 | &ldst_ops); | 
|  | 521 | if (!is_rionet_capable(lpef, lsrc_ops, ldst_ops)) { | 
|  | 522 | printk(KERN_ERR | 
|  | 523 | "%s: local device is not network capable\n", | 
|  | 524 | DRV_NAME); | 
|  | 525 | rionet_check = 1; | 
|  | 526 | rionet_capable = 0; | 
|  | 527 | goto out; | 
|  | 528 | } | 
|  | 529 |  | 
|  | 530 | rc = rionet_setup_netdev(rdev->net->hport); | 
|  | 531 | rionet_check = 1; | 
|  | 532 | } | 
|  | 533 |  | 
|  | 534 | /* | 
|  | 535 | * If the remote device has mailbox/doorbell capabilities, | 
|  | 536 | * add it to the peer list. | 
|  | 537 | */ | 
|  | 538 | if (dev_rionet_capable(rdev)) { | 
|  | 539 | if (!(peer = kmalloc(sizeof(struct rionet_peer), GFP_KERNEL))) { | 
|  | 540 | rc = -ENOMEM; | 
|  | 541 | goto out; | 
|  | 542 | } | 
|  | 543 | peer->rdev = rdev; | 
|  | 544 | list_add_tail(&peer->node, &rionet_peers); | 
|  | 545 | } | 
|  | 546 |  | 
|  | 547 | out: | 
|  | 548 | return rc; | 
|  | 549 | } | 
|  | 550 |  | 
|  | 551 | static struct rio_device_id rionet_id_table[] = { | 
|  | 552 | {RIO_DEVICE(RIO_ANY_ID, RIO_ANY_ID)} | 
|  | 553 | }; | 
|  | 554 |  | 
|  | 555 | static struct rio_driver rionet_driver = { | 
|  | 556 | .name = "rionet", | 
|  | 557 | .id_table = rionet_id_table, | 
|  | 558 | .probe = rionet_probe, | 
|  | 559 | .remove = rionet_remove, | 
|  | 560 | }; | 
|  | 561 |  | 
|  | 562 | static int __init rionet_init(void) | 
|  | 563 | { | 
|  | 564 | return rio_register_driver(&rionet_driver); | 
|  | 565 | } | 
|  | 566 |  | 
|  | 567 | static void __exit rionet_exit(void) | 
|  | 568 | { | 
|  | 569 | rio_unregister_driver(&rionet_driver); | 
|  | 570 | } | 
|  | 571 |  | 
|  | 572 | module_init(rionet_init); | 
|  | 573 | module_exit(rionet_exit); |