| David S. Miller | 4c521e4 | 2007-07-09 22:23:51 -0700 | [diff] [blame] | 1 | /* sunvnet.c: Sun LDOM Virtual Network Driver. | 
 | 2 |  * | 
 | 3 |  * Copyright (C) 2007 David S. Miller <davem@davemloft.net> | 
 | 4 |  */ | 
 | 5 |  | 
 | 6 | #include <linux/module.h> | 
 | 7 | #include <linux/kernel.h> | 
 | 8 | #include <linux/types.h> | 
 | 9 | #include <linux/slab.h> | 
 | 10 | #include <linux/delay.h> | 
 | 11 | #include <linux/init.h> | 
 | 12 | #include <linux/netdevice.h> | 
 | 13 | #include <linux/ethtool.h> | 
 | 14 | #include <linux/etherdevice.h> | 
| David S. Miller | 9184a04 | 2007-07-17 22:19:10 -0700 | [diff] [blame] | 15 | #include <linux/mutex.h> | 
| David S. Miller | 4c521e4 | 2007-07-09 22:23:51 -0700 | [diff] [blame] | 16 |  | 
 | 17 | #include <asm/vio.h> | 
 | 18 | #include <asm/ldc.h> | 
 | 19 |  | 
 | 20 | #include "sunvnet.h" | 
 | 21 |  | 
 | 22 | #define DRV_MODULE_NAME		"sunvnet" | 
 | 23 | #define PFX DRV_MODULE_NAME	": " | 
 | 24 | #define DRV_MODULE_VERSION	"1.0" | 
 | 25 | #define DRV_MODULE_RELDATE	"June 25, 2007" | 
 | 26 |  | 
 | 27 | static char version[] __devinitdata = | 
 | 28 | 	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; | 
 | 29 | MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); | 
 | 30 | MODULE_DESCRIPTION("Sun LDOM virtual network driver"); | 
 | 31 | MODULE_LICENSE("GPL"); | 
 | 32 | MODULE_VERSION(DRV_MODULE_VERSION); | 
 | 33 |  | 
 | 34 | /* Ordered from largest major to lowest */ | 
 | 35 | static struct vio_version vnet_versions[] = { | 
 | 36 | 	{ .major = 1, .minor = 0 }, | 
 | 37 | }; | 
 | 38 |  | 
 | 39 | static inline u32 vnet_tx_dring_avail(struct vio_dring_state *dr) | 
 | 40 | { | 
 | 41 | 	return vio_dring_avail(dr, VNET_TX_RING_SIZE); | 
 | 42 | } | 
 | 43 |  | 
 | 44 | static int vnet_handle_unknown(struct vnet_port *port, void *arg) | 
 | 45 | { | 
 | 46 | 	struct vio_msg_tag *pkt = arg; | 
 | 47 |  | 
 | 48 | 	printk(KERN_ERR PFX "Received unknown msg [%02x:%02x:%04x:%08x]\n", | 
 | 49 | 	       pkt->type, pkt->stype, pkt->stype_env, pkt->sid); | 
 | 50 | 	printk(KERN_ERR PFX "Resetting connection.\n"); | 
 | 51 |  | 
 | 52 | 	ldc_disconnect(port->vio.lp); | 
 | 53 |  | 
 | 54 | 	return -ECONNRESET; | 
 | 55 | } | 
 | 56 |  | 
 | 57 | static int vnet_send_attr(struct vio_driver_state *vio) | 
 | 58 | { | 
 | 59 | 	struct vnet_port *port = to_vnet_port(vio); | 
 | 60 | 	struct net_device *dev = port->vp->dev; | 
 | 61 | 	struct vio_net_attr_info pkt; | 
 | 62 | 	int i; | 
 | 63 |  | 
 | 64 | 	memset(&pkt, 0, sizeof(pkt)); | 
 | 65 | 	pkt.tag.type = VIO_TYPE_CTRL; | 
 | 66 | 	pkt.tag.stype = VIO_SUBTYPE_INFO; | 
 | 67 | 	pkt.tag.stype_env = VIO_ATTR_INFO; | 
 | 68 | 	pkt.tag.sid = vio_send_sid(vio); | 
 | 69 | 	pkt.xfer_mode = VIO_DRING_MODE; | 
 | 70 | 	pkt.addr_type = VNET_ADDR_ETHERMAC; | 
 | 71 | 	pkt.ack_freq = 0; | 
 | 72 | 	for (i = 0; i < 6; i++) | 
 | 73 | 		pkt.addr |= (u64)dev->dev_addr[i] << ((5 - i) * 8); | 
 | 74 | 	pkt.mtu = ETH_FRAME_LEN; | 
 | 75 |  | 
 | 76 | 	viodbg(HS, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] " | 
 | 77 | 	       "ackfreq[%u] mtu[%llu]\n", | 
 | 78 | 	       pkt.xfer_mode, pkt.addr_type, | 
 | 79 | 	       (unsigned long long) pkt.addr, | 
 | 80 | 	       pkt.ack_freq, | 
 | 81 | 	       (unsigned long long) pkt.mtu); | 
 | 82 |  | 
 | 83 | 	return vio_ldc_send(vio, &pkt, sizeof(pkt)); | 
 | 84 | } | 
 | 85 |  | 
 | 86 | static int handle_attr_info(struct vio_driver_state *vio, | 
 | 87 | 			    struct vio_net_attr_info *pkt) | 
 | 88 | { | 
 | 89 | 	viodbg(HS, "GOT NET ATTR INFO xmode[0x%x] atype[0x%x] addr[%llx] " | 
 | 90 | 	       "ackfreq[%u] mtu[%llu]\n", | 
 | 91 | 	       pkt->xfer_mode, pkt->addr_type, | 
 | 92 | 	       (unsigned long long) pkt->addr, | 
 | 93 | 	       pkt->ack_freq, | 
 | 94 | 	       (unsigned long long) pkt->mtu); | 
 | 95 |  | 
 | 96 | 	pkt->tag.sid = vio_send_sid(vio); | 
 | 97 |  | 
 | 98 | 	if (pkt->xfer_mode != VIO_DRING_MODE || | 
 | 99 | 	    pkt->addr_type != VNET_ADDR_ETHERMAC || | 
 | 100 | 	    pkt->mtu != ETH_FRAME_LEN) { | 
 | 101 | 		viodbg(HS, "SEND NET ATTR NACK\n"); | 
 | 102 |  | 
 | 103 | 		pkt->tag.stype = VIO_SUBTYPE_NACK; | 
 | 104 |  | 
 | 105 | 		(void) vio_ldc_send(vio, pkt, sizeof(*pkt)); | 
 | 106 |  | 
 | 107 | 		return -ECONNRESET; | 
 | 108 | 	} else { | 
 | 109 | 		viodbg(HS, "SEND NET ATTR ACK\n"); | 
 | 110 |  | 
 | 111 | 		pkt->tag.stype = VIO_SUBTYPE_ACK; | 
 | 112 |  | 
 | 113 | 		return vio_ldc_send(vio, pkt, sizeof(*pkt)); | 
 | 114 | 	} | 
 | 115 |  | 
 | 116 | } | 
 | 117 |  | 
 | 118 | static int handle_attr_ack(struct vio_driver_state *vio, | 
 | 119 | 			   struct vio_net_attr_info *pkt) | 
 | 120 | { | 
 | 121 | 	viodbg(HS, "GOT NET ATTR ACK\n"); | 
 | 122 |  | 
 | 123 | 	return 0; | 
 | 124 | } | 
 | 125 |  | 
 | 126 | static int handle_attr_nack(struct vio_driver_state *vio, | 
 | 127 | 			    struct vio_net_attr_info *pkt) | 
 | 128 | { | 
 | 129 | 	viodbg(HS, "GOT NET ATTR NACK\n"); | 
 | 130 |  | 
 | 131 | 	return -ECONNRESET; | 
 | 132 | } | 
 | 133 |  | 
 | 134 | static int vnet_handle_attr(struct vio_driver_state *vio, void *arg) | 
 | 135 | { | 
 | 136 | 	struct vio_net_attr_info *pkt = arg; | 
 | 137 |  | 
 | 138 | 	switch (pkt->tag.stype) { | 
 | 139 | 	case VIO_SUBTYPE_INFO: | 
 | 140 | 		return handle_attr_info(vio, pkt); | 
 | 141 |  | 
 | 142 | 	case VIO_SUBTYPE_ACK: | 
 | 143 | 		return handle_attr_ack(vio, pkt); | 
 | 144 |  | 
 | 145 | 	case VIO_SUBTYPE_NACK: | 
 | 146 | 		return handle_attr_nack(vio, pkt); | 
 | 147 |  | 
 | 148 | 	default: | 
 | 149 | 		return -ECONNRESET; | 
 | 150 | 	} | 
 | 151 | } | 
 | 152 |  | 
 | 153 | static void vnet_handshake_complete(struct vio_driver_state *vio) | 
 | 154 | { | 
 | 155 | 	struct vio_dring_state *dr; | 
 | 156 |  | 
 | 157 | 	dr = &vio->drings[VIO_DRIVER_RX_RING]; | 
 | 158 | 	dr->snd_nxt = dr->rcv_nxt = 1; | 
 | 159 |  | 
 | 160 | 	dr = &vio->drings[VIO_DRIVER_TX_RING]; | 
 | 161 | 	dr->snd_nxt = dr->rcv_nxt = 1; | 
 | 162 | } | 
 | 163 |  | 
 | 164 | /* The hypervisor interface that implements copying to/from imported | 
 | 165 |  * memory from another domain requires that copies are done to 8-byte | 
 | 166 |  * aligned buffers, and that the lengths of such copies are also 8-byte | 
 | 167 |  * multiples. | 
 | 168 |  * | 
 | 169 |  * So we align skb->data to an 8-byte multiple and pad-out the data | 
 | 170 |  * area so we can round the copy length up to the next multiple of | 
 | 171 |  * 8 for the copy. | 
 | 172 |  * | 
 | 173 |  * The transmitter puts the actual start of the packet 6 bytes into | 
 | 174 |  * the buffer it sends over, so that the IP headers after the ethernet | 
 | 175 |  * header are aligned properly.  These 6 bytes are not in the descriptor | 
 | 176 |  * length, they are simply implied.  This offset is represented using | 
 | 177 |  * the VNET_PACKET_SKIP macro. | 
 | 178 |  */ | 
 | 179 | static struct sk_buff *alloc_and_align_skb(struct net_device *dev, | 
 | 180 | 					   unsigned int len) | 
 | 181 | { | 
 | 182 | 	struct sk_buff *skb = netdev_alloc_skb(dev, len+VNET_PACKET_SKIP+8+8); | 
 | 183 | 	unsigned long addr, off; | 
 | 184 |  | 
 | 185 | 	if (unlikely(!skb)) | 
 | 186 | 		return NULL; | 
 | 187 |  | 
 | 188 | 	addr = (unsigned long) skb->data; | 
 | 189 | 	off = ((addr + 7UL) & ~7UL) - addr; | 
 | 190 | 	if (off) | 
 | 191 | 		skb_reserve(skb, off); | 
 | 192 |  | 
 | 193 | 	return skb; | 
 | 194 | } | 
 | 195 |  | 
 | 196 | static int vnet_rx_one(struct vnet_port *port, unsigned int len, | 
 | 197 | 		       struct ldc_trans_cookie *cookies, int ncookies) | 
 | 198 | { | 
 | 199 | 	struct net_device *dev = port->vp->dev; | 
 | 200 | 	unsigned int copy_len; | 
 | 201 | 	struct sk_buff *skb; | 
 | 202 | 	int err; | 
 | 203 |  | 
 | 204 | 	err = -EMSGSIZE; | 
 | 205 | 	if (unlikely(len < ETH_ZLEN || len > ETH_FRAME_LEN)) { | 
 | 206 | 		dev->stats.rx_length_errors++; | 
 | 207 | 		goto out_dropped; | 
 | 208 | 	} | 
 | 209 |  | 
 | 210 | 	skb = alloc_and_align_skb(dev, len); | 
 | 211 | 	err = -ENOMEM; | 
 | 212 | 	if (unlikely(!skb)) { | 
 | 213 | 		dev->stats.rx_missed_errors++; | 
 | 214 | 		goto out_dropped; | 
 | 215 | 	} | 
 | 216 |  | 
 | 217 | 	copy_len = (len + VNET_PACKET_SKIP + 7U) & ~7U; | 
 | 218 | 	skb_put(skb, copy_len); | 
 | 219 | 	err = ldc_copy(port->vio.lp, LDC_COPY_IN, | 
 | 220 | 		       skb->data, copy_len, 0, | 
 | 221 | 		       cookies, ncookies); | 
 | 222 | 	if (unlikely(err < 0)) { | 
 | 223 | 		dev->stats.rx_frame_errors++; | 
 | 224 | 		goto out_free_skb; | 
 | 225 | 	} | 
 | 226 |  | 
 | 227 | 	skb_pull(skb, VNET_PACKET_SKIP); | 
 | 228 | 	skb_trim(skb, len); | 
 | 229 | 	skb->protocol = eth_type_trans(skb, dev); | 
 | 230 |  | 
 | 231 | 	dev->stats.rx_packets++; | 
 | 232 | 	dev->stats.rx_bytes += len; | 
 | 233 |  | 
 | 234 | 	netif_rx(skb); | 
 | 235 |  | 
 | 236 | 	return 0; | 
 | 237 |  | 
 | 238 | out_free_skb: | 
 | 239 | 	kfree_skb(skb); | 
 | 240 |  | 
 | 241 | out_dropped: | 
 | 242 | 	dev->stats.rx_dropped++; | 
 | 243 | 	return err; | 
 | 244 | } | 
 | 245 |  | 
 | 246 | static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr, | 
 | 247 | 			 u32 start, u32 end, u8 vio_dring_state) | 
 | 248 | { | 
 | 249 | 	struct vio_dring_data hdr = { | 
 | 250 | 		.tag = { | 
 | 251 | 			.type		= VIO_TYPE_DATA, | 
 | 252 | 			.stype		= VIO_SUBTYPE_ACK, | 
 | 253 | 			.stype_env	= VIO_DRING_DATA, | 
 | 254 | 			.sid		= vio_send_sid(&port->vio), | 
 | 255 | 		}, | 
 | 256 | 		.dring_ident		= dr->ident, | 
 | 257 | 		.start_idx		= start, | 
 | 258 | 		.end_idx		= end, | 
 | 259 | 		.state			= vio_dring_state, | 
 | 260 | 	}; | 
 | 261 | 	int err, delay; | 
 | 262 |  | 
 | 263 | 	hdr.seq = dr->snd_nxt; | 
 | 264 | 	delay = 1; | 
 | 265 | 	do { | 
 | 266 | 		err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr)); | 
 | 267 | 		if (err > 0) { | 
 | 268 | 			dr->snd_nxt++; | 
 | 269 | 			break; | 
 | 270 | 		} | 
 | 271 | 		udelay(delay); | 
 | 272 | 		if ((delay <<= 1) > 128) | 
 | 273 | 			delay = 128; | 
 | 274 | 	} while (err == -EAGAIN); | 
 | 275 |  | 
 | 276 | 	return err; | 
 | 277 | } | 
 | 278 |  | 
 | 279 | static u32 next_idx(u32 idx, struct vio_dring_state *dr) | 
 | 280 | { | 
 | 281 | 	if (++idx == dr->num_entries) | 
 | 282 | 		idx = 0; | 
 | 283 | 	return idx; | 
 | 284 | } | 
 | 285 |  | 
 | 286 | static u32 prev_idx(u32 idx, struct vio_dring_state *dr) | 
 | 287 | { | 
 | 288 | 	if (idx == 0) | 
 | 289 | 		idx = dr->num_entries - 1; | 
 | 290 | 	else | 
 | 291 | 		idx--; | 
 | 292 |  | 
 | 293 | 	return idx; | 
 | 294 | } | 
 | 295 |  | 
 | 296 | static struct vio_net_desc *get_rx_desc(struct vnet_port *port, | 
 | 297 | 					struct vio_dring_state *dr, | 
 | 298 | 					u32 index) | 
 | 299 | { | 
 | 300 | 	struct vio_net_desc *desc = port->vio.desc_buf; | 
 | 301 | 	int err; | 
 | 302 |  | 
 | 303 | 	err = ldc_get_dring_entry(port->vio.lp, desc, dr->entry_size, | 
 | 304 | 				  (index * dr->entry_size), | 
 | 305 | 				  dr->cookies, dr->ncookies); | 
 | 306 | 	if (err < 0) | 
 | 307 | 		return ERR_PTR(err); | 
 | 308 |  | 
 | 309 | 	return desc; | 
 | 310 | } | 
 | 311 |  | 
 | 312 | static int put_rx_desc(struct vnet_port *port, | 
 | 313 | 		       struct vio_dring_state *dr, | 
 | 314 | 		       struct vio_net_desc *desc, | 
 | 315 | 		       u32 index) | 
 | 316 | { | 
 | 317 | 	int err; | 
 | 318 |  | 
 | 319 | 	err = ldc_put_dring_entry(port->vio.lp, desc, dr->entry_size, | 
 | 320 | 				  (index * dr->entry_size), | 
 | 321 | 				  dr->cookies, dr->ncookies); | 
 | 322 | 	if (err < 0) | 
 | 323 | 		return err; | 
 | 324 |  | 
 | 325 | 	return 0; | 
 | 326 | } | 
 | 327 |  | 
 | 328 | static int vnet_walk_rx_one(struct vnet_port *port, | 
 | 329 | 			    struct vio_dring_state *dr, | 
 | 330 | 			    u32 index, int *needs_ack) | 
 | 331 | { | 
 | 332 | 	struct vio_net_desc *desc = get_rx_desc(port, dr, index); | 
 | 333 | 	struct vio_driver_state *vio = &port->vio; | 
 | 334 | 	int err; | 
 | 335 |  | 
 | 336 | 	if (IS_ERR(desc)) | 
 | 337 | 		return PTR_ERR(desc); | 
 | 338 |  | 
 | 339 | 	viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%lx:%lx]\n", | 
 | 340 | 	       desc->hdr.state, desc->hdr.ack, | 
 | 341 | 	       desc->size, desc->ncookies, | 
 | 342 | 	       desc->cookies[0].cookie_addr, | 
 | 343 | 	       desc->cookies[0].cookie_size); | 
 | 344 |  | 
 | 345 | 	if (desc->hdr.state != VIO_DESC_READY) | 
 | 346 | 		return 1; | 
 | 347 | 	err = vnet_rx_one(port, desc->size, desc->cookies, desc->ncookies); | 
 | 348 | 	if (err == -ECONNRESET) | 
 | 349 | 		return err; | 
 | 350 | 	desc->hdr.state = VIO_DESC_DONE; | 
 | 351 | 	err = put_rx_desc(port, dr, desc, index); | 
 | 352 | 	if (err < 0) | 
 | 353 | 		return err; | 
 | 354 | 	*needs_ack = desc->hdr.ack; | 
 | 355 | 	return 0; | 
 | 356 | } | 
 | 357 |  | 
 | 358 | static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr, | 
 | 359 | 			u32 start, u32 end) | 
 | 360 | { | 
 | 361 | 	struct vio_driver_state *vio = &port->vio; | 
 | 362 | 	int ack_start = -1, ack_end = -1; | 
 | 363 |  | 
 | 364 | 	end = (end == (u32) -1) ? prev_idx(start, dr) : next_idx(end, dr); | 
 | 365 |  | 
 | 366 | 	viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end); | 
 | 367 |  | 
 | 368 | 	while (start != end) { | 
 | 369 | 		int ack = 0, err = vnet_walk_rx_one(port, dr, start, &ack); | 
 | 370 | 		if (err == -ECONNRESET) | 
 | 371 | 			return err; | 
 | 372 | 		if (err != 0) | 
 | 373 | 			break; | 
 | 374 | 		if (ack_start == -1) | 
 | 375 | 			ack_start = start; | 
 | 376 | 		ack_end = start; | 
 | 377 | 		start = next_idx(start, dr); | 
 | 378 | 		if (ack && start != end) { | 
 | 379 | 			err = vnet_send_ack(port, dr, ack_start, ack_end, | 
 | 380 | 					    VIO_DRING_ACTIVE); | 
 | 381 | 			if (err == -ECONNRESET) | 
 | 382 | 				return err; | 
 | 383 | 			ack_start = -1; | 
 | 384 | 		} | 
 | 385 | 	} | 
 | 386 | 	if (unlikely(ack_start == -1)) | 
 | 387 | 		ack_start = ack_end = prev_idx(start, dr); | 
 | 388 | 	return vnet_send_ack(port, dr, ack_start, ack_end, VIO_DRING_STOPPED); | 
 | 389 | } | 
 | 390 |  | 
 | 391 | static int vnet_rx(struct vnet_port *port, void *msgbuf) | 
 | 392 | { | 
 | 393 | 	struct vio_dring_data *pkt = msgbuf; | 
 | 394 | 	struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING]; | 
 | 395 | 	struct vio_driver_state *vio = &port->vio; | 
 | 396 |  | 
 | 397 | 	viodbg(DATA, "vnet_rx stype_env[%04x] seq[%016lx] rcv_nxt[%016lx]\n", | 
 | 398 | 	       pkt->tag.stype_env, pkt->seq, dr->rcv_nxt); | 
 | 399 |  | 
 | 400 | 	if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA)) | 
 | 401 | 		return 0; | 
 | 402 | 	if (unlikely(pkt->seq != dr->rcv_nxt)) { | 
 | 403 | 		printk(KERN_ERR PFX "RX out of sequence seq[0x%lx] " | 
 | 404 | 		       "rcv_nxt[0x%lx]\n", pkt->seq, dr->rcv_nxt); | 
 | 405 | 		return 0; | 
 | 406 | 	} | 
 | 407 |  | 
 | 408 | 	dr->rcv_nxt++; | 
 | 409 |  | 
 | 410 | 	/* XXX Validate pkt->start_idx and pkt->end_idx XXX */ | 
 | 411 |  | 
 | 412 | 	return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx); | 
 | 413 | } | 
 | 414 |  | 
 | 415 | static int idx_is_pending(struct vio_dring_state *dr, u32 end) | 
 | 416 | { | 
 | 417 | 	u32 idx = dr->cons; | 
 | 418 | 	int found = 0; | 
 | 419 |  | 
 | 420 | 	while (idx != dr->prod) { | 
 | 421 | 		if (idx == end) { | 
 | 422 | 			found = 1; | 
 | 423 | 			break; | 
 | 424 | 		} | 
 | 425 | 		idx = next_idx(idx, dr); | 
 | 426 | 	} | 
 | 427 | 	return found; | 
 | 428 | } | 
 | 429 |  | 
 | 430 | static int vnet_ack(struct vnet_port *port, void *msgbuf) | 
 | 431 | { | 
 | 432 | 	struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; | 
 | 433 | 	struct vio_dring_data *pkt = msgbuf; | 
 | 434 | 	struct net_device *dev; | 
 | 435 | 	struct vnet *vp; | 
 | 436 | 	u32 end; | 
 | 437 |  | 
 | 438 | 	if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA)) | 
 | 439 | 		return 0; | 
 | 440 |  | 
 | 441 | 	end = pkt->end_idx; | 
 | 442 | 	if (unlikely(!idx_is_pending(dr, end))) | 
 | 443 | 		return 0; | 
 | 444 |  | 
 | 445 | 	dr->cons = next_idx(end, dr); | 
 | 446 |  | 
 | 447 | 	vp = port->vp; | 
 | 448 | 	dev = vp->dev; | 
 | 449 | 	if (unlikely(netif_queue_stopped(dev) && | 
 | 450 | 		     vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr))) | 
 | 451 | 		return 1; | 
 | 452 |  | 
 | 453 | 	return 0; | 
 | 454 | } | 
 | 455 |  | 
 | 456 | static int vnet_nack(struct vnet_port *port, void *msgbuf) | 
 | 457 | { | 
 | 458 | 	/* XXX just reset or similar XXX */ | 
 | 459 | 	return 0; | 
 | 460 | } | 
 | 461 |  | 
| David S. Miller | 028ebff | 2007-07-20 02:30:25 -0700 | [diff] [blame] | 462 | static int handle_mcast(struct vnet_port *port, void *msgbuf) | 
 | 463 | { | 
 | 464 | 	struct vio_net_mcast_info *pkt = msgbuf; | 
 | 465 |  | 
 | 466 | 	if (pkt->tag.stype != VIO_SUBTYPE_ACK) | 
 | 467 | 		printk(KERN_ERR PFX "%s: Got unexpected MCAST reply " | 
 | 468 | 		       "[%02x:%02x:%04x:%08x]\n", | 
 | 469 | 		       port->vp->dev->name, | 
 | 470 | 		       pkt->tag.type, | 
 | 471 | 		       pkt->tag.stype, | 
 | 472 | 		       pkt->tag.stype_env, | 
 | 473 | 		       pkt->tag.sid); | 
 | 474 |  | 
 | 475 | 	return 0; | 
 | 476 | } | 
 | 477 |  | 
| David S. Miller | 4c521e4 | 2007-07-09 22:23:51 -0700 | [diff] [blame] | 478 | static void maybe_tx_wakeup(struct vnet *vp) | 
 | 479 | { | 
 | 480 | 	struct net_device *dev = vp->dev; | 
 | 481 |  | 
 | 482 | 	netif_tx_lock(dev); | 
 | 483 | 	if (likely(netif_queue_stopped(dev))) { | 
 | 484 | 		struct vnet_port *port; | 
 | 485 | 		int wake = 1; | 
 | 486 |  | 
 | 487 | 		list_for_each_entry(port, &vp->port_list, list) { | 
 | 488 | 			struct vio_dring_state *dr; | 
 | 489 |  | 
 | 490 | 			dr = &port->vio.drings[VIO_DRIVER_TX_RING]; | 
 | 491 | 			if (vnet_tx_dring_avail(dr) < | 
 | 492 | 			    VNET_TX_WAKEUP_THRESH(dr)) { | 
 | 493 | 				wake = 0; | 
 | 494 | 				break; | 
 | 495 | 			} | 
 | 496 | 		} | 
 | 497 | 		if (wake) | 
 | 498 | 			netif_wake_queue(dev); | 
 | 499 | 	} | 
 | 500 | 	netif_tx_unlock(dev); | 
 | 501 | } | 
 | 502 |  | 
 | 503 | static void vnet_event(void *arg, int event) | 
 | 504 | { | 
 | 505 | 	struct vnet_port *port = arg; | 
 | 506 | 	struct vio_driver_state *vio = &port->vio; | 
 | 507 | 	unsigned long flags; | 
 | 508 | 	int tx_wakeup, err; | 
 | 509 |  | 
 | 510 | 	spin_lock_irqsave(&vio->lock, flags); | 
 | 511 |  | 
 | 512 | 	if (unlikely(event == LDC_EVENT_RESET || | 
 | 513 | 		     event == LDC_EVENT_UP)) { | 
 | 514 | 		vio_link_state_change(vio, event); | 
 | 515 | 		spin_unlock_irqrestore(&vio->lock, flags); | 
 | 516 |  | 
| David S. Miller | d762acd | 2007-07-18 00:07:39 -0700 | [diff] [blame] | 517 | 		if (event == LDC_EVENT_RESET) | 
 | 518 | 			vio_port_up(vio); | 
| David S. Miller | 4c521e4 | 2007-07-09 22:23:51 -0700 | [diff] [blame] | 519 | 		return; | 
 | 520 | 	} | 
 | 521 |  | 
 | 522 | 	if (unlikely(event != LDC_EVENT_DATA_READY)) { | 
 | 523 | 		printk(KERN_WARNING PFX "Unexpected LDC event %d\n", event); | 
 | 524 | 		spin_unlock_irqrestore(&vio->lock, flags); | 
 | 525 | 		return; | 
 | 526 | 	} | 
 | 527 |  | 
 | 528 | 	tx_wakeup = err = 0; | 
 | 529 | 	while (1) { | 
 | 530 | 		union { | 
 | 531 | 			struct vio_msg_tag tag; | 
 | 532 | 			u64 raw[8]; | 
 | 533 | 		} msgbuf; | 
 | 534 |  | 
 | 535 | 		err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf)); | 
 | 536 | 		if (unlikely(err < 0)) { | 
 | 537 | 			if (err == -ECONNRESET) | 
 | 538 | 				vio_conn_reset(vio); | 
 | 539 | 			break; | 
 | 540 | 		} | 
 | 541 | 		if (err == 0) | 
 | 542 | 			break; | 
 | 543 | 		viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n", | 
 | 544 | 		       msgbuf.tag.type, | 
 | 545 | 		       msgbuf.tag.stype, | 
 | 546 | 		       msgbuf.tag.stype_env, | 
 | 547 | 		       msgbuf.tag.sid); | 
 | 548 | 		err = vio_validate_sid(vio, &msgbuf.tag); | 
 | 549 | 		if (err < 0) | 
 | 550 | 			break; | 
 | 551 |  | 
 | 552 | 		if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) { | 
 | 553 | 			if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) { | 
 | 554 | 				err = vnet_rx(port, &msgbuf); | 
 | 555 | 			} else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) { | 
 | 556 | 				err = vnet_ack(port, &msgbuf); | 
 | 557 | 				if (err > 0) | 
 | 558 | 					tx_wakeup |= err; | 
 | 559 | 			} else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK) { | 
 | 560 | 				err = vnet_nack(port, &msgbuf); | 
 | 561 | 			} | 
 | 562 | 		} else if (msgbuf.tag.type == VIO_TYPE_CTRL) { | 
| David S. Miller | 028ebff | 2007-07-20 02:30:25 -0700 | [diff] [blame] | 563 | 			if (msgbuf.tag.stype_env == VNET_MCAST_INFO) | 
 | 564 | 				err = handle_mcast(port, &msgbuf); | 
 | 565 | 			else | 
 | 566 | 				err = vio_control_pkt_engine(vio, &msgbuf); | 
| David S. Miller | 4c521e4 | 2007-07-09 22:23:51 -0700 | [diff] [blame] | 567 | 			if (err) | 
 | 568 | 				break; | 
 | 569 | 		} else { | 
 | 570 | 			err = vnet_handle_unknown(port, &msgbuf); | 
 | 571 | 		} | 
 | 572 | 		if (err == -ECONNRESET) | 
 | 573 | 			break; | 
 | 574 | 	} | 
 | 575 | 	spin_unlock(&vio->lock); | 
 | 576 | 	if (unlikely(tx_wakeup && err != -ECONNRESET)) | 
 | 577 | 		maybe_tx_wakeup(port->vp); | 
 | 578 | 	local_irq_restore(flags); | 
 | 579 | } | 
 | 580 |  | 
 | 581 | static int __vnet_tx_trigger(struct vnet_port *port) | 
 | 582 | { | 
 | 583 | 	struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; | 
 | 584 | 	struct vio_dring_data hdr = { | 
 | 585 | 		.tag = { | 
 | 586 | 			.type		= VIO_TYPE_DATA, | 
 | 587 | 			.stype		= VIO_SUBTYPE_INFO, | 
 | 588 | 			.stype_env	= VIO_DRING_DATA, | 
 | 589 | 			.sid		= vio_send_sid(&port->vio), | 
 | 590 | 		}, | 
 | 591 | 		.dring_ident		= dr->ident, | 
 | 592 | 		.start_idx		= dr->prod, | 
 | 593 | 		.end_idx		= (u32) -1, | 
 | 594 | 	}; | 
 | 595 | 	int err, delay; | 
 | 596 |  | 
 | 597 | 	hdr.seq = dr->snd_nxt; | 
 | 598 | 	delay = 1; | 
 | 599 | 	do { | 
 | 600 | 		err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr)); | 
 | 601 | 		if (err > 0) { | 
 | 602 | 			dr->snd_nxt++; | 
 | 603 | 			break; | 
 | 604 | 		} | 
 | 605 | 		udelay(delay); | 
 | 606 | 		if ((delay <<= 1) > 128) | 
 | 607 | 			delay = 128; | 
 | 608 | 	} while (err == -EAGAIN); | 
 | 609 |  | 
 | 610 | 	return err; | 
 | 611 | } | 
 | 612 |  | 
 | 613 | struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb) | 
 | 614 | { | 
 | 615 | 	unsigned int hash = vnet_hashfn(skb->data); | 
 | 616 | 	struct hlist_head *hp = &vp->port_hash[hash]; | 
 | 617 | 	struct hlist_node *n; | 
 | 618 | 	struct vnet_port *port; | 
 | 619 |  | 
 | 620 | 	hlist_for_each_entry(port, n, hp, hash) { | 
 | 621 | 		if (!compare_ether_addr(port->raddr, skb->data)) | 
 | 622 | 			return port; | 
 | 623 | 	} | 
 | 624 | 	port = NULL; | 
 | 625 | 	if (!list_empty(&vp->port_list)) | 
 | 626 | 		port = list_entry(vp->port_list.next, struct vnet_port, list); | 
 | 627 |  | 
 | 628 | 	return port; | 
 | 629 | } | 
 | 630 |  | 
 | 631 | struct vnet_port *tx_port_find(struct vnet *vp, struct sk_buff *skb) | 
 | 632 | { | 
 | 633 | 	struct vnet_port *ret; | 
 | 634 | 	unsigned long flags; | 
 | 635 |  | 
 | 636 | 	spin_lock_irqsave(&vp->lock, flags); | 
 | 637 | 	ret = __tx_port_find(vp, skb); | 
 | 638 | 	spin_unlock_irqrestore(&vp->lock, flags); | 
 | 639 |  | 
 | 640 | 	return ret; | 
 | 641 | } | 
 | 642 |  | 
 | 643 | static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) | 
 | 644 | { | 
 | 645 | 	struct vnet *vp = netdev_priv(dev); | 
 | 646 | 	struct vnet_port *port = tx_port_find(vp, skb); | 
 | 647 | 	struct vio_dring_state *dr; | 
 | 648 | 	struct vio_net_desc *d; | 
 | 649 | 	unsigned long flags; | 
 | 650 | 	unsigned int len; | 
 | 651 | 	void *tx_buf; | 
 | 652 | 	int i, err; | 
 | 653 |  | 
 | 654 | 	if (unlikely(!port)) | 
 | 655 | 		goto out_dropped; | 
 | 656 |  | 
 | 657 | 	spin_lock_irqsave(&port->vio.lock, flags); | 
 | 658 |  | 
 | 659 | 	dr = &port->vio.drings[VIO_DRIVER_TX_RING]; | 
 | 660 | 	if (unlikely(vnet_tx_dring_avail(dr) < 2)) { | 
 | 661 | 		if (!netif_queue_stopped(dev)) { | 
 | 662 | 			netif_stop_queue(dev); | 
 | 663 |  | 
 | 664 | 			/* This is a hard error, log it. */ | 
 | 665 | 			printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " | 
 | 666 | 			       "queue awake!\n", dev->name); | 
 | 667 | 			dev->stats.tx_errors++; | 
 | 668 | 		} | 
 | 669 | 		spin_unlock_irqrestore(&port->vio.lock, flags); | 
 | 670 | 		return NETDEV_TX_BUSY; | 
 | 671 | 	} | 
 | 672 |  | 
 | 673 | 	d = vio_dring_cur(dr); | 
 | 674 |  | 
 | 675 | 	tx_buf = port->tx_bufs[dr->prod].buf; | 
 | 676 | 	skb_copy_from_linear_data(skb, tx_buf + VNET_PACKET_SKIP, skb->len); | 
 | 677 |  | 
 | 678 | 	len = skb->len; | 
 | 679 | 	if (len < ETH_ZLEN) { | 
 | 680 | 		len = ETH_ZLEN; | 
 | 681 | 		memset(tx_buf+VNET_PACKET_SKIP+skb->len, 0, len - skb->len); | 
 | 682 | 	} | 
 | 683 |  | 
 | 684 | 	d->hdr.ack = VIO_ACK_ENABLE; | 
 | 685 | 	d->size = len; | 
 | 686 | 	d->ncookies = port->tx_bufs[dr->prod].ncookies; | 
 | 687 | 	for (i = 0; i < d->ncookies; i++) | 
 | 688 | 		d->cookies[i] = port->tx_bufs[dr->prod].cookies[i]; | 
 | 689 |  | 
 | 690 | 	/* This has to be a non-SMP write barrier because we are writing | 
 | 691 | 	 * to memory which is shared with the peer LDOM. | 
 | 692 | 	 */ | 
 | 693 | 	wmb(); | 
 | 694 |  | 
 | 695 | 	d->hdr.state = VIO_DESC_READY; | 
 | 696 |  | 
 | 697 | 	err = __vnet_tx_trigger(port); | 
 | 698 | 	if (unlikely(err < 0)) { | 
 | 699 | 		printk(KERN_INFO PFX "%s: TX trigger error %d\n", | 
 | 700 | 		       dev->name, err); | 
 | 701 | 		d->hdr.state = VIO_DESC_FREE; | 
 | 702 | 		dev->stats.tx_carrier_errors++; | 
 | 703 | 		goto out_dropped_unlock; | 
 | 704 | 	} | 
 | 705 |  | 
 | 706 | 	dev->stats.tx_packets++; | 
 | 707 | 	dev->stats.tx_bytes += skb->len; | 
 | 708 |  | 
 | 709 | 	dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1); | 
 | 710 | 	if (unlikely(vnet_tx_dring_avail(dr) < 2)) { | 
 | 711 | 		netif_stop_queue(dev); | 
 | 712 | 		if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr)) | 
 | 713 | 			netif_wake_queue(dev); | 
 | 714 | 	} | 
 | 715 |  | 
 | 716 | 	spin_unlock_irqrestore(&port->vio.lock, flags); | 
 | 717 |  | 
 | 718 | 	dev_kfree_skb(skb); | 
 | 719 |  | 
 | 720 | 	dev->trans_start = jiffies; | 
 | 721 | 	return NETDEV_TX_OK; | 
 | 722 |  | 
 | 723 | out_dropped_unlock: | 
 | 724 | 	spin_unlock_irqrestore(&port->vio.lock, flags); | 
 | 725 |  | 
 | 726 | out_dropped: | 
 | 727 | 	dev_kfree_skb(skb); | 
 | 728 | 	dev->stats.tx_dropped++; | 
 | 729 | 	return NETDEV_TX_OK; | 
 | 730 | } | 
 | 731 |  | 
 | 732 | static void vnet_tx_timeout(struct net_device *dev) | 
 | 733 | { | 
 | 734 | 	/* XXX Implement me XXX */ | 
 | 735 | } | 
 | 736 |  | 
 | 737 | static int vnet_open(struct net_device *dev) | 
 | 738 | { | 
 | 739 | 	netif_carrier_on(dev); | 
 | 740 | 	netif_start_queue(dev); | 
 | 741 |  | 
 | 742 | 	return 0; | 
 | 743 | } | 
 | 744 |  | 
 | 745 | static int vnet_close(struct net_device *dev) | 
 | 746 | { | 
 | 747 | 	netif_stop_queue(dev); | 
 | 748 | 	netif_carrier_off(dev); | 
 | 749 |  | 
 | 750 | 	return 0; | 
 | 751 | } | 
 | 752 |  | 
| David S. Miller | 028ebff | 2007-07-20 02:30:25 -0700 | [diff] [blame] | 753 | static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr) | 
 | 754 | { | 
 | 755 | 	struct vnet_mcast_entry *m; | 
 | 756 |  | 
 | 757 | 	for (m = vp->mcast_list; m; m = m->next) { | 
 | 758 | 		if (!memcmp(m->addr, addr, ETH_ALEN)) | 
 | 759 | 			return m; | 
 | 760 | 	} | 
 | 761 | 	return NULL; | 
 | 762 | } | 
 | 763 |  | 
 | 764 | static void __update_mc_list(struct vnet *vp, struct net_device *dev) | 
 | 765 | { | 
 | 766 | 	struct dev_addr_list *p; | 
 | 767 |  | 
 | 768 | 	for (p = dev->mc_list; p; p = p->next) { | 
 | 769 | 		struct vnet_mcast_entry *m; | 
 | 770 |  | 
 | 771 | 		m = __vnet_mc_find(vp, p->dmi_addr); | 
 | 772 | 		if (m) { | 
 | 773 | 			m->hit = 1; | 
 | 774 | 			continue; | 
 | 775 | 		} | 
 | 776 |  | 
 | 777 | 		if (!m) { | 
 | 778 | 			m = kzalloc(sizeof(*m), GFP_ATOMIC); | 
 | 779 | 			if (!m) | 
 | 780 | 				continue; | 
 | 781 | 			memcpy(m->addr, p->dmi_addr, ETH_ALEN); | 
 | 782 | 			m->hit = 1; | 
 | 783 |  | 
 | 784 | 			m->next = vp->mcast_list; | 
 | 785 | 			vp->mcast_list = m; | 
 | 786 | 		} | 
 | 787 | 	} | 
 | 788 | } | 
 | 789 |  | 
 | 790 | static void __send_mc_list(struct vnet *vp, struct vnet_port *port) | 
 | 791 | { | 
 | 792 | 	struct vio_net_mcast_info info; | 
 | 793 | 	struct vnet_mcast_entry *m, **pp; | 
 | 794 | 	int n_addrs; | 
 | 795 |  | 
 | 796 | 	memset(&info, 0, sizeof(info)); | 
 | 797 |  | 
 | 798 | 	info.tag.type = VIO_TYPE_CTRL; | 
 | 799 | 	info.tag.stype = VIO_SUBTYPE_INFO; | 
 | 800 | 	info.tag.stype_env = VNET_MCAST_INFO; | 
 | 801 | 	info.tag.sid = vio_send_sid(&port->vio); | 
 | 802 | 	info.set = 1; | 
 | 803 |  | 
 | 804 | 	n_addrs = 0; | 
 | 805 | 	for (m = vp->mcast_list; m; m = m->next) { | 
 | 806 | 		if (m->sent) | 
 | 807 | 			continue; | 
 | 808 | 		m->sent = 1; | 
 | 809 | 		memcpy(&info.mcast_addr[n_addrs * ETH_ALEN], | 
 | 810 | 		       m->addr, ETH_ALEN); | 
 | 811 | 		if (++n_addrs == VNET_NUM_MCAST) { | 
 | 812 | 			info.count = n_addrs; | 
 | 813 |  | 
 | 814 | 			(void) vio_ldc_send(&port->vio, &info, | 
 | 815 | 					    sizeof(info)); | 
 | 816 | 			n_addrs = 0; | 
 | 817 | 		} | 
 | 818 | 	} | 
 | 819 | 	if (n_addrs) { | 
 | 820 | 		info.count = n_addrs; | 
 | 821 | 		(void) vio_ldc_send(&port->vio, &info, sizeof(info)); | 
 | 822 | 	} | 
 | 823 |  | 
 | 824 | 	info.set = 0; | 
 | 825 |  | 
 | 826 | 	n_addrs = 0; | 
 | 827 | 	pp = &vp->mcast_list; | 
 | 828 | 	while ((m = *pp) != NULL) { | 
 | 829 | 		if (m->hit) { | 
 | 830 | 			m->hit = 0; | 
 | 831 | 			pp = &m->next; | 
 | 832 | 			continue; | 
 | 833 | 		} | 
 | 834 |  | 
 | 835 | 		memcpy(&info.mcast_addr[n_addrs * ETH_ALEN], | 
 | 836 | 		       m->addr, ETH_ALEN); | 
 | 837 | 		if (++n_addrs == VNET_NUM_MCAST) { | 
 | 838 | 			info.count = n_addrs; | 
 | 839 | 			(void) vio_ldc_send(&port->vio, &info, | 
 | 840 | 					    sizeof(info)); | 
 | 841 | 			n_addrs = 0; | 
 | 842 | 		} | 
 | 843 |  | 
 | 844 | 		*pp = m->next; | 
 | 845 | 		kfree(m); | 
 | 846 | 	} | 
 | 847 | 	if (n_addrs) { | 
 | 848 | 		info.count = n_addrs; | 
 | 849 | 		(void) vio_ldc_send(&port->vio, &info, sizeof(info)); | 
 | 850 | 	} | 
 | 851 | } | 
 | 852 |  | 
| David S. Miller | 4c521e4 | 2007-07-09 22:23:51 -0700 | [diff] [blame] | 853 | static void vnet_set_rx_mode(struct net_device *dev) | 
 | 854 | { | 
| David S. Miller | 028ebff | 2007-07-20 02:30:25 -0700 | [diff] [blame] | 855 | 	struct vnet *vp = netdev_priv(dev); | 
 | 856 | 	struct vnet_port *port; | 
 | 857 | 	unsigned long flags; | 
 | 858 |  | 
 | 859 | 	spin_lock_irqsave(&vp->lock, flags); | 
 | 860 | 	if (!list_empty(&vp->port_list)) { | 
 | 861 | 		port = list_entry(vp->port_list.next, struct vnet_port, list); | 
 | 862 |  | 
 | 863 | 		if (port->switch_port) { | 
 | 864 | 			__update_mc_list(vp, dev); | 
 | 865 | 			__send_mc_list(vp, port); | 
 | 866 | 		} | 
 | 867 | 	} | 
 | 868 | 	spin_unlock_irqrestore(&vp->lock, flags); | 
| David S. Miller | 4c521e4 | 2007-07-09 22:23:51 -0700 | [diff] [blame] | 869 | } | 
 | 870 |  | 
 | 871 | static int vnet_change_mtu(struct net_device *dev, int new_mtu) | 
 | 872 | { | 
 | 873 | 	if (new_mtu != ETH_DATA_LEN) | 
 | 874 | 		return -EINVAL; | 
 | 875 |  | 
 | 876 | 	dev->mtu = new_mtu; | 
 | 877 | 	return 0; | 
 | 878 | } | 
 | 879 |  | 
 | 880 | static int vnet_set_mac_addr(struct net_device *dev, void *p) | 
 | 881 | { | 
 | 882 | 	return -EINVAL; | 
 | 883 | } | 
 | 884 |  | 
 | 885 | static void vnet_get_drvinfo(struct net_device *dev, | 
 | 886 | 			     struct ethtool_drvinfo *info) | 
 | 887 | { | 
 | 888 | 	strcpy(info->driver, DRV_MODULE_NAME); | 
 | 889 | 	strcpy(info->version, DRV_MODULE_VERSION); | 
 | 890 | } | 
 | 891 |  | 
 | 892 | static u32 vnet_get_msglevel(struct net_device *dev) | 
 | 893 | { | 
 | 894 | 	struct vnet *vp = netdev_priv(dev); | 
 | 895 | 	return vp->msg_enable; | 
 | 896 | } | 
 | 897 |  | 
 | 898 | static void vnet_set_msglevel(struct net_device *dev, u32 value) | 
 | 899 | { | 
 | 900 | 	struct vnet *vp = netdev_priv(dev); | 
 | 901 | 	vp->msg_enable = value; | 
 | 902 | } | 
 | 903 |  | 
 | 904 | static const struct ethtool_ops vnet_ethtool_ops = { | 
 | 905 | 	.get_drvinfo		= vnet_get_drvinfo, | 
 | 906 | 	.get_msglevel		= vnet_get_msglevel, | 
 | 907 | 	.set_msglevel		= vnet_set_msglevel, | 
 | 908 | 	.get_link		= ethtool_op_get_link, | 
| David S. Miller | 4c521e4 | 2007-07-09 22:23:51 -0700 | [diff] [blame] | 909 | }; | 
 | 910 |  | 
 | 911 | static void vnet_port_free_tx_bufs(struct vnet_port *port) | 
 | 912 | { | 
 | 913 | 	struct vio_dring_state *dr; | 
 | 914 | 	int i; | 
 | 915 |  | 
 | 916 | 	dr = &port->vio.drings[VIO_DRIVER_TX_RING]; | 
 | 917 | 	if (dr->base) { | 
 | 918 | 		ldc_free_exp_dring(port->vio.lp, dr->base, | 
 | 919 | 				   (dr->entry_size * dr->num_entries), | 
 | 920 | 				   dr->cookies, dr->ncookies); | 
 | 921 | 		dr->base = NULL; | 
 | 922 | 		dr->entry_size = 0; | 
 | 923 | 		dr->num_entries = 0; | 
 | 924 | 		dr->pending = 0; | 
 | 925 | 		dr->ncookies = 0; | 
 | 926 | 	} | 
 | 927 |  | 
 | 928 | 	for (i = 0; i < VNET_TX_RING_SIZE; i++) { | 
 | 929 | 		void *buf = port->tx_bufs[i].buf; | 
 | 930 |  | 
 | 931 | 		if (!buf) | 
 | 932 | 			continue; | 
 | 933 |  | 
 | 934 | 		ldc_unmap(port->vio.lp, | 
 | 935 | 			  port->tx_bufs[i].cookies, | 
 | 936 | 			  port->tx_bufs[i].ncookies); | 
 | 937 |  | 
 | 938 | 		kfree(buf); | 
 | 939 | 		port->tx_bufs[i].buf = NULL; | 
 | 940 | 	} | 
 | 941 | } | 
 | 942 |  | 
 | 943 | static int __devinit vnet_port_alloc_tx_bufs(struct vnet_port *port) | 
 | 944 | { | 
 | 945 | 	struct vio_dring_state *dr; | 
 | 946 | 	unsigned long len; | 
 | 947 | 	int i, err, ncookies; | 
 | 948 | 	void *dring; | 
 | 949 |  | 
 | 950 | 	for (i = 0; i < VNET_TX_RING_SIZE; i++) { | 
 | 951 | 		void *buf = kzalloc(ETH_FRAME_LEN + 8, GFP_KERNEL); | 
 | 952 | 		int map_len = (ETH_FRAME_LEN + 7) & ~7; | 
 | 953 |  | 
 | 954 | 		err = -ENOMEM; | 
 | 955 | 		if (!buf) { | 
 | 956 | 			printk(KERN_ERR "TX buffer allocation failure\n"); | 
 | 957 | 			goto err_out; | 
 | 958 | 		} | 
 | 959 | 		err = -EFAULT; | 
 | 960 | 		if ((unsigned long)buf & (8UL - 1)) { | 
 | 961 | 			printk(KERN_ERR "TX buffer misaligned\n"); | 
 | 962 | 			kfree(buf); | 
 | 963 | 			goto err_out; | 
 | 964 | 		} | 
 | 965 |  | 
 | 966 | 		err = ldc_map_single(port->vio.lp, buf, map_len, | 
 | 967 | 				     port->tx_bufs[i].cookies, 2, | 
 | 968 | 				     (LDC_MAP_SHADOW | | 
 | 969 | 				      LDC_MAP_DIRECT | | 
 | 970 | 				      LDC_MAP_RW)); | 
 | 971 | 		if (err < 0) { | 
 | 972 | 			kfree(buf); | 
 | 973 | 			goto err_out; | 
 | 974 | 		} | 
 | 975 | 		port->tx_bufs[i].buf = buf; | 
 | 976 | 		port->tx_bufs[i].ncookies = err; | 
 | 977 | 	} | 
 | 978 |  | 
 | 979 | 	dr = &port->vio.drings[VIO_DRIVER_TX_RING]; | 
 | 980 |  | 
 | 981 | 	len = (VNET_TX_RING_SIZE * | 
 | 982 | 	       (sizeof(struct vio_net_desc) + | 
 | 983 | 		(sizeof(struct ldc_trans_cookie) * 2))); | 
 | 984 |  | 
 | 985 | 	ncookies = VIO_MAX_RING_COOKIES; | 
 | 986 | 	dring = ldc_alloc_exp_dring(port->vio.lp, len, | 
 | 987 | 				    dr->cookies, &ncookies, | 
 | 988 | 				    (LDC_MAP_SHADOW | | 
 | 989 | 				     LDC_MAP_DIRECT | | 
 | 990 | 				     LDC_MAP_RW)); | 
 | 991 | 	if (IS_ERR(dring)) { | 
 | 992 | 		err = PTR_ERR(dring); | 
 | 993 | 		goto err_out; | 
 | 994 | 	} | 
 | 995 |  | 
 | 996 | 	dr->base = dring; | 
 | 997 | 	dr->entry_size = (sizeof(struct vio_net_desc) + | 
 | 998 | 			  (sizeof(struct ldc_trans_cookie) * 2)); | 
 | 999 | 	dr->num_entries = VNET_TX_RING_SIZE; | 
 | 1000 | 	dr->prod = dr->cons = 0; | 
 | 1001 | 	dr->pending = VNET_TX_RING_SIZE; | 
 | 1002 | 	dr->ncookies = ncookies; | 
 | 1003 |  | 
 | 1004 | 	return 0; | 
 | 1005 |  | 
 | 1006 | err_out: | 
 | 1007 | 	vnet_port_free_tx_bufs(port); | 
 | 1008 |  | 
 | 1009 | 	return err; | 
 | 1010 | } | 
 | 1011 |  | 
| David S. Miller | 9184a04 | 2007-07-17 22:19:10 -0700 | [diff] [blame] | 1012 | static LIST_HEAD(vnet_list); | 
 | 1013 | static DEFINE_MUTEX(vnet_list_mutex); | 
 | 1014 |  | 
 | 1015 | static struct vnet * __devinit vnet_new(const u64 *local_mac) | 
 | 1016 | { | 
 | 1017 | 	struct net_device *dev; | 
 | 1018 | 	struct vnet *vp; | 
 | 1019 | 	int err, i; | 
 | 1020 |  | 
 | 1021 | 	dev = alloc_etherdev(sizeof(*vp)); | 
 | 1022 | 	if (!dev) { | 
 | 1023 | 		printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); | 
 | 1024 | 		return ERR_PTR(-ENOMEM); | 
 | 1025 | 	} | 
 | 1026 |  | 
 | 1027 | 	for (i = 0; i < ETH_ALEN; i++) | 
 | 1028 | 		dev->dev_addr[i] = (*local_mac >> (5 - i) * 8) & 0xff; | 
 | 1029 |  | 
 | 1030 | 	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); | 
 | 1031 |  | 
 | 1032 | 	vp = netdev_priv(dev); | 
 | 1033 |  | 
 | 1034 | 	spin_lock_init(&vp->lock); | 
 | 1035 | 	vp->dev = dev; | 
 | 1036 |  | 
 | 1037 | 	INIT_LIST_HEAD(&vp->port_list); | 
 | 1038 | 	for (i = 0; i < VNET_PORT_HASH_SIZE; i++) | 
 | 1039 | 		INIT_HLIST_HEAD(&vp->port_hash[i]); | 
 | 1040 | 	INIT_LIST_HEAD(&vp->list); | 
 | 1041 | 	vp->local_mac = *local_mac; | 
 | 1042 |  | 
 | 1043 | 	dev->open = vnet_open; | 
 | 1044 | 	dev->stop = vnet_close; | 
 | 1045 | 	dev->set_multicast_list = vnet_set_rx_mode; | 
 | 1046 | 	dev->set_mac_address = vnet_set_mac_addr; | 
 | 1047 | 	dev->tx_timeout = vnet_tx_timeout; | 
 | 1048 | 	dev->ethtool_ops = &vnet_ethtool_ops; | 
 | 1049 | 	dev->watchdog_timeo = VNET_TX_TIMEOUT; | 
 | 1050 | 	dev->change_mtu = vnet_change_mtu; | 
 | 1051 | 	dev->hard_start_xmit = vnet_start_xmit; | 
 | 1052 |  | 
 | 1053 | 	err = register_netdev(dev); | 
 | 1054 | 	if (err) { | 
 | 1055 | 		printk(KERN_ERR PFX "Cannot register net device, " | 
 | 1056 | 		       "aborting.\n"); | 
 | 1057 | 		goto err_out_free_dev; | 
 | 1058 | 	} | 
 | 1059 |  | 
 | 1060 | 	printk(KERN_INFO "%s: Sun LDOM vnet ", dev->name); | 
 | 1061 |  | 
 | 1062 | 	for (i = 0; i < 6; i++) | 
 | 1063 | 		printk("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':'); | 
 | 1064 |  | 
 | 1065 | 	list_add(&vp->list, &vnet_list); | 
 | 1066 |  | 
 | 1067 | 	return vp; | 
 | 1068 |  | 
 | 1069 | err_out_free_dev: | 
 | 1070 | 	free_netdev(dev); | 
 | 1071 |  | 
 | 1072 | 	return ERR_PTR(err); | 
 | 1073 | } | 
 | 1074 |  | 
 | 1075 | static struct vnet * __devinit vnet_find_or_create(const u64 *local_mac) | 
 | 1076 | { | 
 | 1077 | 	struct vnet *iter, *vp; | 
 | 1078 |  | 
 | 1079 | 	mutex_lock(&vnet_list_mutex); | 
 | 1080 | 	vp = NULL; | 
 | 1081 | 	list_for_each_entry(iter, &vnet_list, list) { | 
 | 1082 | 		if (iter->local_mac == *local_mac) { | 
 | 1083 | 			vp = iter; | 
 | 1084 | 			break; | 
 | 1085 | 		} | 
 | 1086 | 	} | 
 | 1087 | 	if (!vp) | 
 | 1088 | 		vp = vnet_new(local_mac); | 
 | 1089 | 	mutex_unlock(&vnet_list_mutex); | 
 | 1090 |  | 
 | 1091 | 	return vp; | 
 | 1092 | } | 
 | 1093 |  | 
 | 1094 | static const char *local_mac_prop = "local-mac-address"; | 
 | 1095 |  | 
 | 1096 | static struct vnet * __devinit vnet_find_parent(struct mdesc_handle *hp, | 
 | 1097 | 						u64 port_node) | 
 | 1098 | { | 
 | 1099 | 	const u64 *local_mac = NULL; | 
 | 1100 | 	u64 a; | 
 | 1101 |  | 
 | 1102 | 	mdesc_for_each_arc(a, hp, port_node, MDESC_ARC_TYPE_BACK) { | 
 | 1103 | 		u64 target = mdesc_arc_target(hp, a); | 
 | 1104 | 		const char *name; | 
 | 1105 |  | 
 | 1106 | 		name = mdesc_get_property(hp, target, "name", NULL); | 
 | 1107 | 		if (!name || strcmp(name, "network")) | 
 | 1108 | 			continue; | 
 | 1109 |  | 
 | 1110 | 		local_mac = mdesc_get_property(hp, target, | 
 | 1111 | 					       local_mac_prop, NULL); | 
 | 1112 | 		if (local_mac) | 
 | 1113 | 			break; | 
 | 1114 | 	} | 
 | 1115 | 	if (!local_mac) | 
 | 1116 | 		return ERR_PTR(-ENODEV); | 
 | 1117 |  | 
 | 1118 | 	return vnet_find_or_create(local_mac); | 
 | 1119 | } | 
 | 1120 |  | 
| David S. Miller | 4c521e4 | 2007-07-09 22:23:51 -0700 | [diff] [blame] | 1121 | static struct ldc_channel_config vnet_ldc_cfg = { | 
 | 1122 | 	.event		= vnet_event, | 
 | 1123 | 	.mtu		= 64, | 
 | 1124 | 	.mode		= LDC_MODE_UNRELIABLE, | 
 | 1125 | }; | 
 | 1126 |  | 
 | 1127 | static struct vio_driver_ops vnet_vio_ops = { | 
 | 1128 | 	.send_attr		= vnet_send_attr, | 
 | 1129 | 	.handle_attr		= vnet_handle_attr, | 
 | 1130 | 	.handshake_complete	= vnet_handshake_complete, | 
 | 1131 | }; | 
 | 1132 |  | 
| Adrian Bunk | aa782d3 | 2008-01-30 22:03:32 +0200 | [diff] [blame] | 1133 | static void __devinit print_version(void) | 
| David S. Miller | 9184a04 | 2007-07-17 22:19:10 -0700 | [diff] [blame] | 1134 | { | 
 | 1135 | 	static int version_printed; | 
 | 1136 |  | 
 | 1137 | 	if (version_printed++ == 0) | 
 | 1138 | 		printk(KERN_INFO "%s", version); | 
 | 1139 | } | 
 | 1140 |  | 
| David S. Miller | 4c521e4 | 2007-07-09 22:23:51 -0700 | [diff] [blame] | 1141 | const char *remote_macaddr_prop = "remote-mac-address"; | 
 | 1142 |  | 
 | 1143 | static int __devinit vnet_port_probe(struct vio_dev *vdev, | 
 | 1144 | 				     const struct vio_device_id *id) | 
 | 1145 | { | 
| David S. Miller | 43fdf27 | 2007-07-12 13:47:50 -0700 | [diff] [blame] | 1146 | 	struct mdesc_handle *hp; | 
| David S. Miller | 4c521e4 | 2007-07-09 22:23:51 -0700 | [diff] [blame] | 1147 | 	struct vnet_port *port; | 
 | 1148 | 	unsigned long flags; | 
 | 1149 | 	struct vnet *vp; | 
 | 1150 | 	const u64 *rmac; | 
 | 1151 | 	int len, i, err, switch_port; | 
| Joe Perches | 137919a | 2007-12-20 04:06:25 -0800 | [diff] [blame] | 1152 | 	DECLARE_MAC_BUF(mac); | 
| David S. Miller | 4c521e4 | 2007-07-09 22:23:51 -0700 | [diff] [blame] | 1153 |  | 
| David S. Miller | 9184a04 | 2007-07-17 22:19:10 -0700 | [diff] [blame] | 1154 | 	print_version(); | 
| David S. Miller | 4c521e4 | 2007-07-09 22:23:51 -0700 | [diff] [blame] | 1155 |  | 
| David S. Miller | 43fdf27 | 2007-07-12 13:47:50 -0700 | [diff] [blame] | 1156 | 	hp = mdesc_grab(); | 
 | 1157 |  | 
| David S. Miller | 9184a04 | 2007-07-17 22:19:10 -0700 | [diff] [blame] | 1158 | 	vp = vnet_find_parent(hp, vdev->mp); | 
 | 1159 | 	if (IS_ERR(vp)) { | 
 | 1160 | 		printk(KERN_ERR PFX "Cannot find port parent vnet.\n"); | 
 | 1161 | 		err = PTR_ERR(vp); | 
 | 1162 | 		goto err_out_put_mdesc; | 
 | 1163 | 	} | 
 | 1164 |  | 
| David S. Miller | 43fdf27 | 2007-07-12 13:47:50 -0700 | [diff] [blame] | 1165 | 	rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len); | 
 | 1166 | 	err = -ENODEV; | 
| David S. Miller | 4c521e4 | 2007-07-09 22:23:51 -0700 | [diff] [blame] | 1167 | 	if (!rmac) { | 
 | 1168 | 		printk(KERN_ERR PFX "Port lacks %s property.\n", | 
 | 1169 | 		       remote_macaddr_prop); | 
| David S. Miller | 43fdf27 | 2007-07-12 13:47:50 -0700 | [diff] [blame] | 1170 | 		goto err_out_put_mdesc; | 
| David S. Miller | 4c521e4 | 2007-07-09 22:23:51 -0700 | [diff] [blame] | 1171 | 	} | 
 | 1172 |  | 
 | 1173 | 	port = kzalloc(sizeof(*port), GFP_KERNEL); | 
| David S. Miller | 43fdf27 | 2007-07-12 13:47:50 -0700 | [diff] [blame] | 1174 | 	err = -ENOMEM; | 
| David S. Miller | 4c521e4 | 2007-07-09 22:23:51 -0700 | [diff] [blame] | 1175 | 	if (!port) { | 
 | 1176 | 		printk(KERN_ERR PFX "Cannot allocate vnet_port.\n"); | 
| David S. Miller | 43fdf27 | 2007-07-12 13:47:50 -0700 | [diff] [blame] | 1177 | 		goto err_out_put_mdesc; | 
| David S. Miller | 4c521e4 | 2007-07-09 22:23:51 -0700 | [diff] [blame] | 1178 | 	} | 
 | 1179 |  | 
 | 1180 | 	for (i = 0; i < ETH_ALEN; i++) | 
 | 1181 | 		port->raddr[i] = (*rmac >> (5 - i) * 8) & 0xff; | 
 | 1182 |  | 
 | 1183 | 	port->vp = vp; | 
 | 1184 |  | 
| David S. Miller | 43fdf27 | 2007-07-12 13:47:50 -0700 | [diff] [blame] | 1185 | 	err = vio_driver_init(&port->vio, vdev, VDEV_NETWORK, | 
| David S. Miller | 4c521e4 | 2007-07-09 22:23:51 -0700 | [diff] [blame] | 1186 | 			      vnet_versions, ARRAY_SIZE(vnet_versions), | 
 | 1187 | 			      &vnet_vio_ops, vp->dev->name); | 
 | 1188 | 	if (err) | 
 | 1189 | 		goto err_out_free_port; | 
 | 1190 |  | 
 | 1191 | 	err = vio_ldc_alloc(&port->vio, &vnet_ldc_cfg, port); | 
 | 1192 | 	if (err) | 
 | 1193 | 		goto err_out_free_port; | 
 | 1194 |  | 
 | 1195 | 	err = vnet_port_alloc_tx_bufs(port); | 
 | 1196 | 	if (err) | 
 | 1197 | 		goto err_out_free_ldc; | 
 | 1198 |  | 
 | 1199 | 	INIT_HLIST_NODE(&port->hash); | 
 | 1200 | 	INIT_LIST_HEAD(&port->list); | 
 | 1201 |  | 
 | 1202 | 	switch_port = 0; | 
| David S. Miller | 43fdf27 | 2007-07-12 13:47:50 -0700 | [diff] [blame] | 1203 | 	if (mdesc_get_property(hp, vdev->mp, "switch-port", NULL) != NULL) | 
| David S. Miller | 4c521e4 | 2007-07-09 22:23:51 -0700 | [diff] [blame] | 1204 | 		switch_port = 1; | 
| David S. Miller | 028ebff | 2007-07-20 02:30:25 -0700 | [diff] [blame] | 1205 | 	port->switch_port = switch_port; | 
| David S. Miller | 4c521e4 | 2007-07-09 22:23:51 -0700 | [diff] [blame] | 1206 |  | 
 | 1207 | 	spin_lock_irqsave(&vp->lock, flags); | 
 | 1208 | 	if (switch_port) | 
 | 1209 | 		list_add(&port->list, &vp->port_list); | 
 | 1210 | 	else | 
 | 1211 | 		list_add_tail(&port->list, &vp->port_list); | 
 | 1212 | 	hlist_add_head(&port->hash, &vp->port_hash[vnet_hashfn(port->raddr)]); | 
 | 1213 | 	spin_unlock_irqrestore(&vp->lock, flags); | 
 | 1214 |  | 
 | 1215 | 	dev_set_drvdata(&vdev->dev, port); | 
 | 1216 |  | 
| Joe Perches | 137919a | 2007-12-20 04:06:25 -0800 | [diff] [blame] | 1217 | 	printk(KERN_INFO "%s: PORT ( remote-mac %s%s )\n", | 
 | 1218 | 	       vp->dev->name, print_mac(mac, port->raddr), | 
 | 1219 | 	       switch_port ? " switch-port" : ""); | 
| David S. Miller | 4c521e4 | 2007-07-09 22:23:51 -0700 | [diff] [blame] | 1220 |  | 
 | 1221 | 	vio_port_up(&port->vio); | 
 | 1222 |  | 
| David S. Miller | 43fdf27 | 2007-07-12 13:47:50 -0700 | [diff] [blame] | 1223 | 	mdesc_release(hp); | 
 | 1224 |  | 
| David S. Miller | 4c521e4 | 2007-07-09 22:23:51 -0700 | [diff] [blame] | 1225 | 	return 0; | 
 | 1226 |  | 
 | 1227 | err_out_free_ldc: | 
 | 1228 | 	vio_ldc_free(&port->vio); | 
 | 1229 |  | 
 | 1230 | err_out_free_port: | 
 | 1231 | 	kfree(port); | 
 | 1232 |  | 
| David S. Miller | 43fdf27 | 2007-07-12 13:47:50 -0700 | [diff] [blame] | 1233 | err_out_put_mdesc: | 
 | 1234 | 	mdesc_release(hp); | 
| David S. Miller | 4c521e4 | 2007-07-09 22:23:51 -0700 | [diff] [blame] | 1235 | 	return err; | 
 | 1236 | } | 
 | 1237 |  | 
 | 1238 | static int vnet_port_remove(struct vio_dev *vdev) | 
 | 1239 | { | 
 | 1240 | 	struct vnet_port *port = dev_get_drvdata(&vdev->dev); | 
 | 1241 |  | 
 | 1242 | 	if (port) { | 
 | 1243 | 		struct vnet *vp = port->vp; | 
 | 1244 | 		unsigned long flags; | 
 | 1245 |  | 
 | 1246 | 		del_timer_sync(&port->vio.timer); | 
 | 1247 |  | 
 | 1248 | 		spin_lock_irqsave(&vp->lock, flags); | 
 | 1249 | 		list_del(&port->list); | 
 | 1250 | 		hlist_del(&port->hash); | 
 | 1251 | 		spin_unlock_irqrestore(&vp->lock, flags); | 
 | 1252 |  | 
 | 1253 | 		vnet_port_free_tx_bufs(port); | 
 | 1254 | 		vio_ldc_free(&port->vio); | 
 | 1255 |  | 
 | 1256 | 		dev_set_drvdata(&vdev->dev, NULL); | 
 | 1257 |  | 
 | 1258 | 		kfree(port); | 
 | 1259 | 	} | 
 | 1260 | 	return 0; | 
 | 1261 | } | 
 | 1262 |  | 
 | 1263 | static struct vio_device_id vnet_port_match[] = { | 
 | 1264 | 	{ | 
 | 1265 | 		.type = "vnet-port", | 
 | 1266 | 	}, | 
 | 1267 | 	{}, | 
 | 1268 | }; | 
| Fabio Massimo Di Nitto | da68e08 | 2007-07-18 14:35:23 -0700 | [diff] [blame] | 1269 | MODULE_DEVICE_TABLE(vio, vnet_port_match); | 
| David S. Miller | 4c521e4 | 2007-07-09 22:23:51 -0700 | [diff] [blame] | 1270 |  | 
 | 1271 | static struct vio_driver vnet_port_driver = { | 
 | 1272 | 	.id_table	= vnet_port_match, | 
 | 1273 | 	.probe		= vnet_port_probe, | 
 | 1274 | 	.remove		= vnet_port_remove, | 
 | 1275 | 	.driver		= { | 
 | 1276 | 		.name	= "vnet_port", | 
 | 1277 | 		.owner	= THIS_MODULE, | 
 | 1278 | 	} | 
 | 1279 | }; | 
 | 1280 |  | 
| David S. Miller | 4c521e4 | 2007-07-09 22:23:51 -0700 | [diff] [blame] | 1281 | static int __init vnet_init(void) | 
 | 1282 | { | 
| David S. Miller | 9184a04 | 2007-07-17 22:19:10 -0700 | [diff] [blame] | 1283 | 	return vio_register_driver(&vnet_port_driver); | 
| David S. Miller | 4c521e4 | 2007-07-09 22:23:51 -0700 | [diff] [blame] | 1284 | } | 
 | 1285 |  | 
 | 1286 | static void __exit vnet_exit(void) | 
 | 1287 | { | 
 | 1288 | 	vio_unregister_driver(&vnet_port_driver); | 
| David S. Miller | 4c521e4 | 2007-07-09 22:23:51 -0700 | [diff] [blame] | 1289 | } | 
 | 1290 |  | 
 | 1291 | module_init(vnet_init); | 
 | 1292 | module_exit(vnet_exit); |