| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * Virtual network driver for conversing with remote driver backends. | 
|  | 3 | * | 
|  | 4 | * Copyright (c) 2002-2005, K A Fraser | 
|  | 5 | * Copyright (c) 2005, XenSource Ltd | 
|  | 6 | * | 
|  | 7 | * This program is free software; you can redistribute it and/or | 
|  | 8 | * modify it under the terms of the GNU General Public License version 2 | 
|  | 9 | * as published by the Free Software Foundation; or, when distributed | 
|  | 10 | * separately from the Linux kernel or incorporated into other | 
|  | 11 | * software packages, subject to the following license: | 
|  | 12 | * | 
|  | 13 | * Permission is hereby granted, free of charge, to any person obtaining a copy | 
|  | 14 | * of this source file (the "Software"), to deal in the Software without | 
|  | 15 | * restriction, including without limitation the rights to use, copy, modify, | 
|  | 16 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, | 
|  | 17 | * and to permit persons to whom the Software is furnished to do so, subject to | 
|  | 18 | * the following conditions: | 
|  | 19 | * | 
|  | 20 | * The above copyright notice and this permission notice shall be included in | 
|  | 21 | * all copies or substantial portions of the Software. | 
|  | 22 | * | 
|  | 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | 
|  | 24 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | 
|  | 25 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | 
|  | 26 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | 
|  | 27 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | 
|  | 28 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | 
|  | 29 | * IN THE SOFTWARE. | 
|  | 30 | */ | 
|  | 31 |  | 
|  | 32 | #include <linux/module.h> | 
|  | 33 | #include <linux/kernel.h> | 
|  | 34 | #include <linux/netdevice.h> | 
|  | 35 | #include <linux/etherdevice.h> | 
|  | 36 | #include <linux/skbuff.h> | 
|  | 37 | #include <linux/ethtool.h> | 
|  | 38 | #include <linux/if_ether.h> | 
|  | 39 | #include <linux/tcp.h> | 
|  | 40 | #include <linux/udp.h> | 
|  | 41 | #include <linux/moduleparam.h> | 
|  | 42 | #include <linux/mm.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 43 | #include <linux/slab.h> | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 44 | #include <net/ip.h> | 
|  | 45 |  | 
| Jeremy Fitzhardinge | 1ccbf53 | 2009-10-06 15:11:14 -0700 | [diff] [blame] | 46 | #include <xen/xen.h> | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 47 | #include <xen/xenbus.h> | 
|  | 48 | #include <xen/events.h> | 
|  | 49 | #include <xen/page.h> | 
|  | 50 | #include <xen/grant_table.h> | 
|  | 51 |  | 
|  | 52 | #include <xen/interface/io/netif.h> | 
|  | 53 | #include <xen/interface/memory.h> | 
|  | 54 | #include <xen/interface/grant_table.h> | 
|  | 55 |  | 
| Stephen Hemminger | 0fc0b73 | 2009-09-02 01:03:33 -0700 | [diff] [blame] | 56 | static const struct ethtool_ops xennet_ethtool_ops; | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 57 |  | 
|  | 58 | struct netfront_cb { | 
|  | 59 | struct page *page; | 
|  | 60 | unsigned offset; | 
|  | 61 | }; | 
|  | 62 |  | 
|  | 63 | #define NETFRONT_SKB_CB(skb)	((struct netfront_cb *)((skb)->cb)) | 
|  | 64 |  | 
|  | 65 | #define RX_COPY_THRESHOLD 256 | 
|  | 66 |  | 
|  | 67 | #define GRANT_INVALID_REF	0 | 
|  | 68 |  | 
|  | 69 | #define NET_TX_RING_SIZE __RING_SIZE((struct xen_netif_tx_sring *)0, PAGE_SIZE) | 
|  | 70 | #define NET_RX_RING_SIZE __RING_SIZE((struct xen_netif_rx_sring *)0, PAGE_SIZE) | 
|  | 71 | #define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) | 
|  | 72 |  | 
|  | 73 | struct netfront_info { | 
|  | 74 | struct list_head list; | 
|  | 75 | struct net_device *netdev; | 
|  | 76 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 77 | struct napi_struct napi; | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 78 |  | 
| Jeremy Fitzhardinge | 84284d3 | 2007-10-15 12:59:53 -0700 | [diff] [blame] | 79 | unsigned int evtchn; | 
|  | 80 | struct xenbus_device *xbdev; | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 81 |  | 
|  | 82 | spinlock_t   tx_lock; | 
| Jeremy Fitzhardinge | 84284d3 | 2007-10-15 12:59:53 -0700 | [diff] [blame] | 83 | struct xen_netif_tx_front_ring tx; | 
|  | 84 | int tx_ring_ref; | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 85 |  | 
|  | 86 | /* | 
|  | 87 | * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries | 
|  | 88 | * are linked from tx_skb_freelist through skb_entry.link. | 
|  | 89 | * | 
|  | 90 | *  NB. Freelist index entries are always going to be less than | 
|  | 91 | *  PAGE_OFFSET, whereas pointers to skbs will always be equal or | 
|  | 92 | *  greater than PAGE_OFFSET: we use this property to distinguish | 
|  | 93 | *  them. | 
|  | 94 | */ | 
|  | 95 | union skb_entry { | 
|  | 96 | struct sk_buff *skb; | 
| Isaku Yamahata | 1ffb40b | 2008-07-08 15:06:31 -0700 | [diff] [blame] | 97 | unsigned long link; | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 98 | } tx_skbs[NET_TX_RING_SIZE]; | 
|  | 99 | grant_ref_t gref_tx_head; | 
|  | 100 | grant_ref_t grant_tx_ref[NET_TX_RING_SIZE]; | 
|  | 101 | unsigned tx_skb_freelist; | 
|  | 102 |  | 
| Jeremy Fitzhardinge | 84284d3 | 2007-10-15 12:59:53 -0700 | [diff] [blame] | 103 | spinlock_t   rx_lock ____cacheline_aligned_in_smp; | 
|  | 104 | struct xen_netif_rx_front_ring rx; | 
|  | 105 | int rx_ring_ref; | 
|  | 106 |  | 
|  | 107 | /* Receive-ring batched refills. */ | 
|  | 108 | #define RX_MIN_TARGET 8 | 
|  | 109 | #define RX_DFL_MIN_TARGET 64 | 
|  | 110 | #define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) | 
|  | 111 | unsigned rx_min_target, rx_max_target, rx_target; | 
|  | 112 | struct sk_buff_head rx_batch; | 
|  | 113 |  | 
|  | 114 | struct timer_list rx_refill_timer; | 
|  | 115 |  | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 116 | struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; | 
|  | 117 | grant_ref_t gref_rx_head; | 
|  | 118 | grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; | 
|  | 119 |  | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 120 | unsigned long rx_pfn_array[NET_RX_RING_SIZE]; | 
|  | 121 | struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; | 
|  | 122 | struct mmu_update rx_mmu[NET_RX_RING_SIZE]; | 
|  | 123 | }; | 
|  | 124 |  | 
|  | 125 | struct netfront_rx_info { | 
|  | 126 | struct xen_netif_rx_response rx; | 
|  | 127 | struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; | 
|  | 128 | }; | 
|  | 129 |  | 
| Isaku Yamahata | 1ffb40b | 2008-07-08 15:06:31 -0700 | [diff] [blame] | 130 | static void skb_entry_set_link(union skb_entry *list, unsigned short id) | 
|  | 131 | { | 
|  | 132 | list->link = id; | 
|  | 133 | } | 
|  | 134 |  | 
|  | 135 | static int skb_entry_is_link(const union skb_entry *list) | 
|  | 136 | { | 
|  | 137 | BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link)); | 
|  | 138 | return ((unsigned long)list->skb < PAGE_OFFSET); | 
|  | 139 | } | 
|  | 140 |  | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 141 | /* | 
|  | 142 | * Access macros for acquiring freeing slots in tx_skbs[]. | 
|  | 143 | */ | 
|  | 144 |  | 
|  | 145 | static void add_id_to_freelist(unsigned *head, union skb_entry *list, | 
|  | 146 | unsigned short id) | 
|  | 147 | { | 
| Isaku Yamahata | 1ffb40b | 2008-07-08 15:06:31 -0700 | [diff] [blame] | 148 | skb_entry_set_link(&list[id], *head); | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 149 | *head = id; | 
|  | 150 | } | 
|  | 151 |  | 
|  | 152 | static unsigned short get_id_from_freelist(unsigned *head, | 
|  | 153 | union skb_entry *list) | 
|  | 154 | { | 
|  | 155 | unsigned int id = *head; | 
|  | 156 | *head = list[id].link; | 
|  | 157 | return id; | 
|  | 158 | } | 
|  | 159 |  | 
|  | 160 | static int xennet_rxidx(RING_IDX idx) | 
|  | 161 | { | 
|  | 162 | return idx & (NET_RX_RING_SIZE - 1); | 
|  | 163 | } | 
|  | 164 |  | 
|  | 165 | static struct sk_buff *xennet_get_rx_skb(struct netfront_info *np, | 
|  | 166 | RING_IDX ri) | 
|  | 167 | { | 
|  | 168 | int i = xennet_rxidx(ri); | 
|  | 169 | struct sk_buff *skb = np->rx_skbs[i]; | 
|  | 170 | np->rx_skbs[i] = NULL; | 
|  | 171 | return skb; | 
|  | 172 | } | 
|  | 173 |  | 
|  | 174 | static grant_ref_t xennet_get_rx_ref(struct netfront_info *np, | 
|  | 175 | RING_IDX ri) | 
|  | 176 | { | 
|  | 177 | int i = xennet_rxidx(ri); | 
|  | 178 | grant_ref_t ref = np->grant_rx_ref[i]; | 
|  | 179 | np->grant_rx_ref[i] = GRANT_INVALID_REF; | 
|  | 180 | return ref; | 
|  | 181 | } | 
|  | 182 |  | 
|  | 183 | #ifdef CONFIG_SYSFS | 
|  | 184 | static int xennet_sysfs_addif(struct net_device *netdev); | 
|  | 185 | static void xennet_sysfs_delif(struct net_device *netdev); | 
|  | 186 | #else /* !CONFIG_SYSFS */ | 
|  | 187 | #define xennet_sysfs_addif(dev) (0) | 
|  | 188 | #define xennet_sysfs_delif(dev) do { } while (0) | 
|  | 189 | #endif | 
|  | 190 |  | 
|  | 191 | static int xennet_can_sg(struct net_device *dev) | 
|  | 192 | { | 
|  | 193 | return dev->features & NETIF_F_SG; | 
|  | 194 | } | 
|  | 195 |  | 
|  | 196 |  | 
|  | 197 | static void rx_refill_timeout(unsigned long data) | 
|  | 198 | { | 
|  | 199 | struct net_device *dev = (struct net_device *)data; | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 200 | struct netfront_info *np = netdev_priv(dev); | 
| Ben Hutchings | 288379f | 2009-01-19 16:43:59 -0800 | [diff] [blame] | 201 | napi_schedule(&np->napi); | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 202 | } | 
|  | 203 |  | 
|  | 204 | static int netfront_tx_slot_available(struct netfront_info *np) | 
|  | 205 | { | 
|  | 206 | return ((np->tx.req_prod_pvt - np->tx.rsp_cons) < | 
|  | 207 | (TX_MAX_TARGET - MAX_SKB_FRAGS - 2)); | 
|  | 208 | } | 
|  | 209 |  | 
|  | 210 | static void xennet_maybe_wake_tx(struct net_device *dev) | 
|  | 211 | { | 
|  | 212 | struct netfront_info *np = netdev_priv(dev); | 
|  | 213 |  | 
|  | 214 | if (unlikely(netif_queue_stopped(dev)) && | 
|  | 215 | netfront_tx_slot_available(np) && | 
|  | 216 | likely(netif_running(dev))) | 
|  | 217 | netif_wake_queue(dev); | 
|  | 218 | } | 
|  | 219 |  | 
|  | 220 | static void xennet_alloc_rx_buffers(struct net_device *dev) | 
|  | 221 | { | 
|  | 222 | unsigned short id; | 
|  | 223 | struct netfront_info *np = netdev_priv(dev); | 
|  | 224 | struct sk_buff *skb; | 
|  | 225 | struct page *page; | 
|  | 226 | int i, batch_target, notify; | 
|  | 227 | RING_IDX req_prod = np->rx.req_prod_pvt; | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 228 | grant_ref_t ref; | 
|  | 229 | unsigned long pfn; | 
|  | 230 | void *vaddr; | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 231 | struct xen_netif_rx_request *req; | 
|  | 232 |  | 
|  | 233 | if (unlikely(!netif_carrier_ok(dev))) | 
|  | 234 | return; | 
|  | 235 |  | 
|  | 236 | /* | 
|  | 237 | * Allocate skbuffs greedily, even though we batch updates to the | 
|  | 238 | * receive ring. This creates a less bursty demand on the memory | 
|  | 239 | * allocator, so should reduce the chance of failed allocation requests | 
|  | 240 | * both for ourself and for other kernel subsystems. | 
|  | 241 | */ | 
|  | 242 | batch_target = np->rx_target - (req_prod - np->rx.rsp_cons); | 
|  | 243 | for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) { | 
| Isaku Yamahata | 617a20b | 2008-10-14 17:50:42 -0700 | [diff] [blame] | 244 | skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD + NET_IP_ALIGN, | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 245 | GFP_ATOMIC | __GFP_NOWARN); | 
|  | 246 | if (unlikely(!skb)) | 
|  | 247 | goto no_skb; | 
|  | 248 |  | 
| Isaku Yamahata | 617a20b | 2008-10-14 17:50:42 -0700 | [diff] [blame] | 249 | /* Align ip header to a 16 bytes boundary */ | 
|  | 250 | skb_reserve(skb, NET_IP_ALIGN); | 
|  | 251 |  | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 252 | page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); | 
|  | 253 | if (!page) { | 
|  | 254 | kfree_skb(skb); | 
|  | 255 | no_skb: | 
|  | 256 | /* Any skbuffs queued for refill? Force them out. */ | 
|  | 257 | if (i != 0) | 
|  | 258 | goto refill; | 
|  | 259 | /* Could not allocate any skbuffs. Try again later. */ | 
|  | 260 | mod_timer(&np->rx_refill_timer, | 
|  | 261 | jiffies + (HZ/10)); | 
|  | 262 | break; | 
|  | 263 | } | 
|  | 264 |  | 
|  | 265 | skb_shinfo(skb)->frags[0].page = page; | 
|  | 266 | skb_shinfo(skb)->nr_frags = 1; | 
|  | 267 | __skb_queue_tail(&np->rx_batch, skb); | 
|  | 268 | } | 
|  | 269 |  | 
|  | 270 | /* Is the batch large enough to be worthwhile? */ | 
|  | 271 | if (i < (np->rx_target/2)) { | 
|  | 272 | if (req_prod > np->rx.sring->req_prod) | 
|  | 273 | goto push; | 
|  | 274 | return; | 
|  | 275 | } | 
|  | 276 |  | 
|  | 277 | /* Adjust our fill target if we risked running out of buffers. */ | 
|  | 278 | if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) && | 
|  | 279 | ((np->rx_target *= 2) > np->rx_max_target)) | 
|  | 280 | np->rx_target = np->rx_max_target; | 
|  | 281 |  | 
|  | 282 | refill: | 
| Jeremy Fitzhardinge | 5dcddfa | 2007-08-07 14:56:42 -0700 | [diff] [blame] | 283 | for (i = 0; ; i++) { | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 284 | skb = __skb_dequeue(&np->rx_batch); | 
|  | 285 | if (skb == NULL) | 
|  | 286 | break; | 
|  | 287 |  | 
|  | 288 | skb->dev = dev; | 
|  | 289 |  | 
|  | 290 | id = xennet_rxidx(req_prod + i); | 
|  | 291 |  | 
|  | 292 | BUG_ON(np->rx_skbs[id]); | 
|  | 293 | np->rx_skbs[id] = skb; | 
|  | 294 |  | 
|  | 295 | ref = gnttab_claim_grant_reference(&np->gref_rx_head); | 
|  | 296 | BUG_ON((signed short)ref < 0); | 
|  | 297 | np->grant_rx_ref[id] = ref; | 
|  | 298 |  | 
|  | 299 | pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page); | 
|  | 300 | vaddr = page_address(skb_shinfo(skb)->frags[0].page); | 
|  | 301 |  | 
|  | 302 | req = RING_GET_REQUEST(&np->rx, req_prod + i); | 
|  | 303 | gnttab_grant_foreign_access_ref(ref, | 
|  | 304 | np->xbdev->otherend_id, | 
|  | 305 | pfn_to_mfn(pfn), | 
|  | 306 | 0); | 
|  | 307 |  | 
|  | 308 | req->id = id; | 
|  | 309 | req->gref = ref; | 
|  | 310 | } | 
|  | 311 |  | 
| Jeremy Fitzhardinge | 5dcddfa | 2007-08-07 14:56:42 -0700 | [diff] [blame] | 312 | wmb();		/* barrier so backend seens requests */ | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 313 |  | 
|  | 314 | /* Above is a suitable barrier to ensure backend will see requests. */ | 
|  | 315 | np->rx.req_prod_pvt = req_prod + i; | 
|  | 316 | push: | 
|  | 317 | RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify); | 
|  | 318 | if (notify) | 
|  | 319 | notify_remote_via_irq(np->netdev->irq); | 
|  | 320 | } | 
|  | 321 |  | 
|  | 322 | static int xennet_open(struct net_device *dev) | 
|  | 323 | { | 
|  | 324 | struct netfront_info *np = netdev_priv(dev); | 
|  | 325 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 326 | napi_enable(&np->napi); | 
|  | 327 |  | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 328 | spin_lock_bh(&np->rx_lock); | 
|  | 329 | if (netif_carrier_ok(dev)) { | 
|  | 330 | xennet_alloc_rx_buffers(dev); | 
|  | 331 | np->rx.sring->rsp_event = np->rx.rsp_cons + 1; | 
|  | 332 | if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) | 
| Ben Hutchings | 288379f | 2009-01-19 16:43:59 -0800 | [diff] [blame] | 333 | napi_schedule(&np->napi); | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 334 | } | 
|  | 335 | spin_unlock_bh(&np->rx_lock); | 
|  | 336 |  | 
| Eduardo Habkost | 0b1ab1b | 2008-07-31 17:36:55 -0300 | [diff] [blame] | 337 | netif_start_queue(dev); | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 338 |  | 
|  | 339 | return 0; | 
|  | 340 | } | 
|  | 341 |  | 
|  | 342 | static void xennet_tx_buf_gc(struct net_device *dev) | 
|  | 343 | { | 
|  | 344 | RING_IDX cons, prod; | 
|  | 345 | unsigned short id; | 
|  | 346 | struct netfront_info *np = netdev_priv(dev); | 
|  | 347 | struct sk_buff *skb; | 
|  | 348 |  | 
|  | 349 | BUG_ON(!netif_carrier_ok(dev)); | 
|  | 350 |  | 
|  | 351 | do { | 
|  | 352 | prod = np->tx.sring->rsp_prod; | 
|  | 353 | rmb(); /* Ensure we see responses up to 'rp'. */ | 
|  | 354 |  | 
|  | 355 | for (cons = np->tx.rsp_cons; cons != prod; cons++) { | 
|  | 356 | struct xen_netif_tx_response *txrsp; | 
|  | 357 |  | 
|  | 358 | txrsp = RING_GET_RESPONSE(&np->tx, cons); | 
|  | 359 | if (txrsp->status == NETIF_RSP_NULL) | 
|  | 360 | continue; | 
|  | 361 |  | 
|  | 362 | id  = txrsp->id; | 
|  | 363 | skb = np->tx_skbs[id].skb; | 
|  | 364 | if (unlikely(gnttab_query_foreign_access( | 
|  | 365 | np->grant_tx_ref[id]) != 0)) { | 
|  | 366 | printk(KERN_ALERT "xennet_tx_buf_gc: warning " | 
|  | 367 | "-- grant still in use by backend " | 
|  | 368 | "domain.\n"); | 
|  | 369 | BUG(); | 
|  | 370 | } | 
|  | 371 | gnttab_end_foreign_access_ref( | 
|  | 372 | np->grant_tx_ref[id], GNTMAP_readonly); | 
|  | 373 | gnttab_release_grant_reference( | 
|  | 374 | &np->gref_tx_head, np->grant_tx_ref[id]); | 
|  | 375 | np->grant_tx_ref[id] = GRANT_INVALID_REF; | 
|  | 376 | add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id); | 
|  | 377 | dev_kfree_skb_irq(skb); | 
|  | 378 | } | 
|  | 379 |  | 
|  | 380 | np->tx.rsp_cons = prod; | 
|  | 381 |  | 
|  | 382 | /* | 
|  | 383 | * Set a new event, then check for race with update of tx_cons. | 
|  | 384 | * Note that it is essential to schedule a callback, no matter | 
|  | 385 | * how few buffers are pending. Even if there is space in the | 
|  | 386 | * transmit ring, higher layers may be blocked because too much | 
|  | 387 | * data is outstanding: in such cases notification from Xen is | 
|  | 388 | * likely to be the only kick that we'll get. | 
|  | 389 | */ | 
|  | 390 | np->tx.sring->rsp_event = | 
|  | 391 | prod + ((np->tx.sring->req_prod - prod) >> 1) + 1; | 
|  | 392 | mb();		/* update shared area */ | 
|  | 393 | } while ((cons == prod) && (prod != np->tx.sring->rsp_prod)); | 
|  | 394 |  | 
|  | 395 | xennet_maybe_wake_tx(dev); | 
|  | 396 | } | 
|  | 397 |  | 
|  | 398 | static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, | 
|  | 399 | struct xen_netif_tx_request *tx) | 
|  | 400 | { | 
|  | 401 | struct netfront_info *np = netdev_priv(dev); | 
|  | 402 | char *data = skb->data; | 
|  | 403 | unsigned long mfn; | 
|  | 404 | RING_IDX prod = np->tx.req_prod_pvt; | 
|  | 405 | int frags = skb_shinfo(skb)->nr_frags; | 
|  | 406 | unsigned int offset = offset_in_page(data); | 
|  | 407 | unsigned int len = skb_headlen(skb); | 
|  | 408 | unsigned int id; | 
|  | 409 | grant_ref_t ref; | 
|  | 410 | int i; | 
|  | 411 |  | 
|  | 412 | /* While the header overlaps a page boundary (including being | 
|  | 413 | larger than a page), split it it into page-sized chunks. */ | 
|  | 414 | while (len > PAGE_SIZE - offset) { | 
|  | 415 | tx->size = PAGE_SIZE - offset; | 
|  | 416 | tx->flags |= NETTXF_more_data; | 
|  | 417 | len -= tx->size; | 
|  | 418 | data += tx->size; | 
|  | 419 | offset = 0; | 
|  | 420 |  | 
|  | 421 | id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); | 
|  | 422 | np->tx_skbs[id].skb = skb_get(skb); | 
|  | 423 | tx = RING_GET_REQUEST(&np->tx, prod++); | 
|  | 424 | tx->id = id; | 
|  | 425 | ref = gnttab_claim_grant_reference(&np->gref_tx_head); | 
|  | 426 | BUG_ON((signed short)ref < 0); | 
|  | 427 |  | 
|  | 428 | mfn = virt_to_mfn(data); | 
|  | 429 | gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, | 
|  | 430 | mfn, GNTMAP_readonly); | 
|  | 431 |  | 
|  | 432 | tx->gref = np->grant_tx_ref[id] = ref; | 
|  | 433 | tx->offset = offset; | 
|  | 434 | tx->size = len; | 
|  | 435 | tx->flags = 0; | 
|  | 436 | } | 
|  | 437 |  | 
|  | 438 | /* Grant backend access to each skb fragment page. */ | 
|  | 439 | for (i = 0; i < frags; i++) { | 
|  | 440 | skb_frag_t *frag = skb_shinfo(skb)->frags + i; | 
|  | 441 |  | 
|  | 442 | tx->flags |= NETTXF_more_data; | 
|  | 443 |  | 
|  | 444 | id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); | 
|  | 445 | np->tx_skbs[id].skb = skb_get(skb); | 
|  | 446 | tx = RING_GET_REQUEST(&np->tx, prod++); | 
|  | 447 | tx->id = id; | 
|  | 448 | ref = gnttab_claim_grant_reference(&np->gref_tx_head); | 
|  | 449 | BUG_ON((signed short)ref < 0); | 
|  | 450 |  | 
|  | 451 | mfn = pfn_to_mfn(page_to_pfn(frag->page)); | 
|  | 452 | gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, | 
|  | 453 | mfn, GNTMAP_readonly); | 
|  | 454 |  | 
|  | 455 | tx->gref = np->grant_tx_ref[id] = ref; | 
|  | 456 | tx->offset = frag->page_offset; | 
|  | 457 | tx->size = frag->size; | 
|  | 458 | tx->flags = 0; | 
|  | 459 | } | 
|  | 460 |  | 
|  | 461 | np->tx.req_prod_pvt = prod; | 
|  | 462 | } | 
|  | 463 |  | 
|  | 464 | static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) | 
|  | 465 | { | 
|  | 466 | unsigned short id; | 
|  | 467 | struct netfront_info *np = netdev_priv(dev); | 
|  | 468 | struct xen_netif_tx_request *tx; | 
|  | 469 | struct xen_netif_extra_info *extra; | 
|  | 470 | char *data = skb->data; | 
|  | 471 | RING_IDX i; | 
|  | 472 | grant_ref_t ref; | 
|  | 473 | unsigned long mfn; | 
|  | 474 | int notify; | 
|  | 475 | int frags = skb_shinfo(skb)->nr_frags; | 
|  | 476 | unsigned int offset = offset_in_page(data); | 
|  | 477 | unsigned int len = skb_headlen(skb); | 
|  | 478 |  | 
| Julia Lawall | 4352637 | 2008-10-15 22:05:23 -0700 | [diff] [blame] | 479 | frags += DIV_ROUND_UP(offset + len, PAGE_SIZE); | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 480 | if (unlikely(frags > MAX_SKB_FRAGS + 1)) { | 
|  | 481 | printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n", | 
|  | 482 | frags); | 
|  | 483 | dump_stack(); | 
|  | 484 | goto drop; | 
|  | 485 | } | 
|  | 486 |  | 
|  | 487 | spin_lock_irq(&np->tx_lock); | 
|  | 488 |  | 
|  | 489 | if (unlikely(!netif_carrier_ok(dev) || | 
|  | 490 | (frags > 1 && !xennet_can_sg(dev)) || | 
|  | 491 | netif_needs_gso(dev, skb))) { | 
|  | 492 | spin_unlock_irq(&np->tx_lock); | 
|  | 493 | goto drop; | 
|  | 494 | } | 
|  | 495 |  | 
|  | 496 | i = np->tx.req_prod_pvt; | 
|  | 497 |  | 
|  | 498 | id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); | 
|  | 499 | np->tx_skbs[id].skb = skb; | 
|  | 500 |  | 
|  | 501 | tx = RING_GET_REQUEST(&np->tx, i); | 
|  | 502 |  | 
|  | 503 | tx->id   = id; | 
|  | 504 | ref = gnttab_claim_grant_reference(&np->gref_tx_head); | 
|  | 505 | BUG_ON((signed short)ref < 0); | 
|  | 506 | mfn = virt_to_mfn(data); | 
|  | 507 | gnttab_grant_foreign_access_ref( | 
|  | 508 | ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly); | 
|  | 509 | tx->gref = np->grant_tx_ref[id] = ref; | 
|  | 510 | tx->offset = offset; | 
|  | 511 | tx->size = len; | 
|  | 512 | extra = NULL; | 
|  | 513 |  | 
|  | 514 | tx->flags = 0; | 
|  | 515 | if (skb->ip_summed == CHECKSUM_PARTIAL) | 
|  | 516 | /* local packet? */ | 
|  | 517 | tx->flags |= NETTXF_csum_blank | NETTXF_data_validated; | 
|  | 518 | else if (skb->ip_summed == CHECKSUM_UNNECESSARY) | 
|  | 519 | /* remote but checksummed. */ | 
|  | 520 | tx->flags |= NETTXF_data_validated; | 
|  | 521 |  | 
|  | 522 | if (skb_shinfo(skb)->gso_size) { | 
|  | 523 | struct xen_netif_extra_info *gso; | 
|  | 524 |  | 
|  | 525 | gso = (struct xen_netif_extra_info *) | 
|  | 526 | RING_GET_REQUEST(&np->tx, ++i); | 
|  | 527 |  | 
|  | 528 | if (extra) | 
|  | 529 | extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; | 
|  | 530 | else | 
|  | 531 | tx->flags |= NETTXF_extra_info; | 
|  | 532 |  | 
|  | 533 | gso->u.gso.size = skb_shinfo(skb)->gso_size; | 
|  | 534 | gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; | 
|  | 535 | gso->u.gso.pad = 0; | 
|  | 536 | gso->u.gso.features = 0; | 
|  | 537 |  | 
|  | 538 | gso->type = XEN_NETIF_EXTRA_TYPE_GSO; | 
|  | 539 | gso->flags = 0; | 
|  | 540 | extra = gso; | 
|  | 541 | } | 
|  | 542 |  | 
|  | 543 | np->tx.req_prod_pvt = i + 1; | 
|  | 544 |  | 
|  | 545 | xennet_make_frags(skb, dev, tx); | 
|  | 546 | tx->size = skb->len; | 
|  | 547 |  | 
|  | 548 | RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify); | 
|  | 549 | if (notify) | 
|  | 550 | notify_remote_via_irq(np->netdev->irq); | 
|  | 551 |  | 
| Jeff Garzik | 09f75cd | 2007-10-03 17:41:50 -0700 | [diff] [blame] | 552 | dev->stats.tx_bytes += skb->len; | 
|  | 553 | dev->stats.tx_packets++; | 
| Jeremy Fitzhardinge | 10a273a | 2007-08-13 12:54:37 -0700 | [diff] [blame] | 554 |  | 
|  | 555 | /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */ | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 556 | xennet_tx_buf_gc(dev); | 
|  | 557 |  | 
|  | 558 | if (!netfront_tx_slot_available(np)) | 
|  | 559 | netif_stop_queue(dev); | 
|  | 560 |  | 
|  | 561 | spin_unlock_irq(&np->tx_lock); | 
|  | 562 |  | 
| Patrick McHardy | 6ed1065 | 2009-06-23 06:03:08 +0000 | [diff] [blame] | 563 | return NETDEV_TX_OK; | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 564 |  | 
|  | 565 | drop: | 
| Jeff Garzik | 09f75cd | 2007-10-03 17:41:50 -0700 | [diff] [blame] | 566 | dev->stats.tx_dropped++; | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 567 | dev_kfree_skb(skb); | 
| Patrick McHardy | 6ed1065 | 2009-06-23 06:03:08 +0000 | [diff] [blame] | 568 | return NETDEV_TX_OK; | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 569 | } | 
|  | 570 |  | 
|  | 571 | static int xennet_close(struct net_device *dev) | 
|  | 572 | { | 
|  | 573 | struct netfront_info *np = netdev_priv(dev); | 
|  | 574 | netif_stop_queue(np->netdev); | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 575 | napi_disable(&np->napi); | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 576 | return 0; | 
|  | 577 | } | 
|  | 578 |  | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 579 | static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb, | 
|  | 580 | grant_ref_t ref) | 
|  | 581 | { | 
|  | 582 | int new = xennet_rxidx(np->rx.req_prod_pvt); | 
|  | 583 |  | 
|  | 584 | BUG_ON(np->rx_skbs[new]); | 
|  | 585 | np->rx_skbs[new] = skb; | 
|  | 586 | np->grant_rx_ref[new] = ref; | 
|  | 587 | RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new; | 
|  | 588 | RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref; | 
|  | 589 | np->rx.req_prod_pvt++; | 
|  | 590 | } | 
|  | 591 |  | 
|  | 592 | static int xennet_get_extras(struct netfront_info *np, | 
|  | 593 | struct xen_netif_extra_info *extras, | 
|  | 594 | RING_IDX rp) | 
|  | 595 |  | 
|  | 596 | { | 
|  | 597 | struct xen_netif_extra_info *extra; | 
|  | 598 | struct device *dev = &np->netdev->dev; | 
|  | 599 | RING_IDX cons = np->rx.rsp_cons; | 
|  | 600 | int err = 0; | 
|  | 601 |  | 
|  | 602 | do { | 
|  | 603 | struct sk_buff *skb; | 
|  | 604 | grant_ref_t ref; | 
|  | 605 |  | 
|  | 606 | if (unlikely(cons + 1 == rp)) { | 
|  | 607 | if (net_ratelimit()) | 
|  | 608 | dev_warn(dev, "Missing extra info\n"); | 
|  | 609 | err = -EBADR; | 
|  | 610 | break; | 
|  | 611 | } | 
|  | 612 |  | 
|  | 613 | extra = (struct xen_netif_extra_info *) | 
|  | 614 | RING_GET_RESPONSE(&np->rx, ++cons); | 
|  | 615 |  | 
|  | 616 | if (unlikely(!extra->type || | 
|  | 617 | extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { | 
|  | 618 | if (net_ratelimit()) | 
|  | 619 | dev_warn(dev, "Invalid extra type: %d\n", | 
|  | 620 | extra->type); | 
|  | 621 | err = -EINVAL; | 
|  | 622 | } else { | 
|  | 623 | memcpy(&extras[extra->type - 1], extra, | 
|  | 624 | sizeof(*extra)); | 
|  | 625 | } | 
|  | 626 |  | 
|  | 627 | skb = xennet_get_rx_skb(np, cons); | 
|  | 628 | ref = xennet_get_rx_ref(np, cons); | 
|  | 629 | xennet_move_rx_slot(np, skb, ref); | 
|  | 630 | } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); | 
|  | 631 |  | 
|  | 632 | np->rx.rsp_cons = cons; | 
|  | 633 | return err; | 
|  | 634 | } | 
|  | 635 |  | 
|  | 636 | static int xennet_get_responses(struct netfront_info *np, | 
|  | 637 | struct netfront_rx_info *rinfo, RING_IDX rp, | 
|  | 638 | struct sk_buff_head *list) | 
|  | 639 | { | 
|  | 640 | struct xen_netif_rx_response *rx = &rinfo->rx; | 
|  | 641 | struct xen_netif_extra_info *extras = rinfo->extras; | 
|  | 642 | struct device *dev = &np->netdev->dev; | 
|  | 643 | RING_IDX cons = np->rx.rsp_cons; | 
|  | 644 | struct sk_buff *skb = xennet_get_rx_skb(np, cons); | 
|  | 645 | grant_ref_t ref = xennet_get_rx_ref(np, cons); | 
|  | 646 | int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); | 
|  | 647 | int frags = 1; | 
|  | 648 | int err = 0; | 
|  | 649 | unsigned long ret; | 
|  | 650 |  | 
|  | 651 | if (rx->flags & NETRXF_extra_info) { | 
|  | 652 | err = xennet_get_extras(np, extras, rp); | 
|  | 653 | cons = np->rx.rsp_cons; | 
|  | 654 | } | 
|  | 655 |  | 
|  | 656 | for (;;) { | 
|  | 657 | if (unlikely(rx->status < 0 || | 
|  | 658 | rx->offset + rx->status > PAGE_SIZE)) { | 
|  | 659 | if (net_ratelimit()) | 
|  | 660 | dev_warn(dev, "rx->offset: %x, size: %u\n", | 
|  | 661 | rx->offset, rx->status); | 
|  | 662 | xennet_move_rx_slot(np, skb, ref); | 
|  | 663 | err = -EINVAL; | 
|  | 664 | goto next; | 
|  | 665 | } | 
|  | 666 |  | 
|  | 667 | /* | 
|  | 668 | * This definitely indicates a bug, either in this driver or in | 
|  | 669 | * the backend driver. In future this should flag the bad | 
|  | 670 | * situation to the system controller to reboot the backed. | 
|  | 671 | */ | 
|  | 672 | if (ref == GRANT_INVALID_REF) { | 
|  | 673 | if (net_ratelimit()) | 
|  | 674 | dev_warn(dev, "Bad rx response id %d.\n", | 
|  | 675 | rx->id); | 
|  | 676 | err = -EINVAL; | 
|  | 677 | goto next; | 
|  | 678 | } | 
|  | 679 |  | 
|  | 680 | ret = gnttab_end_foreign_access_ref(ref, 0); | 
|  | 681 | BUG_ON(!ret); | 
|  | 682 |  | 
|  | 683 | gnttab_release_grant_reference(&np->gref_rx_head, ref); | 
|  | 684 |  | 
|  | 685 | __skb_queue_tail(list, skb); | 
|  | 686 |  | 
|  | 687 | next: | 
|  | 688 | if (!(rx->flags & NETRXF_more_data)) | 
|  | 689 | break; | 
|  | 690 |  | 
|  | 691 | if (cons + frags == rp) { | 
|  | 692 | if (net_ratelimit()) | 
|  | 693 | dev_warn(dev, "Need more frags\n"); | 
|  | 694 | err = -ENOENT; | 
|  | 695 | break; | 
|  | 696 | } | 
|  | 697 |  | 
|  | 698 | rx = RING_GET_RESPONSE(&np->rx, cons + frags); | 
|  | 699 | skb = xennet_get_rx_skb(np, cons + frags); | 
|  | 700 | ref = xennet_get_rx_ref(np, cons + frags); | 
|  | 701 | frags++; | 
|  | 702 | } | 
|  | 703 |  | 
|  | 704 | if (unlikely(frags > max)) { | 
|  | 705 | if (net_ratelimit()) | 
|  | 706 | dev_warn(dev, "Too many frags\n"); | 
|  | 707 | err = -E2BIG; | 
|  | 708 | } | 
|  | 709 |  | 
|  | 710 | if (unlikely(err)) | 
|  | 711 | np->rx.rsp_cons = cons + frags; | 
|  | 712 |  | 
|  | 713 | return err; | 
|  | 714 | } | 
|  | 715 |  | 
|  | 716 | static int xennet_set_skb_gso(struct sk_buff *skb, | 
|  | 717 | struct xen_netif_extra_info *gso) | 
|  | 718 | { | 
|  | 719 | if (!gso->u.gso.size) { | 
|  | 720 | if (net_ratelimit()) | 
|  | 721 | printk(KERN_WARNING "GSO size must not be zero.\n"); | 
|  | 722 | return -EINVAL; | 
|  | 723 | } | 
|  | 724 |  | 
|  | 725 | /* Currently only TCPv4 S.O. is supported. */ | 
|  | 726 | if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { | 
|  | 727 | if (net_ratelimit()) | 
|  | 728 | printk(KERN_WARNING "Bad GSO type %d.\n", gso->u.gso.type); | 
|  | 729 | return -EINVAL; | 
|  | 730 | } | 
|  | 731 |  | 
|  | 732 | skb_shinfo(skb)->gso_size = gso->u.gso.size; | 
|  | 733 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; | 
|  | 734 |  | 
|  | 735 | /* Header must be checked, and gso_segs computed. */ | 
|  | 736 | skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; | 
|  | 737 | skb_shinfo(skb)->gso_segs = 0; | 
|  | 738 |  | 
|  | 739 | return 0; | 
|  | 740 | } | 
|  | 741 |  | 
|  | 742 | static RING_IDX xennet_fill_frags(struct netfront_info *np, | 
|  | 743 | struct sk_buff *skb, | 
|  | 744 | struct sk_buff_head *list) | 
|  | 745 | { | 
|  | 746 | struct skb_shared_info *shinfo = skb_shinfo(skb); | 
|  | 747 | int nr_frags = shinfo->nr_frags; | 
|  | 748 | RING_IDX cons = np->rx.rsp_cons; | 
|  | 749 | skb_frag_t *frag = shinfo->frags + nr_frags; | 
|  | 750 | struct sk_buff *nskb; | 
|  | 751 |  | 
|  | 752 | while ((nskb = __skb_dequeue(list))) { | 
|  | 753 | struct xen_netif_rx_response *rx = | 
|  | 754 | RING_GET_RESPONSE(&np->rx, ++cons); | 
|  | 755 |  | 
|  | 756 | frag->page = skb_shinfo(nskb)->frags[0].page; | 
|  | 757 | frag->page_offset = rx->offset; | 
|  | 758 | frag->size = rx->status; | 
|  | 759 |  | 
|  | 760 | skb->data_len += rx->status; | 
|  | 761 |  | 
|  | 762 | skb_shinfo(nskb)->nr_frags = 0; | 
|  | 763 | kfree_skb(nskb); | 
|  | 764 |  | 
|  | 765 | frag++; | 
|  | 766 | nr_frags++; | 
|  | 767 | } | 
|  | 768 |  | 
|  | 769 | shinfo->nr_frags = nr_frags; | 
|  | 770 | return cons; | 
|  | 771 | } | 
|  | 772 |  | 
|  | 773 | static int skb_checksum_setup(struct sk_buff *skb) | 
|  | 774 | { | 
|  | 775 | struct iphdr *iph; | 
|  | 776 | unsigned char *th; | 
|  | 777 | int err = -EPROTO; | 
|  | 778 |  | 
|  | 779 | if (skb->protocol != htons(ETH_P_IP)) | 
|  | 780 | goto out; | 
|  | 781 |  | 
|  | 782 | iph = (void *)skb->data; | 
|  | 783 | th = skb->data + 4 * iph->ihl; | 
|  | 784 | if (th >= skb_tail_pointer(skb)) | 
|  | 785 | goto out; | 
|  | 786 |  | 
|  | 787 | skb->csum_start = th - skb->head; | 
|  | 788 | switch (iph->protocol) { | 
|  | 789 | case IPPROTO_TCP: | 
|  | 790 | skb->csum_offset = offsetof(struct tcphdr, check); | 
|  | 791 | break; | 
|  | 792 | case IPPROTO_UDP: | 
|  | 793 | skb->csum_offset = offsetof(struct udphdr, check); | 
|  | 794 | break; | 
|  | 795 | default: | 
|  | 796 | if (net_ratelimit()) | 
|  | 797 | printk(KERN_ERR "Attempting to checksum a non-" | 
|  | 798 | "TCP/UDP packet, dropping a protocol" | 
|  | 799 | " %d packet", iph->protocol); | 
|  | 800 | goto out; | 
|  | 801 | } | 
|  | 802 |  | 
|  | 803 | if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb)) | 
|  | 804 | goto out; | 
|  | 805 |  | 
|  | 806 | err = 0; | 
|  | 807 |  | 
|  | 808 | out: | 
|  | 809 | return err; | 
|  | 810 | } | 
|  | 811 |  | 
|  | 812 | static int handle_incoming_queue(struct net_device *dev, | 
| Jeff Garzik | 09f75cd | 2007-10-03 17:41:50 -0700 | [diff] [blame] | 813 | struct sk_buff_head *rxq) | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 814 | { | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 815 | int packets_dropped = 0; | 
|  | 816 | struct sk_buff *skb; | 
|  | 817 |  | 
|  | 818 | while ((skb = __skb_dequeue(rxq)) != NULL) { | 
|  | 819 | struct page *page = NETFRONT_SKB_CB(skb)->page; | 
|  | 820 | void *vaddr = page_address(page); | 
|  | 821 | unsigned offset = NETFRONT_SKB_CB(skb)->offset; | 
|  | 822 |  | 
|  | 823 | memcpy(skb->data, vaddr + offset, | 
|  | 824 | skb_headlen(skb)); | 
|  | 825 |  | 
|  | 826 | if (page != skb_shinfo(skb)->frags[0].page) | 
|  | 827 | __free_page(page); | 
|  | 828 |  | 
|  | 829 | /* Ethernet work: Delayed to here as it peeks the header. */ | 
|  | 830 | skb->protocol = eth_type_trans(skb, dev); | 
|  | 831 |  | 
|  | 832 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 
|  | 833 | if (skb_checksum_setup(skb)) { | 
|  | 834 | kfree_skb(skb); | 
|  | 835 | packets_dropped++; | 
| Jeff Garzik | 09f75cd | 2007-10-03 17:41:50 -0700 | [diff] [blame] | 836 | dev->stats.rx_errors++; | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 837 | continue; | 
|  | 838 | } | 
|  | 839 | } | 
|  | 840 |  | 
| Jeff Garzik | 09f75cd | 2007-10-03 17:41:50 -0700 | [diff] [blame] | 841 | dev->stats.rx_packets++; | 
|  | 842 | dev->stats.rx_bytes += skb->len; | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 843 |  | 
|  | 844 | /* Pass it up. */ | 
|  | 845 | netif_receive_skb(skb); | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 846 | } | 
|  | 847 |  | 
|  | 848 | return packets_dropped; | 
|  | 849 | } | 
|  | 850 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 851 | static int xennet_poll(struct napi_struct *napi, int budget) | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 852 | { | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 853 | struct netfront_info *np = container_of(napi, struct netfront_info, napi); | 
|  | 854 | struct net_device *dev = np->netdev; | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 855 | struct sk_buff *skb; | 
|  | 856 | struct netfront_rx_info rinfo; | 
|  | 857 | struct xen_netif_rx_response *rx = &rinfo.rx; | 
|  | 858 | struct xen_netif_extra_info *extras = rinfo.extras; | 
|  | 859 | RING_IDX i, rp; | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 860 | int work_done; | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 861 | struct sk_buff_head rxq; | 
|  | 862 | struct sk_buff_head errq; | 
|  | 863 | struct sk_buff_head tmpq; | 
|  | 864 | unsigned long flags; | 
|  | 865 | unsigned int len; | 
|  | 866 | int err; | 
|  | 867 |  | 
|  | 868 | spin_lock(&np->rx_lock); | 
|  | 869 |  | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 870 | skb_queue_head_init(&rxq); | 
|  | 871 | skb_queue_head_init(&errq); | 
|  | 872 | skb_queue_head_init(&tmpq); | 
|  | 873 |  | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 874 | rp = np->rx.sring->rsp_prod; | 
|  | 875 | rmb(); /* Ensure we see queued responses up to 'rp'. */ | 
|  | 876 |  | 
|  | 877 | i = np->rx.rsp_cons; | 
|  | 878 | work_done = 0; | 
|  | 879 | while ((i != rp) && (work_done < budget)) { | 
|  | 880 | memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx)); | 
|  | 881 | memset(extras, 0, sizeof(rinfo.extras)); | 
|  | 882 |  | 
|  | 883 | err = xennet_get_responses(np, &rinfo, rp, &tmpq); | 
|  | 884 |  | 
|  | 885 | if (unlikely(err)) { | 
|  | 886 | err: | 
|  | 887 | while ((skb = __skb_dequeue(&tmpq))) | 
|  | 888 | __skb_queue_tail(&errq, skb); | 
| Jeff Garzik | 09f75cd | 2007-10-03 17:41:50 -0700 | [diff] [blame] | 889 | dev->stats.rx_errors++; | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 890 | i = np->rx.rsp_cons; | 
|  | 891 | continue; | 
|  | 892 | } | 
|  | 893 |  | 
|  | 894 | skb = __skb_dequeue(&tmpq); | 
|  | 895 |  | 
|  | 896 | if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { | 
|  | 897 | struct xen_netif_extra_info *gso; | 
|  | 898 | gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; | 
|  | 899 |  | 
|  | 900 | if (unlikely(xennet_set_skb_gso(skb, gso))) { | 
|  | 901 | __skb_queue_head(&tmpq, skb); | 
|  | 902 | np->rx.rsp_cons += skb_queue_len(&tmpq); | 
|  | 903 | goto err; | 
|  | 904 | } | 
|  | 905 | } | 
|  | 906 |  | 
|  | 907 | NETFRONT_SKB_CB(skb)->page = skb_shinfo(skb)->frags[0].page; | 
|  | 908 | NETFRONT_SKB_CB(skb)->offset = rx->offset; | 
|  | 909 |  | 
|  | 910 | len = rx->status; | 
|  | 911 | if (len > RX_COPY_THRESHOLD) | 
|  | 912 | len = RX_COPY_THRESHOLD; | 
|  | 913 | skb_put(skb, len); | 
|  | 914 |  | 
|  | 915 | if (rx->status > len) { | 
|  | 916 | skb_shinfo(skb)->frags[0].page_offset = | 
|  | 917 | rx->offset + len; | 
|  | 918 | skb_shinfo(skb)->frags[0].size = rx->status - len; | 
|  | 919 | skb->data_len = rx->status - len; | 
|  | 920 | } else { | 
|  | 921 | skb_shinfo(skb)->frags[0].page = NULL; | 
|  | 922 | skb_shinfo(skb)->nr_frags = 0; | 
|  | 923 | } | 
|  | 924 |  | 
|  | 925 | i = xennet_fill_frags(np, skb, &tmpq); | 
|  | 926 |  | 
|  | 927 | /* | 
|  | 928 | * Truesize approximates the size of true data plus | 
|  | 929 | * any supervisor overheads. Adding hypervisor | 
|  | 930 | * overheads has been shown to significantly reduce | 
|  | 931 | * achievable bandwidth with the default receive | 
|  | 932 | * buffer size. It is therefore not wise to account | 
|  | 933 | * for it here. | 
|  | 934 | * | 
|  | 935 | * After alloc_skb(RX_COPY_THRESHOLD), truesize is set | 
|  | 936 | * to RX_COPY_THRESHOLD + the supervisor | 
|  | 937 | * overheads. Here, we add the size of the data pulled | 
|  | 938 | * in xennet_fill_frags(). | 
|  | 939 | * | 
|  | 940 | * We also adjust for any unused space in the main | 
|  | 941 | * data area by subtracting (RX_COPY_THRESHOLD - | 
|  | 942 | * len). This is especially important with drivers | 
|  | 943 | * which split incoming packets into header and data, | 
|  | 944 | * using only 66 bytes of the main data area (see the | 
|  | 945 | * e1000 driver for example.)  On such systems, | 
|  | 946 | * without this last adjustement, our achievable | 
|  | 947 | * receive throughout using the standard receive | 
|  | 948 | * buffer size was cut by 25%(!!!). | 
|  | 949 | */ | 
|  | 950 | skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len); | 
|  | 951 | skb->len += skb->data_len; | 
|  | 952 |  | 
|  | 953 | if (rx->flags & NETRXF_csum_blank) | 
|  | 954 | skb->ip_summed = CHECKSUM_PARTIAL; | 
|  | 955 | else if (rx->flags & NETRXF_data_validated) | 
|  | 956 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 
|  | 957 |  | 
|  | 958 | __skb_queue_tail(&rxq, skb); | 
|  | 959 |  | 
|  | 960 | np->rx.rsp_cons = ++i; | 
|  | 961 | work_done++; | 
|  | 962 | } | 
|  | 963 |  | 
| Wang Chen | 56cfe5d | 2008-05-22 18:09:06 +0800 | [diff] [blame] | 964 | __skb_queue_purge(&errq); | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 965 |  | 
|  | 966 | work_done -= handle_incoming_queue(dev, &rxq); | 
|  | 967 |  | 
|  | 968 | /* If we get a callback with very few responses, reduce fill target. */ | 
|  | 969 | /* NB. Note exponential increase, linear decrease. */ | 
|  | 970 | if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) > | 
|  | 971 | ((3*np->rx_target) / 4)) && | 
|  | 972 | (--np->rx_target < np->rx_min_target)) | 
|  | 973 | np->rx_target = np->rx_min_target; | 
|  | 974 |  | 
|  | 975 | xennet_alloc_rx_buffers(dev); | 
|  | 976 |  | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 977 | if (work_done < budget) { | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 978 | int more_to_do = 0; | 
|  | 979 |  | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 980 | local_irq_save(flags); | 
|  | 981 |  | 
|  | 982 | RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); | 
|  | 983 | if (!more_to_do) | 
| Ben Hutchings | 288379f | 2009-01-19 16:43:59 -0800 | [diff] [blame] | 984 | __napi_complete(napi); | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 985 |  | 
|  | 986 | local_irq_restore(flags); | 
|  | 987 | } | 
|  | 988 |  | 
|  | 989 | spin_unlock(&np->rx_lock); | 
|  | 990 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 991 | return work_done; | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 992 | } | 
|  | 993 |  | 
|  | 994 | static int xennet_change_mtu(struct net_device *dev, int mtu) | 
|  | 995 | { | 
|  | 996 | int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN; | 
|  | 997 |  | 
|  | 998 | if (mtu > max) | 
|  | 999 | return -EINVAL; | 
|  | 1000 | dev->mtu = mtu; | 
|  | 1001 | return 0; | 
|  | 1002 | } | 
|  | 1003 |  | 
|  | 1004 | static void xennet_release_tx_bufs(struct netfront_info *np) | 
|  | 1005 | { | 
|  | 1006 | struct sk_buff *skb; | 
|  | 1007 | int i; | 
|  | 1008 |  | 
|  | 1009 | for (i = 0; i < NET_TX_RING_SIZE; i++) { | 
|  | 1010 | /* Skip over entries which are actually freelist references */ | 
| Isaku Yamahata | 1ffb40b | 2008-07-08 15:06:31 -0700 | [diff] [blame] | 1011 | if (skb_entry_is_link(&np->tx_skbs[i])) | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1012 | continue; | 
|  | 1013 |  | 
|  | 1014 | skb = np->tx_skbs[i].skb; | 
|  | 1015 | gnttab_end_foreign_access_ref(np->grant_tx_ref[i], | 
|  | 1016 | GNTMAP_readonly); | 
|  | 1017 | gnttab_release_grant_reference(&np->gref_tx_head, | 
|  | 1018 | np->grant_tx_ref[i]); | 
|  | 1019 | np->grant_tx_ref[i] = GRANT_INVALID_REF; | 
|  | 1020 | add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i); | 
|  | 1021 | dev_kfree_skb_irq(skb); | 
|  | 1022 | } | 
|  | 1023 | } | 
|  | 1024 |  | 
|  | 1025 | static void xennet_release_rx_bufs(struct netfront_info *np) | 
|  | 1026 | { | 
|  | 1027 | struct mmu_update      *mmu = np->rx_mmu; | 
|  | 1028 | struct multicall_entry *mcl = np->rx_mcl; | 
|  | 1029 | struct sk_buff_head free_list; | 
|  | 1030 | struct sk_buff *skb; | 
|  | 1031 | unsigned long mfn; | 
|  | 1032 | int xfer = 0, noxfer = 0, unused = 0; | 
|  | 1033 | int id, ref; | 
|  | 1034 |  | 
|  | 1035 | dev_warn(&np->netdev->dev, "%s: fix me for copying receiver.\n", | 
|  | 1036 | __func__); | 
|  | 1037 | return; | 
|  | 1038 |  | 
|  | 1039 | skb_queue_head_init(&free_list); | 
|  | 1040 |  | 
|  | 1041 | spin_lock_bh(&np->rx_lock); | 
|  | 1042 |  | 
|  | 1043 | for (id = 0; id < NET_RX_RING_SIZE; id++) { | 
|  | 1044 | ref = np->grant_rx_ref[id]; | 
|  | 1045 | if (ref == GRANT_INVALID_REF) { | 
|  | 1046 | unused++; | 
|  | 1047 | continue; | 
|  | 1048 | } | 
|  | 1049 |  | 
|  | 1050 | skb = np->rx_skbs[id]; | 
|  | 1051 | mfn = gnttab_end_foreign_transfer_ref(ref); | 
|  | 1052 | gnttab_release_grant_reference(&np->gref_rx_head, ref); | 
|  | 1053 | np->grant_rx_ref[id] = GRANT_INVALID_REF; | 
|  | 1054 |  | 
|  | 1055 | if (0 == mfn) { | 
|  | 1056 | skb_shinfo(skb)->nr_frags = 0; | 
|  | 1057 | dev_kfree_skb(skb); | 
|  | 1058 | noxfer++; | 
|  | 1059 | continue; | 
|  | 1060 | } | 
|  | 1061 |  | 
|  | 1062 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { | 
|  | 1063 | /* Remap the page. */ | 
|  | 1064 | struct page *page = skb_shinfo(skb)->frags[0].page; | 
|  | 1065 | unsigned long pfn = page_to_pfn(page); | 
|  | 1066 | void *vaddr = page_address(page); | 
|  | 1067 |  | 
|  | 1068 | MULTI_update_va_mapping(mcl, (unsigned long)vaddr, | 
|  | 1069 | mfn_pte(mfn, PAGE_KERNEL), | 
|  | 1070 | 0); | 
|  | 1071 | mcl++; | 
|  | 1072 | mmu->ptr = ((u64)mfn << PAGE_SHIFT) | 
|  | 1073 | | MMU_MACHPHYS_UPDATE; | 
|  | 1074 | mmu->val = pfn; | 
|  | 1075 | mmu++; | 
|  | 1076 |  | 
|  | 1077 | set_phys_to_machine(pfn, mfn); | 
|  | 1078 | } | 
|  | 1079 | __skb_queue_tail(&free_list, skb); | 
|  | 1080 | xfer++; | 
|  | 1081 | } | 
|  | 1082 |  | 
|  | 1083 | dev_info(&np->netdev->dev, "%s: %d xfer, %d noxfer, %d unused\n", | 
|  | 1084 | __func__, xfer, noxfer, unused); | 
|  | 1085 |  | 
|  | 1086 | if (xfer) { | 
|  | 1087 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { | 
|  | 1088 | /* Do all the remapping work and M2P updates. */ | 
|  | 1089 | MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu, | 
| Al Viro | 79ea13c | 2008-01-24 02:06:46 -0800 | [diff] [blame] | 1090 | NULL, DOMID_SELF); | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1091 | mcl++; | 
|  | 1092 | HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl); | 
|  | 1093 | } | 
|  | 1094 | } | 
|  | 1095 |  | 
| Wang Chen | 56cfe5d | 2008-05-22 18:09:06 +0800 | [diff] [blame] | 1096 | __skb_queue_purge(&free_list); | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1097 |  | 
|  | 1098 | spin_unlock_bh(&np->rx_lock); | 
|  | 1099 | } | 
|  | 1100 |  | 
|  | 1101 | static void xennet_uninit(struct net_device *dev) | 
|  | 1102 | { | 
|  | 1103 | struct netfront_info *np = netdev_priv(dev); | 
|  | 1104 | xennet_release_tx_bufs(np); | 
|  | 1105 | xennet_release_rx_bufs(np); | 
|  | 1106 | gnttab_free_grant_references(np->gref_tx_head); | 
|  | 1107 | gnttab_free_grant_references(np->gref_rx_head); | 
|  | 1108 | } | 
|  | 1109 |  | 
| Stephen Hemminger | 0a0b9d2 | 2009-01-06 10:44:55 -0800 | [diff] [blame] | 1110 | static const struct net_device_ops xennet_netdev_ops = { | 
|  | 1111 | .ndo_open            = xennet_open, | 
|  | 1112 | .ndo_uninit          = xennet_uninit, | 
|  | 1113 | .ndo_stop            = xennet_close, | 
|  | 1114 | .ndo_start_xmit      = xennet_start_xmit, | 
|  | 1115 | .ndo_change_mtu	     = xennet_change_mtu, | 
|  | 1116 | .ndo_set_mac_address = eth_mac_addr, | 
|  | 1117 | .ndo_validate_addr   = eth_validate_addr, | 
|  | 1118 | }; | 
|  | 1119 |  | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1120 | static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev) | 
|  | 1121 | { | 
|  | 1122 | int i, err; | 
|  | 1123 | struct net_device *netdev; | 
|  | 1124 | struct netfront_info *np; | 
|  | 1125 |  | 
|  | 1126 | netdev = alloc_etherdev(sizeof(struct netfront_info)); | 
|  | 1127 | if (!netdev) { | 
|  | 1128 | printk(KERN_WARNING "%s> alloc_etherdev failed.\n", | 
|  | 1129 | __func__); | 
|  | 1130 | return ERR_PTR(-ENOMEM); | 
|  | 1131 | } | 
|  | 1132 |  | 
|  | 1133 | np                   = netdev_priv(netdev); | 
|  | 1134 | np->xbdev            = dev; | 
|  | 1135 |  | 
|  | 1136 | spin_lock_init(&np->tx_lock); | 
|  | 1137 | spin_lock_init(&np->rx_lock); | 
|  | 1138 |  | 
|  | 1139 | skb_queue_head_init(&np->rx_batch); | 
|  | 1140 | np->rx_target     = RX_DFL_MIN_TARGET; | 
|  | 1141 | np->rx_min_target = RX_DFL_MIN_TARGET; | 
|  | 1142 | np->rx_max_target = RX_MAX_TARGET; | 
|  | 1143 |  | 
|  | 1144 | init_timer(&np->rx_refill_timer); | 
|  | 1145 | np->rx_refill_timer.data = (unsigned long)netdev; | 
|  | 1146 | np->rx_refill_timer.function = rx_refill_timeout; | 
|  | 1147 |  | 
|  | 1148 | /* Initialise tx_skbs as a free chain containing every entry. */ | 
|  | 1149 | np->tx_skb_freelist = 0; | 
|  | 1150 | for (i = 0; i < NET_TX_RING_SIZE; i++) { | 
| Isaku Yamahata | 1ffb40b | 2008-07-08 15:06:31 -0700 | [diff] [blame] | 1151 | skb_entry_set_link(&np->tx_skbs[i], i+1); | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1152 | np->grant_tx_ref[i] = GRANT_INVALID_REF; | 
|  | 1153 | } | 
|  | 1154 |  | 
|  | 1155 | /* Clear out rx_skbs */ | 
|  | 1156 | for (i = 0; i < NET_RX_RING_SIZE; i++) { | 
|  | 1157 | np->rx_skbs[i] = NULL; | 
|  | 1158 | np->grant_rx_ref[i] = GRANT_INVALID_REF; | 
|  | 1159 | } | 
|  | 1160 |  | 
|  | 1161 | /* A grant for every tx ring slot */ | 
|  | 1162 | if (gnttab_alloc_grant_references(TX_MAX_TARGET, | 
|  | 1163 | &np->gref_tx_head) < 0) { | 
|  | 1164 | printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n"); | 
|  | 1165 | err = -ENOMEM; | 
|  | 1166 | goto exit; | 
|  | 1167 | } | 
|  | 1168 | /* A grant for every rx ring slot */ | 
|  | 1169 | if (gnttab_alloc_grant_references(RX_MAX_TARGET, | 
|  | 1170 | &np->gref_rx_head) < 0) { | 
|  | 1171 | printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n"); | 
|  | 1172 | err = -ENOMEM; | 
|  | 1173 | goto exit_free_tx; | 
|  | 1174 | } | 
|  | 1175 |  | 
| Stephen Hemminger | 0a0b9d2 | 2009-01-06 10:44:55 -0800 | [diff] [blame] | 1176 | netdev->netdev_ops	= &xennet_netdev_ops; | 
|  | 1177 |  | 
| Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1178 | netif_napi_add(netdev, &np->napi, xennet_poll, 64); | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1179 | netdev->features        = NETIF_F_IP_CSUM; | 
|  | 1180 |  | 
|  | 1181 | SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops); | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1182 | SET_NETDEV_DEV(netdev, &dev->dev); | 
|  | 1183 |  | 
|  | 1184 | np->netdev = netdev; | 
|  | 1185 |  | 
|  | 1186 | netif_carrier_off(netdev); | 
|  | 1187 |  | 
|  | 1188 | return netdev; | 
|  | 1189 |  | 
|  | 1190 | exit_free_tx: | 
|  | 1191 | gnttab_free_grant_references(np->gref_tx_head); | 
|  | 1192 | exit: | 
|  | 1193 | free_netdev(netdev); | 
|  | 1194 | return ERR_PTR(err); | 
|  | 1195 | } | 
|  | 1196 |  | 
|  | 1197 | /** | 
|  | 1198 | * Entry point to this code when a new device is created.  Allocate the basic | 
|  | 1199 | * structures and the ring buffers for communication with the backend, and | 
|  | 1200 | * inform the backend of the appropriate details for those. | 
|  | 1201 | */ | 
|  | 1202 | static int __devinit netfront_probe(struct xenbus_device *dev, | 
|  | 1203 | const struct xenbus_device_id *id) | 
|  | 1204 | { | 
|  | 1205 | int err; | 
|  | 1206 | struct net_device *netdev; | 
|  | 1207 | struct netfront_info *info; | 
|  | 1208 |  | 
|  | 1209 | netdev = xennet_create_dev(dev); | 
|  | 1210 | if (IS_ERR(netdev)) { | 
|  | 1211 | err = PTR_ERR(netdev); | 
|  | 1212 | xenbus_dev_fatal(dev, err, "creating netdev"); | 
|  | 1213 | return err; | 
|  | 1214 | } | 
|  | 1215 |  | 
|  | 1216 | info = netdev_priv(netdev); | 
| Greg Kroah-Hartman | 1b713e0 | 2009-05-04 12:40:54 -0700 | [diff] [blame] | 1217 | dev_set_drvdata(&dev->dev, info); | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1218 |  | 
|  | 1219 | err = register_netdev(info->netdev); | 
|  | 1220 | if (err) { | 
|  | 1221 | printk(KERN_WARNING "%s: register_netdev err=%d\n", | 
|  | 1222 | __func__, err); | 
|  | 1223 | goto fail; | 
|  | 1224 | } | 
|  | 1225 |  | 
|  | 1226 | err = xennet_sysfs_addif(info->netdev); | 
|  | 1227 | if (err) { | 
|  | 1228 | unregister_netdev(info->netdev); | 
|  | 1229 | printk(KERN_WARNING "%s: add sysfs failed err=%d\n", | 
|  | 1230 | __func__, err); | 
|  | 1231 | goto fail; | 
|  | 1232 | } | 
|  | 1233 |  | 
|  | 1234 | return 0; | 
|  | 1235 |  | 
|  | 1236 | fail: | 
|  | 1237 | free_netdev(netdev); | 
| Greg Kroah-Hartman | 1b713e0 | 2009-05-04 12:40:54 -0700 | [diff] [blame] | 1238 | dev_set_drvdata(&dev->dev, NULL); | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1239 | return err; | 
|  | 1240 | } | 
|  | 1241 |  | 
|  | 1242 | static void xennet_end_access(int ref, void *page) | 
|  | 1243 | { | 
|  | 1244 | /* This frees the page as a side-effect */ | 
|  | 1245 | if (ref != GRANT_INVALID_REF) | 
|  | 1246 | gnttab_end_foreign_access(ref, 0, (unsigned long)page); | 
|  | 1247 | } | 
|  | 1248 |  | 
|  | 1249 | static void xennet_disconnect_backend(struct netfront_info *info) | 
|  | 1250 | { | 
|  | 1251 | /* Stop old i/f to prevent errors whilst we rebuild the state. */ | 
|  | 1252 | spin_lock_bh(&info->rx_lock); | 
|  | 1253 | spin_lock_irq(&info->tx_lock); | 
|  | 1254 | netif_carrier_off(info->netdev); | 
|  | 1255 | spin_unlock_irq(&info->tx_lock); | 
|  | 1256 | spin_unlock_bh(&info->rx_lock); | 
|  | 1257 |  | 
|  | 1258 | if (info->netdev->irq) | 
|  | 1259 | unbind_from_irqhandler(info->netdev->irq, info->netdev); | 
|  | 1260 | info->evtchn = info->netdev->irq = 0; | 
|  | 1261 |  | 
|  | 1262 | /* End access and free the pages */ | 
|  | 1263 | xennet_end_access(info->tx_ring_ref, info->tx.sring); | 
|  | 1264 | xennet_end_access(info->rx_ring_ref, info->rx.sring); | 
|  | 1265 |  | 
|  | 1266 | info->tx_ring_ref = GRANT_INVALID_REF; | 
|  | 1267 | info->rx_ring_ref = GRANT_INVALID_REF; | 
|  | 1268 | info->tx.sring = NULL; | 
|  | 1269 | info->rx.sring = NULL; | 
|  | 1270 | } | 
|  | 1271 |  | 
|  | 1272 | /** | 
|  | 1273 | * We are reconnecting to the backend, due to a suspend/resume, or a backend | 
|  | 1274 | * driver restart.  We tear down our netif structure and recreate it, but | 
|  | 1275 | * leave the device-layer structures intact so that this is transparent to the | 
|  | 1276 | * rest of the kernel. | 
|  | 1277 | */ | 
|  | 1278 | static int netfront_resume(struct xenbus_device *dev) | 
|  | 1279 | { | 
| Greg Kroah-Hartman | 1b713e0 | 2009-05-04 12:40:54 -0700 | [diff] [blame] | 1280 | struct netfront_info *info = dev_get_drvdata(&dev->dev); | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1281 |  | 
|  | 1282 | dev_dbg(&dev->dev, "%s\n", dev->nodename); | 
|  | 1283 |  | 
|  | 1284 | xennet_disconnect_backend(info); | 
|  | 1285 | return 0; | 
|  | 1286 | } | 
|  | 1287 |  | 
|  | 1288 | static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) | 
|  | 1289 | { | 
|  | 1290 | char *s, *e, *macstr; | 
|  | 1291 | int i; | 
|  | 1292 |  | 
|  | 1293 | macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL); | 
|  | 1294 | if (IS_ERR(macstr)) | 
|  | 1295 | return PTR_ERR(macstr); | 
|  | 1296 |  | 
|  | 1297 | for (i = 0; i < ETH_ALEN; i++) { | 
|  | 1298 | mac[i] = simple_strtoul(s, &e, 16); | 
|  | 1299 | if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) { | 
|  | 1300 | kfree(macstr); | 
|  | 1301 | return -ENOENT; | 
|  | 1302 | } | 
|  | 1303 | s = e+1; | 
|  | 1304 | } | 
|  | 1305 |  | 
|  | 1306 | kfree(macstr); | 
|  | 1307 | return 0; | 
|  | 1308 | } | 
|  | 1309 |  | 
|  | 1310 | static irqreturn_t xennet_interrupt(int irq, void *dev_id) | 
|  | 1311 | { | 
|  | 1312 | struct net_device *dev = dev_id; | 
|  | 1313 | struct netfront_info *np = netdev_priv(dev); | 
|  | 1314 | unsigned long flags; | 
|  | 1315 |  | 
|  | 1316 | spin_lock_irqsave(&np->tx_lock, flags); | 
|  | 1317 |  | 
|  | 1318 | if (likely(netif_carrier_ok(dev))) { | 
|  | 1319 | xennet_tx_buf_gc(dev); | 
|  | 1320 | /* Under tx_lock: protects access to rx shared-ring indexes. */ | 
|  | 1321 | if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) | 
| Ben Hutchings | 288379f | 2009-01-19 16:43:59 -0800 | [diff] [blame] | 1322 | napi_schedule(&np->napi); | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1323 | } | 
|  | 1324 |  | 
|  | 1325 | spin_unlock_irqrestore(&np->tx_lock, flags); | 
|  | 1326 |  | 
|  | 1327 | return IRQ_HANDLED; | 
|  | 1328 | } | 
|  | 1329 |  | 
|  | 1330 | static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info) | 
|  | 1331 | { | 
|  | 1332 | struct xen_netif_tx_sring *txs; | 
|  | 1333 | struct xen_netif_rx_sring *rxs; | 
|  | 1334 | int err; | 
|  | 1335 | struct net_device *netdev = info->netdev; | 
|  | 1336 |  | 
|  | 1337 | info->tx_ring_ref = GRANT_INVALID_REF; | 
|  | 1338 | info->rx_ring_ref = GRANT_INVALID_REF; | 
|  | 1339 | info->rx.sring = NULL; | 
|  | 1340 | info->tx.sring = NULL; | 
|  | 1341 | netdev->irq = 0; | 
|  | 1342 |  | 
|  | 1343 | err = xen_net_read_mac(dev, netdev->dev_addr); | 
|  | 1344 | if (err) { | 
|  | 1345 | xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); | 
|  | 1346 | goto fail; | 
|  | 1347 | } | 
|  | 1348 |  | 
| Ian Campbell | a144ff0 | 2008-06-17 10:47:08 +0200 | [diff] [blame] | 1349 | txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1350 | if (!txs) { | 
|  | 1351 | err = -ENOMEM; | 
|  | 1352 | xenbus_dev_fatal(dev, err, "allocating tx ring page"); | 
|  | 1353 | goto fail; | 
|  | 1354 | } | 
|  | 1355 | SHARED_RING_INIT(txs); | 
|  | 1356 | FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE); | 
|  | 1357 |  | 
|  | 1358 | err = xenbus_grant_ring(dev, virt_to_mfn(txs)); | 
|  | 1359 | if (err < 0) { | 
|  | 1360 | free_page((unsigned long)txs); | 
|  | 1361 | goto fail; | 
|  | 1362 | } | 
|  | 1363 |  | 
|  | 1364 | info->tx_ring_ref = err; | 
| Ian Campbell | a144ff0 | 2008-06-17 10:47:08 +0200 | [diff] [blame] | 1365 | rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1366 | if (!rxs) { | 
|  | 1367 | err = -ENOMEM; | 
|  | 1368 | xenbus_dev_fatal(dev, err, "allocating rx ring page"); | 
|  | 1369 | goto fail; | 
|  | 1370 | } | 
|  | 1371 | SHARED_RING_INIT(rxs); | 
|  | 1372 | FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE); | 
|  | 1373 |  | 
|  | 1374 | err = xenbus_grant_ring(dev, virt_to_mfn(rxs)); | 
|  | 1375 | if (err < 0) { | 
|  | 1376 | free_page((unsigned long)rxs); | 
|  | 1377 | goto fail; | 
|  | 1378 | } | 
|  | 1379 | info->rx_ring_ref = err; | 
|  | 1380 |  | 
|  | 1381 | err = xenbus_alloc_evtchn(dev, &info->evtchn); | 
|  | 1382 | if (err) | 
|  | 1383 | goto fail; | 
|  | 1384 |  | 
|  | 1385 | err = bind_evtchn_to_irqhandler(info->evtchn, xennet_interrupt, | 
|  | 1386 | IRQF_SAMPLE_RANDOM, netdev->name, | 
|  | 1387 | netdev); | 
|  | 1388 | if (err < 0) | 
|  | 1389 | goto fail; | 
|  | 1390 | netdev->irq = err; | 
|  | 1391 | return 0; | 
|  | 1392 |  | 
|  | 1393 | fail: | 
|  | 1394 | return err; | 
|  | 1395 | } | 
|  | 1396 |  | 
|  | 1397 | /* Common code used when first setting up, and when resuming. */ | 
|  | 1398 | static int talk_to_backend(struct xenbus_device *dev, | 
|  | 1399 | struct netfront_info *info) | 
|  | 1400 | { | 
|  | 1401 | const char *message; | 
|  | 1402 | struct xenbus_transaction xbt; | 
|  | 1403 | int err; | 
|  | 1404 |  | 
|  | 1405 | /* Create shared ring, alloc event channel. */ | 
|  | 1406 | err = setup_netfront(dev, info); | 
|  | 1407 | if (err) | 
|  | 1408 | goto out; | 
|  | 1409 |  | 
|  | 1410 | again: | 
|  | 1411 | err = xenbus_transaction_start(&xbt); | 
|  | 1412 | if (err) { | 
|  | 1413 | xenbus_dev_fatal(dev, err, "starting transaction"); | 
|  | 1414 | goto destroy_ring; | 
|  | 1415 | } | 
|  | 1416 |  | 
|  | 1417 | err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref", "%u", | 
|  | 1418 | info->tx_ring_ref); | 
|  | 1419 | if (err) { | 
|  | 1420 | message = "writing tx ring-ref"; | 
|  | 1421 | goto abort_transaction; | 
|  | 1422 | } | 
|  | 1423 | err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref", "%u", | 
|  | 1424 | info->rx_ring_ref); | 
|  | 1425 | if (err) { | 
|  | 1426 | message = "writing rx ring-ref"; | 
|  | 1427 | goto abort_transaction; | 
|  | 1428 | } | 
|  | 1429 | err = xenbus_printf(xbt, dev->nodename, | 
|  | 1430 | "event-channel", "%u", info->evtchn); | 
|  | 1431 | if (err) { | 
|  | 1432 | message = "writing event-channel"; | 
|  | 1433 | goto abort_transaction; | 
|  | 1434 | } | 
|  | 1435 |  | 
|  | 1436 | err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", | 
|  | 1437 | 1); | 
|  | 1438 | if (err) { | 
|  | 1439 | message = "writing request-rx-copy"; | 
|  | 1440 | goto abort_transaction; | 
|  | 1441 | } | 
|  | 1442 |  | 
|  | 1443 | err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1); | 
|  | 1444 | if (err) { | 
|  | 1445 | message = "writing feature-rx-notify"; | 
|  | 1446 | goto abort_transaction; | 
|  | 1447 | } | 
|  | 1448 |  | 
|  | 1449 | err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1); | 
|  | 1450 | if (err) { | 
|  | 1451 | message = "writing feature-sg"; | 
|  | 1452 | goto abort_transaction; | 
|  | 1453 | } | 
|  | 1454 |  | 
|  | 1455 | err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1); | 
|  | 1456 | if (err) { | 
|  | 1457 | message = "writing feature-gso-tcpv4"; | 
|  | 1458 | goto abort_transaction; | 
|  | 1459 | } | 
|  | 1460 |  | 
|  | 1461 | err = xenbus_transaction_end(xbt, 0); | 
|  | 1462 | if (err) { | 
|  | 1463 | if (err == -EAGAIN) | 
|  | 1464 | goto again; | 
|  | 1465 | xenbus_dev_fatal(dev, err, "completing transaction"); | 
|  | 1466 | goto destroy_ring; | 
|  | 1467 | } | 
|  | 1468 |  | 
|  | 1469 | return 0; | 
|  | 1470 |  | 
|  | 1471 | abort_transaction: | 
|  | 1472 | xenbus_transaction_end(xbt, 1); | 
|  | 1473 | xenbus_dev_fatal(dev, err, "%s", message); | 
|  | 1474 | destroy_ring: | 
|  | 1475 | xennet_disconnect_backend(info); | 
|  | 1476 | out: | 
|  | 1477 | return err; | 
|  | 1478 | } | 
|  | 1479 |  | 
|  | 1480 | static int xennet_set_sg(struct net_device *dev, u32 data) | 
|  | 1481 | { | 
|  | 1482 | if (data) { | 
|  | 1483 | struct netfront_info *np = netdev_priv(dev); | 
|  | 1484 | int val; | 
|  | 1485 |  | 
|  | 1486 | if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg", | 
|  | 1487 | "%d", &val) < 0) | 
|  | 1488 | val = 0; | 
|  | 1489 | if (!val) | 
|  | 1490 | return -ENOSYS; | 
|  | 1491 | } else if (dev->mtu > ETH_DATA_LEN) | 
|  | 1492 | dev->mtu = ETH_DATA_LEN; | 
|  | 1493 |  | 
|  | 1494 | return ethtool_op_set_sg(dev, data); | 
|  | 1495 | } | 
|  | 1496 |  | 
|  | 1497 | static int xennet_set_tso(struct net_device *dev, u32 data) | 
|  | 1498 | { | 
|  | 1499 | if (data) { | 
|  | 1500 | struct netfront_info *np = netdev_priv(dev); | 
|  | 1501 | int val; | 
|  | 1502 |  | 
|  | 1503 | if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, | 
|  | 1504 | "feature-gso-tcpv4", "%d", &val) < 0) | 
|  | 1505 | val = 0; | 
|  | 1506 | if (!val) | 
|  | 1507 | return -ENOSYS; | 
|  | 1508 | } | 
|  | 1509 |  | 
|  | 1510 | return ethtool_op_set_tso(dev, data); | 
|  | 1511 | } | 
|  | 1512 |  | 
|  | 1513 | static void xennet_set_features(struct net_device *dev) | 
|  | 1514 | { | 
|  | 1515 | /* Turn off all GSO bits except ROBUST. */ | 
| Chris Leech | 43eb99c | 2009-02-27 14:06:43 -0800 | [diff] [blame] | 1516 | dev->features &= ~NETIF_F_GSO_MASK; | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1517 | dev->features |= NETIF_F_GSO_ROBUST; | 
|  | 1518 | xennet_set_sg(dev, 0); | 
|  | 1519 |  | 
|  | 1520 | /* We need checksum offload to enable scatter/gather and TSO. */ | 
|  | 1521 | if (!(dev->features & NETIF_F_IP_CSUM)) | 
|  | 1522 | return; | 
|  | 1523 |  | 
|  | 1524 | if (!xennet_set_sg(dev, 1)) | 
|  | 1525 | xennet_set_tso(dev, 1); | 
|  | 1526 | } | 
|  | 1527 |  | 
|  | 1528 | static int xennet_connect(struct net_device *dev) | 
|  | 1529 | { | 
|  | 1530 | struct netfront_info *np = netdev_priv(dev); | 
|  | 1531 | int i, requeue_idx, err; | 
|  | 1532 | struct sk_buff *skb; | 
|  | 1533 | grant_ref_t ref; | 
|  | 1534 | struct xen_netif_rx_request *req; | 
|  | 1535 | unsigned int feature_rx_copy; | 
|  | 1536 |  | 
|  | 1537 | err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, | 
|  | 1538 | "feature-rx-copy", "%u", &feature_rx_copy); | 
|  | 1539 | if (err != 1) | 
|  | 1540 | feature_rx_copy = 0; | 
|  | 1541 |  | 
|  | 1542 | if (!feature_rx_copy) { | 
|  | 1543 | dev_info(&dev->dev, | 
| Joe Perches | 898eb71 | 2007-10-18 03:06:30 -0700 | [diff] [blame] | 1544 | "backend does not support copying receive path\n"); | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1545 | return -ENODEV; | 
|  | 1546 | } | 
|  | 1547 |  | 
|  | 1548 | err = talk_to_backend(np->xbdev, np); | 
|  | 1549 | if (err) | 
|  | 1550 | return err; | 
|  | 1551 |  | 
|  | 1552 | xennet_set_features(dev); | 
|  | 1553 |  | 
|  | 1554 | spin_lock_bh(&np->rx_lock); | 
|  | 1555 | spin_lock_irq(&np->tx_lock); | 
|  | 1556 |  | 
|  | 1557 | /* Step 1: Discard all pending TX packet fragments. */ | 
|  | 1558 | xennet_release_tx_bufs(np); | 
|  | 1559 |  | 
|  | 1560 | /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ | 
|  | 1561 | for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { | 
|  | 1562 | if (!np->rx_skbs[i]) | 
|  | 1563 | continue; | 
|  | 1564 |  | 
|  | 1565 | skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i); | 
|  | 1566 | ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i); | 
|  | 1567 | req = RING_GET_REQUEST(&np->rx, requeue_idx); | 
|  | 1568 |  | 
|  | 1569 | gnttab_grant_foreign_access_ref( | 
|  | 1570 | ref, np->xbdev->otherend_id, | 
|  | 1571 | pfn_to_mfn(page_to_pfn(skb_shinfo(skb)-> | 
|  | 1572 | frags->page)), | 
|  | 1573 | 0); | 
|  | 1574 | req->gref = ref; | 
|  | 1575 | req->id   = requeue_idx; | 
|  | 1576 |  | 
|  | 1577 | requeue_idx++; | 
|  | 1578 | } | 
|  | 1579 |  | 
|  | 1580 | np->rx.req_prod_pvt = requeue_idx; | 
|  | 1581 |  | 
|  | 1582 | /* | 
|  | 1583 | * Step 3: All public and private state should now be sane.  Get | 
|  | 1584 | * ready to start sending and receiving packets and give the driver | 
|  | 1585 | * domain a kick because we've probably just requeued some | 
|  | 1586 | * packets. | 
|  | 1587 | */ | 
|  | 1588 | netif_carrier_on(np->netdev); | 
|  | 1589 | notify_remote_via_irq(np->netdev->irq); | 
|  | 1590 | xennet_tx_buf_gc(dev); | 
|  | 1591 | xennet_alloc_rx_buffers(dev); | 
|  | 1592 |  | 
|  | 1593 | spin_unlock_irq(&np->tx_lock); | 
|  | 1594 | spin_unlock_bh(&np->rx_lock); | 
|  | 1595 |  | 
|  | 1596 | return 0; | 
|  | 1597 | } | 
|  | 1598 |  | 
|  | 1599 | /** | 
|  | 1600 | * Callback received when the backend's state changes. | 
|  | 1601 | */ | 
|  | 1602 | static void backend_changed(struct xenbus_device *dev, | 
|  | 1603 | enum xenbus_state backend_state) | 
|  | 1604 | { | 
| Greg Kroah-Hartman | 1b713e0 | 2009-05-04 12:40:54 -0700 | [diff] [blame] | 1605 | struct netfront_info *np = dev_get_drvdata(&dev->dev); | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1606 | struct net_device *netdev = np->netdev; | 
|  | 1607 |  | 
|  | 1608 | dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state)); | 
|  | 1609 |  | 
|  | 1610 | switch (backend_state) { | 
|  | 1611 | case XenbusStateInitialising: | 
|  | 1612 | case XenbusStateInitialised: | 
|  | 1613 | case XenbusStateConnected: | 
|  | 1614 | case XenbusStateUnknown: | 
|  | 1615 | case XenbusStateClosed: | 
|  | 1616 | break; | 
|  | 1617 |  | 
|  | 1618 | case XenbusStateInitWait: | 
|  | 1619 | if (dev->state != XenbusStateInitialising) | 
|  | 1620 | break; | 
|  | 1621 | if (xennet_connect(netdev) != 0) | 
|  | 1622 | break; | 
|  | 1623 | xenbus_switch_state(dev, XenbusStateConnected); | 
| Ian Campbell | 5929706 | 2010-05-26 00:09:43 +0000 | [diff] [blame] | 1624 | netif_notify_peers(netdev); | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1625 | break; | 
|  | 1626 |  | 
|  | 1627 | case XenbusStateClosing: | 
|  | 1628 | xenbus_frontend_closed(dev); | 
|  | 1629 | break; | 
|  | 1630 | } | 
|  | 1631 | } | 
|  | 1632 |  | 
| Stephen Hemminger | 0fc0b73 | 2009-09-02 01:03:33 -0700 | [diff] [blame] | 1633 | static const struct ethtool_ops xennet_ethtool_ops = | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1634 | { | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1635 | .set_tx_csum = ethtool_op_set_tx_csum, | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1636 | .set_sg = xennet_set_sg, | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1637 | .set_tso = xennet_set_tso, | 
|  | 1638 | .get_link = ethtool_op_get_link, | 
|  | 1639 | }; | 
|  | 1640 |  | 
|  | 1641 | #ifdef CONFIG_SYSFS | 
|  | 1642 | static ssize_t show_rxbuf_min(struct device *dev, | 
|  | 1643 | struct device_attribute *attr, char *buf) | 
|  | 1644 | { | 
|  | 1645 | struct net_device *netdev = to_net_dev(dev); | 
|  | 1646 | struct netfront_info *info = netdev_priv(netdev); | 
|  | 1647 |  | 
|  | 1648 | return sprintf(buf, "%u\n", info->rx_min_target); | 
|  | 1649 | } | 
|  | 1650 |  | 
|  | 1651 | static ssize_t store_rxbuf_min(struct device *dev, | 
|  | 1652 | struct device_attribute *attr, | 
|  | 1653 | const char *buf, size_t len) | 
|  | 1654 | { | 
|  | 1655 | struct net_device *netdev = to_net_dev(dev); | 
|  | 1656 | struct netfront_info *np = netdev_priv(netdev); | 
|  | 1657 | char *endp; | 
|  | 1658 | unsigned long target; | 
|  | 1659 |  | 
|  | 1660 | if (!capable(CAP_NET_ADMIN)) | 
|  | 1661 | return -EPERM; | 
|  | 1662 |  | 
|  | 1663 | target = simple_strtoul(buf, &endp, 0); | 
|  | 1664 | if (endp == buf) | 
|  | 1665 | return -EBADMSG; | 
|  | 1666 |  | 
|  | 1667 | if (target < RX_MIN_TARGET) | 
|  | 1668 | target = RX_MIN_TARGET; | 
|  | 1669 | if (target > RX_MAX_TARGET) | 
|  | 1670 | target = RX_MAX_TARGET; | 
|  | 1671 |  | 
|  | 1672 | spin_lock_bh(&np->rx_lock); | 
|  | 1673 | if (target > np->rx_max_target) | 
|  | 1674 | np->rx_max_target = target; | 
|  | 1675 | np->rx_min_target = target; | 
|  | 1676 | if (target > np->rx_target) | 
|  | 1677 | np->rx_target = target; | 
|  | 1678 |  | 
|  | 1679 | xennet_alloc_rx_buffers(netdev); | 
|  | 1680 |  | 
|  | 1681 | spin_unlock_bh(&np->rx_lock); | 
|  | 1682 | return len; | 
|  | 1683 | } | 
|  | 1684 |  | 
|  | 1685 | static ssize_t show_rxbuf_max(struct device *dev, | 
|  | 1686 | struct device_attribute *attr, char *buf) | 
|  | 1687 | { | 
|  | 1688 | struct net_device *netdev = to_net_dev(dev); | 
|  | 1689 | struct netfront_info *info = netdev_priv(netdev); | 
|  | 1690 |  | 
|  | 1691 | return sprintf(buf, "%u\n", info->rx_max_target); | 
|  | 1692 | } | 
|  | 1693 |  | 
|  | 1694 | static ssize_t store_rxbuf_max(struct device *dev, | 
|  | 1695 | struct device_attribute *attr, | 
|  | 1696 | const char *buf, size_t len) | 
|  | 1697 | { | 
|  | 1698 | struct net_device *netdev = to_net_dev(dev); | 
|  | 1699 | struct netfront_info *np = netdev_priv(netdev); | 
|  | 1700 | char *endp; | 
|  | 1701 | unsigned long target; | 
|  | 1702 |  | 
|  | 1703 | if (!capable(CAP_NET_ADMIN)) | 
|  | 1704 | return -EPERM; | 
|  | 1705 |  | 
|  | 1706 | target = simple_strtoul(buf, &endp, 0); | 
|  | 1707 | if (endp == buf) | 
|  | 1708 | return -EBADMSG; | 
|  | 1709 |  | 
|  | 1710 | if (target < RX_MIN_TARGET) | 
|  | 1711 | target = RX_MIN_TARGET; | 
|  | 1712 | if (target > RX_MAX_TARGET) | 
|  | 1713 | target = RX_MAX_TARGET; | 
|  | 1714 |  | 
|  | 1715 | spin_lock_bh(&np->rx_lock); | 
|  | 1716 | if (target < np->rx_min_target) | 
|  | 1717 | np->rx_min_target = target; | 
|  | 1718 | np->rx_max_target = target; | 
|  | 1719 | if (target < np->rx_target) | 
|  | 1720 | np->rx_target = target; | 
|  | 1721 |  | 
|  | 1722 | xennet_alloc_rx_buffers(netdev); | 
|  | 1723 |  | 
|  | 1724 | spin_unlock_bh(&np->rx_lock); | 
|  | 1725 | return len; | 
|  | 1726 | } | 
|  | 1727 |  | 
|  | 1728 | static ssize_t show_rxbuf_cur(struct device *dev, | 
|  | 1729 | struct device_attribute *attr, char *buf) | 
|  | 1730 | { | 
|  | 1731 | struct net_device *netdev = to_net_dev(dev); | 
|  | 1732 | struct netfront_info *info = netdev_priv(netdev); | 
|  | 1733 |  | 
|  | 1734 | return sprintf(buf, "%u\n", info->rx_target); | 
|  | 1735 | } | 
|  | 1736 |  | 
|  | 1737 | static struct device_attribute xennet_attrs[] = { | 
|  | 1738 | __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min), | 
|  | 1739 | __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max), | 
|  | 1740 | __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL), | 
|  | 1741 | }; | 
|  | 1742 |  | 
|  | 1743 | static int xennet_sysfs_addif(struct net_device *netdev) | 
|  | 1744 | { | 
|  | 1745 | int i; | 
|  | 1746 | int err; | 
|  | 1747 |  | 
|  | 1748 | for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) { | 
|  | 1749 | err = device_create_file(&netdev->dev, | 
|  | 1750 | &xennet_attrs[i]); | 
|  | 1751 | if (err) | 
|  | 1752 | goto fail; | 
|  | 1753 | } | 
|  | 1754 | return 0; | 
|  | 1755 |  | 
|  | 1756 | fail: | 
|  | 1757 | while (--i >= 0) | 
|  | 1758 | device_remove_file(&netdev->dev, &xennet_attrs[i]); | 
|  | 1759 | return err; | 
|  | 1760 | } | 
|  | 1761 |  | 
|  | 1762 | static void xennet_sysfs_delif(struct net_device *netdev) | 
|  | 1763 | { | 
|  | 1764 | int i; | 
|  | 1765 |  | 
|  | 1766 | for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) | 
|  | 1767 | device_remove_file(&netdev->dev, &xennet_attrs[i]); | 
|  | 1768 | } | 
|  | 1769 |  | 
|  | 1770 | #endif /* CONFIG_SYSFS */ | 
|  | 1771 |  | 
|  | 1772 | static struct xenbus_device_id netfront_ids[] = { | 
|  | 1773 | { "vif" }, | 
|  | 1774 | { "" } | 
|  | 1775 | }; | 
|  | 1776 |  | 
|  | 1777 |  | 
|  | 1778 | static int __devexit xennet_remove(struct xenbus_device *dev) | 
|  | 1779 | { | 
| Greg Kroah-Hartman | 1b713e0 | 2009-05-04 12:40:54 -0700 | [diff] [blame] | 1780 | struct netfront_info *info = dev_get_drvdata(&dev->dev); | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1781 |  | 
|  | 1782 | dev_dbg(&dev->dev, "%s\n", dev->nodename); | 
|  | 1783 |  | 
|  | 1784 | unregister_netdev(info->netdev); | 
|  | 1785 |  | 
|  | 1786 | xennet_disconnect_backend(info); | 
|  | 1787 |  | 
|  | 1788 | del_timer_sync(&info->rx_refill_timer); | 
|  | 1789 |  | 
|  | 1790 | xennet_sysfs_delif(info->netdev); | 
|  | 1791 |  | 
|  | 1792 | free_netdev(info->netdev); | 
|  | 1793 |  | 
|  | 1794 | return 0; | 
|  | 1795 | } | 
|  | 1796 |  | 
| Al Viro | ffb78a2 | 2008-11-22 17:38:14 +0000 | [diff] [blame] | 1797 | static struct xenbus_driver netfront_driver = { | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1798 | .name = "vif", | 
|  | 1799 | .owner = THIS_MODULE, | 
|  | 1800 | .ids = netfront_ids, | 
|  | 1801 | .probe = netfront_probe, | 
|  | 1802 | .remove = __devexit_p(xennet_remove), | 
|  | 1803 | .resume = netfront_resume, | 
|  | 1804 | .otherend_changed = backend_changed, | 
|  | 1805 | }; | 
|  | 1806 |  | 
|  | 1807 | static int __init netif_init(void) | 
|  | 1808 | { | 
| Jeremy Fitzhardinge | 6e83358 | 2008-08-19 13:16:17 -0700 | [diff] [blame] | 1809 | if (!xen_domain()) | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1810 | return -ENODEV; | 
|  | 1811 |  | 
| Jeremy Fitzhardinge | 6e83358 | 2008-08-19 13:16:17 -0700 | [diff] [blame] | 1812 | if (xen_initial_domain()) | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1813 | return 0; | 
|  | 1814 |  | 
|  | 1815 | printk(KERN_INFO "Initialising Xen virtual ethernet driver.\n"); | 
|  | 1816 |  | 
| Al Viro | ffb78a2 | 2008-11-22 17:38:14 +0000 | [diff] [blame] | 1817 | return xenbus_register_frontend(&netfront_driver); | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1818 | } | 
|  | 1819 | module_init(netif_init); | 
|  | 1820 |  | 
|  | 1821 |  | 
|  | 1822 | static void __exit netif_exit(void) | 
|  | 1823 | { | 
| Jeremy Fitzhardinge | 6e83358 | 2008-08-19 13:16:17 -0700 | [diff] [blame] | 1824 | if (xen_initial_domain()) | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1825 | return; | 
|  | 1826 |  | 
| Al Viro | ffb78a2 | 2008-11-22 17:38:14 +0000 | [diff] [blame] | 1827 | xenbus_unregister_driver(&netfront_driver); | 
| Jeremy Fitzhardinge | 0d16021 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1828 | } | 
|  | 1829 | module_exit(netif_exit); | 
|  | 1830 |  | 
|  | 1831 | MODULE_DESCRIPTION("Xen virtual network device frontend"); | 
|  | 1832 | MODULE_LICENSE("GPL"); | 
| Mark McLoughlin | d2f0c52 | 2008-04-02 10:54:05 -0700 | [diff] [blame] | 1833 | MODULE_ALIAS("xen:vif"); | 
| Mark McLoughlin | 4f93f09 | 2008-04-02 10:54:06 -0700 | [diff] [blame] | 1834 | MODULE_ALIAS("xennet"); |