blob: 81254be85b92629288e21f6f94ecbfe5f4ac5a1b [file] [log] [blame]
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -07001/*
2 * Linux driver for VMware's vmxnet3 ethernet NIC.
3 *
4 * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
14 * details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING".
22 *
23 * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
24 *
25 */
26
27
28#include "vmxnet3_int.h"
29
30struct vmxnet3_stat_desc {
31 char desc[ETH_GSTRING_LEN];
32 int offset;
33};
34
35
36static u32
37vmxnet3_get_rx_csum(struct net_device *netdev)
38{
39 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
40 return adapter->rxcsum;
41}
42
43
44static int
45vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
46{
47 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +000048 unsigned long flags;
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -070049
50 if (adapter->rxcsum != val) {
51 adapter->rxcsum = val;
52 if (netif_running(netdev)) {
53 if (val)
Harvey Harrison3843e512010-10-21 18:05:32 +000054 adapter->shared->devRead.misc.uptFeatures |=
55 UPT1_F_RXCSUM;
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -070056 else
Harvey Harrison3843e512010-10-21 18:05:32 +000057 adapter->shared->devRead.misc.uptFeatures &=
58 ~UPT1_F_RXCSUM;
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -070059
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +000060 spin_lock_irqsave(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -070061 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
62 VMXNET3_CMD_UPDATE_FEATURE);
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +000063 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -070064 }
65 }
66 return 0;
67}
68
69
70/* per tq stats maintained by the device */
71static const struct vmxnet3_stat_desc
72vmxnet3_tq_dev_stats[] = {
73 /* description, offset */
Shreyas Bhatewara76d39da2011-01-14 14:59:47 +000074 { "Tx Queue#", 0 },
75 { " TSO pkts tx", offsetof(struct UPT1_TxStats, TSOPktsTxOK) },
76 { " TSO bytes tx", offsetof(struct UPT1_TxStats, TSOBytesTxOK) },
77 { " ucast pkts tx", offsetof(struct UPT1_TxStats, ucastPktsTxOK) },
78 { " ucast bytes tx", offsetof(struct UPT1_TxStats, ucastBytesTxOK) },
79 { " mcast pkts tx", offsetof(struct UPT1_TxStats, mcastPktsTxOK) },
80 { " mcast bytes tx", offsetof(struct UPT1_TxStats, mcastBytesTxOK) },
81 { " bcast pkts tx", offsetof(struct UPT1_TxStats, bcastPktsTxOK) },
82 { " bcast bytes tx", offsetof(struct UPT1_TxStats, bcastBytesTxOK) },
83 { " pkts tx err", offsetof(struct UPT1_TxStats, pktsTxError) },
84 { " pkts tx discard", offsetof(struct UPT1_TxStats, pktsTxDiscard) },
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -070085};
86
87/* per tq stats maintained by the driver */
88static const struct vmxnet3_stat_desc
89vmxnet3_tq_driver_stats[] = {
90 /* description, offset */
Shreyas Bhatewara76d39da2011-01-14 14:59:47 +000091 {" drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats,
92 drop_total) },
93 { " too many frags", offsetof(struct vmxnet3_tq_driver_stats,
94 drop_too_many_frags) },
95 { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats,
96 drop_oversized_hdr) },
97 { " hdr err", offsetof(struct vmxnet3_tq_driver_stats,
98 drop_hdr_inspect_err) },
99 { " tso", offsetof(struct vmxnet3_tq_driver_stats,
100 drop_tso) },
101 { " ring full", offsetof(struct vmxnet3_tq_driver_stats,
102 tx_ring_full) },
103 { " pkts linearized", offsetof(struct vmxnet3_tq_driver_stats,
104 linearized) },
105 { " hdr cloned", offsetof(struct vmxnet3_tq_driver_stats,
106 copy_skb_header) },
107 { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats,
108 oversized_hdr) },
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700109};
110
111/* per rq stats maintained by the device */
112static const struct vmxnet3_stat_desc
113vmxnet3_rq_dev_stats[] = {
Shreyas Bhatewara76d39da2011-01-14 14:59:47 +0000114 { "Rx Queue#", 0 },
115 { " LRO pkts rx", offsetof(struct UPT1_RxStats, LROPktsRxOK) },
116 { " LRO byte rx", offsetof(struct UPT1_RxStats, LROBytesRxOK) },
117 { " ucast pkts rx", offsetof(struct UPT1_RxStats, ucastPktsRxOK) },
118 { " ucast bytes rx", offsetof(struct UPT1_RxStats, ucastBytesRxOK) },
119 { " mcast pkts rx", offsetof(struct UPT1_RxStats, mcastPktsRxOK) },
120 { " mcast bytes rx", offsetof(struct UPT1_RxStats, mcastBytesRxOK) },
121 { " bcast pkts rx", offsetof(struct UPT1_RxStats, bcastPktsRxOK) },
122 { " bcast bytes rx", offsetof(struct UPT1_RxStats, bcastBytesRxOK) },
123 { " pkts rx OOB", offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) },
124 { " pkts rx err", offsetof(struct UPT1_RxStats, pktsRxError) },
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700125};
126
127/* per rq stats maintained by the driver */
128static const struct vmxnet3_stat_desc
129vmxnet3_rq_driver_stats[] = {
130 /* description, offset */
Shreyas Bhatewara76d39da2011-01-14 14:59:47 +0000131 { " drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats,
132 drop_total) },
133 { " err", offsetof(struct vmxnet3_rq_driver_stats,
134 drop_err) },
135 { " fcs", offsetof(struct vmxnet3_rq_driver_stats,
136 drop_fcs) },
137 { " rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats,
138 rx_buf_alloc_failure) },
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700139};
140
141/* gloabl stats maintained by the driver */
142static const struct vmxnet3_stat_desc
143vmxnet3_global_stats[] = {
144 /* description, offset */
Shreyas Bhatewara76d39da2011-01-14 14:59:47 +0000145 { "tx timeout count", offsetof(struct vmxnet3_adapter,
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700146 tx_timeout_count) }
147};
148
149
150struct net_device_stats *
151vmxnet3_get_stats(struct net_device *netdev)
152{
153 struct vmxnet3_adapter *adapter;
154 struct vmxnet3_tq_driver_stats *drvTxStats;
155 struct vmxnet3_rq_driver_stats *drvRxStats;
156 struct UPT1_TxStats *devTxStats;
157 struct UPT1_RxStats *devRxStats;
158 struct net_device_stats *net_stats = &netdev->stats;
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +0000159 unsigned long flags;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000160 int i;
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700161
162 adapter = netdev_priv(netdev);
163
164 /* Collect the dev stats into the shared area */
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +0000165 spin_lock_irqsave(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700166 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +0000167 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700168
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700169 memset(net_stats, 0, sizeof(*net_stats));
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000170 for (i = 0; i < adapter->num_tx_queues; i++) {
171 devTxStats = &adapter->tqd_start[i].stats;
172 drvTxStats = &adapter->tx_queue[i].stats;
173 net_stats->tx_packets += devTxStats->ucastPktsTxOK +
174 devTxStats->mcastPktsTxOK +
175 devTxStats->bcastPktsTxOK;
176 net_stats->tx_bytes += devTxStats->ucastBytesTxOK +
177 devTxStats->mcastBytesTxOK +
178 devTxStats->bcastBytesTxOK;
179 net_stats->tx_errors += devTxStats->pktsTxError;
180 net_stats->tx_dropped += drvTxStats->drop_total;
181 }
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700182
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000183 for (i = 0; i < adapter->num_rx_queues; i++) {
184 devRxStats = &adapter->rqd_start[i].stats;
185 drvRxStats = &adapter->rx_queue[i].stats;
186 net_stats->rx_packets += devRxStats->ucastPktsRxOK +
187 devRxStats->mcastPktsRxOK +
188 devRxStats->bcastPktsRxOK;
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700189
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000190 net_stats->rx_bytes += devRxStats->ucastBytesRxOK +
191 devRxStats->mcastBytesRxOK +
192 devRxStats->bcastBytesRxOK;
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700193
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000194 net_stats->rx_errors += devRxStats->pktsRxError;
195 net_stats->rx_dropped += drvRxStats->drop_total;
196 net_stats->multicast += devRxStats->mcastPktsRxOK;
197 }
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700198 return net_stats;
199}
200
201static int
202vmxnet3_get_sset_count(struct net_device *netdev, int sset)
203{
Shreyas Bhatewara76d39da2011-01-14 14:59:47 +0000204 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700205 switch (sset) {
206 case ETH_SS_STATS:
Shreyas Bhatewara76d39da2011-01-14 14:59:47 +0000207 return (ARRAY_SIZE(vmxnet3_tq_dev_stats) +
208 ARRAY_SIZE(vmxnet3_tq_driver_stats)) *
209 adapter->num_tx_queues +
210 (ARRAY_SIZE(vmxnet3_rq_dev_stats) +
211 ARRAY_SIZE(vmxnet3_rq_driver_stats)) *
212 adapter->num_rx_queues +
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700213 ARRAY_SIZE(vmxnet3_global_stats);
214 default:
215 return -EOPNOTSUPP;
216 }
217}
218
219
Shreyas Bhatewara76d39da2011-01-14 14:59:47 +0000220/* Should be multiple of 4 */
221#define NUM_TX_REGS 8
222#define NUM_RX_REGS 12
223
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700224static int
225vmxnet3_get_regs_len(struct net_device *netdev)
226{
Shreyas Bhatewara76d39da2011-01-14 14:59:47 +0000227 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
228 return (adapter->num_tx_queues * NUM_TX_REGS * sizeof(u32) +
229 adapter->num_rx_queues * NUM_RX_REGS * sizeof(u32));
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700230}
231
232
233static void
234vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
235{
236 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
237
238 strlcpy(drvinfo->driver, vmxnet3_driver_name, sizeof(drvinfo->driver));
239 drvinfo->driver[sizeof(drvinfo->driver) - 1] = '\0';
240
241 strlcpy(drvinfo->version, VMXNET3_DRIVER_VERSION_REPORT,
242 sizeof(drvinfo->version));
243 drvinfo->driver[sizeof(drvinfo->version) - 1] = '\0';
244
245 strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
246 drvinfo->fw_version[sizeof(drvinfo->fw_version) - 1] = '\0';
247
248 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
249 ETHTOOL_BUSINFO_LEN);
250 drvinfo->n_stats = vmxnet3_get_sset_count(netdev, ETH_SS_STATS);
251 drvinfo->testinfo_len = 0;
252 drvinfo->eedump_len = 0;
253 drvinfo->regdump_len = vmxnet3_get_regs_len(netdev);
254}
255
256
257static void
258vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
259{
Shreyas Bhatewara76d39da2011-01-14 14:59:47 +0000260 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700261 if (stringset == ETH_SS_STATS) {
Shreyas Bhatewara76d39da2011-01-14 14:59:47 +0000262 int i, j;
263 for (j = 0; j < adapter->num_tx_queues; j++) {
264 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) {
265 memcpy(buf, vmxnet3_tq_dev_stats[i].desc,
266 ETH_GSTRING_LEN);
267 buf += ETH_GSTRING_LEN;
268 }
269 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats);
270 i++) {
271 memcpy(buf, vmxnet3_tq_driver_stats[i].desc,
272 ETH_GSTRING_LEN);
273 buf += ETH_GSTRING_LEN;
274 }
275 }
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700276
Shreyas Bhatewara76d39da2011-01-14 14:59:47 +0000277 for (j = 0; j < adapter->num_rx_queues; j++) {
278 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) {
279 memcpy(buf, vmxnet3_rq_dev_stats[i].desc,
280 ETH_GSTRING_LEN);
281 buf += ETH_GSTRING_LEN;
282 }
283 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats);
284 i++) {
285 memcpy(buf, vmxnet3_rq_driver_stats[i].desc,
286 ETH_GSTRING_LEN);
287 buf += ETH_GSTRING_LEN;
288 }
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700289 }
Shreyas Bhatewara76d39da2011-01-14 14:59:47 +0000290
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700291 for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) {
292 memcpy(buf, vmxnet3_global_stats[i].desc,
293 ETH_GSTRING_LEN);
294 buf += ETH_GSTRING_LEN;
295 }
296 }
297}
298
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700299static int
Stanislaw Gruszkad92be4b2010-06-27 23:29:42 +0000300vmxnet3_set_flags(struct net_device *netdev, u32 data)
301{
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700302 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
303 u8 lro_requested = (data & ETH_FLAG_LRO) == 0 ? 0 : 1;
304 u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1;
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +0000305 unsigned long flags;
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700306
Stanislaw Gruszkad92be4b2010-06-27 23:29:42 +0000307 if (data & ~ETH_FLAG_LRO)
308 return -EOPNOTSUPP;
309
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700310 if (lro_requested ^ lro_present) {
311 /* toggle the LRO feature*/
312 netdev->features ^= NETIF_F_LRO;
313
314 /* update harware LRO capability accordingly */
315 if (lro_requested)
Shreyas Bhatewaraca802442010-07-15 22:17:29 -0700316 adapter->shared->devRead.misc.uptFeatures |=
Harvey Harrison3843e512010-10-21 18:05:32 +0000317 UPT1_F_LRO;
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700318 else
319 adapter->shared->devRead.misc.uptFeatures &=
Harvey Harrison3843e512010-10-21 18:05:32 +0000320 ~UPT1_F_LRO;
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +0000321 spin_lock_irqsave(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700322 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
323 VMXNET3_CMD_UPDATE_FEATURE);
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +0000324 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700325 }
326 return 0;
327}
328
329static void
330vmxnet3_get_ethtool_stats(struct net_device *netdev,
331 struct ethtool_stats *stats, u64 *buf)
332{
333 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +0000334 unsigned long flags;
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700335 u8 *base;
336 int i;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000337 int j = 0;
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700338
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +0000339 spin_lock_irqsave(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700340 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +0000341 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700342
343 /* this does assume each counter is 64-bit wide */
Shreyas Bhatewara76d39da2011-01-14 14:59:47 +0000344 for (j = 0; j < adapter->num_tx_queues; j++) {
345 base = (u8 *)&adapter->tqd_start[j].stats;
346 *buf++ = (u64)j;
347 for (i = 1; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++)
348 *buf++ = *(u64 *)(base +
349 vmxnet3_tq_dev_stats[i].offset);
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700350
Shreyas Bhatewara76d39da2011-01-14 14:59:47 +0000351 base = (u8 *)&adapter->tx_queue[j].stats;
352 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++)
353 *buf++ = *(u64 *)(base +
354 vmxnet3_tq_driver_stats[i].offset);
355 }
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700356
Shreyas Bhatewara76d39da2011-01-14 14:59:47 +0000357 for (j = 0; j < adapter->num_tx_queues; j++) {
358 base = (u8 *)&adapter->rqd_start[j].stats;
359 *buf++ = (u64) j;
360 for (i = 1; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++)
361 *buf++ = *(u64 *)(base +
362 vmxnet3_rq_dev_stats[i].offset);
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700363
Shreyas Bhatewara76d39da2011-01-14 14:59:47 +0000364 base = (u8 *)&adapter->rx_queue[j].stats;
365 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++)
366 *buf++ = *(u64 *)(base +
367 vmxnet3_rq_driver_stats[i].offset);
368 }
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700369
370 base = (u8 *)adapter;
371 for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++)
372 *buf++ = *(u64 *)(base + vmxnet3_global_stats[i].offset);
373}
374
375
376static void
377vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
378{
379 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
380 u32 *buf = p;
Shreyas Bhatewara76d39da2011-01-14 14:59:47 +0000381 int i = 0, j = 0;
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700382
383 memset(p, 0, vmxnet3_get_regs_len(netdev));
384
385 regs->version = 1;
386
387 /* Update vmxnet3_get_regs_len if we want to dump more registers */
388
389 /* make each ring use multiple of 16 bytes */
Shreyas Bhatewara76d39da2011-01-14 14:59:47 +0000390 for (i = 0; i < adapter->num_tx_queues; i++) {
391 buf[j++] = adapter->tx_queue[i].tx_ring.next2fill;
392 buf[j++] = adapter->tx_queue[i].tx_ring.next2comp;
393 buf[j++] = adapter->tx_queue[i].tx_ring.gen;
394 buf[j++] = 0;
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700395
Shreyas Bhatewara76d39da2011-01-14 14:59:47 +0000396 buf[j++] = adapter->tx_queue[i].comp_ring.next2proc;
397 buf[j++] = adapter->tx_queue[i].comp_ring.gen;
398 buf[j++] = adapter->tx_queue[i].stopped;
399 buf[j++] = 0;
400 }
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700401
Shreyas Bhatewara76d39da2011-01-14 14:59:47 +0000402 for (i = 0; i < adapter->num_rx_queues; i++) {
403 buf[j++] = adapter->rx_queue[i].rx_ring[0].next2fill;
404 buf[j++] = adapter->rx_queue[i].rx_ring[0].next2comp;
405 buf[j++] = adapter->rx_queue[i].rx_ring[0].gen;
406 buf[j++] = 0;
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700407
Shreyas Bhatewara76d39da2011-01-14 14:59:47 +0000408 buf[j++] = adapter->rx_queue[i].rx_ring[1].next2fill;
409 buf[j++] = adapter->rx_queue[i].rx_ring[1].next2comp;
410 buf[j++] = adapter->rx_queue[i].rx_ring[1].gen;
411 buf[j++] = 0;
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700412
Shreyas Bhatewara76d39da2011-01-14 14:59:47 +0000413 buf[j++] = adapter->rx_queue[i].comp_ring.next2proc;
414 buf[j++] = adapter->rx_queue[i].comp_ring.gen;
415 buf[j++] = 0;
416 buf[j++] = 0;
417 }
418
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700419}
420
421
422static void
423vmxnet3_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
424{
425 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
426
427 wol->supported = WAKE_UCAST | WAKE_ARP | WAKE_MAGIC;
428 wol->wolopts = adapter->wol;
429}
430
431
432static int
433vmxnet3_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
434{
435 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
436
437 if (wol->wolopts & (WAKE_PHY | WAKE_MCAST | WAKE_BCAST |
438 WAKE_MAGICSECURE)) {
439 return -EOPNOTSUPP;
440 }
441
442 adapter->wol = wol->wolopts;
443
444 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
445
446 return 0;
447}
448
449
450static int
451vmxnet3_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
452{
453 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
454
455 ecmd->supported = SUPPORTED_10000baseT_Full | SUPPORTED_1000baseT_Full |
456 SUPPORTED_TP;
457 ecmd->advertising = ADVERTISED_TP;
458 ecmd->port = PORT_TP;
459 ecmd->transceiver = XCVR_INTERNAL;
460
461 if (adapter->link_speed) {
462 ecmd->speed = adapter->link_speed;
463 ecmd->duplex = DUPLEX_FULL;
464 } else {
465 ecmd->speed = -1;
466 ecmd->duplex = -1;
467 }
468 return 0;
469}
470
471
472static void
473vmxnet3_get_ringparam(struct net_device *netdev,
474 struct ethtool_ringparam *param)
475{
476 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
477
478 param->rx_max_pending = VMXNET3_RX_RING_MAX_SIZE;
479 param->tx_max_pending = VMXNET3_TX_RING_MAX_SIZE;
480 param->rx_mini_max_pending = 0;
481 param->rx_jumbo_max_pending = 0;
482
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000483 param->rx_pending = adapter->rx_queue[0].rx_ring[0].size *
484 adapter->num_rx_queues;
485 param->tx_pending = adapter->tx_queue[0].tx_ring.size *
486 adapter->num_tx_queues;
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700487 param->rx_mini_pending = 0;
488 param->rx_jumbo_pending = 0;
489}
490
491
492static int
493vmxnet3_set_ringparam(struct net_device *netdev,
494 struct ethtool_ringparam *param)
495{
496 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
497 u32 new_tx_ring_size, new_rx_ring_size;
498 u32 sz;
499 int err = 0;
500
501 if (param->tx_pending == 0 || param->tx_pending >
502 VMXNET3_TX_RING_MAX_SIZE)
503 return -EINVAL;
504
505 if (param->rx_pending == 0 || param->rx_pending >
506 VMXNET3_RX_RING_MAX_SIZE)
507 return -EINVAL;
508
509
510 /* round it up to a multiple of VMXNET3_RING_SIZE_ALIGN */
511 new_tx_ring_size = (param->tx_pending + VMXNET3_RING_SIZE_MASK) &
512 ~VMXNET3_RING_SIZE_MASK;
513 new_tx_ring_size = min_t(u32, new_tx_ring_size,
514 VMXNET3_TX_RING_MAX_SIZE);
515 if (new_tx_ring_size > VMXNET3_TX_RING_MAX_SIZE || (new_tx_ring_size %
516 VMXNET3_RING_SIZE_ALIGN) != 0)
517 return -EINVAL;
518
519 /* ring0 has to be a multiple of
520 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
521 */
522 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
523 new_rx_ring_size = (param->rx_pending + sz - 1) / sz * sz;
524 new_rx_ring_size = min_t(u32, new_rx_ring_size,
525 VMXNET3_RX_RING_MAX_SIZE / sz * sz);
526 if (new_rx_ring_size > VMXNET3_RX_RING_MAX_SIZE || (new_rx_ring_size %
527 sz) != 0)
528 return -EINVAL;
529
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000530 if (new_tx_ring_size == adapter->tx_queue[0].tx_ring.size &&
531 new_rx_ring_size == adapter->rx_queue[0].rx_ring[0].size) {
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700532 return 0;
533 }
534
535 /*
536 * Reset_work may be in the middle of resetting the device, wait for its
537 * completion.
538 */
539 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
540 msleep(1);
541
542 if (netif_running(netdev)) {
543 vmxnet3_quiesce_dev(adapter);
544 vmxnet3_reset_dev(adapter);
545
546 /* recreate the rx queue and the tx queue based on the
547 * new sizes */
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000548 vmxnet3_tq_destroy_all(adapter);
549 vmxnet3_rq_destroy_all(adapter);
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700550
551 err = vmxnet3_create_queues(adapter, new_tx_ring_size,
552 new_rx_ring_size, VMXNET3_DEF_RX_RING_SIZE);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000553
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700554 if (err) {
555 /* failed, most likely because of OOM, try default
556 * size */
557 printk(KERN_ERR "%s: failed to apply new sizes, try the"
558 " default ones\n", netdev->name);
559 err = vmxnet3_create_queues(adapter,
560 VMXNET3_DEF_TX_RING_SIZE,
561 VMXNET3_DEF_RX_RING_SIZE,
562 VMXNET3_DEF_RX_RING_SIZE);
563 if (err) {
564 printk(KERN_ERR "%s: failed to create queues "
565 "with default sizes. Closing it\n",
566 netdev->name);
567 goto out;
568 }
569 }
570
571 err = vmxnet3_activate_dev(adapter);
572 if (err)
573 printk(KERN_ERR "%s: failed to re-activate, error %d."
574 " Closing it\n", netdev->name, err);
575 }
576
577out:
578 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
579 if (err)
580 vmxnet3_force_close(adapter);
581
582 return err;
583}
584
585
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000586static int
587vmxnet3_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
588 void *rules)
589{
590 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
591 switch (info->cmd) {
592 case ETHTOOL_GRXRINGS:
593 info->data = adapter->num_rx_queues;
594 return 0;
595 }
596 return -EOPNOTSUPP;
597}
598
Scott J. Goldmane9248fb2010-11-27 10:33:55 +0000599#ifdef VMXNET3_RSS
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000600static int
601vmxnet3_get_rss_indir(struct net_device *netdev,
602 struct ethtool_rxfh_indir *p)
603{
604 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
605 struct UPT1_RSSConf *rssConf = adapter->rss_conf;
606 unsigned int n = min_t(unsigned int, p->size, rssConf->indTableSize);
607
608 p->size = rssConf->indTableSize;
609 while (n--)
610 p->ring_index[n] = rssConf->indTable[n];
611 return 0;
612
613}
614
615static int
616vmxnet3_set_rss_indir(struct net_device *netdev,
617 const struct ethtool_rxfh_indir *p)
618{
619 unsigned int i;
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +0000620 unsigned long flags;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000621 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
622 struct UPT1_RSSConf *rssConf = adapter->rss_conf;
623
624 if (p->size != rssConf->indTableSize)
625 return -EINVAL;
626 for (i = 0; i < rssConf->indTableSize; i++) {
627 /*
628 * Return with error code if any of the queue indices
629 * is out of range
630 */
631 if (p->ring_index[i] < 0 ||
632 p->ring_index[i] >= adapter->num_rx_queues)
633 return -EINVAL;
634 }
635
636 for (i = 0; i < rssConf->indTableSize; i++)
637 rssConf->indTable[i] = p->ring_index[i];
638
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +0000639 spin_lock_irqsave(&adapter->cmd_lock, flags);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000640 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
641 VMXNET3_CMD_UPDATE_RSSIDT);
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +0000642 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000643
644 return 0;
645
646}
Scott J. Goldmane9248fb2010-11-27 10:33:55 +0000647#endif
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000648
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700649static struct ethtool_ops vmxnet3_ethtool_ops = {
650 .get_settings = vmxnet3_get_settings,
651 .get_drvinfo = vmxnet3_get_drvinfo,
652 .get_regs_len = vmxnet3_get_regs_len,
653 .get_regs = vmxnet3_get_regs,
654 .get_wol = vmxnet3_get_wol,
655 .set_wol = vmxnet3_set_wol,
656 .get_link = ethtool_op_get_link,
657 .get_rx_csum = vmxnet3_get_rx_csum,
658 .set_rx_csum = vmxnet3_set_rx_csum,
659 .get_tx_csum = ethtool_op_get_tx_csum,
660 .set_tx_csum = ethtool_op_set_tx_hw_csum,
661 .get_sg = ethtool_op_get_sg,
662 .set_sg = ethtool_op_set_sg,
663 .get_tso = ethtool_op_get_tso,
664 .set_tso = ethtool_op_set_tso,
665 .get_strings = vmxnet3_get_strings,
Ben Hutchingscbf2d602010-06-30 02:47:40 +0000666 .get_flags = ethtool_op_get_flags,
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700667 .set_flags = vmxnet3_set_flags,
668 .get_sset_count = vmxnet3_get_sset_count,
669 .get_ethtool_stats = vmxnet3_get_ethtool_stats,
670 .get_ringparam = vmxnet3_get_ringparam,
671 .set_ringparam = vmxnet3_set_ringparam,
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000672 .get_rxnfc = vmxnet3_get_rxnfc,
Scott J. Goldmane9248fb2010-11-27 10:33:55 +0000673#ifdef VMXNET3_RSS
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000674 .get_rxfh_indir = vmxnet3_get_rss_indir,
675 .set_rxfh_indir = vmxnet3_set_rss_indir,
Scott J. Goldmane9248fb2010-11-27 10:33:55 +0000676#endif
Shreyas Bhatewarad1a890f2009-10-13 00:15:51 -0700677};
678
679void vmxnet3_set_ethtool_ops(struct net_device *netdev)
680{
681 SET_ETHTOOL_OPS(netdev, &vmxnet3_ethtool_ops);
682}