| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright (C) 2005 - 2009 ServerEngines | 
|  | 3 | * All rights reserved. | 
|  | 4 | * | 
|  | 5 | * This program is free software; you can redistribute it and/or | 
|  | 6 | * modify it under the terms of the GNU General Public License version 2 | 
|  | 7 | * as published by the Free Software Foundation.  The full GNU General | 
|  | 8 | * Public License is included in this distribution in the file called COPYING. | 
|  | 9 | * | 
|  | 10 | * Contact Information: | 
|  | 11 | * linux-drivers@serverengines.com | 
|  | 12 | * | 
|  | 13 | * ServerEngines | 
|  | 14 | * 209 N. Fair Oaks Ave | 
|  | 15 | * Sunnyvale, CA 94085 | 
|  | 16 | */ | 
|  | 17 |  | 
|  | 18 | #include "be.h" | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 19 | #include "be_cmds.h" | 
| Stephen Hemminger | 65f71b8 | 2009-03-27 00:25:24 -0700 | [diff] [blame] | 20 | #include <asm/div64.h> | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 21 |  | 
|  | 22 | MODULE_VERSION(DRV_VER); | 
|  | 23 | MODULE_DEVICE_TABLE(pci, be_dev_ids); | 
|  | 24 | MODULE_DESCRIPTION(DRV_DESC " " DRV_VER); | 
|  | 25 | MODULE_AUTHOR("ServerEngines Corporation"); | 
|  | 26 | MODULE_LICENSE("GPL"); | 
|  | 27 |  | 
|  | 28 | static unsigned int rx_frag_size = 2048; | 
|  | 29 | module_param(rx_frag_size, uint, S_IRUGO); | 
|  | 30 | MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data."); | 
|  | 31 |  | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 32 | static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = { | 
| Ajit Khaparde | c4ca237 | 2009-05-18 15:38:55 -0700 | [diff] [blame] | 33 | { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, | 
|  | 34 | { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, | 
|  | 35 | { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 36 | { 0 } | 
|  | 37 | }; | 
|  | 38 | MODULE_DEVICE_TABLE(pci, be_dev_ids); | 
|  | 39 |  | 
|  | 40 | static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q) | 
|  | 41 | { | 
|  | 42 | struct be_dma_mem *mem = &q->dma_mem; | 
|  | 43 | if (mem->va) | 
|  | 44 | pci_free_consistent(adapter->pdev, mem->size, | 
|  | 45 | mem->va, mem->dma); | 
|  | 46 | } | 
|  | 47 |  | 
|  | 48 | static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q, | 
|  | 49 | u16 len, u16 entry_size) | 
|  | 50 | { | 
|  | 51 | struct be_dma_mem *mem = &q->dma_mem; | 
|  | 52 |  | 
|  | 53 | memset(q, 0, sizeof(*q)); | 
|  | 54 | q->len = len; | 
|  | 55 | q->entry_size = entry_size; | 
|  | 56 | mem->size = len * entry_size; | 
|  | 57 | mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma); | 
|  | 58 | if (!mem->va) | 
|  | 59 | return -1; | 
|  | 60 | memset(mem->va, 0, mem->size); | 
|  | 61 | return 0; | 
|  | 62 | } | 
|  | 63 |  | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 64 | static void be_intr_set(struct be_adapter *adapter, bool enable) | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 65 | { | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 66 | u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 67 | u32 reg = ioread32(addr); | 
|  | 68 | u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; | 
| Sathya Perla | 5f0b849 | 2009-07-27 22:52:56 +0000 | [diff] [blame] | 69 |  | 
|  | 70 | if (!enabled && enable) | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 71 | reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; | 
| Sathya Perla | 5f0b849 | 2009-07-27 22:52:56 +0000 | [diff] [blame] | 72 | else if (enabled && !enable) | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 73 | reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; | 
| Sathya Perla | 5f0b849 | 2009-07-27 22:52:56 +0000 | [diff] [blame] | 74 | else | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 75 | return; | 
| Sathya Perla | 5f0b849 | 2009-07-27 22:52:56 +0000 | [diff] [blame] | 76 |  | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 77 | iowrite32(reg, addr); | 
|  | 78 | } | 
|  | 79 |  | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 80 | static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted) | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 81 | { | 
|  | 82 | u32 val = 0; | 
|  | 83 | val |= qid & DB_RQ_RING_ID_MASK; | 
|  | 84 | val |= posted << DB_RQ_NUM_POSTED_SHIFT; | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 85 | iowrite32(val, adapter->db + DB_RQ_OFFSET); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 86 | } | 
|  | 87 |  | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 88 | static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted) | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 89 | { | 
|  | 90 | u32 val = 0; | 
|  | 91 | val |= qid & DB_TXULP_RING_ID_MASK; | 
|  | 92 | val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT; | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 93 | iowrite32(val, adapter->db + DB_TXULP1_OFFSET); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 94 | } | 
|  | 95 |  | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 96 | static void be_eq_notify(struct be_adapter *adapter, u16 qid, | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 97 | bool arm, bool clear_int, u16 num_popped) | 
|  | 98 | { | 
|  | 99 | u32 val = 0; | 
|  | 100 | val |= qid & DB_EQ_RING_ID_MASK; | 
|  | 101 | if (arm) | 
|  | 102 | val |= 1 << DB_EQ_REARM_SHIFT; | 
|  | 103 | if (clear_int) | 
|  | 104 | val |= 1 << DB_EQ_CLR_SHIFT; | 
|  | 105 | val |= 1 << DB_EQ_EVNT_SHIFT; | 
|  | 106 | val |= num_popped << DB_EQ_NUM_POPPED_SHIFT; | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 107 | iowrite32(val, adapter->db + DB_EQ_OFFSET); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 108 | } | 
|  | 109 |  | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 110 | void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped) | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 111 | { | 
|  | 112 | u32 val = 0; | 
|  | 113 | val |= qid & DB_CQ_RING_ID_MASK; | 
|  | 114 | if (arm) | 
|  | 115 | val |= 1 << DB_CQ_REARM_SHIFT; | 
|  | 116 | val |= num_popped << DB_CQ_NUM_POPPED_SHIFT; | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 117 | iowrite32(val, adapter->db + DB_CQ_OFFSET); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 118 | } | 
|  | 119 |  | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 120 | static int be_mac_addr_set(struct net_device *netdev, void *p) | 
|  | 121 | { | 
|  | 122 | struct be_adapter *adapter = netdev_priv(netdev); | 
|  | 123 | struct sockaddr *addr = p; | 
|  | 124 | int status = 0; | 
|  | 125 |  | 
| Sathya Perla | a65027e | 2009-08-17 00:58:04 +0000 | [diff] [blame] | 126 | status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id); | 
|  | 127 | if (status) | 
|  | 128 | return status; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 129 |  | 
| Sathya Perla | a65027e | 2009-08-17 00:58:04 +0000 | [diff] [blame] | 130 | status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data, | 
|  | 131 | adapter->if_handle, &adapter->pmac_id); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 132 | if (!status) | 
|  | 133 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | 
|  | 134 |  | 
|  | 135 | return status; | 
|  | 136 | } | 
|  | 137 |  | 
|  | 138 | static void netdev_stats_update(struct be_adapter *adapter) | 
|  | 139 | { | 
|  | 140 | struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats.cmd.va); | 
|  | 141 | struct be_rxf_stats *rxf_stats = &hw_stats->rxf; | 
|  | 142 | struct be_port_rxf_stats *port_stats = | 
|  | 143 | &rxf_stats->port[adapter->port_num]; | 
|  | 144 | struct net_device_stats *dev_stats = &adapter->stats.net_stats; | 
| Sathya Perla | 6811086 | 2009-06-10 02:21:16 +0000 | [diff] [blame] | 145 | struct be_erx_stats *erx_stats = &hw_stats->erx; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 146 |  | 
|  | 147 | dev_stats->rx_packets = port_stats->rx_total_frames; | 
|  | 148 | dev_stats->tx_packets = port_stats->tx_unicastframes + | 
|  | 149 | port_stats->tx_multicastframes + port_stats->tx_broadcastframes; | 
|  | 150 | dev_stats->rx_bytes = (u64) port_stats->rx_bytes_msd << 32 | | 
|  | 151 | (u64) port_stats->rx_bytes_lsd; | 
|  | 152 | dev_stats->tx_bytes = (u64) port_stats->tx_bytes_msd << 32 | | 
|  | 153 | (u64) port_stats->tx_bytes_lsd; | 
|  | 154 |  | 
|  | 155 | /* bad pkts received */ | 
|  | 156 | dev_stats->rx_errors = port_stats->rx_crc_errors + | 
|  | 157 | port_stats->rx_alignment_symbol_errors + | 
|  | 158 | port_stats->rx_in_range_errors + | 
| Sathya Perla | 6811086 | 2009-06-10 02:21:16 +0000 | [diff] [blame] | 159 | port_stats->rx_out_range_errors + | 
|  | 160 | port_stats->rx_frame_too_long + | 
|  | 161 | port_stats->rx_dropped_too_small + | 
|  | 162 | port_stats->rx_dropped_too_short + | 
|  | 163 | port_stats->rx_dropped_header_too_small + | 
|  | 164 | port_stats->rx_dropped_tcp_length + | 
|  | 165 | port_stats->rx_dropped_runt + | 
|  | 166 | port_stats->rx_tcp_checksum_errs + | 
|  | 167 | port_stats->rx_ip_checksum_errs + | 
|  | 168 | port_stats->rx_udp_checksum_errs; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 169 |  | 
| Sathya Perla | 6811086 | 2009-06-10 02:21:16 +0000 | [diff] [blame] | 170 | /*  no space in linux buffers: best possible approximation */ | 
|  | 171 | dev_stats->rx_dropped = erx_stats->rx_drops_no_fragments[0]; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 172 |  | 
|  | 173 | /* detailed rx errors */ | 
|  | 174 | dev_stats->rx_length_errors = port_stats->rx_in_range_errors + | 
| Sathya Perla | 6811086 | 2009-06-10 02:21:16 +0000 | [diff] [blame] | 175 | port_stats->rx_out_range_errors + | 
|  | 176 | port_stats->rx_frame_too_long; | 
|  | 177 |  | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 178 | /* receive ring buffer overflow */ | 
|  | 179 | dev_stats->rx_over_errors = 0; | 
| Sathya Perla | 6811086 | 2009-06-10 02:21:16 +0000 | [diff] [blame] | 180 |  | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 181 | dev_stats->rx_crc_errors = port_stats->rx_crc_errors; | 
|  | 182 |  | 
|  | 183 | /* frame alignment errors */ | 
|  | 184 | dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors; | 
| Sathya Perla | 6811086 | 2009-06-10 02:21:16 +0000 | [diff] [blame] | 185 |  | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 186 | /* receiver fifo overrun */ | 
|  | 187 | /* drops_no_pbuf is no per i/f, it's per BE card */ | 
|  | 188 | dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow + | 
|  | 189 | port_stats->rx_input_fifo_overflow + | 
|  | 190 | rxf_stats->rx_drops_no_pbuf; | 
|  | 191 | /* receiver missed packetd */ | 
|  | 192 | dev_stats->rx_missed_errors = 0; | 
| Sathya Perla | 6811086 | 2009-06-10 02:21:16 +0000 | [diff] [blame] | 193 |  | 
|  | 194 | /*  packet transmit problems */ | 
|  | 195 | dev_stats->tx_errors = 0; | 
|  | 196 |  | 
|  | 197 | /* no space available in linux */ | 
|  | 198 | dev_stats->tx_dropped = 0; | 
|  | 199 |  | 
|  | 200 | dev_stats->multicast = port_stats->tx_multicastframes; | 
|  | 201 | dev_stats->collisions = 0; | 
|  | 202 |  | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 203 | /* detailed tx_errors */ | 
|  | 204 | dev_stats->tx_aborted_errors = 0; | 
|  | 205 | dev_stats->tx_carrier_errors = 0; | 
|  | 206 | dev_stats->tx_fifo_errors = 0; | 
|  | 207 | dev_stats->tx_heartbeat_errors = 0; | 
|  | 208 | dev_stats->tx_window_errors = 0; | 
|  | 209 | } | 
|  | 210 |  | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 211 | void be_link_status_update(struct be_adapter *adapter, bool link_up) | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 212 | { | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 213 | struct net_device *netdev = adapter->netdev; | 
|  | 214 |  | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 215 | /* If link came up or went down */ | 
| Sathya Perla | a8f447b | 2009-06-18 00:10:27 +0000 | [diff] [blame] | 216 | if (adapter->link_up != link_up) { | 
|  | 217 | if (link_up) { | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 218 | netif_start_queue(netdev); | 
|  | 219 | netif_carrier_on(netdev); | 
|  | 220 | printk(KERN_INFO "%s: Link up\n", netdev->name); | 
| Sathya Perla | a8f447b | 2009-06-18 00:10:27 +0000 | [diff] [blame] | 221 | } else { | 
|  | 222 | netif_stop_queue(netdev); | 
|  | 223 | netif_carrier_off(netdev); | 
|  | 224 | printk(KERN_INFO "%s: Link down\n", netdev->name); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 225 | } | 
| Sathya Perla | a8f447b | 2009-06-18 00:10:27 +0000 | [diff] [blame] | 226 | adapter->link_up = link_up; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 227 | } | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 228 | } | 
|  | 229 |  | 
|  | 230 | /* Update the EQ delay n BE based on the RX frags consumed / sec */ | 
|  | 231 | static void be_rx_eqd_update(struct be_adapter *adapter) | 
|  | 232 | { | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 233 | struct be_eq_obj *rx_eq = &adapter->rx_eq; | 
|  | 234 | struct be_drvr_stats *stats = &adapter->stats.drvr_stats; | 
| Sathya Perla | 4097f66 | 2009-03-24 16:40:13 -0700 | [diff] [blame] | 235 | ulong now = jiffies; | 
|  | 236 | u32 eqd; | 
|  | 237 |  | 
|  | 238 | if (!rx_eq->enable_aic) | 
|  | 239 | return; | 
|  | 240 |  | 
|  | 241 | /* Wrapped around */ | 
|  | 242 | if (time_before(now, stats->rx_fps_jiffies)) { | 
|  | 243 | stats->rx_fps_jiffies = now; | 
|  | 244 | return; | 
|  | 245 | } | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 246 |  | 
|  | 247 | /* Update once a second */ | 
| Sathya Perla | 4097f66 | 2009-03-24 16:40:13 -0700 | [diff] [blame] | 248 | if ((now - stats->rx_fps_jiffies) < HZ) | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 249 | return; | 
|  | 250 |  | 
|  | 251 | stats->be_rx_fps = (stats->be_rx_frags - stats->be_prev_rx_frags) / | 
| Sathya Perla | 4097f66 | 2009-03-24 16:40:13 -0700 | [diff] [blame] | 252 | ((now - stats->rx_fps_jiffies) / HZ); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 253 |  | 
| Sathya Perla | 4097f66 | 2009-03-24 16:40:13 -0700 | [diff] [blame] | 254 | stats->rx_fps_jiffies = now; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 255 | stats->be_prev_rx_frags = stats->be_rx_frags; | 
|  | 256 | eqd = stats->be_rx_fps / 110000; | 
|  | 257 | eqd = eqd << 3; | 
|  | 258 | if (eqd > rx_eq->max_eqd) | 
|  | 259 | eqd = rx_eq->max_eqd; | 
|  | 260 | if (eqd < rx_eq->min_eqd) | 
|  | 261 | eqd = rx_eq->min_eqd; | 
|  | 262 | if (eqd < 10) | 
|  | 263 | eqd = 0; | 
|  | 264 | if (eqd != rx_eq->cur_eqd) | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 265 | be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 266 |  | 
|  | 267 | rx_eq->cur_eqd = eqd; | 
|  | 268 | } | 
|  | 269 |  | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 270 | static struct net_device_stats *be_get_stats(struct net_device *dev) | 
|  | 271 | { | 
|  | 272 | struct be_adapter *adapter = netdev_priv(dev); | 
|  | 273 |  | 
|  | 274 | return &adapter->stats.net_stats; | 
|  | 275 | } | 
|  | 276 |  | 
| Stephen Hemminger | 65f71b8 | 2009-03-27 00:25:24 -0700 | [diff] [blame] | 277 | static u32 be_calc_rate(u64 bytes, unsigned long ticks) | 
|  | 278 | { | 
|  | 279 | u64 rate = bytes; | 
|  | 280 |  | 
|  | 281 | do_div(rate, ticks / HZ); | 
|  | 282 | rate <<= 3;			/* bytes/sec -> bits/sec */ | 
|  | 283 | do_div(rate, 1000000ul);	/* MB/Sec */ | 
|  | 284 |  | 
|  | 285 | return rate; | 
|  | 286 | } | 
|  | 287 |  | 
| Sathya Perla | 4097f66 | 2009-03-24 16:40:13 -0700 | [diff] [blame] | 288 | static void be_tx_rate_update(struct be_adapter *adapter) | 
|  | 289 | { | 
|  | 290 | struct be_drvr_stats *stats = drvr_stats(adapter); | 
|  | 291 | ulong now = jiffies; | 
|  | 292 |  | 
|  | 293 | /* Wrapped around? */ | 
|  | 294 | if (time_before(now, stats->be_tx_jiffies)) { | 
|  | 295 | stats->be_tx_jiffies = now; | 
|  | 296 | return; | 
|  | 297 | } | 
|  | 298 |  | 
|  | 299 | /* Update tx rate once in two seconds */ | 
|  | 300 | if ((now - stats->be_tx_jiffies) > 2 * HZ) { | 
| Stephen Hemminger | 65f71b8 | 2009-03-27 00:25:24 -0700 | [diff] [blame] | 301 | stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes | 
|  | 302 | - stats->be_tx_bytes_prev, | 
|  | 303 | now - stats->be_tx_jiffies); | 
| Sathya Perla | 4097f66 | 2009-03-24 16:40:13 -0700 | [diff] [blame] | 304 | stats->be_tx_jiffies = now; | 
|  | 305 | stats->be_tx_bytes_prev = stats->be_tx_bytes; | 
|  | 306 | } | 
|  | 307 | } | 
|  | 308 |  | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 309 | static void be_tx_stats_update(struct be_adapter *adapter, | 
|  | 310 | u32 wrb_cnt, u32 copied, bool stopped) | 
|  | 311 | { | 
| Sathya Perla | 4097f66 | 2009-03-24 16:40:13 -0700 | [diff] [blame] | 312 | struct be_drvr_stats *stats = drvr_stats(adapter); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 313 | stats->be_tx_reqs++; | 
|  | 314 | stats->be_tx_wrbs += wrb_cnt; | 
|  | 315 | stats->be_tx_bytes += copied; | 
|  | 316 | if (stopped) | 
|  | 317 | stats->be_tx_stops++; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 318 | } | 
|  | 319 |  | 
|  | 320 | /* Determine number of WRB entries needed to xmit data in an skb */ | 
|  | 321 | static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy) | 
|  | 322 | { | 
| David S. Miller | ebc8d2a | 2009-06-09 01:01:31 -0700 | [diff] [blame] | 323 | int cnt = (skb->len > skb->data_len); | 
|  | 324 |  | 
|  | 325 | cnt += skb_shinfo(skb)->nr_frags; | 
|  | 326 |  | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 327 | /* to account for hdr wrb */ | 
|  | 328 | cnt++; | 
|  | 329 | if (cnt & 1) { | 
|  | 330 | /* add a dummy to make it an even num */ | 
|  | 331 | cnt++; | 
|  | 332 | *dummy = true; | 
|  | 333 | } else | 
|  | 334 | *dummy = false; | 
|  | 335 | BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT); | 
|  | 336 | return cnt; | 
|  | 337 | } | 
|  | 338 |  | 
|  | 339 | static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len) | 
|  | 340 | { | 
|  | 341 | wrb->frag_pa_hi = upper_32_bits(addr); | 
|  | 342 | wrb->frag_pa_lo = addr & 0xFFFFFFFF; | 
|  | 343 | wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK; | 
|  | 344 | } | 
|  | 345 |  | 
|  | 346 | static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb, | 
|  | 347 | bool vlan, u32 wrb_cnt, u32 len) | 
|  | 348 | { | 
|  | 349 | memset(hdr, 0, sizeof(*hdr)); | 
|  | 350 |  | 
|  | 351 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1); | 
|  | 352 |  | 
|  | 353 | if (skb_shinfo(skb)->gso_segs > 1 && skb_shinfo(skb)->gso_size) { | 
|  | 354 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1); | 
|  | 355 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss, | 
|  | 356 | hdr, skb_shinfo(skb)->gso_size); | 
|  | 357 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { | 
|  | 358 | if (is_tcp_pkt(skb)) | 
|  | 359 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1); | 
|  | 360 | else if (is_udp_pkt(skb)) | 
|  | 361 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1); | 
|  | 362 | } | 
|  | 363 |  | 
|  | 364 | if (vlan && vlan_tx_tag_present(skb)) { | 
|  | 365 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1); | 
|  | 366 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, | 
|  | 367 | hdr, vlan_tx_tag_get(skb)); | 
|  | 368 | } | 
|  | 369 |  | 
|  | 370 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1); | 
|  | 371 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1); | 
|  | 372 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt); | 
|  | 373 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len); | 
|  | 374 | } | 
|  | 375 |  | 
|  | 376 |  | 
|  | 377 | static int make_tx_wrbs(struct be_adapter *adapter, | 
|  | 378 | struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb) | 
|  | 379 | { | 
|  | 380 | u64 busaddr; | 
|  | 381 | u32 i, copied = 0; | 
|  | 382 | struct pci_dev *pdev = adapter->pdev; | 
|  | 383 | struct sk_buff *first_skb = skb; | 
|  | 384 | struct be_queue_info *txq = &adapter->tx_obj.q; | 
|  | 385 | struct be_eth_wrb *wrb; | 
|  | 386 | struct be_eth_hdr_wrb *hdr; | 
|  | 387 |  | 
|  | 388 | atomic_add(wrb_cnt, &txq->used); | 
|  | 389 | hdr = queue_head_node(txq); | 
|  | 390 | queue_head_inc(txq); | 
|  | 391 |  | 
| David S. Miller | ebc8d2a | 2009-06-09 01:01:31 -0700 | [diff] [blame] | 392 | if (skb->len > skb->data_len) { | 
|  | 393 | int len = skb->len - skb->data_len; | 
|  | 394 | busaddr = pci_map_single(pdev, skb->data, len, | 
|  | 395 | PCI_DMA_TODEVICE); | 
|  | 396 | wrb = queue_head_node(txq); | 
|  | 397 | wrb_fill(wrb, busaddr, len); | 
|  | 398 | be_dws_cpu_to_le(wrb, sizeof(*wrb)); | 
|  | 399 | queue_head_inc(txq); | 
|  | 400 | copied += len; | 
|  | 401 | } | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 402 |  | 
| David S. Miller | ebc8d2a | 2009-06-09 01:01:31 -0700 | [diff] [blame] | 403 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 
|  | 404 | struct skb_frag_struct *frag = | 
|  | 405 | &skb_shinfo(skb)->frags[i]; | 
|  | 406 | busaddr = pci_map_page(pdev, frag->page, | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 407 | frag->page_offset, | 
|  | 408 | frag->size, PCI_DMA_TODEVICE); | 
| David S. Miller | ebc8d2a | 2009-06-09 01:01:31 -0700 | [diff] [blame] | 409 | wrb = queue_head_node(txq); | 
|  | 410 | wrb_fill(wrb, busaddr, frag->size); | 
|  | 411 | be_dws_cpu_to_le(wrb, sizeof(*wrb)); | 
|  | 412 | queue_head_inc(txq); | 
|  | 413 | copied += frag->size; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 414 | } | 
|  | 415 |  | 
|  | 416 | if (dummy_wrb) { | 
|  | 417 | wrb = queue_head_node(txq); | 
|  | 418 | wrb_fill(wrb, 0, 0); | 
|  | 419 | be_dws_cpu_to_le(wrb, sizeof(*wrb)); | 
|  | 420 | queue_head_inc(txq); | 
|  | 421 | } | 
|  | 422 |  | 
|  | 423 | wrb_fill_hdr(hdr, first_skb, adapter->vlan_grp ? true : false, | 
|  | 424 | wrb_cnt, copied); | 
|  | 425 | be_dws_cpu_to_le(hdr, sizeof(*hdr)); | 
|  | 426 |  | 
|  | 427 | return copied; | 
|  | 428 | } | 
|  | 429 |  | 
| Stephen Hemminger | 61357325 | 2009-08-31 19:50:58 +0000 | [diff] [blame] | 430 | static netdev_tx_t be_xmit(struct sk_buff *skb, | 
|  | 431 | struct net_device *netdev) | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 432 | { | 
|  | 433 | struct be_adapter *adapter = netdev_priv(netdev); | 
|  | 434 | struct be_tx_obj *tx_obj = &adapter->tx_obj; | 
|  | 435 | struct be_queue_info *txq = &tx_obj->q; | 
|  | 436 | u32 wrb_cnt = 0, copied = 0; | 
|  | 437 | u32 start = txq->head; | 
|  | 438 | bool dummy_wrb, stopped = false; | 
|  | 439 |  | 
|  | 440 | wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb); | 
|  | 441 |  | 
|  | 442 | copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb); | 
|  | 443 |  | 
|  | 444 | /* record the sent skb in the sent_skb table */ | 
|  | 445 | BUG_ON(tx_obj->sent_skb_list[start]); | 
|  | 446 | tx_obj->sent_skb_list[start] = skb; | 
|  | 447 |  | 
|  | 448 | /* Ensure that txq has space for the next skb; Else stop the queue | 
|  | 449 | * *BEFORE* ringing the tx doorbell, so that we serialze the | 
|  | 450 | * tx compls of the current transmit which'll wake up the queue | 
|  | 451 | */ | 
|  | 452 | if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >= txq->len) { | 
|  | 453 | netif_stop_queue(netdev); | 
|  | 454 | stopped = true; | 
|  | 455 | } | 
|  | 456 |  | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 457 | be_txq_notify(adapter, txq->id, wrb_cnt); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 458 |  | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 459 | be_tx_stats_update(adapter, wrb_cnt, copied, stopped); | 
|  | 460 | return NETDEV_TX_OK; | 
|  | 461 | } | 
|  | 462 |  | 
|  | 463 | static int be_change_mtu(struct net_device *netdev, int new_mtu) | 
|  | 464 | { | 
|  | 465 | struct be_adapter *adapter = netdev_priv(netdev); | 
|  | 466 | if (new_mtu < BE_MIN_MTU || | 
|  | 467 | new_mtu > BE_MAX_JUMBO_FRAME_SIZE) { | 
|  | 468 | dev_info(&adapter->pdev->dev, | 
|  | 469 | "MTU must be between %d and %d bytes\n", | 
|  | 470 | BE_MIN_MTU, BE_MAX_JUMBO_FRAME_SIZE); | 
|  | 471 | return -EINVAL; | 
|  | 472 | } | 
|  | 473 | dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n", | 
|  | 474 | netdev->mtu, new_mtu); | 
|  | 475 | netdev->mtu = new_mtu; | 
|  | 476 | return 0; | 
|  | 477 | } | 
|  | 478 |  | 
|  | 479 | /* | 
|  | 480 | * if there are BE_NUM_VLANS_SUPPORTED or lesser number of VLANS configured, | 
|  | 481 | * program them in BE.  If more than BE_NUM_VLANS_SUPPORTED are configured, | 
|  | 482 | * set the BE in promiscuous VLAN mode. | 
|  | 483 | */ | 
| Sathya Perla | 1ab1ab7 | 2009-03-19 23:56:46 -0700 | [diff] [blame] | 484 | static void be_vid_config(struct net_device *netdev) | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 485 | { | 
|  | 486 | struct be_adapter *adapter = netdev_priv(netdev); | 
|  | 487 | u16 vtag[BE_NUM_VLANS_SUPPORTED]; | 
|  | 488 | u16 ntags = 0, i; | 
|  | 489 |  | 
|  | 490 | if (adapter->num_vlans <= BE_NUM_VLANS_SUPPORTED)  { | 
|  | 491 | /* Construct VLAN Table to give to HW */ | 
|  | 492 | for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { | 
|  | 493 | if (adapter->vlan_tag[i]) { | 
|  | 494 | vtag[ntags] = cpu_to_le16(i); | 
|  | 495 | ntags++; | 
|  | 496 | } | 
|  | 497 | } | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 498 | be_cmd_vlan_config(adapter, adapter->if_handle, | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 499 | vtag, ntags, 1, 0); | 
|  | 500 | } else { | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 501 | be_cmd_vlan_config(adapter, adapter->if_handle, | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 502 | NULL, 0, 1, 1); | 
|  | 503 | } | 
|  | 504 | } | 
|  | 505 |  | 
|  | 506 | static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp) | 
|  | 507 | { | 
|  | 508 | struct be_adapter *adapter = netdev_priv(netdev); | 
|  | 509 | struct be_eq_obj *rx_eq = &adapter->rx_eq; | 
|  | 510 | struct be_eq_obj *tx_eq = &adapter->tx_eq; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 511 |  | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 512 | be_eq_notify(adapter, rx_eq->q.id, false, false, 0); | 
|  | 513 | be_eq_notify(adapter, tx_eq->q.id, false, false, 0); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 514 | adapter->vlan_grp = grp; | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 515 | be_eq_notify(adapter, rx_eq->q.id, true, false, 0); | 
|  | 516 | be_eq_notify(adapter, tx_eq->q.id, true, false, 0); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 517 | } | 
|  | 518 |  | 
|  | 519 | static void be_vlan_add_vid(struct net_device *netdev, u16 vid) | 
|  | 520 | { | 
|  | 521 | struct be_adapter *adapter = netdev_priv(netdev); | 
|  | 522 |  | 
|  | 523 | adapter->num_vlans++; | 
|  | 524 | adapter->vlan_tag[vid] = 1; | 
|  | 525 |  | 
| Sathya Perla | 1ab1ab7 | 2009-03-19 23:56:46 -0700 | [diff] [blame] | 526 | be_vid_config(netdev); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 527 | } | 
|  | 528 |  | 
|  | 529 | static void be_vlan_rem_vid(struct net_device *netdev, u16 vid) | 
|  | 530 | { | 
|  | 531 | struct be_adapter *adapter = netdev_priv(netdev); | 
|  | 532 |  | 
|  | 533 | adapter->num_vlans--; | 
|  | 534 | adapter->vlan_tag[vid] = 0; | 
|  | 535 |  | 
|  | 536 | vlan_group_set_device(adapter->vlan_grp, vid, NULL); | 
| Sathya Perla | 1ab1ab7 | 2009-03-19 23:56:46 -0700 | [diff] [blame] | 537 | be_vid_config(netdev); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 538 | } | 
|  | 539 |  | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 540 | static void be_set_multicast_list(struct net_device *netdev) | 
|  | 541 | { | 
|  | 542 | struct be_adapter *adapter = netdev_priv(netdev); | 
|  | 543 |  | 
|  | 544 | if (netdev->flags & IFF_PROMISC) { | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 545 | be_cmd_promiscuous_config(adapter, adapter->port_num, 1); | 
| Sathya Perla | 24307ee | 2009-06-18 00:09:25 +0000 | [diff] [blame] | 546 | adapter->promiscuous = true; | 
|  | 547 | goto done; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 548 | } | 
| Sathya Perla | 24307ee | 2009-06-18 00:09:25 +0000 | [diff] [blame] | 549 |  | 
|  | 550 | /* BE was previously in promiscous mode; disable it */ | 
|  | 551 | if (adapter->promiscuous) { | 
|  | 552 | adapter->promiscuous = false; | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 553 | be_cmd_promiscuous_config(adapter, adapter->port_num, 0); | 
| Sathya Perla | 24307ee | 2009-06-18 00:09:25 +0000 | [diff] [blame] | 554 | } | 
|  | 555 |  | 
|  | 556 | if (netdev->flags & IFF_ALLMULTI) { | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 557 | be_cmd_multicast_set(adapter, adapter->if_handle, NULL, 0); | 
| Sathya Perla | 24307ee | 2009-06-18 00:09:25 +0000 | [diff] [blame] | 558 | goto done; | 
|  | 559 | } | 
|  | 560 |  | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 561 | be_cmd_multicast_set(adapter, adapter->if_handle, netdev->mc_list, | 
| Sathya Perla | 24307ee | 2009-06-18 00:09:25 +0000 | [diff] [blame] | 562 | netdev->mc_count); | 
|  | 563 | done: | 
|  | 564 | return; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 565 | } | 
|  | 566 |  | 
| Sathya Perla | 4097f66 | 2009-03-24 16:40:13 -0700 | [diff] [blame] | 567 | static void be_rx_rate_update(struct be_adapter *adapter) | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 568 | { | 
| Sathya Perla | 4097f66 | 2009-03-24 16:40:13 -0700 | [diff] [blame] | 569 | struct be_drvr_stats *stats = drvr_stats(adapter); | 
|  | 570 | ulong now = jiffies; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 571 |  | 
| Sathya Perla | 4097f66 | 2009-03-24 16:40:13 -0700 | [diff] [blame] | 572 | /* Wrapped around */ | 
|  | 573 | if (time_before(now, stats->be_rx_jiffies)) { | 
|  | 574 | stats->be_rx_jiffies = now; | 
|  | 575 | return; | 
|  | 576 | } | 
|  | 577 |  | 
|  | 578 | /* Update the rate once in two seconds */ | 
|  | 579 | if ((now - stats->be_rx_jiffies) < 2 * HZ) | 
|  | 580 | return; | 
|  | 581 |  | 
| Stephen Hemminger | 65f71b8 | 2009-03-27 00:25:24 -0700 | [diff] [blame] | 582 | stats->be_rx_rate = be_calc_rate(stats->be_rx_bytes | 
|  | 583 | - stats->be_rx_bytes_prev, | 
|  | 584 | now - stats->be_rx_jiffies); | 
| Sathya Perla | 4097f66 | 2009-03-24 16:40:13 -0700 | [diff] [blame] | 585 | stats->be_rx_jiffies = now; | 
|  | 586 | stats->be_rx_bytes_prev = stats->be_rx_bytes; | 
|  | 587 | } | 
|  | 588 |  | 
|  | 589 | static void be_rx_stats_update(struct be_adapter *adapter, | 
|  | 590 | u32 pktsize, u16 numfrags) | 
|  | 591 | { | 
|  | 592 | struct be_drvr_stats *stats = drvr_stats(adapter); | 
|  | 593 |  | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 594 | stats->be_rx_compl++; | 
|  | 595 | stats->be_rx_frags += numfrags; | 
|  | 596 | stats->be_rx_bytes += pktsize; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 597 | } | 
|  | 598 |  | 
| Ajit Khaparde | 728a997 | 2009-04-13 15:41:22 -0700 | [diff] [blame] | 599 | static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso) | 
|  | 600 | { | 
|  | 601 | u8 l4_cksm, ip_version, ipcksm, tcpf = 0, udpf = 0, ipv6_chk; | 
|  | 602 |  | 
|  | 603 | l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp); | 
|  | 604 | ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp); | 
|  | 605 | ip_version = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp); | 
|  | 606 | if (ip_version) { | 
|  | 607 | tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp); | 
|  | 608 | udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp); | 
|  | 609 | } | 
|  | 610 | ipv6_chk = (ip_version && (tcpf || udpf)); | 
|  | 611 |  | 
|  | 612 | return ((l4_cksm && ipv6_chk && ipcksm) && cso) ? false : true; | 
|  | 613 | } | 
|  | 614 |  | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 615 | static struct be_rx_page_info * | 
|  | 616 | get_rx_page_info(struct be_adapter *adapter, u16 frag_idx) | 
|  | 617 | { | 
|  | 618 | struct be_rx_page_info *rx_page_info; | 
|  | 619 | struct be_queue_info *rxq = &adapter->rx_obj.q; | 
|  | 620 |  | 
|  | 621 | rx_page_info = &adapter->rx_obj.page_info_tbl[frag_idx]; | 
|  | 622 | BUG_ON(!rx_page_info->page); | 
|  | 623 |  | 
|  | 624 | if (rx_page_info->last_page_user) | 
|  | 625 | pci_unmap_page(adapter->pdev, pci_unmap_addr(rx_page_info, bus), | 
|  | 626 | adapter->big_page_size, PCI_DMA_FROMDEVICE); | 
|  | 627 |  | 
|  | 628 | atomic_dec(&rxq->used); | 
|  | 629 | return rx_page_info; | 
|  | 630 | } | 
|  | 631 |  | 
|  | 632 | /* Throwaway the data in the Rx completion */ | 
|  | 633 | static void be_rx_compl_discard(struct be_adapter *adapter, | 
|  | 634 | struct be_eth_rx_compl *rxcp) | 
|  | 635 | { | 
|  | 636 | struct be_queue_info *rxq = &adapter->rx_obj.q; | 
|  | 637 | struct be_rx_page_info *page_info; | 
|  | 638 | u16 rxq_idx, i, num_rcvd; | 
|  | 639 |  | 
|  | 640 | rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); | 
|  | 641 | num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); | 
|  | 642 |  | 
|  | 643 | for (i = 0; i < num_rcvd; i++) { | 
|  | 644 | page_info = get_rx_page_info(adapter, rxq_idx); | 
|  | 645 | put_page(page_info->page); | 
|  | 646 | memset(page_info, 0, sizeof(*page_info)); | 
|  | 647 | index_inc(&rxq_idx, rxq->len); | 
|  | 648 | } | 
|  | 649 | } | 
|  | 650 |  | 
|  | 651 | /* | 
|  | 652 | * skb_fill_rx_data forms a complete skb for an ether frame | 
|  | 653 | * indicated by rxcp. | 
|  | 654 | */ | 
|  | 655 | static void skb_fill_rx_data(struct be_adapter *adapter, | 
|  | 656 | struct sk_buff *skb, struct be_eth_rx_compl *rxcp) | 
|  | 657 | { | 
|  | 658 | struct be_queue_info *rxq = &adapter->rx_obj.q; | 
|  | 659 | struct be_rx_page_info *page_info; | 
| Ajit Khaparde | bd46cb6 | 2009-06-26 02:51:07 +0000 | [diff] [blame] | 660 | u16 rxq_idx, i, num_rcvd, j; | 
| Ajit Khaparde | fa77406 | 2009-07-22 09:28:55 -0700 | [diff] [blame] | 661 | u32 pktsize, hdr_len, curr_frag_len, size; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 662 | u8 *start; | 
|  | 663 |  | 
|  | 664 | rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); | 
|  | 665 | pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); | 
|  | 666 | num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); | 
|  | 667 |  | 
|  | 668 | page_info = get_rx_page_info(adapter, rxq_idx); | 
|  | 669 |  | 
|  | 670 | start = page_address(page_info->page) + page_info->page_offset; | 
|  | 671 | prefetch(start); | 
|  | 672 |  | 
|  | 673 | /* Copy data in the first descriptor of this completion */ | 
|  | 674 | curr_frag_len = min(pktsize, rx_frag_size); | 
|  | 675 |  | 
|  | 676 | /* Copy the header portion into skb_data */ | 
|  | 677 | hdr_len = min((u32)BE_HDR_LEN, curr_frag_len); | 
|  | 678 | memcpy(skb->data, start, hdr_len); | 
|  | 679 | skb->len = curr_frag_len; | 
|  | 680 | if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */ | 
|  | 681 | /* Complete packet has now been moved to data */ | 
|  | 682 | put_page(page_info->page); | 
|  | 683 | skb->data_len = 0; | 
|  | 684 | skb->tail += curr_frag_len; | 
|  | 685 | } else { | 
|  | 686 | skb_shinfo(skb)->nr_frags = 1; | 
|  | 687 | skb_shinfo(skb)->frags[0].page = page_info->page; | 
|  | 688 | skb_shinfo(skb)->frags[0].page_offset = | 
|  | 689 | page_info->page_offset + hdr_len; | 
|  | 690 | skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len; | 
|  | 691 | skb->data_len = curr_frag_len - hdr_len; | 
|  | 692 | skb->tail += hdr_len; | 
|  | 693 | } | 
|  | 694 | memset(page_info, 0, sizeof(*page_info)); | 
|  | 695 |  | 
|  | 696 | if (pktsize <= rx_frag_size) { | 
|  | 697 | BUG_ON(num_rcvd != 1); | 
| Sathya Perla | 76fbb42 | 2009-06-10 02:21:56 +0000 | [diff] [blame] | 698 | goto done; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 699 | } | 
|  | 700 |  | 
|  | 701 | /* More frags present for this completion */ | 
| Ajit Khaparde | fa77406 | 2009-07-22 09:28:55 -0700 | [diff] [blame] | 702 | size = pktsize; | 
| Ajit Khaparde | bd46cb6 | 2009-06-26 02:51:07 +0000 | [diff] [blame] | 703 | for (i = 1, j = 0; i < num_rcvd; i++) { | 
| Ajit Khaparde | fa77406 | 2009-07-22 09:28:55 -0700 | [diff] [blame] | 704 | size -= curr_frag_len; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 705 | index_inc(&rxq_idx, rxq->len); | 
|  | 706 | page_info = get_rx_page_info(adapter, rxq_idx); | 
|  | 707 |  | 
| Ajit Khaparde | fa77406 | 2009-07-22 09:28:55 -0700 | [diff] [blame] | 708 | curr_frag_len = min(size, rx_frag_size); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 709 |  | 
| Ajit Khaparde | bd46cb6 | 2009-06-26 02:51:07 +0000 | [diff] [blame] | 710 | /* Coalesce all frags from the same physical page in one slot */ | 
|  | 711 | if (page_info->page_offset == 0) { | 
|  | 712 | /* Fresh page */ | 
|  | 713 | j++; | 
|  | 714 | skb_shinfo(skb)->frags[j].page = page_info->page; | 
|  | 715 | skb_shinfo(skb)->frags[j].page_offset = | 
|  | 716 | page_info->page_offset; | 
|  | 717 | skb_shinfo(skb)->frags[j].size = 0; | 
|  | 718 | skb_shinfo(skb)->nr_frags++; | 
|  | 719 | } else { | 
|  | 720 | put_page(page_info->page); | 
|  | 721 | } | 
|  | 722 |  | 
|  | 723 | skb_shinfo(skb)->frags[j].size += curr_frag_len; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 724 | skb->len += curr_frag_len; | 
|  | 725 | skb->data_len += curr_frag_len; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 726 |  | 
|  | 727 | memset(page_info, 0, sizeof(*page_info)); | 
|  | 728 | } | 
| Ajit Khaparde | bd46cb6 | 2009-06-26 02:51:07 +0000 | [diff] [blame] | 729 | BUG_ON(j > MAX_SKB_FRAGS); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 730 |  | 
| Sathya Perla | 76fbb42 | 2009-06-10 02:21:56 +0000 | [diff] [blame] | 731 | done: | 
| Sathya Perla | 4097f66 | 2009-03-24 16:40:13 -0700 | [diff] [blame] | 732 | be_rx_stats_update(adapter, pktsize, num_rcvd); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 733 | return; | 
|  | 734 | } | 
|  | 735 |  | 
| Ajit Khaparde | 5be93b9 | 2009-07-21 12:36:19 -0700 | [diff] [blame] | 736 | /* Process the RX completion indicated by rxcp when GRO is disabled */ | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 737 | static void be_rx_compl_process(struct be_adapter *adapter, | 
|  | 738 | struct be_eth_rx_compl *rxcp) | 
|  | 739 | { | 
|  | 740 | struct sk_buff *skb; | 
|  | 741 | u32 vtp, vid; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 742 |  | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 743 | vtp = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); | 
|  | 744 |  | 
|  | 745 | skb = netdev_alloc_skb(adapter->netdev, BE_HDR_LEN + NET_IP_ALIGN); | 
|  | 746 | if (!skb) { | 
|  | 747 | if (net_ratelimit()) | 
|  | 748 | dev_warn(&adapter->pdev->dev, "skb alloc failed\n"); | 
|  | 749 | be_rx_compl_discard(adapter, rxcp); | 
|  | 750 | return; | 
|  | 751 | } | 
|  | 752 |  | 
|  | 753 | skb_reserve(skb, NET_IP_ALIGN); | 
|  | 754 |  | 
|  | 755 | skb_fill_rx_data(adapter, skb, rxcp); | 
|  | 756 |  | 
| Ajit Khaparde | 728a997 | 2009-04-13 15:41:22 -0700 | [diff] [blame] | 757 | if (do_pkt_csum(rxcp, adapter->rx_csum)) | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 758 | skb->ip_summed = CHECKSUM_NONE; | 
| Ajit Khaparde | 728a997 | 2009-04-13 15:41:22 -0700 | [diff] [blame] | 759 | else | 
|  | 760 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 761 |  | 
|  | 762 | skb->truesize = skb->len + sizeof(struct sk_buff); | 
|  | 763 | skb->protocol = eth_type_trans(skb, adapter->netdev); | 
|  | 764 | skb->dev = adapter->netdev; | 
|  | 765 |  | 
|  | 766 | if (vtp) { | 
|  | 767 | if (!adapter->vlan_grp || adapter->num_vlans == 0) { | 
|  | 768 | kfree_skb(skb); | 
|  | 769 | return; | 
|  | 770 | } | 
|  | 771 | vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); | 
|  | 772 | vid = be16_to_cpu(vid); | 
|  | 773 | vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid); | 
|  | 774 | } else { | 
|  | 775 | netif_receive_skb(skb); | 
|  | 776 | } | 
|  | 777 |  | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 778 | return; | 
|  | 779 | } | 
|  | 780 |  | 
| Ajit Khaparde | 5be93b9 | 2009-07-21 12:36:19 -0700 | [diff] [blame] | 781 | /* Process the RX completion indicated by rxcp when GRO is enabled */ | 
|  | 782 | static void be_rx_compl_process_gro(struct be_adapter *adapter, | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 783 | struct be_eth_rx_compl *rxcp) | 
|  | 784 | { | 
|  | 785 | struct be_rx_page_info *page_info; | 
| Ajit Khaparde | 5be93b9 | 2009-07-21 12:36:19 -0700 | [diff] [blame] | 786 | struct sk_buff *skb = NULL; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 787 | struct be_queue_info *rxq = &adapter->rx_obj.q; | 
| Ajit Khaparde | 5be93b9 | 2009-07-21 12:36:19 -0700 | [diff] [blame] | 788 | struct be_eq_obj *eq_obj =  &adapter->rx_eq; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 789 | u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len; | 
| Ajit Khaparde | bd46cb6 | 2009-06-26 02:51:07 +0000 | [diff] [blame] | 790 | u16 i, rxq_idx = 0, vid, j; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 791 |  | 
|  | 792 | num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); | 
|  | 793 | pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); | 
|  | 794 | vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); | 
|  | 795 | rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); | 
|  | 796 |  | 
| Ajit Khaparde | 5be93b9 | 2009-07-21 12:36:19 -0700 | [diff] [blame] | 797 | skb = napi_get_frags(&eq_obj->napi); | 
|  | 798 | if (!skb) { | 
|  | 799 | be_rx_compl_discard(adapter, rxcp); | 
|  | 800 | return; | 
|  | 801 | } | 
|  | 802 |  | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 803 | remaining = pkt_size; | 
| Ajit Khaparde | bd46cb6 | 2009-06-26 02:51:07 +0000 | [diff] [blame] | 804 | for (i = 0, j = -1; i < num_rcvd; i++) { | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 805 | page_info = get_rx_page_info(adapter, rxq_idx); | 
|  | 806 |  | 
|  | 807 | curr_frag_len = min(remaining, rx_frag_size); | 
|  | 808 |  | 
| Ajit Khaparde | bd46cb6 | 2009-06-26 02:51:07 +0000 | [diff] [blame] | 809 | /* Coalesce all frags from the same physical page in one slot */ | 
|  | 810 | if (i == 0 || page_info->page_offset == 0) { | 
|  | 811 | /* First frag or Fresh page */ | 
|  | 812 | j++; | 
| Ajit Khaparde | 5be93b9 | 2009-07-21 12:36:19 -0700 | [diff] [blame] | 813 | skb_shinfo(skb)->frags[j].page = page_info->page; | 
|  | 814 | skb_shinfo(skb)->frags[j].page_offset = | 
|  | 815 | page_info->page_offset; | 
|  | 816 | skb_shinfo(skb)->frags[j].size = 0; | 
| Ajit Khaparde | bd46cb6 | 2009-06-26 02:51:07 +0000 | [diff] [blame] | 817 | } else { | 
|  | 818 | put_page(page_info->page); | 
|  | 819 | } | 
| Ajit Khaparde | 5be93b9 | 2009-07-21 12:36:19 -0700 | [diff] [blame] | 820 | skb_shinfo(skb)->frags[j].size += curr_frag_len; | 
| Ajit Khaparde | bd46cb6 | 2009-06-26 02:51:07 +0000 | [diff] [blame] | 821 |  | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 822 | remaining -= curr_frag_len; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 823 | index_inc(&rxq_idx, rxq->len); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 824 | memset(page_info, 0, sizeof(*page_info)); | 
|  | 825 | } | 
| Ajit Khaparde | bd46cb6 | 2009-06-26 02:51:07 +0000 | [diff] [blame] | 826 | BUG_ON(j > MAX_SKB_FRAGS); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 827 |  | 
| Ajit Khaparde | 5be93b9 | 2009-07-21 12:36:19 -0700 | [diff] [blame] | 828 | skb_shinfo(skb)->nr_frags = j + 1; | 
|  | 829 | skb->len = pkt_size; | 
|  | 830 | skb->data_len = pkt_size; | 
|  | 831 | skb->truesize += pkt_size; | 
|  | 832 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 
|  | 833 |  | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 834 | if (likely(!vlanf)) { | 
| Ajit Khaparde | 5be93b9 | 2009-07-21 12:36:19 -0700 | [diff] [blame] | 835 | napi_gro_frags(&eq_obj->napi); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 836 | } else { | 
|  | 837 | vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); | 
|  | 838 | vid = be16_to_cpu(vid); | 
|  | 839 |  | 
|  | 840 | if (!adapter->vlan_grp || adapter->num_vlans == 0) | 
|  | 841 | return; | 
|  | 842 |  | 
| Ajit Khaparde | 5be93b9 | 2009-07-21 12:36:19 -0700 | [diff] [blame] | 843 | vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 844 | } | 
|  | 845 |  | 
| Sathya Perla | 4097f66 | 2009-03-24 16:40:13 -0700 | [diff] [blame] | 846 | be_rx_stats_update(adapter, pkt_size, num_rcvd); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 847 | return; | 
|  | 848 | } | 
|  | 849 |  | 
|  | 850 | static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter) | 
|  | 851 | { | 
|  | 852 | struct be_eth_rx_compl *rxcp = queue_tail_node(&adapter->rx_obj.cq); | 
|  | 853 |  | 
|  | 854 | if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0) | 
|  | 855 | return NULL; | 
|  | 856 |  | 
|  | 857 | be_dws_le_to_cpu(rxcp, sizeof(*rxcp)); | 
|  | 858 |  | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 859 | queue_tail_inc(&adapter->rx_obj.cq); | 
|  | 860 | return rxcp; | 
|  | 861 | } | 
|  | 862 |  | 
| Sathya Perla | a7a0ef3 | 2009-06-10 02:23:28 +0000 | [diff] [blame] | 863 | /* To reset the valid bit, we need to reset the whole word as | 
|  | 864 | * when walking the queue the valid entries are little-endian | 
|  | 865 | * and invalid entries are host endian | 
|  | 866 | */ | 
|  | 867 | static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp) | 
|  | 868 | { | 
|  | 869 | rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0; | 
|  | 870 | } | 
|  | 871 |  | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 872 | static inline struct page *be_alloc_pages(u32 size) | 
|  | 873 | { | 
|  | 874 | gfp_t alloc_flags = GFP_ATOMIC; | 
|  | 875 | u32 order = get_order(size); | 
|  | 876 | if (order > 0) | 
|  | 877 | alloc_flags |= __GFP_COMP; | 
|  | 878 | return  alloc_pages(alloc_flags, order); | 
|  | 879 | } | 
|  | 880 |  | 
|  | 881 | /* | 
|  | 882 | * Allocate a page, split it to fragments of size rx_frag_size and post as | 
|  | 883 | * receive buffers to BE | 
|  | 884 | */ | 
|  | 885 | static void be_post_rx_frags(struct be_adapter *adapter) | 
|  | 886 | { | 
|  | 887 | struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl; | 
|  | 888 | struct be_rx_page_info *page_info = NULL; | 
|  | 889 | struct be_queue_info *rxq = &adapter->rx_obj.q; | 
|  | 890 | struct page *pagep = NULL; | 
|  | 891 | struct be_eth_rx_d *rxd; | 
|  | 892 | u64 page_dmaaddr = 0, frag_dmaaddr; | 
|  | 893 | u32 posted, page_offset = 0; | 
|  | 894 |  | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 895 | page_info = &page_info_tbl[rxq->head]; | 
|  | 896 | for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) { | 
|  | 897 | if (!pagep) { | 
|  | 898 | pagep = be_alloc_pages(adapter->big_page_size); | 
|  | 899 | if (unlikely(!pagep)) { | 
|  | 900 | drvr_stats(adapter)->be_ethrx_post_fail++; | 
|  | 901 | break; | 
|  | 902 | } | 
|  | 903 | page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0, | 
|  | 904 | adapter->big_page_size, | 
|  | 905 | PCI_DMA_FROMDEVICE); | 
|  | 906 | page_info->page_offset = 0; | 
|  | 907 | } else { | 
|  | 908 | get_page(pagep); | 
|  | 909 | page_info->page_offset = page_offset + rx_frag_size; | 
|  | 910 | } | 
|  | 911 | page_offset = page_info->page_offset; | 
|  | 912 | page_info->page = pagep; | 
|  | 913 | pci_unmap_addr_set(page_info, bus, page_dmaaddr); | 
|  | 914 | frag_dmaaddr = page_dmaaddr + page_info->page_offset; | 
|  | 915 |  | 
|  | 916 | rxd = queue_head_node(rxq); | 
|  | 917 | rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF); | 
|  | 918 | rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr)); | 
|  | 919 | queue_head_inc(rxq); | 
|  | 920 |  | 
|  | 921 | /* Any space left in the current big page for another frag? */ | 
|  | 922 | if ((page_offset + rx_frag_size + rx_frag_size) > | 
|  | 923 | adapter->big_page_size) { | 
|  | 924 | pagep = NULL; | 
|  | 925 | page_info->last_page_user = true; | 
|  | 926 | } | 
|  | 927 | page_info = &page_info_tbl[rxq->head]; | 
|  | 928 | } | 
|  | 929 | if (pagep) | 
|  | 930 | page_info->last_page_user = true; | 
|  | 931 |  | 
|  | 932 | if (posted) { | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 933 | atomic_add(posted, &rxq->used); | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 934 | be_rxq_notify(adapter, rxq->id, posted); | 
| Sathya Perla | ea1dae1 | 2009-03-19 23:56:20 -0700 | [diff] [blame] | 935 | } else if (atomic_read(&rxq->used) == 0) { | 
|  | 936 | /* Let be_worker replenish when memory is available */ | 
|  | 937 | adapter->rx_post_starved = true; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 938 | } | 
|  | 939 |  | 
|  | 940 | return; | 
|  | 941 | } | 
|  | 942 |  | 
| Sathya Perla | 5fb379e | 2009-06-18 00:02:59 +0000 | [diff] [blame] | 943 | static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq) | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 944 | { | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 945 | struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq); | 
|  | 946 |  | 
|  | 947 | if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0) | 
|  | 948 | return NULL; | 
|  | 949 |  | 
|  | 950 | be_dws_le_to_cpu(txcp, sizeof(*txcp)); | 
|  | 951 |  | 
|  | 952 | txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0; | 
|  | 953 |  | 
|  | 954 | queue_tail_inc(tx_cq); | 
|  | 955 | return txcp; | 
|  | 956 | } | 
|  | 957 |  | 
|  | 958 | static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index) | 
|  | 959 | { | 
|  | 960 | struct be_queue_info *txq = &adapter->tx_obj.q; | 
|  | 961 | struct be_eth_wrb *wrb; | 
|  | 962 | struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list; | 
|  | 963 | struct sk_buff *sent_skb; | 
|  | 964 | u64 busaddr; | 
|  | 965 | u16 cur_index, num_wrbs = 0; | 
|  | 966 |  | 
|  | 967 | cur_index = txq->tail; | 
|  | 968 | sent_skb = sent_skbs[cur_index]; | 
|  | 969 | BUG_ON(!sent_skb); | 
|  | 970 | sent_skbs[cur_index] = NULL; | 
|  | 971 |  | 
|  | 972 | do { | 
|  | 973 | cur_index = txq->tail; | 
|  | 974 | wrb = queue_tail_node(txq); | 
|  | 975 | be_dws_le_to_cpu(wrb, sizeof(*wrb)); | 
|  | 976 | busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo; | 
|  | 977 | if (busaddr != 0) { | 
|  | 978 | pci_unmap_single(adapter->pdev, busaddr, | 
|  | 979 | wrb->frag_len, PCI_DMA_TODEVICE); | 
|  | 980 | } | 
|  | 981 | num_wrbs++; | 
|  | 982 | queue_tail_inc(txq); | 
|  | 983 | } while (cur_index != last_index); | 
|  | 984 |  | 
|  | 985 | atomic_sub(num_wrbs, &txq->used); | 
|  | 986 |  | 
|  | 987 | kfree_skb(sent_skb); | 
|  | 988 | } | 
|  | 989 |  | 
| Sathya Perla | 859b1e4 | 2009-08-10 03:43:51 +0000 | [diff] [blame] | 990 | static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj) | 
|  | 991 | { | 
|  | 992 | struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q); | 
|  | 993 |  | 
|  | 994 | if (!eqe->evt) | 
|  | 995 | return NULL; | 
|  | 996 |  | 
|  | 997 | eqe->evt = le32_to_cpu(eqe->evt); | 
|  | 998 | queue_tail_inc(&eq_obj->q); | 
|  | 999 | return eqe; | 
|  | 1000 | } | 
|  | 1001 |  | 
|  | 1002 | static int event_handle(struct be_adapter *adapter, | 
|  | 1003 | struct be_eq_obj *eq_obj) | 
|  | 1004 | { | 
|  | 1005 | struct be_eq_entry *eqe; | 
|  | 1006 | u16 num = 0; | 
|  | 1007 |  | 
|  | 1008 | while ((eqe = event_get(eq_obj)) != NULL) { | 
|  | 1009 | eqe->evt = 0; | 
|  | 1010 | num++; | 
|  | 1011 | } | 
|  | 1012 |  | 
|  | 1013 | /* Deal with any spurious interrupts that come | 
|  | 1014 | * without events | 
|  | 1015 | */ | 
|  | 1016 | be_eq_notify(adapter, eq_obj->q.id, true, true, num); | 
|  | 1017 | if (num) | 
|  | 1018 | napi_schedule(&eq_obj->napi); | 
|  | 1019 |  | 
|  | 1020 | return num; | 
|  | 1021 | } | 
|  | 1022 |  | 
|  | 1023 | /* Just read and notify events without processing them. | 
|  | 1024 | * Used at the time of destroying event queues */ | 
|  | 1025 | static void be_eq_clean(struct be_adapter *adapter, | 
|  | 1026 | struct be_eq_obj *eq_obj) | 
|  | 1027 | { | 
|  | 1028 | struct be_eq_entry *eqe; | 
|  | 1029 | u16 num = 0; | 
|  | 1030 |  | 
|  | 1031 | while ((eqe = event_get(eq_obj)) != NULL) { | 
|  | 1032 | eqe->evt = 0; | 
|  | 1033 | num++; | 
|  | 1034 | } | 
|  | 1035 |  | 
|  | 1036 | if (num) | 
|  | 1037 | be_eq_notify(adapter, eq_obj->q.id, false, true, num); | 
|  | 1038 | } | 
|  | 1039 |  | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1040 | static void be_rx_q_clean(struct be_adapter *adapter) | 
|  | 1041 | { | 
|  | 1042 | struct be_rx_page_info *page_info; | 
|  | 1043 | struct be_queue_info *rxq = &adapter->rx_obj.q; | 
|  | 1044 | struct be_queue_info *rx_cq = &adapter->rx_obj.cq; | 
|  | 1045 | struct be_eth_rx_compl *rxcp; | 
|  | 1046 | u16 tail; | 
|  | 1047 |  | 
|  | 1048 | /* First cleanup pending rx completions */ | 
|  | 1049 | while ((rxcp = be_rx_compl_get(adapter)) != NULL) { | 
|  | 1050 | be_rx_compl_discard(adapter, rxcp); | 
| Sathya Perla | a7a0ef3 | 2009-06-10 02:23:28 +0000 | [diff] [blame] | 1051 | be_rx_compl_reset(rxcp); | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1052 | be_cq_notify(adapter, rx_cq->id, true, 1); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1053 | } | 
|  | 1054 |  | 
|  | 1055 | /* Then free posted rx buffer that were not used */ | 
|  | 1056 | tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len; | 
| Sathya Perla | cdab23b | 2009-08-10 03:43:23 +0000 | [diff] [blame] | 1057 | for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) { | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1058 | page_info = get_rx_page_info(adapter, tail); | 
|  | 1059 | put_page(page_info->page); | 
|  | 1060 | memset(page_info, 0, sizeof(*page_info)); | 
|  | 1061 | } | 
|  | 1062 | BUG_ON(atomic_read(&rxq->used)); | 
|  | 1063 | } | 
|  | 1064 |  | 
| Sathya Perla | a8e9179 | 2009-08-10 03:42:43 +0000 | [diff] [blame] | 1065 | static void be_tx_compl_clean(struct be_adapter *adapter) | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1066 | { | 
| Sathya Perla | a8e9179 | 2009-08-10 03:42:43 +0000 | [diff] [blame] | 1067 | struct be_queue_info *tx_cq = &adapter->tx_obj.cq; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1068 | struct be_queue_info *txq = &adapter->tx_obj.q; | 
| Sathya Perla | a8e9179 | 2009-08-10 03:42:43 +0000 | [diff] [blame] | 1069 | struct be_eth_tx_compl *txcp; | 
|  | 1070 | u16 end_idx, cmpl = 0, timeo = 0; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1071 |  | 
| Sathya Perla | a8e9179 | 2009-08-10 03:42:43 +0000 | [diff] [blame] | 1072 | /* Wait for a max of 200ms for all the tx-completions to arrive. */ | 
|  | 1073 | do { | 
|  | 1074 | while ((txcp = be_tx_compl_get(tx_cq))) { | 
|  | 1075 | end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl, | 
|  | 1076 | wrb_index, txcp); | 
|  | 1077 | be_tx_compl_process(adapter, end_idx); | 
|  | 1078 | cmpl++; | 
|  | 1079 | } | 
|  | 1080 | if (cmpl) { | 
|  | 1081 | be_cq_notify(adapter, tx_cq->id, false, cmpl); | 
|  | 1082 | cmpl = 0; | 
|  | 1083 | } | 
|  | 1084 |  | 
|  | 1085 | if (atomic_read(&txq->used) == 0 || ++timeo > 200) | 
|  | 1086 | break; | 
|  | 1087 |  | 
|  | 1088 | mdelay(1); | 
|  | 1089 | } while (true); | 
|  | 1090 |  | 
|  | 1091 | if (atomic_read(&txq->used)) | 
|  | 1092 | dev_err(&adapter->pdev->dev, "%d pending tx-completions\n", | 
|  | 1093 | atomic_read(&txq->used)); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1094 | } | 
|  | 1095 |  | 
| Sathya Perla | 5fb379e | 2009-06-18 00:02:59 +0000 | [diff] [blame] | 1096 | static void be_mcc_queues_destroy(struct be_adapter *adapter) | 
|  | 1097 | { | 
|  | 1098 | struct be_queue_info *q; | 
| Sathya Perla | 5fb379e | 2009-06-18 00:02:59 +0000 | [diff] [blame] | 1099 |  | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1100 | q = &adapter->mcc_obj.q; | 
| Sathya Perla | 5fb379e | 2009-06-18 00:02:59 +0000 | [diff] [blame] | 1101 | if (q->created) | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1102 | be_cmd_q_destroy(adapter, q, QTYPE_MCCQ); | 
| Sathya Perla | 5fb379e | 2009-06-18 00:02:59 +0000 | [diff] [blame] | 1103 | be_queue_free(adapter, q); | 
|  | 1104 |  | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1105 | q = &adapter->mcc_obj.cq; | 
| Sathya Perla | 5fb379e | 2009-06-18 00:02:59 +0000 | [diff] [blame] | 1106 | if (q->created) | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1107 | be_cmd_q_destroy(adapter, q, QTYPE_CQ); | 
| Sathya Perla | 5fb379e | 2009-06-18 00:02:59 +0000 | [diff] [blame] | 1108 | be_queue_free(adapter, q); | 
|  | 1109 | } | 
|  | 1110 |  | 
|  | 1111 | /* Must be called only after TX qs are created as MCC shares TX EQ */ | 
|  | 1112 | static int be_mcc_queues_create(struct be_adapter *adapter) | 
|  | 1113 | { | 
|  | 1114 | struct be_queue_info *q, *cq; | 
| Sathya Perla | 5fb379e | 2009-06-18 00:02:59 +0000 | [diff] [blame] | 1115 |  | 
|  | 1116 | /* Alloc MCC compl queue */ | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1117 | cq = &adapter->mcc_obj.cq; | 
| Sathya Perla | 5fb379e | 2009-06-18 00:02:59 +0000 | [diff] [blame] | 1118 | if (be_queue_alloc(adapter, cq, MCC_CQ_LEN, | 
| Sathya Perla | efd2e40 | 2009-07-27 22:53:10 +0000 | [diff] [blame] | 1119 | sizeof(struct be_mcc_compl))) | 
| Sathya Perla | 5fb379e | 2009-06-18 00:02:59 +0000 | [diff] [blame] | 1120 | goto err; | 
|  | 1121 |  | 
|  | 1122 | /* Ask BE to create MCC compl queue; share TX's eq */ | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1123 | if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0)) | 
| Sathya Perla | 5fb379e | 2009-06-18 00:02:59 +0000 | [diff] [blame] | 1124 | goto mcc_cq_free; | 
|  | 1125 |  | 
|  | 1126 | /* Alloc MCC queue */ | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1127 | q = &adapter->mcc_obj.q; | 
| Sathya Perla | 5fb379e | 2009-06-18 00:02:59 +0000 | [diff] [blame] | 1128 | if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb))) | 
|  | 1129 | goto mcc_cq_destroy; | 
|  | 1130 |  | 
|  | 1131 | /* Ask BE to create MCC queue */ | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1132 | if (be_cmd_mccq_create(adapter, q, cq)) | 
| Sathya Perla | 5fb379e | 2009-06-18 00:02:59 +0000 | [diff] [blame] | 1133 | goto mcc_q_free; | 
|  | 1134 |  | 
|  | 1135 | return 0; | 
|  | 1136 |  | 
|  | 1137 | mcc_q_free: | 
|  | 1138 | be_queue_free(adapter, q); | 
|  | 1139 | mcc_cq_destroy: | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1140 | be_cmd_q_destroy(adapter, cq, QTYPE_CQ); | 
| Sathya Perla | 5fb379e | 2009-06-18 00:02:59 +0000 | [diff] [blame] | 1141 | mcc_cq_free: | 
|  | 1142 | be_queue_free(adapter, cq); | 
|  | 1143 | err: | 
|  | 1144 | return -1; | 
|  | 1145 | } | 
|  | 1146 |  | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1147 | static void be_tx_queues_destroy(struct be_adapter *adapter) | 
|  | 1148 | { | 
|  | 1149 | struct be_queue_info *q; | 
|  | 1150 |  | 
|  | 1151 | q = &adapter->tx_obj.q; | 
| Sathya Perla | a8e9179 | 2009-08-10 03:42:43 +0000 | [diff] [blame] | 1152 | if (q->created) | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1153 | be_cmd_q_destroy(adapter, q, QTYPE_TXQ); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1154 | be_queue_free(adapter, q); | 
|  | 1155 |  | 
|  | 1156 | q = &adapter->tx_obj.cq; | 
|  | 1157 | if (q->created) | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1158 | be_cmd_q_destroy(adapter, q, QTYPE_CQ); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1159 | be_queue_free(adapter, q); | 
|  | 1160 |  | 
| Sathya Perla | 859b1e4 | 2009-08-10 03:43:51 +0000 | [diff] [blame] | 1161 | /* Clear any residual events */ | 
|  | 1162 | be_eq_clean(adapter, &adapter->tx_eq); | 
|  | 1163 |  | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1164 | q = &adapter->tx_eq.q; | 
|  | 1165 | if (q->created) | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1166 | be_cmd_q_destroy(adapter, q, QTYPE_EQ); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1167 | be_queue_free(adapter, q); | 
|  | 1168 | } | 
|  | 1169 |  | 
|  | 1170 | static int be_tx_queues_create(struct be_adapter *adapter) | 
|  | 1171 | { | 
|  | 1172 | struct be_queue_info *eq, *q, *cq; | 
|  | 1173 |  | 
|  | 1174 | adapter->tx_eq.max_eqd = 0; | 
|  | 1175 | adapter->tx_eq.min_eqd = 0; | 
|  | 1176 | adapter->tx_eq.cur_eqd = 96; | 
|  | 1177 | adapter->tx_eq.enable_aic = false; | 
|  | 1178 | /* Alloc Tx Event queue */ | 
|  | 1179 | eq = &adapter->tx_eq.q; | 
|  | 1180 | if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry))) | 
|  | 1181 | return -1; | 
|  | 1182 |  | 
|  | 1183 | /* Ask BE to create Tx Event queue */ | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1184 | if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd)) | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1185 | goto tx_eq_free; | 
|  | 1186 | /* Alloc TX eth compl queue */ | 
|  | 1187 | cq = &adapter->tx_obj.cq; | 
|  | 1188 | if (be_queue_alloc(adapter, cq, TX_CQ_LEN, | 
|  | 1189 | sizeof(struct be_eth_tx_compl))) | 
|  | 1190 | goto tx_eq_destroy; | 
|  | 1191 |  | 
|  | 1192 | /* Ask BE to create Tx eth compl queue */ | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1193 | if (be_cmd_cq_create(adapter, cq, eq, false, false, 3)) | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1194 | goto tx_cq_free; | 
|  | 1195 |  | 
|  | 1196 | /* Alloc TX eth queue */ | 
|  | 1197 | q = &adapter->tx_obj.q; | 
|  | 1198 | if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb))) | 
|  | 1199 | goto tx_cq_destroy; | 
|  | 1200 |  | 
|  | 1201 | /* Ask BE to create Tx eth queue */ | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1202 | if (be_cmd_txq_create(adapter, q, cq)) | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1203 | goto tx_q_free; | 
|  | 1204 | return 0; | 
|  | 1205 |  | 
|  | 1206 | tx_q_free: | 
|  | 1207 | be_queue_free(adapter, q); | 
|  | 1208 | tx_cq_destroy: | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1209 | be_cmd_q_destroy(adapter, cq, QTYPE_CQ); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1210 | tx_cq_free: | 
|  | 1211 | be_queue_free(adapter, cq); | 
|  | 1212 | tx_eq_destroy: | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1213 | be_cmd_q_destroy(adapter, eq, QTYPE_EQ); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1214 | tx_eq_free: | 
|  | 1215 | be_queue_free(adapter, eq); | 
|  | 1216 | return -1; | 
|  | 1217 | } | 
|  | 1218 |  | 
|  | 1219 | static void be_rx_queues_destroy(struct be_adapter *adapter) | 
|  | 1220 | { | 
|  | 1221 | struct be_queue_info *q; | 
|  | 1222 |  | 
|  | 1223 | q = &adapter->rx_obj.q; | 
|  | 1224 | if (q->created) { | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1225 | be_cmd_q_destroy(adapter, q, QTYPE_RXQ); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1226 | be_rx_q_clean(adapter); | 
|  | 1227 | } | 
|  | 1228 | be_queue_free(adapter, q); | 
|  | 1229 |  | 
|  | 1230 | q = &adapter->rx_obj.cq; | 
|  | 1231 | if (q->created) | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1232 | be_cmd_q_destroy(adapter, q, QTYPE_CQ); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1233 | be_queue_free(adapter, q); | 
|  | 1234 |  | 
| Sathya Perla | 859b1e4 | 2009-08-10 03:43:51 +0000 | [diff] [blame] | 1235 | /* Clear any residual events */ | 
|  | 1236 | be_eq_clean(adapter, &adapter->rx_eq); | 
|  | 1237 |  | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1238 | q = &adapter->rx_eq.q; | 
|  | 1239 | if (q->created) | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1240 | be_cmd_q_destroy(adapter, q, QTYPE_EQ); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1241 | be_queue_free(adapter, q); | 
|  | 1242 | } | 
|  | 1243 |  | 
|  | 1244 | static int be_rx_queues_create(struct be_adapter *adapter) | 
|  | 1245 | { | 
|  | 1246 | struct be_queue_info *eq, *q, *cq; | 
|  | 1247 | int rc; | 
|  | 1248 |  | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1249 | adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE; | 
|  | 1250 | adapter->rx_eq.max_eqd = BE_MAX_EQD; | 
|  | 1251 | adapter->rx_eq.min_eqd = 0; | 
|  | 1252 | adapter->rx_eq.cur_eqd = 0; | 
|  | 1253 | adapter->rx_eq.enable_aic = true; | 
|  | 1254 |  | 
|  | 1255 | /* Alloc Rx Event queue */ | 
|  | 1256 | eq = &adapter->rx_eq.q; | 
|  | 1257 | rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN, | 
|  | 1258 | sizeof(struct be_eq_entry)); | 
|  | 1259 | if (rc) | 
|  | 1260 | return rc; | 
|  | 1261 |  | 
|  | 1262 | /* Ask BE to create Rx Event queue */ | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1263 | rc = be_cmd_eq_create(adapter, eq, adapter->rx_eq.cur_eqd); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1264 | if (rc) | 
|  | 1265 | goto rx_eq_free; | 
|  | 1266 |  | 
|  | 1267 | /* Alloc RX eth compl queue */ | 
|  | 1268 | cq = &adapter->rx_obj.cq; | 
|  | 1269 | rc = be_queue_alloc(adapter, cq, RX_CQ_LEN, | 
|  | 1270 | sizeof(struct be_eth_rx_compl)); | 
|  | 1271 | if (rc) | 
|  | 1272 | goto rx_eq_destroy; | 
|  | 1273 |  | 
|  | 1274 | /* Ask BE to create Rx eth compl queue */ | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1275 | rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1276 | if (rc) | 
|  | 1277 | goto rx_cq_free; | 
|  | 1278 |  | 
|  | 1279 | /* Alloc RX eth queue */ | 
|  | 1280 | q = &adapter->rx_obj.q; | 
|  | 1281 | rc = be_queue_alloc(adapter, q, RX_Q_LEN, sizeof(struct be_eth_rx_d)); | 
|  | 1282 | if (rc) | 
|  | 1283 | goto rx_cq_destroy; | 
|  | 1284 |  | 
|  | 1285 | /* Ask BE to create Rx eth queue */ | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1286 | rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size, | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1287 | BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle, false); | 
|  | 1288 | if (rc) | 
|  | 1289 | goto rx_q_free; | 
|  | 1290 |  | 
|  | 1291 | return 0; | 
|  | 1292 | rx_q_free: | 
|  | 1293 | be_queue_free(adapter, q); | 
|  | 1294 | rx_cq_destroy: | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1295 | be_cmd_q_destroy(adapter, cq, QTYPE_CQ); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1296 | rx_cq_free: | 
|  | 1297 | be_queue_free(adapter, cq); | 
|  | 1298 | rx_eq_destroy: | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1299 | be_cmd_q_destroy(adapter, eq, QTYPE_EQ); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1300 | rx_eq_free: | 
|  | 1301 | be_queue_free(adapter, eq); | 
|  | 1302 | return rc; | 
|  | 1303 | } | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1304 |  | 
| Sathya Perla | b628bde | 2009-08-17 00:58:26 +0000 | [diff] [blame] | 1305 | /* There are 8 evt ids per func. Retruns the evt id's bit number */ | 
|  | 1306 | static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id) | 
|  | 1307 | { | 
|  | 1308 | return eq_id - 8 * be_pci_func(adapter); | 
|  | 1309 | } | 
|  | 1310 |  | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1311 | static irqreturn_t be_intx(int irq, void *dev) | 
|  | 1312 | { | 
|  | 1313 | struct be_adapter *adapter = dev; | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1314 | int isr; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1315 |  | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1316 | isr = ioread32(adapter->csr + CEV_ISR0_OFFSET + | 
| Sathya Perla | eec368f | 2009-07-27 22:52:23 +0000 | [diff] [blame] | 1317 | be_pci_func(adapter) * CEV_ISR_SIZE); | 
| Sathya Perla | c001c21 | 2009-07-01 01:06:07 +0000 | [diff] [blame] | 1318 | if (!isr) | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1319 | return IRQ_NONE; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1320 |  | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1321 | event_handle(adapter, &adapter->tx_eq); | 
|  | 1322 | event_handle(adapter, &adapter->rx_eq); | 
| Sathya Perla | c001c21 | 2009-07-01 01:06:07 +0000 | [diff] [blame] | 1323 |  | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1324 | return IRQ_HANDLED; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1325 | } | 
|  | 1326 |  | 
|  | 1327 | static irqreturn_t be_msix_rx(int irq, void *dev) | 
|  | 1328 | { | 
|  | 1329 | struct be_adapter *adapter = dev; | 
|  | 1330 |  | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1331 | event_handle(adapter, &adapter->rx_eq); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1332 |  | 
|  | 1333 | return IRQ_HANDLED; | 
|  | 1334 | } | 
|  | 1335 |  | 
| Sathya Perla | 5fb379e | 2009-06-18 00:02:59 +0000 | [diff] [blame] | 1336 | static irqreturn_t be_msix_tx_mcc(int irq, void *dev) | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1337 | { | 
|  | 1338 | struct be_adapter *adapter = dev; | 
|  | 1339 |  | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1340 | event_handle(adapter, &adapter->tx_eq); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1341 |  | 
|  | 1342 | return IRQ_HANDLED; | 
|  | 1343 | } | 
|  | 1344 |  | 
| Ajit Khaparde | 5be93b9 | 2009-07-21 12:36:19 -0700 | [diff] [blame] | 1345 | static inline bool do_gro(struct be_adapter *adapter, | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1346 | struct be_eth_rx_compl *rxcp) | 
|  | 1347 | { | 
|  | 1348 | int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp); | 
|  | 1349 | int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp); | 
|  | 1350 |  | 
|  | 1351 | if (err) | 
|  | 1352 | drvr_stats(adapter)->be_rxcp_err++; | 
|  | 1353 |  | 
| Ajit Khaparde | 5be93b9 | 2009-07-21 12:36:19 -0700 | [diff] [blame] | 1354 | return (tcp_frame && !err) ? true : false; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1355 | } | 
|  | 1356 |  | 
|  | 1357 | int be_poll_rx(struct napi_struct *napi, int budget) | 
|  | 1358 | { | 
|  | 1359 | struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi); | 
|  | 1360 | struct be_adapter *adapter = | 
|  | 1361 | container_of(rx_eq, struct be_adapter, rx_eq); | 
|  | 1362 | struct be_queue_info *rx_cq = &adapter->rx_obj.cq; | 
|  | 1363 | struct be_eth_rx_compl *rxcp; | 
|  | 1364 | u32 work_done; | 
|  | 1365 |  | 
|  | 1366 | for (work_done = 0; work_done < budget; work_done++) { | 
|  | 1367 | rxcp = be_rx_compl_get(adapter); | 
|  | 1368 | if (!rxcp) | 
|  | 1369 | break; | 
|  | 1370 |  | 
| Ajit Khaparde | 5be93b9 | 2009-07-21 12:36:19 -0700 | [diff] [blame] | 1371 | if (do_gro(adapter, rxcp)) | 
|  | 1372 | be_rx_compl_process_gro(adapter, rxcp); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1373 | else | 
|  | 1374 | be_rx_compl_process(adapter, rxcp); | 
| Sathya Perla | a7a0ef3 | 2009-06-10 02:23:28 +0000 | [diff] [blame] | 1375 |  | 
|  | 1376 | be_rx_compl_reset(rxcp); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1377 | } | 
|  | 1378 |  | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1379 | /* Refill the queue */ | 
|  | 1380 | if (atomic_read(&adapter->rx_obj.q.used) < RX_FRAGS_REFILL_WM) | 
|  | 1381 | be_post_rx_frags(adapter); | 
|  | 1382 |  | 
|  | 1383 | /* All consumed */ | 
|  | 1384 | if (work_done < budget) { | 
|  | 1385 | napi_complete(napi); | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1386 | be_cq_notify(adapter, rx_cq->id, true, work_done); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1387 | } else { | 
|  | 1388 | /* More to be consumed; continue with interrupts disabled */ | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1389 | be_cq_notify(adapter, rx_cq->id, false, work_done); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1390 | } | 
|  | 1391 | return work_done; | 
|  | 1392 | } | 
|  | 1393 |  | 
| Sathya Perla | 5fb379e | 2009-06-18 00:02:59 +0000 | [diff] [blame] | 1394 | void be_process_tx(struct be_adapter *adapter) | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1395 | { | 
| Sathya Perla | 5fb379e | 2009-06-18 00:02:59 +0000 | [diff] [blame] | 1396 | struct be_queue_info *txq = &adapter->tx_obj.q; | 
|  | 1397 | struct be_queue_info *tx_cq = &adapter->tx_obj.cq; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1398 | struct be_eth_tx_compl *txcp; | 
|  | 1399 | u32 num_cmpl = 0; | 
|  | 1400 | u16 end_idx; | 
|  | 1401 |  | 
| Sathya Perla | 5fb379e | 2009-06-18 00:02:59 +0000 | [diff] [blame] | 1402 | while ((txcp = be_tx_compl_get(tx_cq))) { | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1403 | end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl, | 
|  | 1404 | wrb_index, txcp); | 
|  | 1405 | be_tx_compl_process(adapter, end_idx); | 
|  | 1406 | num_cmpl++; | 
|  | 1407 | } | 
|  | 1408 |  | 
| Sathya Perla | 5fb379e | 2009-06-18 00:02:59 +0000 | [diff] [blame] | 1409 | if (num_cmpl) { | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1410 | be_cq_notify(adapter, tx_cq->id, true, num_cmpl); | 
| Sathya Perla | 5fb379e | 2009-06-18 00:02:59 +0000 | [diff] [blame] | 1411 |  | 
|  | 1412 | /* As Tx wrbs have been freed up, wake up netdev queue if | 
|  | 1413 | * it was stopped due to lack of tx wrbs. | 
|  | 1414 | */ | 
|  | 1415 | if (netif_queue_stopped(adapter->netdev) && | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1416 | atomic_read(&txq->used) < txq->len / 2) { | 
| Sathya Perla | 5fb379e | 2009-06-18 00:02:59 +0000 | [diff] [blame] | 1417 | netif_wake_queue(adapter->netdev); | 
|  | 1418 | } | 
|  | 1419 |  | 
|  | 1420 | drvr_stats(adapter)->be_tx_events++; | 
|  | 1421 | drvr_stats(adapter)->be_tx_compl += num_cmpl; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1422 | } | 
| Sathya Perla | 5fb379e | 2009-06-18 00:02:59 +0000 | [diff] [blame] | 1423 | } | 
|  | 1424 |  | 
|  | 1425 | /* As TX and MCC share the same EQ check for both TX and MCC completions. | 
|  | 1426 | * For TX/MCC we don't honour budget; consume everything | 
|  | 1427 | */ | 
|  | 1428 | static int be_poll_tx_mcc(struct napi_struct *napi, int budget) | 
|  | 1429 | { | 
|  | 1430 | struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi); | 
|  | 1431 | struct be_adapter *adapter = | 
|  | 1432 | container_of(tx_eq, struct be_adapter, tx_eq); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1433 |  | 
|  | 1434 | napi_complete(napi); | 
|  | 1435 |  | 
| Sathya Perla | 5fb379e | 2009-06-18 00:02:59 +0000 | [diff] [blame] | 1436 | be_process_tx(adapter); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1437 |  | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1438 | be_process_mcc(adapter); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1439 |  | 
|  | 1440 | return 1; | 
|  | 1441 | } | 
|  | 1442 |  | 
| Sathya Perla | ea1dae1 | 2009-03-19 23:56:20 -0700 | [diff] [blame] | 1443 | static void be_worker(struct work_struct *work) | 
|  | 1444 | { | 
|  | 1445 | struct be_adapter *adapter = | 
|  | 1446 | container_of(work, struct be_adapter, work.work); | 
|  | 1447 | int status; | 
|  | 1448 |  | 
| Sathya Perla | ea1dae1 | 2009-03-19 23:56:20 -0700 | [diff] [blame] | 1449 | /* Get Stats */ | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1450 | status = be_cmd_get_stats(adapter, &adapter->stats.cmd); | 
| Sathya Perla | ea1dae1 | 2009-03-19 23:56:20 -0700 | [diff] [blame] | 1451 | if (!status) | 
|  | 1452 | netdev_stats_update(adapter); | 
|  | 1453 |  | 
|  | 1454 | /* Set EQ delay */ | 
|  | 1455 | be_rx_eqd_update(adapter); | 
|  | 1456 |  | 
| Sathya Perla | 4097f66 | 2009-03-24 16:40:13 -0700 | [diff] [blame] | 1457 | be_tx_rate_update(adapter); | 
|  | 1458 | be_rx_rate_update(adapter); | 
|  | 1459 |  | 
| Sathya Perla | ea1dae1 | 2009-03-19 23:56:20 -0700 | [diff] [blame] | 1460 | if (adapter->rx_post_starved) { | 
|  | 1461 | adapter->rx_post_starved = false; | 
|  | 1462 | be_post_rx_frags(adapter); | 
|  | 1463 | } | 
|  | 1464 |  | 
|  | 1465 | schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); | 
|  | 1466 | } | 
|  | 1467 |  | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1468 | static void be_msix_enable(struct be_adapter *adapter) | 
|  | 1469 | { | 
|  | 1470 | int i, status; | 
|  | 1471 |  | 
|  | 1472 | for (i = 0; i < BE_NUM_MSIX_VECTORS; i++) | 
|  | 1473 | adapter->msix_entries[i].entry = i; | 
|  | 1474 |  | 
|  | 1475 | status = pci_enable_msix(adapter->pdev, adapter->msix_entries, | 
|  | 1476 | BE_NUM_MSIX_VECTORS); | 
|  | 1477 | if (status == 0) | 
|  | 1478 | adapter->msix_enabled = true; | 
|  | 1479 | return; | 
|  | 1480 | } | 
|  | 1481 |  | 
|  | 1482 | static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id) | 
|  | 1483 | { | 
| Sathya Perla | b628bde | 2009-08-17 00:58:26 +0000 | [diff] [blame] | 1484 | return adapter->msix_entries[ | 
|  | 1485 | be_evt_bit_get(adapter, eq_id)].vector; | 
|  | 1486 | } | 
|  | 1487 |  | 
|  | 1488 | static int be_request_irq(struct be_adapter *adapter, | 
|  | 1489 | struct be_eq_obj *eq_obj, | 
|  | 1490 | void *handler, char *desc) | 
|  | 1491 | { | 
|  | 1492 | struct net_device *netdev = adapter->netdev; | 
|  | 1493 | int vec; | 
|  | 1494 |  | 
|  | 1495 | sprintf(eq_obj->desc, "%s-%s", netdev->name, desc); | 
|  | 1496 | vec = be_msix_vec_get(adapter, eq_obj->q.id); | 
|  | 1497 | return request_irq(vec, handler, 0, eq_obj->desc, adapter); | 
|  | 1498 | } | 
|  | 1499 |  | 
|  | 1500 | static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj) | 
|  | 1501 | { | 
|  | 1502 | int vec = be_msix_vec_get(adapter, eq_obj->q.id); | 
|  | 1503 | free_irq(vec, adapter); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1504 | } | 
|  | 1505 |  | 
|  | 1506 | static int be_msix_register(struct be_adapter *adapter) | 
|  | 1507 | { | 
| Sathya Perla | b628bde | 2009-08-17 00:58:26 +0000 | [diff] [blame] | 1508 | int status; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1509 |  | 
| Sathya Perla | b628bde | 2009-08-17 00:58:26 +0000 | [diff] [blame] | 1510 | status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx"); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1511 | if (status) | 
|  | 1512 | goto err; | 
|  | 1513 |  | 
| Sathya Perla | b628bde | 2009-08-17 00:58:26 +0000 | [diff] [blame] | 1514 | status = be_request_irq(adapter, &adapter->rx_eq, be_msix_rx, "rx"); | 
|  | 1515 | if (status) | 
|  | 1516 | goto free_tx_irq; | 
|  | 1517 |  | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1518 | return 0; | 
| Sathya Perla | b628bde | 2009-08-17 00:58:26 +0000 | [diff] [blame] | 1519 |  | 
|  | 1520 | free_tx_irq: | 
|  | 1521 | be_free_irq(adapter, &adapter->tx_eq); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1522 | err: | 
|  | 1523 | dev_warn(&adapter->pdev->dev, | 
|  | 1524 | "MSIX Request IRQ failed - err %d\n", status); | 
|  | 1525 | pci_disable_msix(adapter->pdev); | 
|  | 1526 | adapter->msix_enabled = false; | 
|  | 1527 | return status; | 
|  | 1528 | } | 
|  | 1529 |  | 
|  | 1530 | static int be_irq_register(struct be_adapter *adapter) | 
|  | 1531 | { | 
|  | 1532 | struct net_device *netdev = adapter->netdev; | 
|  | 1533 | int status; | 
|  | 1534 |  | 
|  | 1535 | if (adapter->msix_enabled) { | 
|  | 1536 | status = be_msix_register(adapter); | 
|  | 1537 | if (status == 0) | 
|  | 1538 | goto done; | 
|  | 1539 | } | 
|  | 1540 |  | 
|  | 1541 | /* INTx */ | 
|  | 1542 | netdev->irq = adapter->pdev->irq; | 
|  | 1543 | status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name, | 
|  | 1544 | adapter); | 
|  | 1545 | if (status) { | 
|  | 1546 | dev_err(&adapter->pdev->dev, | 
|  | 1547 | "INTx request IRQ failed - err %d\n", status); | 
|  | 1548 | return status; | 
|  | 1549 | } | 
|  | 1550 | done: | 
|  | 1551 | adapter->isr_registered = true; | 
|  | 1552 | return 0; | 
|  | 1553 | } | 
|  | 1554 |  | 
|  | 1555 | static void be_irq_unregister(struct be_adapter *adapter) | 
|  | 1556 | { | 
|  | 1557 | struct net_device *netdev = adapter->netdev; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1558 |  | 
|  | 1559 | if (!adapter->isr_registered) | 
|  | 1560 | return; | 
|  | 1561 |  | 
|  | 1562 | /* INTx */ | 
|  | 1563 | if (!adapter->msix_enabled) { | 
|  | 1564 | free_irq(netdev->irq, adapter); | 
|  | 1565 | goto done; | 
|  | 1566 | } | 
|  | 1567 |  | 
|  | 1568 | /* MSIx */ | 
| Sathya Perla | b628bde | 2009-08-17 00:58:26 +0000 | [diff] [blame] | 1569 | be_free_irq(adapter, &adapter->tx_eq); | 
|  | 1570 | be_free_irq(adapter, &adapter->rx_eq); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1571 | done: | 
|  | 1572 | adapter->isr_registered = false; | 
|  | 1573 | return; | 
|  | 1574 | } | 
|  | 1575 |  | 
|  | 1576 | static int be_open(struct net_device *netdev) | 
|  | 1577 | { | 
|  | 1578 | struct be_adapter *adapter = netdev_priv(netdev); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1579 | struct be_eq_obj *rx_eq = &adapter->rx_eq; | 
|  | 1580 | struct be_eq_obj *tx_eq = &adapter->tx_eq; | 
| Sathya Perla | a8f447b | 2009-06-18 00:10:27 +0000 | [diff] [blame] | 1581 | bool link_up; | 
|  | 1582 | int status; | 
| Sathya Perla | 5fb379e | 2009-06-18 00:02:59 +0000 | [diff] [blame] | 1583 |  | 
|  | 1584 | /* First time posting */ | 
|  | 1585 | be_post_rx_frags(adapter); | 
|  | 1586 |  | 
|  | 1587 | napi_enable(&rx_eq->napi); | 
|  | 1588 | napi_enable(&tx_eq->napi); | 
|  | 1589 |  | 
|  | 1590 | be_irq_register(adapter); | 
|  | 1591 |  | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1592 | be_intr_set(adapter, true); | 
| Sathya Perla | 5fb379e | 2009-06-18 00:02:59 +0000 | [diff] [blame] | 1593 |  | 
|  | 1594 | /* The evt queues are created in unarmed state; arm them */ | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1595 | be_eq_notify(adapter, rx_eq->q.id, true, false, 0); | 
|  | 1596 | be_eq_notify(adapter, tx_eq->q.id, true, false, 0); | 
| Sathya Perla | 5fb379e | 2009-06-18 00:02:59 +0000 | [diff] [blame] | 1597 |  | 
|  | 1598 | /* Rx compl queue may be in unarmed state; rearm it */ | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1599 | be_cq_notify(adapter, adapter->rx_obj.cq.id, true, 0); | 
| Sathya Perla | 5fb379e | 2009-06-18 00:02:59 +0000 | [diff] [blame] | 1600 |  | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1601 | status = be_cmd_link_status_query(adapter, &link_up); | 
| Sathya Perla | a8f447b | 2009-06-18 00:10:27 +0000 | [diff] [blame] | 1602 | if (status) | 
|  | 1603 | return status; | 
|  | 1604 | be_link_status_update(adapter, link_up); | 
| Sathya Perla | 5fb379e | 2009-06-18 00:02:59 +0000 | [diff] [blame] | 1605 |  | 
|  | 1606 | schedule_delayed_work(&adapter->work, msecs_to_jiffies(100)); | 
|  | 1607 | return 0; | 
|  | 1608 | } | 
|  | 1609 |  | 
|  | 1610 | static int be_setup(struct be_adapter *adapter) | 
|  | 1611 | { | 
| Sathya Perla | 5fb379e | 2009-06-18 00:02:59 +0000 | [diff] [blame] | 1612 | struct net_device *netdev = adapter->netdev; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1613 | u32 if_flags; | 
|  | 1614 | int status; | 
|  | 1615 |  | 
|  | 1616 | if_flags = BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PROMISCUOUS | | 
|  | 1617 | BE_IF_FLAGS_MCAST_PROMISCUOUS | BE_IF_FLAGS_UNTAGGED | | 
|  | 1618 | BE_IF_FLAGS_PASS_L3L4_ERRORS; | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1619 | status = be_cmd_if_create(adapter, if_flags, netdev->dev_addr, | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1620 | false/* pmac_invalid */, &adapter->if_handle, | 
|  | 1621 | &adapter->pmac_id); | 
|  | 1622 | if (status != 0) | 
|  | 1623 | goto do_none; | 
|  | 1624 |  | 
| Sathya Perla | 1ab1ab7 | 2009-03-19 23:56:46 -0700 | [diff] [blame] | 1625 | be_vid_config(netdev); | 
|  | 1626 |  | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1627 | status = be_cmd_set_flow_control(adapter, true, true); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1628 | if (status != 0) | 
|  | 1629 | goto if_destroy; | 
|  | 1630 |  | 
|  | 1631 | status = be_tx_queues_create(adapter); | 
|  | 1632 | if (status != 0) | 
|  | 1633 | goto if_destroy; | 
|  | 1634 |  | 
|  | 1635 | status = be_rx_queues_create(adapter); | 
|  | 1636 | if (status != 0) | 
|  | 1637 | goto tx_qs_destroy; | 
|  | 1638 |  | 
| Sathya Perla | 5fb379e | 2009-06-18 00:02:59 +0000 | [diff] [blame] | 1639 | status = be_mcc_queues_create(adapter); | 
|  | 1640 | if (status != 0) | 
|  | 1641 | goto rx_qs_destroy; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1642 |  | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1643 | return 0; | 
|  | 1644 |  | 
| Sathya Perla | 5fb379e | 2009-06-18 00:02:59 +0000 | [diff] [blame] | 1645 | rx_qs_destroy: | 
|  | 1646 | be_rx_queues_destroy(adapter); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1647 | tx_qs_destroy: | 
|  | 1648 | be_tx_queues_destroy(adapter); | 
|  | 1649 | if_destroy: | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1650 | be_cmd_if_destroy(adapter, adapter->if_handle); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1651 | do_none: | 
|  | 1652 | return status; | 
|  | 1653 | } | 
|  | 1654 |  | 
| Sathya Perla | 5fb379e | 2009-06-18 00:02:59 +0000 | [diff] [blame] | 1655 | static int be_clear(struct be_adapter *adapter) | 
|  | 1656 | { | 
| Sathya Perla | 1a8887d | 2009-08-17 00:58:41 +0000 | [diff] [blame] | 1657 | be_mcc_queues_destroy(adapter); | 
| Sathya Perla | 5fb379e | 2009-06-18 00:02:59 +0000 | [diff] [blame] | 1658 | be_rx_queues_destroy(adapter); | 
|  | 1659 | be_tx_queues_destroy(adapter); | 
|  | 1660 |  | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1661 | be_cmd_if_destroy(adapter, adapter->if_handle); | 
| Sathya Perla | 5fb379e | 2009-06-18 00:02:59 +0000 | [diff] [blame] | 1662 |  | 
| Sathya Perla | 5fb379e | 2009-06-18 00:02:59 +0000 | [diff] [blame] | 1663 | return 0; | 
|  | 1664 | } | 
|  | 1665 |  | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1666 | static int be_close(struct net_device *netdev) | 
|  | 1667 | { | 
|  | 1668 | struct be_adapter *adapter = netdev_priv(netdev); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1669 | struct be_eq_obj *rx_eq = &adapter->rx_eq; | 
|  | 1670 | struct be_eq_obj *tx_eq = &adapter->tx_eq; | 
|  | 1671 | int vec; | 
|  | 1672 |  | 
| Sathya Perla | b305be7 | 2009-06-10 02:18:35 +0000 | [diff] [blame] | 1673 | cancel_delayed_work_sync(&adapter->work); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1674 |  | 
|  | 1675 | netif_stop_queue(netdev); | 
|  | 1676 | netif_carrier_off(netdev); | 
| Sathya Perla | a8f447b | 2009-06-18 00:10:27 +0000 | [diff] [blame] | 1677 | adapter->link_up = false; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1678 |  | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1679 | be_intr_set(adapter, false); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1680 |  | 
|  | 1681 | if (adapter->msix_enabled) { | 
|  | 1682 | vec = be_msix_vec_get(adapter, tx_eq->q.id); | 
|  | 1683 | synchronize_irq(vec); | 
|  | 1684 | vec = be_msix_vec_get(adapter, rx_eq->q.id); | 
|  | 1685 | synchronize_irq(vec); | 
|  | 1686 | } else { | 
|  | 1687 | synchronize_irq(netdev->irq); | 
|  | 1688 | } | 
|  | 1689 | be_irq_unregister(adapter); | 
|  | 1690 |  | 
|  | 1691 | napi_disable(&rx_eq->napi); | 
|  | 1692 | napi_disable(&tx_eq->napi); | 
|  | 1693 |  | 
| Sathya Perla | a8e9179 | 2009-08-10 03:42:43 +0000 | [diff] [blame] | 1694 | /* Wait for all pending tx completions to arrive so that | 
|  | 1695 | * all tx skbs are freed. | 
|  | 1696 | */ | 
|  | 1697 | be_tx_compl_clean(adapter); | 
|  | 1698 |  | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1699 | return 0; | 
|  | 1700 | } | 
|  | 1701 |  | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1702 | static struct net_device_ops be_netdev_ops = { | 
|  | 1703 | .ndo_open		= be_open, | 
|  | 1704 | .ndo_stop		= be_close, | 
|  | 1705 | .ndo_start_xmit		= be_xmit, | 
|  | 1706 | .ndo_get_stats		= be_get_stats, | 
|  | 1707 | .ndo_set_rx_mode	= be_set_multicast_list, | 
|  | 1708 | .ndo_set_mac_address	= be_mac_addr_set, | 
|  | 1709 | .ndo_change_mtu		= be_change_mtu, | 
|  | 1710 | .ndo_validate_addr	= eth_validate_addr, | 
|  | 1711 | .ndo_vlan_rx_register	= be_vlan_register, | 
|  | 1712 | .ndo_vlan_rx_add_vid	= be_vlan_add_vid, | 
|  | 1713 | .ndo_vlan_rx_kill_vid	= be_vlan_rem_vid, | 
|  | 1714 | }; | 
|  | 1715 |  | 
|  | 1716 | static void be_netdev_init(struct net_device *netdev) | 
|  | 1717 | { | 
|  | 1718 | struct be_adapter *adapter = netdev_priv(netdev); | 
|  | 1719 |  | 
|  | 1720 | netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO | | 
|  | 1721 | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_IP_CSUM | | 
| Ajit Khaparde | 5be93b9 | 2009-07-21 12:36:19 -0700 | [diff] [blame] | 1722 | NETIF_F_IPV6_CSUM | NETIF_F_GRO; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1723 |  | 
|  | 1724 | netdev->flags |= IFF_MULTICAST; | 
|  | 1725 |  | 
| Ajit Khaparde | 728a997 | 2009-04-13 15:41:22 -0700 | [diff] [blame] | 1726 | adapter->rx_csum = true; | 
|  | 1727 |  | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1728 | BE_SET_NETDEV_OPS(netdev, &be_netdev_ops); | 
|  | 1729 |  | 
|  | 1730 | SET_ETHTOOL_OPS(netdev, &be_ethtool_ops); | 
|  | 1731 |  | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1732 | netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx, | 
|  | 1733 | BE_NAPI_WEIGHT); | 
| Sathya Perla | 5fb379e | 2009-06-18 00:02:59 +0000 | [diff] [blame] | 1734 | netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc, | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1735 | BE_NAPI_WEIGHT); | 
|  | 1736 |  | 
|  | 1737 | netif_carrier_off(netdev); | 
|  | 1738 | netif_stop_queue(netdev); | 
|  | 1739 | } | 
|  | 1740 |  | 
|  | 1741 | static void be_unmap_pci_bars(struct be_adapter *adapter) | 
|  | 1742 | { | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1743 | if (adapter->csr) | 
|  | 1744 | iounmap(adapter->csr); | 
|  | 1745 | if (adapter->db) | 
|  | 1746 | iounmap(adapter->db); | 
|  | 1747 | if (adapter->pcicfg) | 
|  | 1748 | iounmap(adapter->pcicfg); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1749 | } | 
|  | 1750 |  | 
|  | 1751 | static int be_map_pci_bars(struct be_adapter *adapter) | 
|  | 1752 | { | 
|  | 1753 | u8 __iomem *addr; | 
|  | 1754 |  | 
|  | 1755 | addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2), | 
|  | 1756 | pci_resource_len(adapter->pdev, 2)); | 
|  | 1757 | if (addr == NULL) | 
|  | 1758 | return -ENOMEM; | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1759 | adapter->csr = addr; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1760 |  | 
|  | 1761 | addr = ioremap_nocache(pci_resource_start(adapter->pdev, 4), | 
|  | 1762 | 128 * 1024); | 
|  | 1763 | if (addr == NULL) | 
|  | 1764 | goto pci_map_err; | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1765 | adapter->db = addr; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1766 |  | 
|  | 1767 | addr = ioremap_nocache(pci_resource_start(adapter->pdev, 1), | 
|  | 1768 | pci_resource_len(adapter->pdev, 1)); | 
|  | 1769 | if (addr == NULL) | 
|  | 1770 | goto pci_map_err; | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1771 | adapter->pcicfg = addr; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1772 |  | 
|  | 1773 | return 0; | 
|  | 1774 | pci_map_err: | 
|  | 1775 | be_unmap_pci_bars(adapter); | 
|  | 1776 | return -ENOMEM; | 
|  | 1777 | } | 
|  | 1778 |  | 
|  | 1779 |  | 
|  | 1780 | static void be_ctrl_cleanup(struct be_adapter *adapter) | 
|  | 1781 | { | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1782 | struct be_dma_mem *mem = &adapter->mbox_mem_alloced; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1783 |  | 
|  | 1784 | be_unmap_pci_bars(adapter); | 
|  | 1785 |  | 
|  | 1786 | if (mem->va) | 
|  | 1787 | pci_free_consistent(adapter->pdev, mem->size, | 
|  | 1788 | mem->va, mem->dma); | 
|  | 1789 | } | 
|  | 1790 |  | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1791 | static int be_ctrl_init(struct be_adapter *adapter) | 
|  | 1792 | { | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1793 | struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced; | 
|  | 1794 | struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1795 | int status; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1796 |  | 
|  | 1797 | status = be_map_pci_bars(adapter); | 
|  | 1798 | if (status) | 
|  | 1799 | return status; | 
|  | 1800 |  | 
|  | 1801 | mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; | 
|  | 1802 | mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev, | 
|  | 1803 | mbox_mem_alloc->size, &mbox_mem_alloc->dma); | 
|  | 1804 | if (!mbox_mem_alloc->va) { | 
|  | 1805 | be_unmap_pci_bars(adapter); | 
|  | 1806 | return -1; | 
|  | 1807 | } | 
|  | 1808 | mbox_mem_align->size = sizeof(struct be_mcc_mailbox); | 
|  | 1809 | mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); | 
|  | 1810 | mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); | 
|  | 1811 | memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1812 | spin_lock_init(&adapter->mbox_lock); | 
|  | 1813 | spin_lock_init(&adapter->mcc_lock); | 
|  | 1814 | spin_lock_init(&adapter->mcc_cq_lock); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1815 |  | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1816 | return 0; | 
|  | 1817 | } | 
|  | 1818 |  | 
|  | 1819 | static void be_stats_cleanup(struct be_adapter *adapter) | 
|  | 1820 | { | 
|  | 1821 | struct be_stats_obj *stats = &adapter->stats; | 
|  | 1822 | struct be_dma_mem *cmd = &stats->cmd; | 
|  | 1823 |  | 
|  | 1824 | if (cmd->va) | 
|  | 1825 | pci_free_consistent(adapter->pdev, cmd->size, | 
|  | 1826 | cmd->va, cmd->dma); | 
|  | 1827 | } | 
|  | 1828 |  | 
|  | 1829 | static int be_stats_init(struct be_adapter *adapter) | 
|  | 1830 | { | 
|  | 1831 | struct be_stats_obj *stats = &adapter->stats; | 
|  | 1832 | struct be_dma_mem *cmd = &stats->cmd; | 
|  | 1833 |  | 
|  | 1834 | cmd->size = sizeof(struct be_cmd_req_get_stats); | 
|  | 1835 | cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma); | 
|  | 1836 | if (cmd->va == NULL) | 
|  | 1837 | return -1; | 
|  | 1838 | return 0; | 
|  | 1839 | } | 
|  | 1840 |  | 
|  | 1841 | static void __devexit be_remove(struct pci_dev *pdev) | 
|  | 1842 | { | 
|  | 1843 | struct be_adapter *adapter = pci_get_drvdata(pdev); | 
|  | 1844 | if (!adapter) | 
|  | 1845 | return; | 
|  | 1846 |  | 
|  | 1847 | unregister_netdev(adapter->netdev); | 
|  | 1848 |  | 
| Sathya Perla | 5fb379e | 2009-06-18 00:02:59 +0000 | [diff] [blame] | 1849 | be_clear(adapter); | 
|  | 1850 |  | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1851 | be_stats_cleanup(adapter); | 
|  | 1852 |  | 
|  | 1853 | be_ctrl_cleanup(adapter); | 
|  | 1854 |  | 
|  | 1855 | if (adapter->msix_enabled) { | 
|  | 1856 | pci_disable_msix(adapter->pdev); | 
|  | 1857 | adapter->msix_enabled = false; | 
|  | 1858 | } | 
|  | 1859 |  | 
|  | 1860 | pci_set_drvdata(pdev, NULL); | 
|  | 1861 | pci_release_regions(pdev); | 
|  | 1862 | pci_disable_device(pdev); | 
|  | 1863 |  | 
|  | 1864 | free_netdev(adapter->netdev); | 
|  | 1865 | } | 
|  | 1866 |  | 
|  | 1867 | static int be_hw_up(struct be_adapter *adapter) | 
|  | 1868 | { | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1869 | int status; | 
|  | 1870 |  | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1871 | status = be_cmd_POST(adapter); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1872 | if (status) | 
|  | 1873 | return status; | 
|  | 1874 |  | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1875 | status = be_cmd_get_fw_ver(adapter, adapter->fw_ver); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1876 | if (status) | 
|  | 1877 | return status; | 
|  | 1878 |  | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1879 | status = be_cmd_query_fw_cfg(adapter, &adapter->port_num); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1880 | return status; | 
|  | 1881 | } | 
|  | 1882 |  | 
|  | 1883 | static int __devinit be_probe(struct pci_dev *pdev, | 
|  | 1884 | const struct pci_device_id *pdev_id) | 
|  | 1885 | { | 
|  | 1886 | int status = 0; | 
|  | 1887 | struct be_adapter *adapter; | 
|  | 1888 | struct net_device *netdev; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1889 | u8 mac[ETH_ALEN]; | 
|  | 1890 |  | 
|  | 1891 | status = pci_enable_device(pdev); | 
|  | 1892 | if (status) | 
|  | 1893 | goto do_none; | 
|  | 1894 |  | 
|  | 1895 | status = pci_request_regions(pdev, DRV_NAME); | 
|  | 1896 | if (status) | 
|  | 1897 | goto disable_dev; | 
|  | 1898 | pci_set_master(pdev); | 
|  | 1899 |  | 
|  | 1900 | netdev = alloc_etherdev(sizeof(struct be_adapter)); | 
|  | 1901 | if (netdev == NULL) { | 
|  | 1902 | status = -ENOMEM; | 
|  | 1903 | goto rel_reg; | 
|  | 1904 | } | 
|  | 1905 | adapter = netdev_priv(netdev); | 
|  | 1906 | adapter->pdev = pdev; | 
|  | 1907 | pci_set_drvdata(pdev, adapter); | 
|  | 1908 | adapter->netdev = netdev; | 
|  | 1909 |  | 
|  | 1910 | be_msix_enable(adapter); | 
|  | 1911 |  | 
| Yang Hongyang | e930438 | 2009-04-13 14:40:14 -0700 | [diff] [blame] | 1912 | status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1913 | if (!status) { | 
|  | 1914 | netdev->features |= NETIF_F_HIGHDMA; | 
|  | 1915 | } else { | 
| Yang Hongyang | e930438 | 2009-04-13 14:40:14 -0700 | [diff] [blame] | 1916 | status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1917 | if (status) { | 
|  | 1918 | dev_err(&pdev->dev, "Could not set PCI DMA Mask\n"); | 
|  | 1919 | goto free_netdev; | 
|  | 1920 | } | 
|  | 1921 | } | 
|  | 1922 |  | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1923 | status = be_ctrl_init(adapter); | 
|  | 1924 | if (status) | 
|  | 1925 | goto free_netdev; | 
|  | 1926 |  | 
| sarveshwarb | 14074ea | 2009-08-05 13:05:24 -0700 | [diff] [blame] | 1927 | status = be_cmd_reset_function(adapter); | 
|  | 1928 | if (status) | 
|  | 1929 | goto ctrl_clean; | 
|  | 1930 |  | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1931 | status = be_stats_init(adapter); | 
|  | 1932 | if (status) | 
|  | 1933 | goto ctrl_clean; | 
|  | 1934 |  | 
|  | 1935 | status = be_hw_up(adapter); | 
|  | 1936 | if (status) | 
|  | 1937 | goto stats_clean; | 
|  | 1938 |  | 
| Sathya Perla | 8788fdc | 2009-07-27 22:52:03 +0000 | [diff] [blame] | 1939 | status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK, | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1940 | true /* permanent */, 0); | 
|  | 1941 | if (status) | 
|  | 1942 | goto stats_clean; | 
|  | 1943 | memcpy(netdev->dev_addr, mac, ETH_ALEN); | 
|  | 1944 |  | 
|  | 1945 | INIT_DELAYED_WORK(&adapter->work, be_worker); | 
|  | 1946 | be_netdev_init(netdev); | 
|  | 1947 | SET_NETDEV_DEV(netdev, &adapter->pdev->dev); | 
|  | 1948 |  | 
| Sathya Perla | 5fb379e | 2009-06-18 00:02:59 +0000 | [diff] [blame] | 1949 | status = be_setup(adapter); | 
|  | 1950 | if (status) | 
|  | 1951 | goto stats_clean; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1952 | status = register_netdev(netdev); | 
|  | 1953 | if (status != 0) | 
| Sathya Perla | 5fb379e | 2009-06-18 00:02:59 +0000 | [diff] [blame] | 1954 | goto unsetup; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1955 |  | 
| Ajit Khaparde | c4ca237 | 2009-05-18 15:38:55 -0700 | [diff] [blame] | 1956 | dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1957 | return 0; | 
|  | 1958 |  | 
| Sathya Perla | 5fb379e | 2009-06-18 00:02:59 +0000 | [diff] [blame] | 1959 | unsetup: | 
|  | 1960 | be_clear(adapter); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1961 | stats_clean: | 
|  | 1962 | be_stats_cleanup(adapter); | 
|  | 1963 | ctrl_clean: | 
|  | 1964 | be_ctrl_cleanup(adapter); | 
|  | 1965 | free_netdev: | 
|  | 1966 | free_netdev(adapter->netdev); | 
|  | 1967 | rel_reg: | 
|  | 1968 | pci_release_regions(pdev); | 
|  | 1969 | disable_dev: | 
|  | 1970 | pci_disable_device(pdev); | 
|  | 1971 | do_none: | 
| Ajit Khaparde | c4ca237 | 2009-05-18 15:38:55 -0700 | [diff] [blame] | 1972 | dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev)); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1973 | return status; | 
|  | 1974 | } | 
|  | 1975 |  | 
|  | 1976 | static int be_suspend(struct pci_dev *pdev, pm_message_t state) | 
|  | 1977 | { | 
|  | 1978 | struct be_adapter *adapter = pci_get_drvdata(pdev); | 
|  | 1979 | struct net_device *netdev =  adapter->netdev; | 
|  | 1980 |  | 
|  | 1981 | netif_device_detach(netdev); | 
|  | 1982 | if (netif_running(netdev)) { | 
|  | 1983 | rtnl_lock(); | 
|  | 1984 | be_close(netdev); | 
|  | 1985 | rtnl_unlock(); | 
|  | 1986 | } | 
| Sarveshwar Bandi | 9b0365f | 2009-08-12 21:01:29 +0000 | [diff] [blame] | 1987 | be_clear(adapter); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1988 |  | 
|  | 1989 | pci_save_state(pdev); | 
|  | 1990 | pci_disable_device(pdev); | 
|  | 1991 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | 
|  | 1992 | return 0; | 
|  | 1993 | } | 
|  | 1994 |  | 
|  | 1995 | static int be_resume(struct pci_dev *pdev) | 
|  | 1996 | { | 
|  | 1997 | int status = 0; | 
|  | 1998 | struct be_adapter *adapter = pci_get_drvdata(pdev); | 
|  | 1999 | struct net_device *netdev =  adapter->netdev; | 
|  | 2000 |  | 
|  | 2001 | netif_device_detach(netdev); | 
|  | 2002 |  | 
|  | 2003 | status = pci_enable_device(pdev); | 
|  | 2004 | if (status) | 
|  | 2005 | return status; | 
|  | 2006 |  | 
|  | 2007 | pci_set_power_state(pdev, 0); | 
|  | 2008 | pci_restore_state(pdev); | 
|  | 2009 |  | 
| Sarveshwar Bandi | 9b0365f | 2009-08-12 21:01:29 +0000 | [diff] [blame] | 2010 | be_setup(adapter); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 2011 | if (netif_running(netdev)) { | 
|  | 2012 | rtnl_lock(); | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 2013 | be_open(netdev); | 
|  | 2014 | rtnl_unlock(); | 
|  | 2015 | } | 
|  | 2016 | netif_device_attach(netdev); | 
|  | 2017 | return 0; | 
|  | 2018 | } | 
|  | 2019 |  | 
|  | 2020 | static struct pci_driver be_driver = { | 
|  | 2021 | .name = DRV_NAME, | 
|  | 2022 | .id_table = be_dev_ids, | 
|  | 2023 | .probe = be_probe, | 
|  | 2024 | .remove = be_remove, | 
|  | 2025 | .suspend = be_suspend, | 
|  | 2026 | .resume = be_resume | 
|  | 2027 | }; | 
|  | 2028 |  | 
|  | 2029 | static int __init be_init_module(void) | 
|  | 2030 | { | 
|  | 2031 | if (rx_frag_size != 8192 && rx_frag_size != 4096 | 
|  | 2032 | && rx_frag_size != 2048) { | 
|  | 2033 | printk(KERN_WARNING DRV_NAME | 
|  | 2034 | " : Module param rx_frag_size must be 2048/4096/8192." | 
|  | 2035 | " Using 2048\n"); | 
|  | 2036 | rx_frag_size = 2048; | 
|  | 2037 | } | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 2038 |  | 
|  | 2039 | return pci_register_driver(&be_driver); | 
|  | 2040 | } | 
|  | 2041 | module_init(be_init_module); | 
|  | 2042 |  | 
|  | 2043 | static void __exit be_exit_module(void) | 
|  | 2044 | { | 
|  | 2045 | pci_unregister_driver(&be_driver); | 
|  | 2046 | } | 
|  | 2047 | module_exit(be_exit_module); |