| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * Linux network driver for Brocade Converged Network Adapter. | 
 | 3 |  * | 
 | 4 |  * This program is free software; you can redistribute it and/or modify it | 
 | 5 |  * under the terms of the GNU General Public License (GPL) Version 2 as | 
 | 6 |  * published by the Free Software Foundation | 
 | 7 |  * | 
 | 8 |  * This program is distributed in the hope that it will be useful, but | 
 | 9 |  * WITHOUT ANY WARRANTY; without even the implied warranty of | 
 | 10 |  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU | 
 | 11 |  * General Public License for more details. | 
 | 12 |  */ | 
 | 13 | /* | 
 | 14 |  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. | 
 | 15 |  * All rights reserved | 
 | 16 |  * www.brocade.com | 
 | 17 |  */ | 
 | 18 | #include <linux/netdevice.h> | 
 | 19 | #include <linux/skbuff.h> | 
 | 20 | #include <linux/etherdevice.h> | 
 | 21 | #include <linux/in.h> | 
 | 22 | #include <linux/ethtool.h> | 
 | 23 | #include <linux/if_vlan.h> | 
 | 24 | #include <linux/if_ether.h> | 
 | 25 | #include <linux/ip.h> | 
| Paul Gortmaker | 70c7160 | 2011-05-22 16:47:17 -0400 | [diff] [blame] | 26 | #include <linux/prefetch.h> | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 27 |  | 
 | 28 | #include "bnad.h" | 
 | 29 | #include "bna.h" | 
 | 30 | #include "cna.h" | 
 | 31 |  | 
| Rasesh Mody | b7ee31c | 2010-10-05 15:46:05 +0000 | [diff] [blame] | 32 | static DEFINE_MUTEX(bnad_fwimg_mutex); | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 33 |  | 
 | 34 | /* | 
 | 35 |  * Module params | 
 | 36 |  */ | 
 | 37 | static uint bnad_msix_disable; | 
 | 38 | module_param(bnad_msix_disable, uint, 0444); | 
 | 39 | MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode"); | 
 | 40 |  | 
 | 41 | static uint bnad_ioc_auto_recover = 1; | 
 | 42 | module_param(bnad_ioc_auto_recover, uint, 0444); | 
 | 43 | MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery"); | 
 | 44 |  | 
 | 45 | /* | 
 | 46 |  * Global variables | 
 | 47 |  */ | 
 | 48 | u32 bnad_rxqs_per_cq = 2; | 
 | 49 |  | 
| Rasesh Mody | b7ee31c | 2010-10-05 15:46:05 +0000 | [diff] [blame] | 50 | static const u8 bnad_bcast_addr[] =  {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 51 |  | 
 | 52 | /* | 
 | 53 |  * Local MACROS | 
 | 54 |  */ | 
 | 55 | #define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2) | 
 | 56 |  | 
 | 57 | #define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth) | 
 | 58 |  | 
 | 59 | #define BNAD_GET_MBOX_IRQ(_bnad)				\ | 
 | 60 | 	(((_bnad)->cfg_flags & BNAD_CF_MSIX) ?			\ | 
 | 61 | 	 ((_bnad)->msix_table[(_bnad)->msix_num - 1].vector) : 	\ | 
 | 62 | 	 ((_bnad)->pcidev->irq)) | 
 | 63 |  | 
 | 64 | #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth)	\ | 
 | 65 | do {								\ | 
 | 66 | 	(_res_info)->res_type = BNA_RES_T_MEM;			\ | 
 | 67 | 	(_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA;	\ | 
 | 68 | 	(_res_info)->res_u.mem_info.num = (_num);		\ | 
 | 69 | 	(_res_info)->res_u.mem_info.len =			\ | 
 | 70 | 	sizeof(struct bnad_unmap_q) +				\ | 
 | 71 | 	(sizeof(struct bnad_skb_unmap) * ((_depth) - 1));	\ | 
 | 72 | } while (0) | 
 | 73 |  | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 74 | #define BNAD_TXRX_SYNC_MDELAY	250	/* 250 msecs */ | 
 | 75 |  | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 76 | /* | 
 | 77 |  * Reinitialize completions in CQ, once Rx is taken down | 
 | 78 |  */ | 
 | 79 | static void | 
 | 80 | bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb) | 
 | 81 | { | 
 | 82 | 	struct bna_cq_entry *cmpl, *next_cmpl; | 
 | 83 | 	unsigned int wi_range, wis = 0, ccb_prod = 0; | 
 | 84 | 	int i; | 
 | 85 |  | 
 | 86 | 	BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, cmpl, | 
 | 87 | 			    wi_range); | 
 | 88 |  | 
 | 89 | 	for (i = 0; i < ccb->q_depth; i++) { | 
 | 90 | 		wis++; | 
 | 91 | 		if (likely(--wi_range)) | 
 | 92 | 			next_cmpl = cmpl + 1; | 
 | 93 | 		else { | 
 | 94 | 			BNA_QE_INDX_ADD(ccb_prod, wis, ccb->q_depth); | 
 | 95 | 			wis = 0; | 
 | 96 | 			BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, | 
 | 97 | 						next_cmpl, wi_range); | 
 | 98 | 		} | 
 | 99 | 		cmpl->valid = 0; | 
 | 100 | 		cmpl = next_cmpl; | 
 | 101 | 	} | 
 | 102 | } | 
 | 103 |  | 
 | 104 | /* | 
 | 105 |  * Frees all pending Tx Bufs | 
 | 106 |  * At this point no activity is expected on the Q, | 
 | 107 |  * so DMA unmap & freeing is fine. | 
 | 108 |  */ | 
 | 109 | static void | 
 | 110 | bnad_free_all_txbufs(struct bnad *bnad, | 
 | 111 | 		 struct bna_tcb *tcb) | 
 | 112 | { | 
| Rasesh Mody | f7c0fa4 | 2010-12-23 21:45:05 +0000 | [diff] [blame] | 113 | 	u32 		unmap_cons; | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 114 | 	struct bnad_unmap_q *unmap_q = tcb->unmap_q; | 
 | 115 | 	struct bnad_skb_unmap *unmap_array; | 
 | 116 | 	struct sk_buff 		*skb = NULL; | 
 | 117 | 	int			i; | 
 | 118 |  | 
 | 119 | 	unmap_array = unmap_q->unmap_array; | 
 | 120 |  | 
 | 121 | 	unmap_cons = 0; | 
 | 122 | 	while (unmap_cons < unmap_q->q_depth) { | 
 | 123 | 		skb = unmap_array[unmap_cons].skb; | 
 | 124 | 		if (!skb) { | 
 | 125 | 			unmap_cons++; | 
 | 126 | 			continue; | 
 | 127 | 		} | 
 | 128 | 		unmap_array[unmap_cons].skb = NULL; | 
 | 129 |  | 
| Ivan Vecera | 5ea7431 | 2011-02-02 04:37:02 +0000 | [diff] [blame] | 130 | 		dma_unmap_single(&bnad->pcidev->dev, | 
 | 131 | 				 dma_unmap_addr(&unmap_array[unmap_cons], | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 132 | 						dma_addr), skb_headlen(skb), | 
| Ivan Vecera | 5ea7431 | 2011-02-02 04:37:02 +0000 | [diff] [blame] | 133 | 						DMA_TO_DEVICE); | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 134 |  | 
| Ivan Vecera | 5ea7431 | 2011-02-02 04:37:02 +0000 | [diff] [blame] | 135 | 		dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0); | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 136 | 		if (++unmap_cons >= unmap_q->q_depth) | 
 | 137 | 			break; | 
 | 138 |  | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 139 | 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 
| Ivan Vecera | 5ea7431 | 2011-02-02 04:37:02 +0000 | [diff] [blame] | 140 | 			dma_unmap_page(&bnad->pcidev->dev, | 
 | 141 | 				       dma_unmap_addr(&unmap_array[unmap_cons], | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 142 | 						      dma_addr), | 
 | 143 | 				       skb_shinfo(skb)->frags[i].size, | 
| Ivan Vecera | 5ea7431 | 2011-02-02 04:37:02 +0000 | [diff] [blame] | 144 | 				       DMA_TO_DEVICE); | 
 | 145 | 			dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 146 | 					   0); | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 147 | 			if (++unmap_cons >= unmap_q->q_depth) | 
 | 148 | 				break; | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 149 | 		} | 
 | 150 | 		dev_kfree_skb_any(skb); | 
 | 151 | 	} | 
 | 152 | } | 
 | 153 |  | 
 | 154 | /* Data Path Handlers */ | 
 | 155 |  | 
 | 156 | /* | 
 | 157 |  * bnad_free_txbufs : Frees the Tx bufs on Tx completion | 
 | 158 |  * Can be called in a) Interrupt context | 
 | 159 |  *		    b) Sending context | 
 | 160 |  *		    c) Tasklet context | 
 | 161 |  */ | 
 | 162 | static u32 | 
 | 163 | bnad_free_txbufs(struct bnad *bnad, | 
 | 164 | 		 struct bna_tcb *tcb) | 
 | 165 | { | 
 | 166 | 	u32 		sent_packets = 0, sent_bytes = 0; | 
 | 167 | 	u16 		wis, unmap_cons, updated_hw_cons; | 
 | 168 | 	struct bnad_unmap_q *unmap_q = tcb->unmap_q; | 
 | 169 | 	struct bnad_skb_unmap *unmap_array; | 
 | 170 | 	struct sk_buff 		*skb; | 
 | 171 | 	int i; | 
 | 172 |  | 
 | 173 | 	/* | 
 | 174 | 	 * Just return if TX is stopped. This check is useful | 
 | 175 | 	 * when bnad_free_txbufs() runs out of a tasklet scheduled | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 176 | 	 * before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 177 | 	 * but this routine runs actually after the cleanup has been | 
 | 178 | 	 * executed. | 
 | 179 | 	 */ | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 180 | 	if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 181 | 		return 0; | 
 | 182 |  | 
 | 183 | 	updated_hw_cons = *(tcb->hw_consumer_index); | 
 | 184 |  | 
 | 185 | 	wis = BNA_Q_INDEX_CHANGE(tcb->consumer_index, | 
 | 186 | 				  updated_hw_cons, tcb->q_depth); | 
 | 187 |  | 
 | 188 | 	BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth))); | 
 | 189 |  | 
 | 190 | 	unmap_array = unmap_q->unmap_array; | 
 | 191 | 	unmap_cons = unmap_q->consumer_index; | 
 | 192 |  | 
 | 193 | 	prefetch(&unmap_array[unmap_cons + 1]); | 
 | 194 | 	while (wis) { | 
 | 195 | 		skb = unmap_array[unmap_cons].skb; | 
 | 196 |  | 
 | 197 | 		unmap_array[unmap_cons].skb = NULL; | 
 | 198 |  | 
 | 199 | 		sent_packets++; | 
 | 200 | 		sent_bytes += skb->len; | 
 | 201 | 		wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags); | 
 | 202 |  | 
| Ivan Vecera | 5ea7431 | 2011-02-02 04:37:02 +0000 | [diff] [blame] | 203 | 		dma_unmap_single(&bnad->pcidev->dev, | 
 | 204 | 				 dma_unmap_addr(&unmap_array[unmap_cons], | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 205 | 						dma_addr), skb_headlen(skb), | 
| Ivan Vecera | 5ea7431 | 2011-02-02 04:37:02 +0000 | [diff] [blame] | 206 | 				 DMA_TO_DEVICE); | 
 | 207 | 		dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0); | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 208 | 		BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth); | 
 | 209 |  | 
 | 210 | 		prefetch(&unmap_array[unmap_cons + 1]); | 
 | 211 | 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 
 | 212 | 			prefetch(&unmap_array[unmap_cons + 1]); | 
 | 213 |  | 
| Ivan Vecera | 5ea7431 | 2011-02-02 04:37:02 +0000 | [diff] [blame] | 214 | 			dma_unmap_page(&bnad->pcidev->dev, | 
 | 215 | 				       dma_unmap_addr(&unmap_array[unmap_cons], | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 216 | 						      dma_addr), | 
 | 217 | 				       skb_shinfo(skb)->frags[i].size, | 
| Ivan Vecera | 5ea7431 | 2011-02-02 04:37:02 +0000 | [diff] [blame] | 218 | 				       DMA_TO_DEVICE); | 
 | 219 | 			dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 220 | 					   0); | 
 | 221 | 			BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth); | 
 | 222 | 		} | 
 | 223 | 		dev_kfree_skb_any(skb); | 
 | 224 | 	} | 
 | 225 |  | 
 | 226 | 	/* Update consumer pointers. */ | 
 | 227 | 	tcb->consumer_index = updated_hw_cons; | 
 | 228 | 	unmap_q->consumer_index = unmap_cons; | 
 | 229 |  | 
 | 230 | 	tcb->txq->tx_packets += sent_packets; | 
 | 231 | 	tcb->txq->tx_bytes += sent_bytes; | 
 | 232 |  | 
 | 233 | 	return sent_packets; | 
 | 234 | } | 
 | 235 |  | 
 | 236 | /* Tx Free Tasklet function */ | 
 | 237 | /* Frees for all the tcb's in all the Tx's */ | 
 | 238 | /* | 
 | 239 |  * Scheduled from sending context, so that | 
 | 240 |  * the fat Tx lock is not held for too long | 
 | 241 |  * in the sending context. | 
 | 242 |  */ | 
 | 243 | static void | 
 | 244 | bnad_tx_free_tasklet(unsigned long bnad_ptr) | 
 | 245 | { | 
 | 246 | 	struct bnad *bnad = (struct bnad *)bnad_ptr; | 
 | 247 | 	struct bna_tcb *tcb; | 
| Rasesh Mody | f7c0fa4 | 2010-12-23 21:45:05 +0000 | [diff] [blame] | 248 | 	u32 		acked = 0; | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 249 | 	int			i, j; | 
 | 250 |  | 
 | 251 | 	for (i = 0; i < bnad->num_tx; i++) { | 
 | 252 | 		for (j = 0; j < bnad->num_txq_per_tx; j++) { | 
 | 253 | 			tcb = bnad->tx_info[i].tcb[j]; | 
 | 254 | 			if (!tcb) | 
 | 255 | 				continue; | 
 | 256 | 			if (((u16) (*tcb->hw_consumer_index) != | 
 | 257 | 				tcb->consumer_index) && | 
 | 258 | 				(!test_and_set_bit(BNAD_TXQ_FREE_SENT, | 
 | 259 | 						  &tcb->flags))) { | 
 | 260 | 				acked = bnad_free_txbufs(bnad, tcb); | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 261 | 				if (likely(test_bit(BNAD_TXQ_TX_STARTED, | 
 | 262 | 					&tcb->flags))) | 
 | 263 | 					bna_ib_ack(tcb->i_dbell, acked); | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 264 | 				smp_mb__before_clear_bit(); | 
 | 265 | 				clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); | 
 | 266 | 			} | 
| Rasesh Mody | f7c0fa4 | 2010-12-23 21:45:05 +0000 | [diff] [blame] | 267 | 			if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, | 
 | 268 | 						&tcb->flags))) | 
 | 269 | 				continue; | 
 | 270 | 			if (netif_queue_stopped(bnad->netdev)) { | 
 | 271 | 				if (acked && netif_carrier_ok(bnad->netdev) && | 
 | 272 | 					BNA_QE_FREE_CNT(tcb, tcb->q_depth) >= | 
 | 273 | 						BNAD_NETIF_WAKE_THRESHOLD) { | 
 | 274 | 					netif_wake_queue(bnad->netdev); | 
 | 275 | 					/* TODO */ | 
 | 276 | 					/* Counters for individual TxQs? */ | 
 | 277 | 					BNAD_UPDATE_CTR(bnad, | 
 | 278 | 						netif_queue_wakeup); | 
 | 279 | 				} | 
 | 280 | 			} | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 281 | 		} | 
 | 282 | 	} | 
 | 283 | } | 
 | 284 |  | 
 | 285 | static u32 | 
 | 286 | bnad_tx(struct bnad *bnad, struct bna_tcb *tcb) | 
 | 287 | { | 
 | 288 | 	struct net_device *netdev = bnad->netdev; | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 289 | 	u32 sent = 0; | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 290 |  | 
 | 291 | 	if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) | 
 | 292 | 		return 0; | 
 | 293 |  | 
 | 294 | 	sent = bnad_free_txbufs(bnad, tcb); | 
 | 295 | 	if (sent) { | 
 | 296 | 		if (netif_queue_stopped(netdev) && | 
 | 297 | 		    netif_carrier_ok(netdev) && | 
 | 298 | 		    BNA_QE_FREE_CNT(tcb, tcb->q_depth) >= | 
 | 299 | 				    BNAD_NETIF_WAKE_THRESHOLD) { | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 300 | 			if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) { | 
 | 301 | 				netif_wake_queue(netdev); | 
 | 302 | 				BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); | 
 | 303 | 			} | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 304 | 		} | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 305 | 	} | 
 | 306 |  | 
 | 307 | 	if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 308 | 		bna_ib_ack(tcb->i_dbell, sent); | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 309 |  | 
 | 310 | 	smp_mb__before_clear_bit(); | 
 | 311 | 	clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); | 
 | 312 |  | 
 | 313 | 	return sent; | 
 | 314 | } | 
 | 315 |  | 
 | 316 | /* MSIX Tx Completion Handler */ | 
 | 317 | static irqreturn_t | 
 | 318 | bnad_msix_tx(int irq, void *data) | 
 | 319 | { | 
 | 320 | 	struct bna_tcb *tcb = (struct bna_tcb *)data; | 
 | 321 | 	struct bnad *bnad = tcb->bnad; | 
 | 322 |  | 
 | 323 | 	bnad_tx(bnad, tcb); | 
 | 324 |  | 
 | 325 | 	return IRQ_HANDLED; | 
 | 326 | } | 
 | 327 |  | 
 | 328 | static void | 
 | 329 | bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb) | 
 | 330 | { | 
 | 331 | 	struct bnad_unmap_q *unmap_q = rcb->unmap_q; | 
 | 332 |  | 
 | 333 | 	rcb->producer_index = 0; | 
 | 334 | 	rcb->consumer_index = 0; | 
 | 335 |  | 
 | 336 | 	unmap_q->producer_index = 0; | 
 | 337 | 	unmap_q->consumer_index = 0; | 
 | 338 | } | 
 | 339 |  | 
 | 340 | static void | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 341 | bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb) | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 342 | { | 
 | 343 | 	struct bnad_unmap_q *unmap_q; | 
| Ivan Vecera | 5ea7431 | 2011-02-02 04:37:02 +0000 | [diff] [blame] | 344 | 	struct bnad_skb_unmap *unmap_array; | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 345 | 	struct sk_buff *skb; | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 346 | 	int unmap_cons; | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 347 |  | 
 | 348 | 	unmap_q = rcb->unmap_q; | 
| Ivan Vecera | 5ea7431 | 2011-02-02 04:37:02 +0000 | [diff] [blame] | 349 | 	unmap_array = unmap_q->unmap_array; | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 350 | 	for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) { | 
| Ivan Vecera | 5ea7431 | 2011-02-02 04:37:02 +0000 | [diff] [blame] | 351 | 		skb = unmap_array[unmap_cons].skb; | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 352 | 		if (!skb) | 
 | 353 | 			continue; | 
| Ivan Vecera | 5ea7431 | 2011-02-02 04:37:02 +0000 | [diff] [blame] | 354 | 		unmap_array[unmap_cons].skb = NULL; | 
 | 355 | 		dma_unmap_single(&bnad->pcidev->dev, | 
 | 356 | 				 dma_unmap_addr(&unmap_array[unmap_cons], | 
 | 357 | 						dma_addr), | 
 | 358 | 				 rcb->rxq->buffer_size, | 
 | 359 | 				 DMA_FROM_DEVICE); | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 360 | 		dev_kfree_skb(skb); | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 361 | 	} | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 362 | 	bnad_reset_rcb(bnad, rcb); | 
 | 363 | } | 
 | 364 |  | 
 | 365 | static void | 
 | 366 | bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb) | 
 | 367 | { | 
 | 368 | 	u16 to_alloc, alloced, unmap_prod, wi_range; | 
 | 369 | 	struct bnad_unmap_q *unmap_q = rcb->unmap_q; | 
 | 370 | 	struct bnad_skb_unmap *unmap_array; | 
 | 371 | 	struct bna_rxq_entry *rxent; | 
 | 372 | 	struct sk_buff *skb; | 
 | 373 | 	dma_addr_t dma_addr; | 
 | 374 |  | 
 | 375 | 	alloced = 0; | 
 | 376 | 	to_alloc = | 
 | 377 | 		BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth); | 
 | 378 |  | 
 | 379 | 	unmap_array = unmap_q->unmap_array; | 
 | 380 | 	unmap_prod = unmap_q->producer_index; | 
 | 381 |  | 
 | 382 | 	BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range); | 
 | 383 |  | 
 | 384 | 	while (to_alloc--) { | 
 | 385 | 		if (!wi_range) { | 
 | 386 | 			BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, | 
 | 387 | 					     wi_range); | 
 | 388 | 		} | 
 | 389 | 		skb = alloc_skb(rcb->rxq->buffer_size + NET_IP_ALIGN, | 
 | 390 | 				     GFP_ATOMIC); | 
 | 391 | 		if (unlikely(!skb)) { | 
 | 392 | 			BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed); | 
 | 393 | 			goto finishing; | 
 | 394 | 		} | 
 | 395 | 		skb->dev = bnad->netdev; | 
 | 396 | 		skb_reserve(skb, NET_IP_ALIGN); | 
 | 397 | 		unmap_array[unmap_prod].skb = skb; | 
| Ivan Vecera | 5ea7431 | 2011-02-02 04:37:02 +0000 | [diff] [blame] | 398 | 		dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, | 
 | 399 | 					  rcb->rxq->buffer_size, | 
 | 400 | 					  DMA_FROM_DEVICE); | 
 | 401 | 		dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr, | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 402 | 				   dma_addr); | 
 | 403 | 		BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr); | 
 | 404 | 		BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth); | 
 | 405 |  | 
 | 406 | 		rxent++; | 
 | 407 | 		wi_range--; | 
 | 408 | 		alloced++; | 
 | 409 | 	} | 
 | 410 |  | 
 | 411 | finishing: | 
 | 412 | 	if (likely(alloced)) { | 
 | 413 | 		unmap_q->producer_index = unmap_prod; | 
 | 414 | 		rcb->producer_index = unmap_prod; | 
 | 415 | 		smp_mb(); | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 416 | 		if (likely(test_bit(BNAD_RXQ_STARTED, &rcb->flags))) | 
 | 417 | 			bna_rxq_prod_indx_doorbell(rcb); | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 418 | 	} | 
 | 419 | } | 
 | 420 |  | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 421 | static inline void | 
 | 422 | bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb) | 
 | 423 | { | 
 | 424 | 	struct bnad_unmap_q *unmap_q = rcb->unmap_q; | 
 | 425 |  | 
 | 426 | 	if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) { | 
 | 427 | 		if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth) | 
 | 428 | 			 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT) | 
 | 429 | 			bnad_alloc_n_post_rxbufs(bnad, rcb); | 
 | 430 | 		smp_mb__before_clear_bit(); | 
 | 431 | 		clear_bit(BNAD_RXQ_REFILL, &rcb->flags); | 
 | 432 | 	} | 
 | 433 | } | 
 | 434 |  | 
 | 435 | static u32 | 
 | 436 | bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget) | 
 | 437 | { | 
 | 438 | 	struct bna_cq_entry *cmpl, *next_cmpl; | 
 | 439 | 	struct bna_rcb *rcb = NULL; | 
 | 440 | 	unsigned int wi_range, packets = 0, wis = 0; | 
 | 441 | 	struct bnad_unmap_q *unmap_q; | 
| Ivan Vecera | 5ea7431 | 2011-02-02 04:37:02 +0000 | [diff] [blame] | 442 | 	struct bnad_skb_unmap *unmap_array; | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 443 | 	struct sk_buff *skb; | 
| Ivan Vecera | 5ea7431 | 2011-02-02 04:37:02 +0000 | [diff] [blame] | 444 | 	u32 flags, unmap_cons; | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 445 | 	u32 qid0 = ccb->rcb[0]->rxq->rxq_id; | 
 | 446 | 	struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate; | 
 | 447 |  | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 448 | 	if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)) | 
 | 449 | 		return 0; | 
 | 450 |  | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 451 | 	prefetch(bnad->netdev); | 
 | 452 | 	BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl, | 
 | 453 | 			    wi_range); | 
 | 454 | 	BUG_ON(!(wi_range <= ccb->q_depth)); | 
 | 455 | 	while (cmpl->valid && packets < budget) { | 
 | 456 | 		packets++; | 
 | 457 | 		BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length)); | 
 | 458 |  | 
 | 459 | 		if (qid0 == cmpl->rxq_id) | 
 | 460 | 			rcb = ccb->rcb[0]; | 
 | 461 | 		else | 
 | 462 | 			rcb = ccb->rcb[1]; | 
 | 463 |  | 
 | 464 | 		unmap_q = rcb->unmap_q; | 
| Ivan Vecera | 5ea7431 | 2011-02-02 04:37:02 +0000 | [diff] [blame] | 465 | 		unmap_array = unmap_q->unmap_array; | 
 | 466 | 		unmap_cons = unmap_q->consumer_index; | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 467 |  | 
| Ivan Vecera | 5ea7431 | 2011-02-02 04:37:02 +0000 | [diff] [blame] | 468 | 		skb = unmap_array[unmap_cons].skb; | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 469 | 		BUG_ON(!(skb)); | 
| Ivan Vecera | 5ea7431 | 2011-02-02 04:37:02 +0000 | [diff] [blame] | 470 | 		unmap_array[unmap_cons].skb = NULL; | 
 | 471 | 		dma_unmap_single(&bnad->pcidev->dev, | 
 | 472 | 				 dma_unmap_addr(&unmap_array[unmap_cons], | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 473 | 						dma_addr), | 
| Ivan Vecera | 5ea7431 | 2011-02-02 04:37:02 +0000 | [diff] [blame] | 474 | 				 rcb->rxq->buffer_size, | 
 | 475 | 				 DMA_FROM_DEVICE); | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 476 | 		BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth); | 
 | 477 |  | 
 | 478 | 		/* Should be more efficient ? Performance ? */ | 
 | 479 | 		BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth); | 
 | 480 |  | 
 | 481 | 		wis++; | 
 | 482 | 		if (likely(--wi_range)) | 
 | 483 | 			next_cmpl = cmpl + 1; | 
 | 484 | 		else { | 
 | 485 | 			BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth); | 
 | 486 | 			wis = 0; | 
 | 487 | 			BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, | 
 | 488 | 						next_cmpl, wi_range); | 
 | 489 | 			BUG_ON(!(wi_range <= ccb->q_depth)); | 
 | 490 | 		} | 
 | 491 | 		prefetch(next_cmpl); | 
 | 492 |  | 
 | 493 | 		flags = ntohl(cmpl->flags); | 
 | 494 | 		if (unlikely | 
 | 495 | 		    (flags & | 
 | 496 | 		     (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR | | 
 | 497 | 		      BNA_CQ_EF_TOO_LONG))) { | 
 | 498 | 			dev_kfree_skb_any(skb); | 
 | 499 | 			rcb->rxq->rx_packets_with_error++; | 
 | 500 | 			goto next; | 
 | 501 | 		} | 
 | 502 |  | 
 | 503 | 		skb_put(skb, ntohs(cmpl->length)); | 
 | 504 | 		if (likely | 
| Michał Mirosław | e5ee20e | 2011-04-12 09:38:23 +0000 | [diff] [blame] | 505 | 		    ((bnad->netdev->features & NETIF_F_RXCSUM) && | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 506 | 		     (((flags & BNA_CQ_EF_IPV4) && | 
 | 507 | 		      (flags & BNA_CQ_EF_L3_CKSUM_OK)) || | 
 | 508 | 		      (flags & BNA_CQ_EF_IPV6)) && | 
 | 509 | 		      (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) && | 
 | 510 | 		      (flags & BNA_CQ_EF_L4_CKSUM_OK))) | 
 | 511 | 			skb->ip_summed = CHECKSUM_UNNECESSARY; | 
 | 512 | 		else | 
| Eric Dumazet | bc8acf2 | 2010-09-02 13:07:41 -0700 | [diff] [blame] | 513 | 			skb_checksum_none_assert(skb); | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 514 |  | 
 | 515 | 		rcb->rxq->rx_packets++; | 
 | 516 | 		rcb->rxq->rx_bytes += skb->len; | 
 | 517 | 		skb->protocol = eth_type_trans(skb, bnad->netdev); | 
 | 518 |  | 
 | 519 | 		if (bnad->vlan_grp && (flags & BNA_CQ_EF_VLAN)) { | 
 | 520 | 			struct bnad_rx_ctrl *rx_ctrl = | 
 | 521 | 				(struct bnad_rx_ctrl *)ccb->ctrl; | 
 | 522 | 			if (skb->ip_summed == CHECKSUM_UNNECESSARY) | 
 | 523 | 				vlan_gro_receive(&rx_ctrl->napi, bnad->vlan_grp, | 
 | 524 | 						ntohs(cmpl->vlan_tag), skb); | 
 | 525 | 			else | 
 | 526 | 				vlan_hwaccel_receive_skb(skb, | 
 | 527 | 							 bnad->vlan_grp, | 
 | 528 | 							 ntohs(cmpl->vlan_tag)); | 
 | 529 |  | 
 | 530 | 		} else { /* Not VLAN tagged/stripped */ | 
 | 531 | 			struct bnad_rx_ctrl *rx_ctrl = | 
 | 532 | 				(struct bnad_rx_ctrl *)ccb->ctrl; | 
 | 533 | 			if (skb->ip_summed == CHECKSUM_UNNECESSARY) | 
 | 534 | 				napi_gro_receive(&rx_ctrl->napi, skb); | 
 | 535 | 			else | 
 | 536 | 				netif_receive_skb(skb); | 
 | 537 | 		} | 
 | 538 |  | 
 | 539 | next: | 
 | 540 | 		cmpl->valid = 0; | 
 | 541 | 		cmpl = next_cmpl; | 
 | 542 | 	} | 
 | 543 |  | 
 | 544 | 	BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth); | 
 | 545 |  | 
 | 546 | 	if (likely(ccb)) { | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 547 | 		if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) | 
 | 548 | 			bna_ib_ack(ccb->i_dbell, packets); | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 549 | 		bnad_refill_rxq(bnad, ccb->rcb[0]); | 
 | 550 | 		if (ccb->rcb[1]) | 
 | 551 | 			bnad_refill_rxq(bnad, ccb->rcb[1]); | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 552 | 	} else { | 
 | 553 | 		if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) | 
 | 554 | 			bna_ib_ack(ccb->i_dbell, 0); | 
 | 555 | 	} | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 556 |  | 
 | 557 | 	return packets; | 
 | 558 | } | 
 | 559 |  | 
 | 560 | static void | 
 | 561 | bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb) | 
 | 562 | { | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 563 | 	if (unlikely(!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) | 
 | 564 | 		return; | 
 | 565 |  | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 566 | 	bna_ib_coalescing_timer_set(ccb->i_dbell, 0); | 
 | 567 | 	bna_ib_ack(ccb->i_dbell, 0); | 
 | 568 | } | 
 | 569 |  | 
 | 570 | static void | 
 | 571 | bnad_enable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb) | 
 | 572 | { | 
| Rasesh Mody | e2fa6f2 | 2010-10-05 15:46:04 +0000 | [diff] [blame] | 573 | 	unsigned long flags; | 
 | 574 |  | 
| Rasesh Mody | aad75b6 | 2010-12-23 21:45:08 +0000 | [diff] [blame] | 575 | 	/* Because of polling context */ | 
 | 576 | 	spin_lock_irqsave(&bnad->bna_lock, flags); | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 577 | 	bnad_enable_rx_irq_unsafe(ccb); | 
| Rasesh Mody | e2fa6f2 | 2010-10-05 15:46:04 +0000 | [diff] [blame] | 578 | 	spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 579 | } | 
 | 580 |  | 
 | 581 | static void | 
 | 582 | bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb) | 
 | 583 | { | 
 | 584 | 	struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl); | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 585 | 	struct napi_struct *napi = &rx_ctrl->napi; | 
 | 586 |  | 
 | 587 | 	if (likely(napi_schedule_prep(napi))) { | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 588 | 		bnad_disable_rx_irq(bnad, ccb); | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 589 | 		__napi_schedule(napi); | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 590 | 	} | 
 | 591 | 	BNAD_UPDATE_CTR(bnad, netif_rx_schedule); | 
 | 592 | } | 
 | 593 |  | 
 | 594 | /* MSIX Rx Path Handler */ | 
 | 595 | static irqreturn_t | 
 | 596 | bnad_msix_rx(int irq, void *data) | 
 | 597 | { | 
 | 598 | 	struct bna_ccb *ccb = (struct bna_ccb *)data; | 
 | 599 | 	struct bnad *bnad = ccb->bnad; | 
 | 600 |  | 
 | 601 | 	bnad_netif_rx_schedule_poll(bnad, ccb); | 
 | 602 |  | 
 | 603 | 	return IRQ_HANDLED; | 
 | 604 | } | 
 | 605 |  | 
 | 606 | /* Interrupt handlers */ | 
 | 607 |  | 
 | 608 | /* Mbox Interrupt Handlers */ | 
 | 609 | static irqreturn_t | 
 | 610 | bnad_msix_mbox_handler(int irq, void *data) | 
 | 611 | { | 
 | 612 | 	u32 intr_status; | 
| Rasesh Mody | e2fa6f2 | 2010-10-05 15:46:04 +0000 | [diff] [blame] | 613 | 	unsigned long flags; | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 614 | 	struct bnad *bnad = (struct bnad *)data; | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 615 |  | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 616 | 	if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) | 
 | 617 | 		return IRQ_HANDLED; | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 618 |  | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 619 | 	spin_lock_irqsave(&bnad->bna_lock, flags); | 
 | 620 |  | 
 | 621 | 	bna_intr_status_get(&bnad->bna, intr_status); | 
 | 622 |  | 
 | 623 | 	if (BNA_IS_MBOX_ERR_INTR(intr_status)) | 
 | 624 | 		bna_mbox_handler(&bnad->bna, intr_status); | 
 | 625 |  | 
 | 626 | 	spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
 | 627 |  | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 628 | 	return IRQ_HANDLED; | 
 | 629 | } | 
 | 630 |  | 
 | 631 | static irqreturn_t | 
 | 632 | bnad_isr(int irq, void *data) | 
 | 633 | { | 
 | 634 | 	int i, j; | 
 | 635 | 	u32 intr_status; | 
 | 636 | 	unsigned long flags; | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 637 | 	struct bnad *bnad = (struct bnad *)data; | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 638 | 	struct bnad_rx_info *rx_info; | 
 | 639 | 	struct bnad_rx_ctrl *rx_ctrl; | 
 | 640 |  | 
| Rasesh Mody | e2fa6f2 | 2010-10-05 15:46:04 +0000 | [diff] [blame] | 641 | 	if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) | 
 | 642 | 		return IRQ_NONE; | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 643 |  | 
 | 644 | 	bna_intr_status_get(&bnad->bna, intr_status); | 
| Rasesh Mody | e2fa6f2 | 2010-10-05 15:46:04 +0000 | [diff] [blame] | 645 |  | 
 | 646 | 	if (unlikely(!intr_status)) | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 647 | 		return IRQ_NONE; | 
| Rasesh Mody | e2fa6f2 | 2010-10-05 15:46:04 +0000 | [diff] [blame] | 648 |  | 
 | 649 | 	spin_lock_irqsave(&bnad->bna_lock, flags); | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 650 |  | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 651 | 	if (BNA_IS_MBOX_ERR_INTR(intr_status)) | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 652 | 		bna_mbox_handler(&bnad->bna, intr_status); | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 653 |  | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 654 | 	spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
 | 655 |  | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 656 | 	if (!BNA_IS_INTX_DATA_INTR(intr_status)) | 
 | 657 | 		return IRQ_HANDLED; | 
 | 658 |  | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 659 | 	/* Process data interrupts */ | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 660 | 	/* Tx processing */ | 
 | 661 | 	for (i = 0; i < bnad->num_tx; i++) { | 
 | 662 | 		for (j = 0; j < bnad->num_txq_per_tx; j++) | 
 | 663 | 			bnad_tx(bnad, bnad->tx_info[i].tcb[j]); | 
 | 664 | 	} | 
 | 665 | 	/* Rx processing */ | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 666 | 	for (i = 0; i < bnad->num_rx; i++) { | 
 | 667 | 		rx_info = &bnad->rx_info[i]; | 
 | 668 | 		if (!rx_info->rx) | 
 | 669 | 			continue; | 
 | 670 | 		for (j = 0; j < bnad->num_rxp_per_rx; j++) { | 
 | 671 | 			rx_ctrl = &rx_info->rx_ctrl[j]; | 
 | 672 | 			if (rx_ctrl->ccb) | 
 | 673 | 				bnad_netif_rx_schedule_poll(bnad, | 
 | 674 | 							    rx_ctrl->ccb); | 
 | 675 | 		} | 
 | 676 | 	} | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 677 | 	return IRQ_HANDLED; | 
 | 678 | } | 
 | 679 |  | 
 | 680 | /* | 
 | 681 |  * Called in interrupt / callback context | 
 | 682 |  * with bna_lock held, so cfg_flags access is OK | 
 | 683 |  */ | 
 | 684 | static void | 
 | 685 | bnad_enable_mbox_irq(struct bnad *bnad) | 
 | 686 | { | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 687 | 	clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); | 
| Rasesh Mody | e2fa6f2 | 2010-10-05 15:46:04 +0000 | [diff] [blame] | 688 |  | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 689 | 	BNAD_UPDATE_CTR(bnad, mbox_intr_enabled); | 
 | 690 | } | 
 | 691 |  | 
 | 692 | /* | 
 | 693 |  * Called with bnad->bna_lock held b'cos of | 
 | 694 |  * bnad->cfg_flags access. | 
 | 695 |  */ | 
| Rasesh Mody | b7ee31c | 2010-10-05 15:46:05 +0000 | [diff] [blame] | 696 | static void | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 697 | bnad_disable_mbox_irq(struct bnad *bnad) | 
 | 698 | { | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 699 | 	set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); | 
| Rasesh Mody | e2fa6f2 | 2010-10-05 15:46:04 +0000 | [diff] [blame] | 700 |  | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 701 | 	BNAD_UPDATE_CTR(bnad, mbox_intr_disabled); | 
 | 702 | } | 
 | 703 |  | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 704 | static void | 
 | 705 | bnad_set_netdev_perm_addr(struct bnad *bnad) | 
 | 706 | { | 
 | 707 | 	struct net_device *netdev = bnad->netdev; | 
 | 708 |  | 
 | 709 | 	memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len); | 
 | 710 | 	if (is_zero_ether_addr(netdev->dev_addr)) | 
 | 711 | 		memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len); | 
 | 712 | } | 
 | 713 |  | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 714 | /* Control Path Handlers */ | 
 | 715 |  | 
 | 716 | /* Callbacks */ | 
 | 717 | void | 
 | 718 | bnad_cb_device_enable_mbox_intr(struct bnad *bnad) | 
 | 719 | { | 
 | 720 | 	bnad_enable_mbox_irq(bnad); | 
 | 721 | } | 
 | 722 |  | 
 | 723 | void | 
 | 724 | bnad_cb_device_disable_mbox_intr(struct bnad *bnad) | 
 | 725 | { | 
 | 726 | 	bnad_disable_mbox_irq(bnad); | 
 | 727 | } | 
 | 728 |  | 
 | 729 | void | 
 | 730 | bnad_cb_device_enabled(struct bnad *bnad, enum bna_cb_status status) | 
 | 731 | { | 
 | 732 | 	complete(&bnad->bnad_completions.ioc_comp); | 
 | 733 | 	bnad->bnad_completions.ioc_comp_status = status; | 
 | 734 | } | 
 | 735 |  | 
 | 736 | void | 
 | 737 | bnad_cb_device_disabled(struct bnad *bnad, enum bna_cb_status status) | 
 | 738 | { | 
 | 739 | 	complete(&bnad->bnad_completions.ioc_comp); | 
 | 740 | 	bnad->bnad_completions.ioc_comp_status = status; | 
 | 741 | } | 
 | 742 |  | 
 | 743 | static void | 
 | 744 | bnad_cb_port_disabled(void *arg, enum bna_cb_status status) | 
 | 745 | { | 
 | 746 | 	struct bnad *bnad = (struct bnad *)arg; | 
 | 747 |  | 
 | 748 | 	complete(&bnad->bnad_completions.port_comp); | 
 | 749 |  | 
 | 750 | 	netif_carrier_off(bnad->netdev); | 
 | 751 | } | 
 | 752 |  | 
 | 753 | void | 
 | 754 | bnad_cb_port_link_status(struct bnad *bnad, | 
 | 755 | 			enum bna_link_status link_status) | 
 | 756 | { | 
 | 757 | 	bool link_up = 0; | 
 | 758 |  | 
 | 759 | 	link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP); | 
 | 760 |  | 
 | 761 | 	if (link_status == BNA_CEE_UP) { | 
 | 762 | 		set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags); | 
 | 763 | 		BNAD_UPDATE_CTR(bnad, cee_up); | 
 | 764 | 	} else | 
 | 765 | 		clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags); | 
 | 766 |  | 
 | 767 | 	if (link_up) { | 
 | 768 | 		if (!netif_carrier_ok(bnad->netdev)) { | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 769 | 			struct bna_tcb *tcb = bnad->tx_info[0].tcb[0]; | 
 | 770 | 			if (!tcb) | 
 | 771 | 				return; | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 772 | 			pr_warn("bna: %s link up\n", | 
 | 773 | 				bnad->netdev->name); | 
 | 774 | 			netif_carrier_on(bnad->netdev); | 
 | 775 | 			BNAD_UPDATE_CTR(bnad, link_toggle); | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 776 | 			if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) { | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 777 | 				/* Force an immediate Transmit Schedule */ | 
 | 778 | 				pr_info("bna: %s TX_STARTED\n", | 
 | 779 | 					bnad->netdev->name); | 
 | 780 | 				netif_wake_queue(bnad->netdev); | 
 | 781 | 				BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); | 
 | 782 | 			} else { | 
 | 783 | 				netif_stop_queue(bnad->netdev); | 
 | 784 | 				BNAD_UPDATE_CTR(bnad, netif_queue_stop); | 
 | 785 | 			} | 
 | 786 | 		} | 
 | 787 | 	} else { | 
 | 788 | 		if (netif_carrier_ok(bnad->netdev)) { | 
 | 789 | 			pr_warn("bna: %s link down\n", | 
 | 790 | 				bnad->netdev->name); | 
 | 791 | 			netif_carrier_off(bnad->netdev); | 
 | 792 | 			BNAD_UPDATE_CTR(bnad, link_toggle); | 
 | 793 | 		} | 
 | 794 | 	} | 
 | 795 | } | 
 | 796 |  | 
 | 797 | static void | 
 | 798 | bnad_cb_tx_disabled(void *arg, struct bna_tx *tx, | 
 | 799 | 			enum bna_cb_status status) | 
 | 800 | { | 
 | 801 | 	struct bnad *bnad = (struct bnad *)arg; | 
 | 802 |  | 
 | 803 | 	complete(&bnad->bnad_completions.tx_comp); | 
 | 804 | } | 
 | 805 |  | 
 | 806 | static void | 
 | 807 | bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb) | 
 | 808 | { | 
 | 809 | 	struct bnad_tx_info *tx_info = | 
 | 810 | 			(struct bnad_tx_info *)tcb->txq->tx->priv; | 
 | 811 | 	struct bnad_unmap_q *unmap_q = tcb->unmap_q; | 
 | 812 |  | 
 | 813 | 	tx_info->tcb[tcb->id] = tcb; | 
 | 814 | 	unmap_q->producer_index = 0; | 
 | 815 | 	unmap_q->consumer_index = 0; | 
 | 816 | 	unmap_q->q_depth = BNAD_TX_UNMAPQ_DEPTH; | 
 | 817 | } | 
 | 818 |  | 
 | 819 | static void | 
 | 820 | bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb) | 
 | 821 | { | 
 | 822 | 	struct bnad_tx_info *tx_info = | 
 | 823 | 			(struct bnad_tx_info *)tcb->txq->tx->priv; | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 824 | 	struct bnad_unmap_q *unmap_q = tcb->unmap_q; | 
 | 825 |  | 
 | 826 | 	while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) | 
 | 827 | 		cpu_relax(); | 
 | 828 |  | 
 | 829 | 	bnad_free_all_txbufs(bnad, tcb); | 
 | 830 |  | 
 | 831 | 	unmap_q->producer_index = 0; | 
 | 832 | 	unmap_q->consumer_index = 0; | 
 | 833 |  | 
 | 834 | 	smp_mb__before_clear_bit(); | 
 | 835 | 	clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 836 |  | 
 | 837 | 	tx_info->tcb[tcb->id] = NULL; | 
 | 838 | } | 
 | 839 |  | 
 | 840 | static void | 
 | 841 | bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb) | 
 | 842 | { | 
 | 843 | 	struct bnad_unmap_q *unmap_q = rcb->unmap_q; | 
 | 844 |  | 
 | 845 | 	unmap_q->producer_index = 0; | 
 | 846 | 	unmap_q->consumer_index = 0; | 
 | 847 | 	unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH; | 
 | 848 | } | 
 | 849 |  | 
 | 850 | static void | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 851 | bnad_cb_rcb_destroy(struct bnad *bnad, struct bna_rcb *rcb) | 
 | 852 | { | 
 | 853 | 	bnad_free_all_rxbufs(bnad, rcb); | 
 | 854 | } | 
 | 855 |  | 
 | 856 | static void | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 857 | bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb) | 
 | 858 | { | 
 | 859 | 	struct bnad_rx_info *rx_info = | 
 | 860 | 			(struct bnad_rx_info *)ccb->cq->rx->priv; | 
 | 861 |  | 
 | 862 | 	rx_info->rx_ctrl[ccb->id].ccb = ccb; | 
 | 863 | 	ccb->ctrl = &rx_info->rx_ctrl[ccb->id]; | 
 | 864 | } | 
 | 865 |  | 
 | 866 | static void | 
 | 867 | bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb) | 
 | 868 | { | 
 | 869 | 	struct bnad_rx_info *rx_info = | 
 | 870 | 			(struct bnad_rx_info *)ccb->cq->rx->priv; | 
 | 871 |  | 
 | 872 | 	rx_info->rx_ctrl[ccb->id].ccb = NULL; | 
 | 873 | } | 
 | 874 |  | 
 | 875 | static void | 
 | 876 | bnad_cb_tx_stall(struct bnad *bnad, struct bna_tcb *tcb) | 
 | 877 | { | 
 | 878 | 	struct bnad_tx_info *tx_info = | 
 | 879 | 			(struct bnad_tx_info *)tcb->txq->tx->priv; | 
 | 880 |  | 
 | 881 | 	if (tx_info != &bnad->tx_info[0]) | 
 | 882 | 		return; | 
 | 883 |  | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 884 | 	clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags); | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 885 | 	netif_stop_queue(bnad->netdev); | 
 | 886 | 	pr_info("bna: %s TX_STOPPED\n", bnad->netdev->name); | 
 | 887 | } | 
 | 888 |  | 
 | 889 | static void | 
 | 890 | bnad_cb_tx_resume(struct bnad *bnad, struct bna_tcb *tcb) | 
 | 891 | { | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 892 | 	struct bnad_unmap_q *unmap_q = tcb->unmap_q; | 
 | 893 |  | 
 | 894 | 	if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 895 | 		return; | 
 | 896 |  | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 897 | 	clear_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags); | 
 | 898 |  | 
 | 899 | 	while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) | 
 | 900 | 		cpu_relax(); | 
 | 901 |  | 
 | 902 | 	bnad_free_all_txbufs(bnad, tcb); | 
 | 903 |  | 
 | 904 | 	unmap_q->producer_index = 0; | 
 | 905 | 	unmap_q->consumer_index = 0; | 
 | 906 |  | 
 | 907 | 	smp_mb__before_clear_bit(); | 
 | 908 | 	clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); | 
 | 909 |  | 
 | 910 | 	/* | 
 | 911 | 	 * Workaround for first device enable failure & we | 
 | 912 | 	 * get a 0 MAC address. We try to get the MAC address | 
 | 913 | 	 * again here. | 
 | 914 | 	 */ | 
 | 915 | 	if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) { | 
 | 916 | 		bna_port_mac_get(&bnad->bna.port, &bnad->perm_addr); | 
 | 917 | 		bnad_set_netdev_perm_addr(bnad); | 
 | 918 | 	} | 
 | 919 |  | 
 | 920 | 	set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags); | 
 | 921 |  | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 922 | 	if (netif_carrier_ok(bnad->netdev)) { | 
 | 923 | 		pr_info("bna: %s TX_STARTED\n", bnad->netdev->name); | 
 | 924 | 		netif_wake_queue(bnad->netdev); | 
 | 925 | 		BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); | 
 | 926 | 	} | 
 | 927 | } | 
 | 928 |  | 
 | 929 | static void | 
 | 930 | bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb) | 
 | 931 | { | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 932 | 	/* Delay only once for the whole Tx Path Shutdown */ | 
 | 933 | 	if (!test_and_set_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags)) | 
 | 934 | 		mdelay(BNAD_TXRX_SYNC_MDELAY); | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 935 | } | 
 | 936 |  | 
 | 937 | static void | 
 | 938 | bnad_cb_rx_cleanup(struct bnad *bnad, | 
 | 939 | 			struct bna_ccb *ccb) | 
 | 940 | { | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 941 | 	clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags); | 
 | 942 |  | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 943 | 	if (ccb->rcb[1]) | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 944 | 		clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags); | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 945 |  | 
 | 946 | 	if (!test_and_set_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags)) | 
 | 947 | 		mdelay(BNAD_TXRX_SYNC_MDELAY); | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 948 | } | 
 | 949 |  | 
 | 950 | static void | 
 | 951 | bnad_cb_rx_post(struct bnad *bnad, struct bna_rcb *rcb) | 
 | 952 | { | 
 | 953 | 	struct bnad_unmap_q *unmap_q = rcb->unmap_q; | 
 | 954 |  | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 955 | 	clear_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags); | 
 | 956 |  | 
 | 957 | 	if (rcb == rcb->cq->ccb->rcb[0]) | 
 | 958 | 		bnad_cq_cmpl_init(bnad, rcb->cq->ccb); | 
 | 959 |  | 
 | 960 | 	bnad_free_all_rxbufs(bnad, rcb); | 
 | 961 |  | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 962 | 	set_bit(BNAD_RXQ_STARTED, &rcb->flags); | 
 | 963 |  | 
 | 964 | 	/* Now allocate & post buffers for this RCB */ | 
 | 965 | 	/* !!Allocation in callback context */ | 
 | 966 | 	if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) { | 
 | 967 | 		if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth) | 
 | 968 | 			 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT) | 
 | 969 | 			bnad_alloc_n_post_rxbufs(bnad, rcb); | 
 | 970 | 		smp_mb__before_clear_bit(); | 
 | 971 | 		clear_bit(BNAD_RXQ_REFILL, &rcb->flags); | 
 | 972 | 	} | 
 | 973 | } | 
 | 974 |  | 
 | 975 | static void | 
 | 976 | bnad_cb_rx_disabled(void *arg, struct bna_rx *rx, | 
 | 977 | 			enum bna_cb_status status) | 
 | 978 | { | 
 | 979 | 	struct bnad *bnad = (struct bnad *)arg; | 
 | 980 |  | 
 | 981 | 	complete(&bnad->bnad_completions.rx_comp); | 
 | 982 | } | 
 | 983 |  | 
 | 984 | static void | 
 | 985 | bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx, | 
 | 986 | 				enum bna_cb_status status) | 
 | 987 | { | 
 | 988 | 	bnad->bnad_completions.mcast_comp_status = status; | 
 | 989 | 	complete(&bnad->bnad_completions.mcast_comp); | 
 | 990 | } | 
 | 991 |  | 
 | 992 | void | 
 | 993 | bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status, | 
 | 994 | 		       struct bna_stats *stats) | 
 | 995 | { | 
 | 996 | 	if (status == BNA_CB_SUCCESS) | 
 | 997 | 		BNAD_UPDATE_CTR(bnad, hw_stats_updates); | 
 | 998 |  | 
 | 999 | 	if (!netif_running(bnad->netdev) || | 
 | 1000 | 		!test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) | 
 | 1001 | 		return; | 
 | 1002 |  | 
 | 1003 | 	mod_timer(&bnad->stats_timer, | 
 | 1004 | 		  jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ)); | 
 | 1005 | } | 
 | 1006 |  | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 1007 | /* Resource allocation, free functions */ | 
 | 1008 |  | 
 | 1009 | static void | 
 | 1010 | bnad_mem_free(struct bnad *bnad, | 
 | 1011 | 	      struct bna_mem_info *mem_info) | 
 | 1012 | { | 
 | 1013 | 	int i; | 
 | 1014 | 	dma_addr_t dma_pa; | 
 | 1015 |  | 
 | 1016 | 	if (mem_info->mdl == NULL) | 
 | 1017 | 		return; | 
 | 1018 |  | 
 | 1019 | 	for (i = 0; i < mem_info->num; i++) { | 
 | 1020 | 		if (mem_info->mdl[i].kva != NULL) { | 
 | 1021 | 			if (mem_info->mem_type == BNA_MEM_T_DMA) { | 
 | 1022 | 				BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma), | 
 | 1023 | 						dma_pa); | 
| Ivan Vecera | 5ea7431 | 2011-02-02 04:37:02 +0000 | [diff] [blame] | 1024 | 				dma_free_coherent(&bnad->pcidev->dev, | 
 | 1025 | 						  mem_info->mdl[i].len, | 
 | 1026 | 						  mem_info->mdl[i].kva, dma_pa); | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 1027 | 			} else | 
 | 1028 | 				kfree(mem_info->mdl[i].kva); | 
 | 1029 | 		} | 
 | 1030 | 	} | 
 | 1031 | 	kfree(mem_info->mdl); | 
 | 1032 | 	mem_info->mdl = NULL; | 
 | 1033 | } | 
 | 1034 |  | 
 | 1035 | static int | 
 | 1036 | bnad_mem_alloc(struct bnad *bnad, | 
 | 1037 | 	       struct bna_mem_info *mem_info) | 
 | 1038 | { | 
 | 1039 | 	int i; | 
 | 1040 | 	dma_addr_t dma_pa; | 
 | 1041 |  | 
 | 1042 | 	if ((mem_info->num == 0) || (mem_info->len == 0)) { | 
 | 1043 | 		mem_info->mdl = NULL; | 
 | 1044 | 		return 0; | 
 | 1045 | 	} | 
 | 1046 |  | 
 | 1047 | 	mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr), | 
 | 1048 | 				GFP_KERNEL); | 
 | 1049 | 	if (mem_info->mdl == NULL) | 
 | 1050 | 		return -ENOMEM; | 
 | 1051 |  | 
 | 1052 | 	if (mem_info->mem_type == BNA_MEM_T_DMA) { | 
 | 1053 | 		for (i = 0; i < mem_info->num; i++) { | 
 | 1054 | 			mem_info->mdl[i].len = mem_info->len; | 
 | 1055 | 			mem_info->mdl[i].kva = | 
| Ivan Vecera | 5ea7431 | 2011-02-02 04:37:02 +0000 | [diff] [blame] | 1056 | 				dma_alloc_coherent(&bnad->pcidev->dev, | 
 | 1057 | 						mem_info->len, &dma_pa, | 
 | 1058 | 						GFP_KERNEL); | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 1059 |  | 
 | 1060 | 			if (mem_info->mdl[i].kva == NULL) | 
 | 1061 | 				goto err_return; | 
 | 1062 |  | 
 | 1063 | 			BNA_SET_DMA_ADDR(dma_pa, | 
 | 1064 | 					 &(mem_info->mdl[i].dma)); | 
 | 1065 | 		} | 
 | 1066 | 	} else { | 
 | 1067 | 		for (i = 0; i < mem_info->num; i++) { | 
 | 1068 | 			mem_info->mdl[i].len = mem_info->len; | 
 | 1069 | 			mem_info->mdl[i].kva = kzalloc(mem_info->len, | 
 | 1070 | 							GFP_KERNEL); | 
 | 1071 | 			if (mem_info->mdl[i].kva == NULL) | 
 | 1072 | 				goto err_return; | 
 | 1073 | 		} | 
 | 1074 | 	} | 
 | 1075 |  | 
 | 1076 | 	return 0; | 
 | 1077 |  | 
 | 1078 | err_return: | 
 | 1079 | 	bnad_mem_free(bnad, mem_info); | 
 | 1080 | 	return -ENOMEM; | 
 | 1081 | } | 
 | 1082 |  | 
 | 1083 | /* Free IRQ for Mailbox */ | 
 | 1084 | static void | 
 | 1085 | bnad_mbox_irq_free(struct bnad *bnad, | 
 | 1086 | 		   struct bna_intr_info *intr_info) | 
 | 1087 | { | 
 | 1088 | 	int irq; | 
 | 1089 | 	unsigned long flags; | 
 | 1090 |  | 
 | 1091 | 	if (intr_info->idl == NULL) | 
 | 1092 | 		return; | 
 | 1093 |  | 
 | 1094 | 	spin_lock_irqsave(&bnad->bna_lock, flags); | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 1095 | 	bnad_disable_mbox_irq(bnad); | 
| Rasesh Mody | e2fa6f2 | 2010-10-05 15:46:04 +0000 | [diff] [blame] | 1096 | 	spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 1097 |  | 
 | 1098 | 	irq = BNAD_GET_MBOX_IRQ(bnad); | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 1099 | 	free_irq(irq, bnad); | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 1100 |  | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 1101 | 	kfree(intr_info->idl); | 
 | 1102 | } | 
 | 1103 |  | 
 | 1104 | /* | 
 | 1105 |  * Allocates IRQ for Mailbox, but keep it disabled | 
 | 1106 |  * This will be enabled once we get the mbox enable callback | 
 | 1107 |  * from bna | 
 | 1108 |  */ | 
 | 1109 | static int | 
 | 1110 | bnad_mbox_irq_alloc(struct bnad *bnad, | 
 | 1111 | 		    struct bna_intr_info *intr_info) | 
 | 1112 | { | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 1113 | 	int 		err = 0; | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 1114 | 	unsigned long 	flags; | 
 | 1115 | 	u32	irq; | 
 | 1116 | 	irq_handler_t 	irq_handler; | 
 | 1117 |  | 
 | 1118 | 	/* Mbox should use only 1 vector */ | 
 | 1119 |  | 
 | 1120 | 	intr_info->idl = kzalloc(sizeof(*(intr_info->idl)), GFP_KERNEL); | 
 | 1121 | 	if (!intr_info->idl) | 
 | 1122 | 		return -ENOMEM; | 
 | 1123 |  | 
 | 1124 | 	spin_lock_irqsave(&bnad->bna_lock, flags); | 
 | 1125 | 	if (bnad->cfg_flags & BNAD_CF_MSIX) { | 
 | 1126 | 		irq_handler = (irq_handler_t)bnad_msix_mbox_handler; | 
 | 1127 | 		irq = bnad->msix_table[bnad->msix_num - 1].vector; | 
 | 1128 | 		flags = 0; | 
 | 1129 | 		intr_info->intr_type = BNA_INTR_T_MSIX; | 
 | 1130 | 		intr_info->idl[0].vector = bnad->msix_num - 1; | 
 | 1131 | 	} else { | 
 | 1132 | 		irq_handler = (irq_handler_t)bnad_isr; | 
 | 1133 | 		irq = bnad->pcidev->irq; | 
 | 1134 | 		flags = IRQF_SHARED; | 
 | 1135 | 		intr_info->intr_type = BNA_INTR_T_INTX; | 
 | 1136 | 		/* intr_info->idl.vector = 0 ? */ | 
 | 1137 | 	} | 
 | 1138 | 	spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
 | 1139 |  | 
 | 1140 | 	sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME); | 
 | 1141 |  | 
| Rasesh Mody | e2fa6f2 | 2010-10-05 15:46:04 +0000 | [diff] [blame] | 1142 | 	/* | 
 | 1143 | 	 * Set the Mbox IRQ disable flag, so that the IRQ handler | 
 | 1144 | 	 * called from request_irq() for SHARED IRQs do not execute | 
 | 1145 | 	 */ | 
 | 1146 | 	set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); | 
 | 1147 |  | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 1148 | 	BNAD_UPDATE_CTR(bnad, mbox_intr_disabled); | 
 | 1149 |  | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 1150 | 	err = request_irq(irq, irq_handler, flags, | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 1151 | 			  bnad->mbox_irq_name, bnad); | 
| Rasesh Mody | e2fa6f2 | 2010-10-05 15:46:04 +0000 | [diff] [blame] | 1152 |  | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 1153 | 	if (err) { | 
 | 1154 | 		kfree(intr_info->idl); | 
 | 1155 | 		intr_info->idl = NULL; | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 1156 | 	} | 
 | 1157 |  | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 1158 | 	return err; | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 1159 | } | 
 | 1160 |  | 
 | 1161 | static void | 
 | 1162 | bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info) | 
 | 1163 | { | 
 | 1164 | 	kfree(intr_info->idl); | 
 | 1165 | 	intr_info->idl = NULL; | 
 | 1166 | } | 
 | 1167 |  | 
 | 1168 | /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */ | 
 | 1169 | static int | 
 | 1170 | bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src, | 
 | 1171 | 		    uint txrx_id, struct bna_intr_info *intr_info) | 
 | 1172 | { | 
 | 1173 | 	int i, vector_start = 0; | 
 | 1174 | 	u32 cfg_flags; | 
 | 1175 | 	unsigned long flags; | 
 | 1176 |  | 
 | 1177 | 	spin_lock_irqsave(&bnad->bna_lock, flags); | 
 | 1178 | 	cfg_flags = bnad->cfg_flags; | 
 | 1179 | 	spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
 | 1180 |  | 
 | 1181 | 	if (cfg_flags & BNAD_CF_MSIX) { | 
 | 1182 | 		intr_info->intr_type = BNA_INTR_T_MSIX; | 
 | 1183 | 		intr_info->idl = kcalloc(intr_info->num, | 
 | 1184 | 					sizeof(struct bna_intr_descr), | 
 | 1185 | 					GFP_KERNEL); | 
 | 1186 | 		if (!intr_info->idl) | 
 | 1187 | 			return -ENOMEM; | 
 | 1188 |  | 
 | 1189 | 		switch (src) { | 
 | 1190 | 		case BNAD_INTR_TX: | 
 | 1191 | 			vector_start = txrx_id; | 
 | 1192 | 			break; | 
 | 1193 |  | 
 | 1194 | 		case BNAD_INTR_RX: | 
 | 1195 | 			vector_start = bnad->num_tx * bnad->num_txq_per_tx + | 
 | 1196 | 					txrx_id; | 
 | 1197 | 			break; | 
 | 1198 |  | 
 | 1199 | 		default: | 
 | 1200 | 			BUG(); | 
 | 1201 | 		} | 
 | 1202 |  | 
 | 1203 | 		for (i = 0; i < intr_info->num; i++) | 
 | 1204 | 			intr_info->idl[i].vector = vector_start + i; | 
 | 1205 | 	} else { | 
 | 1206 | 		intr_info->intr_type = BNA_INTR_T_INTX; | 
 | 1207 | 		intr_info->num = 1; | 
 | 1208 | 		intr_info->idl = kcalloc(intr_info->num, | 
 | 1209 | 					sizeof(struct bna_intr_descr), | 
 | 1210 | 					GFP_KERNEL); | 
 | 1211 | 		if (!intr_info->idl) | 
 | 1212 | 			return -ENOMEM; | 
 | 1213 |  | 
 | 1214 | 		switch (src) { | 
 | 1215 | 		case BNAD_INTR_TX: | 
 | 1216 | 			intr_info->idl[0].vector = 0x1; /* Bit mask : Tx IB */ | 
 | 1217 | 			break; | 
 | 1218 |  | 
 | 1219 | 		case BNAD_INTR_RX: | 
 | 1220 | 			intr_info->idl[0].vector = 0x2; /* Bit mask : Rx IB */ | 
 | 1221 | 			break; | 
 | 1222 | 		} | 
 | 1223 | 	} | 
 | 1224 | 	return 0; | 
 | 1225 | } | 
 | 1226 |  | 
 | 1227 | /** | 
 | 1228 |  * NOTE: Should be called for MSIX only | 
 | 1229 |  * Unregisters Tx MSIX vector(s) from the kernel | 
 | 1230 |  */ | 
 | 1231 | static void | 
 | 1232 | bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info, | 
 | 1233 | 			int num_txqs) | 
 | 1234 | { | 
 | 1235 | 	int i; | 
 | 1236 | 	int vector_num; | 
 | 1237 |  | 
 | 1238 | 	for (i = 0; i < num_txqs; i++) { | 
 | 1239 | 		if (tx_info->tcb[i] == NULL) | 
 | 1240 | 			continue; | 
 | 1241 |  | 
 | 1242 | 		vector_num = tx_info->tcb[i]->intr_vector; | 
 | 1243 | 		free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]); | 
 | 1244 | 	} | 
 | 1245 | } | 
 | 1246 |  | 
 | 1247 | /** | 
 | 1248 |  * NOTE: Should be called for MSIX only | 
 | 1249 |  * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel | 
 | 1250 |  */ | 
 | 1251 | static int | 
 | 1252 | bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info, | 
 | 1253 | 			uint tx_id, int num_txqs) | 
 | 1254 | { | 
 | 1255 | 	int i; | 
 | 1256 | 	int err; | 
 | 1257 | 	int vector_num; | 
 | 1258 |  | 
 | 1259 | 	for (i = 0; i < num_txqs; i++) { | 
 | 1260 | 		vector_num = tx_info->tcb[i]->intr_vector; | 
 | 1261 | 		sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name, | 
 | 1262 | 				tx_id + tx_info->tcb[i]->id); | 
 | 1263 | 		err = request_irq(bnad->msix_table[vector_num].vector, | 
 | 1264 | 				  (irq_handler_t)bnad_msix_tx, 0, | 
 | 1265 | 				  tx_info->tcb[i]->name, | 
 | 1266 | 				  tx_info->tcb[i]); | 
 | 1267 | 		if (err) | 
 | 1268 | 			goto err_return; | 
 | 1269 | 	} | 
 | 1270 |  | 
 | 1271 | 	return 0; | 
 | 1272 |  | 
 | 1273 | err_return: | 
 | 1274 | 	if (i > 0) | 
 | 1275 | 		bnad_tx_msix_unregister(bnad, tx_info, (i - 1)); | 
 | 1276 | 	return -1; | 
 | 1277 | } | 
 | 1278 |  | 
 | 1279 | /** | 
 | 1280 |  * NOTE: Should be called for MSIX only | 
 | 1281 |  * Unregisters Rx MSIX vector(s) from the kernel | 
 | 1282 |  */ | 
 | 1283 | static void | 
 | 1284 | bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info, | 
 | 1285 | 			int num_rxps) | 
 | 1286 | { | 
 | 1287 | 	int i; | 
 | 1288 | 	int vector_num; | 
 | 1289 |  | 
 | 1290 | 	for (i = 0; i < num_rxps; i++) { | 
 | 1291 | 		if (rx_info->rx_ctrl[i].ccb == NULL) | 
 | 1292 | 			continue; | 
 | 1293 |  | 
 | 1294 | 		vector_num = rx_info->rx_ctrl[i].ccb->intr_vector; | 
 | 1295 | 		free_irq(bnad->msix_table[vector_num].vector, | 
 | 1296 | 			 rx_info->rx_ctrl[i].ccb); | 
 | 1297 | 	} | 
 | 1298 | } | 
 | 1299 |  | 
 | 1300 | /** | 
 | 1301 |  * NOTE: Should be called for MSIX only | 
 | 1302 |  * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel | 
 | 1303 |  */ | 
 | 1304 | static int | 
 | 1305 | bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info, | 
 | 1306 | 			uint rx_id, int num_rxps) | 
 | 1307 | { | 
 | 1308 | 	int i; | 
 | 1309 | 	int err; | 
 | 1310 | 	int vector_num; | 
 | 1311 |  | 
 | 1312 | 	for (i = 0; i < num_rxps; i++) { | 
 | 1313 | 		vector_num = rx_info->rx_ctrl[i].ccb->intr_vector; | 
 | 1314 | 		sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d", | 
 | 1315 | 			bnad->netdev->name, | 
 | 1316 | 			rx_id + rx_info->rx_ctrl[i].ccb->id); | 
 | 1317 | 		err = request_irq(bnad->msix_table[vector_num].vector, | 
 | 1318 | 				  (irq_handler_t)bnad_msix_rx, 0, | 
 | 1319 | 				  rx_info->rx_ctrl[i].ccb->name, | 
 | 1320 | 				  rx_info->rx_ctrl[i].ccb); | 
 | 1321 | 		if (err) | 
 | 1322 | 			goto err_return; | 
 | 1323 | 	} | 
 | 1324 |  | 
 | 1325 | 	return 0; | 
 | 1326 |  | 
 | 1327 | err_return: | 
 | 1328 | 	if (i > 0) | 
 | 1329 | 		bnad_rx_msix_unregister(bnad, rx_info, (i - 1)); | 
 | 1330 | 	return -1; | 
 | 1331 | } | 
 | 1332 |  | 
 | 1333 | /* Free Tx object Resources */ | 
 | 1334 | static void | 
 | 1335 | bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info) | 
 | 1336 | { | 
 | 1337 | 	int i; | 
 | 1338 |  | 
 | 1339 | 	for (i = 0; i < BNA_TX_RES_T_MAX; i++) { | 
 | 1340 | 		if (res_info[i].res_type == BNA_RES_T_MEM) | 
 | 1341 | 			bnad_mem_free(bnad, &res_info[i].res_u.mem_info); | 
 | 1342 | 		else if (res_info[i].res_type == BNA_RES_T_INTR) | 
 | 1343 | 			bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info); | 
 | 1344 | 	} | 
 | 1345 | } | 
 | 1346 |  | 
 | 1347 | /* Allocates memory and interrupt resources for Tx object */ | 
 | 1348 | static int | 
 | 1349 | bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info, | 
 | 1350 | 		  uint tx_id) | 
 | 1351 | { | 
 | 1352 | 	int i, err = 0; | 
 | 1353 |  | 
 | 1354 | 	for (i = 0; i < BNA_TX_RES_T_MAX; i++) { | 
 | 1355 | 		if (res_info[i].res_type == BNA_RES_T_MEM) | 
 | 1356 | 			err = bnad_mem_alloc(bnad, | 
 | 1357 | 					&res_info[i].res_u.mem_info); | 
 | 1358 | 		else if (res_info[i].res_type == BNA_RES_T_INTR) | 
 | 1359 | 			err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id, | 
 | 1360 | 					&res_info[i].res_u.intr_info); | 
 | 1361 | 		if (err) | 
 | 1362 | 			goto err_return; | 
 | 1363 | 	} | 
 | 1364 | 	return 0; | 
 | 1365 |  | 
 | 1366 | err_return: | 
 | 1367 | 	bnad_tx_res_free(bnad, res_info); | 
 | 1368 | 	return err; | 
 | 1369 | } | 
 | 1370 |  | 
 | 1371 | /* Free Rx object Resources */ | 
 | 1372 | static void | 
 | 1373 | bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info) | 
 | 1374 | { | 
 | 1375 | 	int i; | 
 | 1376 |  | 
 | 1377 | 	for (i = 0; i < BNA_RX_RES_T_MAX; i++) { | 
 | 1378 | 		if (res_info[i].res_type == BNA_RES_T_MEM) | 
 | 1379 | 			bnad_mem_free(bnad, &res_info[i].res_u.mem_info); | 
 | 1380 | 		else if (res_info[i].res_type == BNA_RES_T_INTR) | 
 | 1381 | 			bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info); | 
 | 1382 | 	} | 
 | 1383 | } | 
 | 1384 |  | 
 | 1385 | /* Allocates memory and interrupt resources for Rx object */ | 
 | 1386 | static int | 
 | 1387 | bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info, | 
 | 1388 | 		  uint rx_id) | 
 | 1389 | { | 
 | 1390 | 	int i, err = 0; | 
 | 1391 |  | 
 | 1392 | 	/* All memory needs to be allocated before setup_ccbs */ | 
 | 1393 | 	for (i = 0; i < BNA_RX_RES_T_MAX; i++) { | 
 | 1394 | 		if (res_info[i].res_type == BNA_RES_T_MEM) | 
 | 1395 | 			err = bnad_mem_alloc(bnad, | 
 | 1396 | 					&res_info[i].res_u.mem_info); | 
 | 1397 | 		else if (res_info[i].res_type == BNA_RES_T_INTR) | 
 | 1398 | 			err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id, | 
 | 1399 | 					&res_info[i].res_u.intr_info); | 
 | 1400 | 		if (err) | 
 | 1401 | 			goto err_return; | 
 | 1402 | 	} | 
 | 1403 | 	return 0; | 
 | 1404 |  | 
 | 1405 | err_return: | 
 | 1406 | 	bnad_rx_res_free(bnad, res_info); | 
 | 1407 | 	return err; | 
 | 1408 | } | 
 | 1409 |  | 
 | 1410 | /* Timer callbacks */ | 
 | 1411 | /* a) IOC timer */ | 
 | 1412 | static void | 
 | 1413 | bnad_ioc_timeout(unsigned long data) | 
 | 1414 | { | 
 | 1415 | 	struct bnad *bnad = (struct bnad *)data; | 
 | 1416 | 	unsigned long flags; | 
 | 1417 |  | 
 | 1418 | 	spin_lock_irqsave(&bnad->bna_lock, flags); | 
| Rasesh Mody | 8a89142 | 2010-08-25 23:00:27 -0700 | [diff] [blame] | 1419 | 	bfa_nw_ioc_timeout((void *) &bnad->bna.device.ioc); | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 1420 | 	spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
 | 1421 | } | 
 | 1422 |  | 
 | 1423 | static void | 
 | 1424 | bnad_ioc_hb_check(unsigned long data) | 
 | 1425 | { | 
 | 1426 | 	struct bnad *bnad = (struct bnad *)data; | 
 | 1427 | 	unsigned long flags; | 
 | 1428 |  | 
 | 1429 | 	spin_lock_irqsave(&bnad->bna_lock, flags); | 
| Rasesh Mody | 8a89142 | 2010-08-25 23:00:27 -0700 | [diff] [blame] | 1430 | 	bfa_nw_ioc_hb_check((void *) &bnad->bna.device.ioc); | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 1431 | 	spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
 | 1432 | } | 
 | 1433 |  | 
 | 1434 | static void | 
| Rasesh Mody | 1d32f76 | 2010-12-23 21:45:09 +0000 | [diff] [blame] | 1435 | bnad_iocpf_timeout(unsigned long data) | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 1436 | { | 
 | 1437 | 	struct bnad *bnad = (struct bnad *)data; | 
 | 1438 | 	unsigned long flags; | 
 | 1439 |  | 
 | 1440 | 	spin_lock_irqsave(&bnad->bna_lock, flags); | 
| Rasesh Mody | 1d32f76 | 2010-12-23 21:45:09 +0000 | [diff] [blame] | 1441 | 	bfa_nw_iocpf_timeout((void *) &bnad->bna.device.ioc); | 
 | 1442 | 	spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
 | 1443 | } | 
 | 1444 |  | 
 | 1445 | static void | 
 | 1446 | bnad_iocpf_sem_timeout(unsigned long data) | 
 | 1447 | { | 
 | 1448 | 	struct bnad *bnad = (struct bnad *)data; | 
 | 1449 | 	unsigned long flags; | 
 | 1450 |  | 
 | 1451 | 	spin_lock_irqsave(&bnad->bna_lock, flags); | 
 | 1452 | 	bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.device.ioc); | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 1453 | 	spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
 | 1454 | } | 
 | 1455 |  | 
 | 1456 | /* | 
 | 1457 |  * All timer routines use bnad->bna_lock to protect against | 
 | 1458 |  * the following race, which may occur in case of no locking: | 
 | 1459 |  * 	Time	CPU m  		CPU n | 
 | 1460 |  *	0       1 = test_bit | 
 | 1461 |  *	1			clear_bit | 
 | 1462 |  *	2			del_timer_sync | 
 | 1463 |  *	3	mod_timer | 
 | 1464 |  */ | 
 | 1465 |  | 
 | 1466 | /* b) Dynamic Interrupt Moderation Timer */ | 
 | 1467 | static void | 
 | 1468 | bnad_dim_timeout(unsigned long data) | 
 | 1469 | { | 
 | 1470 | 	struct bnad *bnad = (struct bnad *)data; | 
 | 1471 | 	struct bnad_rx_info *rx_info; | 
 | 1472 | 	struct bnad_rx_ctrl *rx_ctrl; | 
 | 1473 | 	int i, j; | 
 | 1474 | 	unsigned long flags; | 
 | 1475 |  | 
 | 1476 | 	if (!netif_carrier_ok(bnad->netdev)) | 
 | 1477 | 		return; | 
 | 1478 |  | 
 | 1479 | 	spin_lock_irqsave(&bnad->bna_lock, flags); | 
 | 1480 | 	for (i = 0; i < bnad->num_rx; i++) { | 
 | 1481 | 		rx_info = &bnad->rx_info[i]; | 
 | 1482 | 		if (!rx_info->rx) | 
 | 1483 | 			continue; | 
 | 1484 | 		for (j = 0; j < bnad->num_rxp_per_rx; j++) { | 
 | 1485 | 			rx_ctrl = &rx_info->rx_ctrl[j]; | 
 | 1486 | 			if (!rx_ctrl->ccb) | 
 | 1487 | 				continue; | 
 | 1488 | 			bna_rx_dim_update(rx_ctrl->ccb); | 
 | 1489 | 		} | 
 | 1490 | 	} | 
 | 1491 |  | 
 | 1492 | 	/* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */ | 
 | 1493 | 	if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) | 
 | 1494 | 		mod_timer(&bnad->dim_timer, | 
 | 1495 | 			  jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ)); | 
 | 1496 | 	spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
 | 1497 | } | 
 | 1498 |  | 
 | 1499 | /* c)  Statistics Timer */ | 
 | 1500 | static void | 
 | 1501 | bnad_stats_timeout(unsigned long data) | 
 | 1502 | { | 
 | 1503 | 	struct bnad *bnad = (struct bnad *)data; | 
 | 1504 | 	unsigned long flags; | 
 | 1505 |  | 
 | 1506 | 	if (!netif_running(bnad->netdev) || | 
 | 1507 | 		!test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) | 
 | 1508 | 		return; | 
 | 1509 |  | 
 | 1510 | 	spin_lock_irqsave(&bnad->bna_lock, flags); | 
 | 1511 | 	bna_stats_get(&bnad->bna); | 
 | 1512 | 	spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
 | 1513 | } | 
 | 1514 |  | 
 | 1515 | /* | 
 | 1516 |  * Set up timer for DIM | 
 | 1517 |  * Called with bnad->bna_lock held | 
 | 1518 |  */ | 
 | 1519 | void | 
 | 1520 | bnad_dim_timer_start(struct bnad *bnad) | 
 | 1521 | { | 
 | 1522 | 	if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED && | 
 | 1523 | 	    !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) { | 
 | 1524 | 		setup_timer(&bnad->dim_timer, bnad_dim_timeout, | 
 | 1525 | 			    (unsigned long)bnad); | 
 | 1526 | 		set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags); | 
 | 1527 | 		mod_timer(&bnad->dim_timer, | 
 | 1528 | 			  jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ)); | 
 | 1529 | 	} | 
 | 1530 | } | 
 | 1531 |  | 
 | 1532 | /* | 
 | 1533 |  * Set up timer for statistics | 
 | 1534 |  * Called with mutex_lock(&bnad->conf_mutex) held | 
 | 1535 |  */ | 
 | 1536 | static void | 
 | 1537 | bnad_stats_timer_start(struct bnad *bnad) | 
 | 1538 | { | 
 | 1539 | 	unsigned long flags; | 
 | 1540 |  | 
 | 1541 | 	spin_lock_irqsave(&bnad->bna_lock, flags); | 
 | 1542 | 	if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) { | 
 | 1543 | 		setup_timer(&bnad->stats_timer, bnad_stats_timeout, | 
 | 1544 | 			    (unsigned long)bnad); | 
 | 1545 | 		mod_timer(&bnad->stats_timer, | 
 | 1546 | 			  jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ)); | 
 | 1547 | 	} | 
 | 1548 | 	spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 1549 | } | 
 | 1550 |  | 
 | 1551 | /* | 
 | 1552 |  * Stops the stats timer | 
 | 1553 |  * Called with mutex_lock(&bnad->conf_mutex) held | 
 | 1554 |  */ | 
 | 1555 | static void | 
 | 1556 | bnad_stats_timer_stop(struct bnad *bnad) | 
 | 1557 | { | 
 | 1558 | 	int to_del = 0; | 
 | 1559 | 	unsigned long flags; | 
 | 1560 |  | 
 | 1561 | 	spin_lock_irqsave(&bnad->bna_lock, flags); | 
 | 1562 | 	if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) | 
 | 1563 | 		to_del = 1; | 
 | 1564 | 	spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
 | 1565 | 	if (to_del) | 
 | 1566 | 		del_timer_sync(&bnad->stats_timer); | 
 | 1567 | } | 
 | 1568 |  | 
 | 1569 | /* Utilities */ | 
 | 1570 |  | 
 | 1571 | static void | 
 | 1572 | bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list) | 
 | 1573 | { | 
 | 1574 | 	int i = 1; /* Index 0 has broadcast address */ | 
 | 1575 | 	struct netdev_hw_addr *mc_addr; | 
 | 1576 |  | 
 | 1577 | 	netdev_for_each_mc_addr(mc_addr, netdev) { | 
 | 1578 | 		memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0], | 
 | 1579 | 							ETH_ALEN); | 
 | 1580 | 		i++; | 
 | 1581 | 	} | 
 | 1582 | } | 
 | 1583 |  | 
 | 1584 | static int | 
 | 1585 | bnad_napi_poll_rx(struct napi_struct *napi, int budget) | 
 | 1586 | { | 
 | 1587 | 	struct bnad_rx_ctrl *rx_ctrl = | 
 | 1588 | 		container_of(napi, struct bnad_rx_ctrl, napi); | 
 | 1589 | 	struct bna_ccb *ccb; | 
 | 1590 | 	struct bnad *bnad; | 
 | 1591 | 	int rcvd = 0; | 
 | 1592 |  | 
 | 1593 | 	ccb = rx_ctrl->ccb; | 
 | 1594 |  | 
 | 1595 | 	bnad = ccb->bnad; | 
 | 1596 |  | 
 | 1597 | 	if (!netif_carrier_ok(bnad->netdev)) | 
 | 1598 | 		goto poll_exit; | 
 | 1599 |  | 
 | 1600 | 	rcvd = bnad_poll_cq(bnad, ccb, budget); | 
 | 1601 | 	if (rcvd == budget) | 
 | 1602 | 		return rcvd; | 
 | 1603 |  | 
 | 1604 | poll_exit: | 
 | 1605 | 	napi_complete((napi)); | 
 | 1606 |  | 
 | 1607 | 	BNAD_UPDATE_CTR(bnad, netif_rx_complete); | 
 | 1608 |  | 
 | 1609 | 	bnad_enable_rx_irq(bnad, ccb); | 
 | 1610 | 	return rcvd; | 
 | 1611 | } | 
 | 1612 |  | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 1613 | static void | 
 | 1614 | bnad_napi_enable(struct bnad *bnad, u32 rx_id) | 
 | 1615 | { | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 1616 | 	struct bnad_rx_ctrl *rx_ctrl; | 
 | 1617 | 	int i; | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 1618 |  | 
 | 1619 | 	/* Initialize & enable NAPI */ | 
 | 1620 | 	for (i = 0; i <	bnad->num_rxp_per_rx; i++) { | 
 | 1621 | 		rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i]; | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 1622 |  | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 1623 | 		netif_napi_add(bnad->netdev, &rx_ctrl->napi, | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 1624 | 			       bnad_napi_poll_rx, 64); | 
 | 1625 |  | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 1626 | 		napi_enable(&rx_ctrl->napi); | 
 | 1627 | 	} | 
 | 1628 | } | 
 | 1629 |  | 
 | 1630 | static void | 
 | 1631 | bnad_napi_disable(struct bnad *bnad, u32 rx_id) | 
 | 1632 | { | 
 | 1633 | 	int i; | 
 | 1634 |  | 
 | 1635 | 	/* First disable and then clean up */ | 
 | 1636 | 	for (i = 0; i < bnad->num_rxp_per_rx; i++) { | 
 | 1637 | 		napi_disable(&bnad->rx_info[rx_id].rx_ctrl[i].napi); | 
 | 1638 | 		netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi); | 
 | 1639 | 	} | 
 | 1640 | } | 
 | 1641 |  | 
 | 1642 | /* Should be held with conf_lock held */ | 
 | 1643 | void | 
 | 1644 | bnad_cleanup_tx(struct bnad *bnad, uint tx_id) | 
 | 1645 | { | 
 | 1646 | 	struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id]; | 
 | 1647 | 	struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0]; | 
 | 1648 | 	unsigned long flags; | 
 | 1649 |  | 
 | 1650 | 	if (!tx_info->tx) | 
 | 1651 | 		return; | 
 | 1652 |  | 
 | 1653 | 	init_completion(&bnad->bnad_completions.tx_comp); | 
 | 1654 | 	spin_lock_irqsave(&bnad->bna_lock, flags); | 
 | 1655 | 	bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled); | 
 | 1656 | 	spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
 | 1657 | 	wait_for_completion(&bnad->bnad_completions.tx_comp); | 
 | 1658 |  | 
 | 1659 | 	if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX) | 
 | 1660 | 		bnad_tx_msix_unregister(bnad, tx_info, | 
 | 1661 | 			bnad->num_txq_per_tx); | 
 | 1662 |  | 
 | 1663 | 	spin_lock_irqsave(&bnad->bna_lock, flags); | 
 | 1664 | 	bna_tx_destroy(tx_info->tx); | 
 | 1665 | 	spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
 | 1666 |  | 
 | 1667 | 	tx_info->tx = NULL; | 
 | 1668 |  | 
 | 1669 | 	if (0 == tx_id) | 
 | 1670 | 		tasklet_kill(&bnad->tx_free_tasklet); | 
 | 1671 |  | 
 | 1672 | 	bnad_tx_res_free(bnad, res_info); | 
 | 1673 | } | 
 | 1674 |  | 
 | 1675 | /* Should be held with conf_lock held */ | 
 | 1676 | int | 
 | 1677 | bnad_setup_tx(struct bnad *bnad, uint tx_id) | 
 | 1678 | { | 
 | 1679 | 	int err; | 
 | 1680 | 	struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id]; | 
 | 1681 | 	struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0]; | 
 | 1682 | 	struct bna_intr_info *intr_info = | 
 | 1683 | 			&res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info; | 
 | 1684 | 	struct bna_tx_config *tx_config = &bnad->tx_config[tx_id]; | 
 | 1685 | 	struct bna_tx_event_cbfn tx_cbfn; | 
 | 1686 | 	struct bna_tx *tx; | 
 | 1687 | 	unsigned long flags; | 
 | 1688 |  | 
 | 1689 | 	/* Initialize the Tx object configuration */ | 
 | 1690 | 	tx_config->num_txq = bnad->num_txq_per_tx; | 
 | 1691 | 	tx_config->txq_depth = bnad->txq_depth; | 
 | 1692 | 	tx_config->tx_type = BNA_TX_T_REGULAR; | 
 | 1693 |  | 
 | 1694 | 	/* Initialize the tx event handlers */ | 
 | 1695 | 	tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup; | 
 | 1696 | 	tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy; | 
 | 1697 | 	tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall; | 
 | 1698 | 	tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume; | 
 | 1699 | 	tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup; | 
 | 1700 |  | 
 | 1701 | 	/* Get BNA's resource requirement for one tx object */ | 
 | 1702 | 	spin_lock_irqsave(&bnad->bna_lock, flags); | 
 | 1703 | 	bna_tx_res_req(bnad->num_txq_per_tx, | 
 | 1704 | 		bnad->txq_depth, res_info); | 
 | 1705 | 	spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
 | 1706 |  | 
 | 1707 | 	/* Fill Unmap Q memory requirements */ | 
 | 1708 | 	BNAD_FILL_UNMAPQ_MEM_REQ( | 
 | 1709 | 			&res_info[BNA_TX_RES_MEM_T_UNMAPQ], | 
 | 1710 | 			bnad->num_txq_per_tx, | 
 | 1711 | 			BNAD_TX_UNMAPQ_DEPTH); | 
 | 1712 |  | 
 | 1713 | 	/* Allocate resources */ | 
 | 1714 | 	err = bnad_tx_res_alloc(bnad, res_info, tx_id); | 
 | 1715 | 	if (err) | 
 | 1716 | 		return err; | 
 | 1717 |  | 
 | 1718 | 	/* Ask BNA to create one Tx object, supplying required resources */ | 
 | 1719 | 	spin_lock_irqsave(&bnad->bna_lock, flags); | 
 | 1720 | 	tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info, | 
 | 1721 | 			tx_info); | 
 | 1722 | 	spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
 | 1723 | 	if (!tx) | 
 | 1724 | 		goto err_return; | 
 | 1725 | 	tx_info->tx = tx; | 
 | 1726 |  | 
 | 1727 | 	/* Register ISR for the Tx object */ | 
 | 1728 | 	if (intr_info->intr_type == BNA_INTR_T_MSIX) { | 
 | 1729 | 		err = bnad_tx_msix_register(bnad, tx_info, | 
 | 1730 | 			tx_id, bnad->num_txq_per_tx); | 
 | 1731 | 		if (err) | 
 | 1732 | 			goto err_return; | 
 | 1733 | 	} | 
 | 1734 |  | 
 | 1735 | 	spin_lock_irqsave(&bnad->bna_lock, flags); | 
 | 1736 | 	bna_tx_enable(tx); | 
 | 1737 | 	spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
 | 1738 |  | 
 | 1739 | 	return 0; | 
 | 1740 |  | 
 | 1741 | err_return: | 
 | 1742 | 	bnad_tx_res_free(bnad, res_info); | 
 | 1743 | 	return err; | 
 | 1744 | } | 
 | 1745 |  | 
 | 1746 | /* Setup the rx config for bna_rx_create */ | 
 | 1747 | /* bnad decides the configuration */ | 
 | 1748 | static void | 
 | 1749 | bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config) | 
 | 1750 | { | 
 | 1751 | 	rx_config->rx_type = BNA_RX_T_REGULAR; | 
 | 1752 | 	rx_config->num_paths = bnad->num_rxp_per_rx; | 
 | 1753 |  | 
 | 1754 | 	if (bnad->num_rxp_per_rx > 1) { | 
 | 1755 | 		rx_config->rss_status = BNA_STATUS_T_ENABLED; | 
 | 1756 | 		rx_config->rss_config.hash_type = | 
 | 1757 | 				(BFI_RSS_T_V4_TCP | | 
 | 1758 | 				 BFI_RSS_T_V6_TCP | | 
 | 1759 | 				 BFI_RSS_T_V4_IP  | | 
 | 1760 | 				 BFI_RSS_T_V6_IP); | 
 | 1761 | 		rx_config->rss_config.hash_mask = | 
 | 1762 | 				bnad->num_rxp_per_rx - 1; | 
 | 1763 | 		get_random_bytes(rx_config->rss_config.toeplitz_hash_key, | 
 | 1764 | 			sizeof(rx_config->rss_config.toeplitz_hash_key)); | 
 | 1765 | 	} else { | 
 | 1766 | 		rx_config->rss_status = BNA_STATUS_T_DISABLED; | 
 | 1767 | 		memset(&rx_config->rss_config, 0, | 
 | 1768 | 		       sizeof(rx_config->rss_config)); | 
 | 1769 | 	} | 
 | 1770 | 	rx_config->rxp_type = BNA_RXP_SLR; | 
 | 1771 | 	rx_config->q_depth = bnad->rxq_depth; | 
 | 1772 |  | 
 | 1773 | 	rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE; | 
 | 1774 |  | 
 | 1775 | 	rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED; | 
 | 1776 | } | 
 | 1777 |  | 
 | 1778 | /* Called with mutex_lock(&bnad->conf_mutex) held */ | 
 | 1779 | void | 
 | 1780 | bnad_cleanup_rx(struct bnad *bnad, uint rx_id) | 
 | 1781 | { | 
 | 1782 | 	struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id]; | 
 | 1783 | 	struct bna_rx_config *rx_config = &bnad->rx_config[rx_id]; | 
 | 1784 | 	struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0]; | 
 | 1785 | 	unsigned long flags; | 
 | 1786 | 	int dim_timer_del = 0; | 
 | 1787 |  | 
 | 1788 | 	if (!rx_info->rx) | 
 | 1789 | 		return; | 
 | 1790 |  | 
 | 1791 | 	if (0 == rx_id) { | 
 | 1792 | 		spin_lock_irqsave(&bnad->bna_lock, flags); | 
 | 1793 | 		dim_timer_del = bnad_dim_timer_running(bnad); | 
 | 1794 | 		if (dim_timer_del) | 
 | 1795 | 			clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags); | 
 | 1796 | 		spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
 | 1797 | 		if (dim_timer_del) | 
 | 1798 | 			del_timer_sync(&bnad->dim_timer); | 
 | 1799 | 	} | 
 | 1800 |  | 
 | 1801 | 	bnad_napi_disable(bnad, rx_id); | 
 | 1802 |  | 
 | 1803 | 	init_completion(&bnad->bnad_completions.rx_comp); | 
 | 1804 | 	spin_lock_irqsave(&bnad->bna_lock, flags); | 
 | 1805 | 	bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled); | 
 | 1806 | 	spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
 | 1807 | 	wait_for_completion(&bnad->bnad_completions.rx_comp); | 
 | 1808 |  | 
 | 1809 | 	if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX) | 
 | 1810 | 		bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths); | 
 | 1811 |  | 
 | 1812 | 	spin_lock_irqsave(&bnad->bna_lock, flags); | 
 | 1813 | 	bna_rx_destroy(rx_info->rx); | 
 | 1814 | 	spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
 | 1815 |  | 
 | 1816 | 	rx_info->rx = NULL; | 
 | 1817 |  | 
 | 1818 | 	bnad_rx_res_free(bnad, res_info); | 
 | 1819 | } | 
 | 1820 |  | 
 | 1821 | /* Called with mutex_lock(&bnad->conf_mutex) held */ | 
 | 1822 | int | 
 | 1823 | bnad_setup_rx(struct bnad *bnad, uint rx_id) | 
 | 1824 | { | 
 | 1825 | 	int err; | 
 | 1826 | 	struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id]; | 
 | 1827 | 	struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0]; | 
 | 1828 | 	struct bna_intr_info *intr_info = | 
 | 1829 | 			&res_info[BNA_RX_RES_T_INTR].res_u.intr_info; | 
 | 1830 | 	struct bna_rx_config *rx_config = &bnad->rx_config[rx_id]; | 
 | 1831 | 	struct bna_rx_event_cbfn rx_cbfn; | 
 | 1832 | 	struct bna_rx *rx; | 
 | 1833 | 	unsigned long flags; | 
 | 1834 |  | 
 | 1835 | 	/* Initialize the Rx object configuration */ | 
 | 1836 | 	bnad_init_rx_config(bnad, rx_config); | 
 | 1837 |  | 
 | 1838 | 	/* Initialize the Rx event handlers */ | 
 | 1839 | 	rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup; | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 1840 | 	rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy; | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 1841 | 	rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup; | 
 | 1842 | 	rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy; | 
 | 1843 | 	rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup; | 
 | 1844 | 	rx_cbfn.rx_post_cbfn = bnad_cb_rx_post; | 
 | 1845 |  | 
 | 1846 | 	/* Get BNA's resource requirement for one Rx object */ | 
 | 1847 | 	spin_lock_irqsave(&bnad->bna_lock, flags); | 
 | 1848 | 	bna_rx_res_req(rx_config, res_info); | 
 | 1849 | 	spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
 | 1850 |  | 
 | 1851 | 	/* Fill Unmap Q memory requirements */ | 
 | 1852 | 	BNAD_FILL_UNMAPQ_MEM_REQ( | 
 | 1853 | 			&res_info[BNA_RX_RES_MEM_T_UNMAPQ], | 
 | 1854 | 			rx_config->num_paths + | 
 | 1855 | 			((rx_config->rxp_type == BNA_RXP_SINGLE) ? 0 : | 
 | 1856 | 				rx_config->num_paths), BNAD_RX_UNMAPQ_DEPTH); | 
 | 1857 |  | 
 | 1858 | 	/* Allocate resource */ | 
 | 1859 | 	err = bnad_rx_res_alloc(bnad, res_info, rx_id); | 
 | 1860 | 	if (err) | 
 | 1861 | 		return err; | 
 | 1862 |  | 
 | 1863 | 	/* Ask BNA to create one Rx object, supplying required resources */ | 
 | 1864 | 	spin_lock_irqsave(&bnad->bna_lock, flags); | 
 | 1865 | 	rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info, | 
 | 1866 | 			rx_info); | 
 | 1867 | 	spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
 | 1868 | 	if (!rx) | 
 | 1869 | 		goto err_return; | 
 | 1870 | 	rx_info->rx = rx; | 
 | 1871 |  | 
 | 1872 | 	/* Register ISR for the Rx object */ | 
 | 1873 | 	if (intr_info->intr_type == BNA_INTR_T_MSIX) { | 
 | 1874 | 		err = bnad_rx_msix_register(bnad, rx_info, rx_id, | 
 | 1875 | 						rx_config->num_paths); | 
 | 1876 | 		if (err) | 
 | 1877 | 			goto err_return; | 
 | 1878 | 	} | 
 | 1879 |  | 
 | 1880 | 	/* Enable NAPI */ | 
 | 1881 | 	bnad_napi_enable(bnad, rx_id); | 
 | 1882 |  | 
 | 1883 | 	spin_lock_irqsave(&bnad->bna_lock, flags); | 
 | 1884 | 	if (0 == rx_id) { | 
 | 1885 | 		/* Set up Dynamic Interrupt Moderation Vector */ | 
 | 1886 | 		if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) | 
 | 1887 | 			bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector); | 
 | 1888 |  | 
 | 1889 | 		/* Enable VLAN filtering only on the default Rx */ | 
 | 1890 | 		bna_rx_vlanfilter_enable(rx); | 
 | 1891 |  | 
 | 1892 | 		/* Start the DIM timer */ | 
 | 1893 | 		bnad_dim_timer_start(bnad); | 
 | 1894 | 	} | 
 | 1895 |  | 
 | 1896 | 	bna_rx_enable(rx); | 
 | 1897 | 	spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
 | 1898 |  | 
 | 1899 | 	return 0; | 
 | 1900 |  | 
 | 1901 | err_return: | 
 | 1902 | 	bnad_cleanup_rx(bnad, rx_id); | 
 | 1903 | 	return err; | 
 | 1904 | } | 
 | 1905 |  | 
 | 1906 | /* Called with conf_lock & bnad->bna_lock held */ | 
 | 1907 | void | 
 | 1908 | bnad_tx_coalescing_timeo_set(struct bnad *bnad) | 
 | 1909 | { | 
 | 1910 | 	struct bnad_tx_info *tx_info; | 
 | 1911 |  | 
 | 1912 | 	tx_info = &bnad->tx_info[0]; | 
 | 1913 | 	if (!tx_info->tx) | 
 | 1914 | 		return; | 
 | 1915 |  | 
 | 1916 | 	bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo); | 
 | 1917 | } | 
 | 1918 |  | 
 | 1919 | /* Called with conf_lock & bnad->bna_lock held */ | 
 | 1920 | void | 
 | 1921 | bnad_rx_coalescing_timeo_set(struct bnad *bnad) | 
 | 1922 | { | 
 | 1923 | 	struct bnad_rx_info *rx_info; | 
 | 1924 | 	int 	i; | 
 | 1925 |  | 
 | 1926 | 	for (i = 0; i < bnad->num_rx; i++) { | 
 | 1927 | 		rx_info = &bnad->rx_info[i]; | 
 | 1928 | 		if (!rx_info->rx) | 
 | 1929 | 			continue; | 
 | 1930 | 		bna_rx_coalescing_timeo_set(rx_info->rx, | 
 | 1931 | 				bnad->rx_coalescing_timeo); | 
 | 1932 | 	} | 
 | 1933 | } | 
 | 1934 |  | 
 | 1935 | /* | 
 | 1936 |  * Called with bnad->bna_lock held | 
 | 1937 |  */ | 
 | 1938 | static int | 
 | 1939 | bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr) | 
 | 1940 | { | 
 | 1941 | 	int ret; | 
 | 1942 |  | 
 | 1943 | 	if (!is_valid_ether_addr(mac_addr)) | 
 | 1944 | 		return -EADDRNOTAVAIL; | 
 | 1945 |  | 
 | 1946 | 	/* If datapath is down, pretend everything went through */ | 
 | 1947 | 	if (!bnad->rx_info[0].rx) | 
 | 1948 | 		return 0; | 
 | 1949 |  | 
 | 1950 | 	ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL); | 
 | 1951 | 	if (ret != BNA_CB_SUCCESS) | 
 | 1952 | 		return -EADDRNOTAVAIL; | 
 | 1953 |  | 
 | 1954 | 	return 0; | 
 | 1955 | } | 
 | 1956 |  | 
 | 1957 | /* Should be called with conf_lock held */ | 
 | 1958 | static int | 
 | 1959 | bnad_enable_default_bcast(struct bnad *bnad) | 
 | 1960 | { | 
 | 1961 | 	struct bnad_rx_info *rx_info = &bnad->rx_info[0]; | 
 | 1962 | 	int ret; | 
 | 1963 | 	unsigned long flags; | 
 | 1964 |  | 
 | 1965 | 	init_completion(&bnad->bnad_completions.mcast_comp); | 
 | 1966 |  | 
 | 1967 | 	spin_lock_irqsave(&bnad->bna_lock, flags); | 
 | 1968 | 	ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr, | 
 | 1969 | 				bnad_cb_rx_mcast_add); | 
 | 1970 | 	spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
 | 1971 |  | 
 | 1972 | 	if (ret == BNA_CB_SUCCESS) | 
 | 1973 | 		wait_for_completion(&bnad->bnad_completions.mcast_comp); | 
 | 1974 | 	else | 
 | 1975 | 		return -ENODEV; | 
 | 1976 |  | 
 | 1977 | 	if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS) | 
 | 1978 | 		return -ENODEV; | 
 | 1979 |  | 
 | 1980 | 	return 0; | 
 | 1981 | } | 
 | 1982 |  | 
| Rasesh Mody | aad75b6 | 2010-12-23 21:45:08 +0000 | [diff] [blame] | 1983 | /* Called with bnad_conf_lock() held */ | 
 | 1984 | static void | 
 | 1985 | bnad_restore_vlans(struct bnad *bnad, u32 rx_id) | 
 | 1986 | { | 
 | 1987 | 	u16 vlan_id; | 
 | 1988 | 	unsigned long flags; | 
 | 1989 |  | 
 | 1990 | 	if (!bnad->vlan_grp) | 
 | 1991 | 		return; | 
 | 1992 |  | 
 | 1993 | 	BUG_ON(!(VLAN_N_VID == (BFI_MAX_VLAN + 1))); | 
 | 1994 |  | 
 | 1995 | 	for (vlan_id = 0; vlan_id < VLAN_N_VID; vlan_id++) { | 
 | 1996 | 		if (!vlan_group_get_device(bnad->vlan_grp, vlan_id)) | 
 | 1997 | 			continue; | 
 | 1998 | 		spin_lock_irqsave(&bnad->bna_lock, flags); | 
 | 1999 | 		bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vlan_id); | 
 | 2000 | 		spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
 | 2001 | 	} | 
 | 2002 | } | 
 | 2003 |  | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 2004 | /* Statistics utilities */ | 
 | 2005 | void | 
| Eric Dumazet | 250e061 | 2010-09-02 12:45:02 -0700 | [diff] [blame] | 2006 | bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats) | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 2007 | { | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 2008 | 	int i, j; | 
 | 2009 |  | 
 | 2010 | 	for (i = 0; i < bnad->num_rx; i++) { | 
 | 2011 | 		for (j = 0; j < bnad->num_rxp_per_rx; j++) { | 
 | 2012 | 			if (bnad->rx_info[i].rx_ctrl[j].ccb) { | 
| Eric Dumazet | 250e061 | 2010-09-02 12:45:02 -0700 | [diff] [blame] | 2013 | 				stats->rx_packets += bnad->rx_info[i]. | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 2014 | 				rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets; | 
| Eric Dumazet | 250e061 | 2010-09-02 12:45:02 -0700 | [diff] [blame] | 2015 | 				stats->rx_bytes += bnad->rx_info[i]. | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 2016 | 					rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes; | 
 | 2017 | 				if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] && | 
 | 2018 | 					bnad->rx_info[i].rx_ctrl[j].ccb-> | 
 | 2019 | 					rcb[1]->rxq) { | 
| Eric Dumazet | 250e061 | 2010-09-02 12:45:02 -0700 | [diff] [blame] | 2020 | 					stats->rx_packets += | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 2021 | 						bnad->rx_info[i].rx_ctrl[j]. | 
 | 2022 | 						ccb->rcb[1]->rxq->rx_packets; | 
| Eric Dumazet | 250e061 | 2010-09-02 12:45:02 -0700 | [diff] [blame] | 2023 | 					stats->rx_bytes += | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 2024 | 						bnad->rx_info[i].rx_ctrl[j]. | 
 | 2025 | 						ccb->rcb[1]->rxq->rx_bytes; | 
 | 2026 | 				} | 
 | 2027 | 			} | 
 | 2028 | 		} | 
 | 2029 | 	} | 
 | 2030 | 	for (i = 0; i < bnad->num_tx; i++) { | 
 | 2031 | 		for (j = 0; j < bnad->num_txq_per_tx; j++) { | 
 | 2032 | 			if (bnad->tx_info[i].tcb[j]) { | 
| Eric Dumazet | 250e061 | 2010-09-02 12:45:02 -0700 | [diff] [blame] | 2033 | 				stats->tx_packets += | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 2034 | 				bnad->tx_info[i].tcb[j]->txq->tx_packets; | 
| Eric Dumazet | 250e061 | 2010-09-02 12:45:02 -0700 | [diff] [blame] | 2035 | 				stats->tx_bytes += | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 2036 | 					bnad->tx_info[i].tcb[j]->txq->tx_bytes; | 
 | 2037 | 			} | 
 | 2038 | 		} | 
 | 2039 | 	} | 
 | 2040 | } | 
 | 2041 |  | 
 | 2042 | /* | 
 | 2043 |  * Must be called with the bna_lock held. | 
 | 2044 |  */ | 
 | 2045 | void | 
| Eric Dumazet | 250e061 | 2010-09-02 12:45:02 -0700 | [diff] [blame] | 2046 | bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats) | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 2047 | { | 
 | 2048 | 	struct bfi_ll_stats_mac *mac_stats; | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 2049 | 	u64 bmap; | 
 | 2050 | 	int i; | 
 | 2051 |  | 
 | 2052 | 	mac_stats = &bnad->stats.bna_stats->hw_stats->mac_stats; | 
| Eric Dumazet | 250e061 | 2010-09-02 12:45:02 -0700 | [diff] [blame] | 2053 | 	stats->rx_errors = | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 2054 | 		mac_stats->rx_fcs_error + mac_stats->rx_alignment_error + | 
 | 2055 | 		mac_stats->rx_frame_length_error + mac_stats->rx_code_error + | 
 | 2056 | 		mac_stats->rx_undersize; | 
| Eric Dumazet | 250e061 | 2010-09-02 12:45:02 -0700 | [diff] [blame] | 2057 | 	stats->tx_errors = mac_stats->tx_fcs_error + | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 2058 | 					mac_stats->tx_undersize; | 
| Eric Dumazet | 250e061 | 2010-09-02 12:45:02 -0700 | [diff] [blame] | 2059 | 	stats->rx_dropped = mac_stats->rx_drop; | 
 | 2060 | 	stats->tx_dropped = mac_stats->tx_drop; | 
 | 2061 | 	stats->multicast = mac_stats->rx_multicast; | 
 | 2062 | 	stats->collisions = mac_stats->tx_total_collision; | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 2063 |  | 
| Eric Dumazet | 250e061 | 2010-09-02 12:45:02 -0700 | [diff] [blame] | 2064 | 	stats->rx_length_errors = mac_stats->rx_frame_length_error; | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 2065 |  | 
 | 2066 | 	/* receive ring buffer overflow  ?? */ | 
 | 2067 |  | 
| Eric Dumazet | 250e061 | 2010-09-02 12:45:02 -0700 | [diff] [blame] | 2068 | 	stats->rx_crc_errors = mac_stats->rx_fcs_error; | 
 | 2069 | 	stats->rx_frame_errors = mac_stats->rx_alignment_error; | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 2070 | 	/* recv'r fifo overrun */ | 
 | 2071 | 	bmap = (u64)bnad->stats.bna_stats->rxf_bmap[0] | | 
 | 2072 | 		((u64)bnad->stats.bna_stats->rxf_bmap[1] << 32); | 
 | 2073 | 	for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) { | 
 | 2074 | 		if (bmap & 1) { | 
| Eric Dumazet | 250e061 | 2010-09-02 12:45:02 -0700 | [diff] [blame] | 2075 | 			stats->rx_fifo_errors += | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 2076 | 				bnad->stats.bna_stats-> | 
 | 2077 | 					hw_stats->rxf_stats[i].frame_drops; | 
 | 2078 | 			break; | 
 | 2079 | 		} | 
 | 2080 | 		bmap >>= 1; | 
 | 2081 | 	} | 
 | 2082 | } | 
 | 2083 |  | 
 | 2084 | static void | 
 | 2085 | bnad_mbox_irq_sync(struct bnad *bnad) | 
 | 2086 | { | 
 | 2087 | 	u32 irq; | 
 | 2088 | 	unsigned long flags; | 
 | 2089 |  | 
 | 2090 | 	spin_lock_irqsave(&bnad->bna_lock, flags); | 
 | 2091 | 	if (bnad->cfg_flags & BNAD_CF_MSIX) | 
 | 2092 | 		irq = bnad->msix_table[bnad->msix_num - 1].vector; | 
 | 2093 | 	else | 
 | 2094 | 		irq = bnad->pcidev->irq; | 
 | 2095 | 	spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
 | 2096 |  | 
 | 2097 | 	synchronize_irq(irq); | 
 | 2098 | } | 
 | 2099 |  | 
 | 2100 | /* Utility used by bnad_start_xmit, for doing TSO */ | 
 | 2101 | static int | 
 | 2102 | bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb) | 
 | 2103 | { | 
 | 2104 | 	int err; | 
 | 2105 |  | 
 | 2106 | 	/* SKB_GSO_TCPV4 and SKB_GSO_TCPV6 is defined since 2.6.18. */ | 
 | 2107 | 	BUG_ON(!(skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 || | 
 | 2108 | 		   skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6)); | 
 | 2109 | 	if (skb_header_cloned(skb)) { | 
 | 2110 | 		err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); | 
 | 2111 | 		if (err) { | 
 | 2112 | 			BNAD_UPDATE_CTR(bnad, tso_err); | 
 | 2113 | 			return err; | 
 | 2114 | 		} | 
 | 2115 | 	} | 
 | 2116 |  | 
 | 2117 | 	/* | 
 | 2118 | 	 * For TSO, the TCP checksum field is seeded with pseudo-header sum | 
 | 2119 | 	 * excluding the length field. | 
 | 2120 | 	 */ | 
 | 2121 | 	if (skb->protocol == htons(ETH_P_IP)) { | 
 | 2122 | 		struct iphdr *iph = ip_hdr(skb); | 
 | 2123 |  | 
 | 2124 | 		/* Do we really need these? */ | 
 | 2125 | 		iph->tot_len = 0; | 
 | 2126 | 		iph->check = 0; | 
 | 2127 |  | 
 | 2128 | 		tcp_hdr(skb)->check = | 
 | 2129 | 			~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, | 
 | 2130 | 					   IPPROTO_TCP, 0); | 
 | 2131 | 		BNAD_UPDATE_CTR(bnad, tso4); | 
 | 2132 | 	} else { | 
 | 2133 | 		struct ipv6hdr *ipv6h = ipv6_hdr(skb); | 
 | 2134 |  | 
 | 2135 | 		BUG_ON(!(skb->protocol == htons(ETH_P_IPV6))); | 
 | 2136 | 		ipv6h->payload_len = 0; | 
 | 2137 | 		tcp_hdr(skb)->check = | 
 | 2138 | 			~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0, | 
 | 2139 | 					 IPPROTO_TCP, 0); | 
 | 2140 | 		BNAD_UPDATE_CTR(bnad, tso6); | 
 | 2141 | 	} | 
 | 2142 |  | 
 | 2143 | 	return 0; | 
 | 2144 | } | 
 | 2145 |  | 
 | 2146 | /* | 
 | 2147 |  * Initialize Q numbers depending on Rx Paths | 
 | 2148 |  * Called with bnad->bna_lock held, because of cfg_flags | 
 | 2149 |  * access. | 
 | 2150 |  */ | 
 | 2151 | static void | 
 | 2152 | bnad_q_num_init(struct bnad *bnad) | 
 | 2153 | { | 
 | 2154 | 	int rxps; | 
 | 2155 |  | 
 | 2156 | 	rxps = min((uint)num_online_cpus(), | 
 | 2157 | 			(uint)(BNAD_MAX_RXS * BNAD_MAX_RXPS_PER_RX)); | 
 | 2158 |  | 
 | 2159 | 	if (!(bnad->cfg_flags & BNAD_CF_MSIX)) | 
 | 2160 | 		rxps = 1;	/* INTx */ | 
 | 2161 |  | 
 | 2162 | 	bnad->num_rx = 1; | 
 | 2163 | 	bnad->num_tx = 1; | 
 | 2164 | 	bnad->num_rxp_per_rx = rxps; | 
 | 2165 | 	bnad->num_txq_per_tx = BNAD_TXQ_NUM; | 
 | 2166 | } | 
 | 2167 |  | 
 | 2168 | /* | 
 | 2169 |  * Adjusts the Q numbers, given a number of msix vectors | 
 | 2170 |  * Give preference to RSS as opposed to Tx priority Queues, | 
 | 2171 |  * in such a case, just use 1 Tx Q | 
 | 2172 |  * Called with bnad->bna_lock held b'cos of cfg_flags access | 
 | 2173 |  */ | 
 | 2174 | static void | 
 | 2175 | bnad_q_num_adjust(struct bnad *bnad, int msix_vectors) | 
 | 2176 | { | 
 | 2177 | 	bnad->num_txq_per_tx = 1; | 
 | 2178 | 	if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx)  + | 
 | 2179 | 	     bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) && | 
 | 2180 | 	    (bnad->cfg_flags & BNAD_CF_MSIX)) { | 
 | 2181 | 		bnad->num_rxp_per_rx = msix_vectors - | 
 | 2182 | 			(bnad->num_tx * bnad->num_txq_per_tx) - | 
 | 2183 | 			BNAD_MAILBOX_MSIX_VECTORS; | 
 | 2184 | 	} else | 
 | 2185 | 		bnad->num_rxp_per_rx = 1; | 
 | 2186 | } | 
 | 2187 |  | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 2188 | /* Enable / disable device */ | 
 | 2189 | static void | 
 | 2190 | bnad_device_disable(struct bnad *bnad) | 
 | 2191 | { | 
 | 2192 | 	unsigned long flags; | 
 | 2193 |  | 
 | 2194 | 	init_completion(&bnad->bnad_completions.ioc_comp); | 
 | 2195 |  | 
 | 2196 | 	spin_lock_irqsave(&bnad->bna_lock, flags); | 
 | 2197 | 	bna_device_disable(&bnad->bna.device, BNA_HARD_CLEANUP); | 
 | 2198 | 	spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
 | 2199 |  | 
 | 2200 | 	wait_for_completion(&bnad->bnad_completions.ioc_comp); | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 2201 | } | 
 | 2202 |  | 
 | 2203 | static int | 
 | 2204 | bnad_device_enable(struct bnad *bnad) | 
 | 2205 | { | 
 | 2206 | 	int err = 0; | 
 | 2207 | 	unsigned long flags; | 
 | 2208 |  | 
 | 2209 | 	init_completion(&bnad->bnad_completions.ioc_comp); | 
 | 2210 |  | 
 | 2211 | 	spin_lock_irqsave(&bnad->bna_lock, flags); | 
 | 2212 | 	bna_device_enable(&bnad->bna.device); | 
 | 2213 | 	spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
 | 2214 |  | 
 | 2215 | 	wait_for_completion(&bnad->bnad_completions.ioc_comp); | 
 | 2216 |  | 
 | 2217 | 	if (bnad->bnad_completions.ioc_comp_status) | 
 | 2218 | 		err = bnad->bnad_completions.ioc_comp_status; | 
 | 2219 |  | 
 | 2220 | 	return err; | 
 | 2221 | } | 
 | 2222 |  | 
 | 2223 | /* Free BNA resources */ | 
 | 2224 | static void | 
 | 2225 | bnad_res_free(struct bnad *bnad) | 
 | 2226 | { | 
 | 2227 | 	int i; | 
 | 2228 | 	struct bna_res_info *res_info = &bnad->res_info[0]; | 
 | 2229 |  | 
 | 2230 | 	for (i = 0; i < BNA_RES_T_MAX; i++) { | 
 | 2231 | 		if (res_info[i].res_type == BNA_RES_T_MEM) | 
 | 2232 | 			bnad_mem_free(bnad, &res_info[i].res_u.mem_info); | 
 | 2233 | 		else | 
 | 2234 | 			bnad_mbox_irq_free(bnad, &res_info[i].res_u.intr_info); | 
 | 2235 | 	} | 
 | 2236 | } | 
 | 2237 |  | 
 | 2238 | /* Allocates memory and interrupt resources for BNA */ | 
 | 2239 | static int | 
 | 2240 | bnad_res_alloc(struct bnad *bnad) | 
 | 2241 | { | 
 | 2242 | 	int i, err; | 
 | 2243 | 	struct bna_res_info *res_info = &bnad->res_info[0]; | 
 | 2244 |  | 
 | 2245 | 	for (i = 0; i < BNA_RES_T_MAX; i++) { | 
 | 2246 | 		if (res_info[i].res_type == BNA_RES_T_MEM) | 
 | 2247 | 			err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info); | 
 | 2248 | 		else | 
 | 2249 | 			err = bnad_mbox_irq_alloc(bnad, | 
 | 2250 | 						  &res_info[i].res_u.intr_info); | 
 | 2251 | 		if (err) | 
 | 2252 | 			goto err_return; | 
 | 2253 | 	} | 
 | 2254 | 	return 0; | 
 | 2255 |  | 
 | 2256 | err_return: | 
 | 2257 | 	bnad_res_free(bnad); | 
 | 2258 | 	return err; | 
 | 2259 | } | 
 | 2260 |  | 
 | 2261 | /* Interrupt enable / disable */ | 
 | 2262 | static void | 
 | 2263 | bnad_enable_msix(struct bnad *bnad) | 
 | 2264 | { | 
 | 2265 | 	int i, ret; | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 2266 | 	unsigned long flags; | 
 | 2267 |  | 
 | 2268 | 	spin_lock_irqsave(&bnad->bna_lock, flags); | 
 | 2269 | 	if (!(bnad->cfg_flags & BNAD_CF_MSIX)) { | 
 | 2270 | 		spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
 | 2271 | 		return; | 
 | 2272 | 	} | 
 | 2273 | 	spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
 | 2274 |  | 
 | 2275 | 	if (bnad->msix_table) | 
 | 2276 | 		return; | 
 | 2277 |  | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 2278 | 	bnad->msix_table = | 
| Rasesh Mody | b7ee31c | 2010-10-05 15:46:05 +0000 | [diff] [blame] | 2279 | 		kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL); | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 2280 |  | 
 | 2281 | 	if (!bnad->msix_table) | 
 | 2282 | 		goto intx_mode; | 
 | 2283 |  | 
| Rasesh Mody | b7ee31c | 2010-10-05 15:46:05 +0000 | [diff] [blame] | 2284 | 	for (i = 0; i < bnad->msix_num; i++) | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 2285 | 		bnad->msix_table[i].entry = i; | 
 | 2286 |  | 
| Rasesh Mody | b7ee31c | 2010-10-05 15:46:05 +0000 | [diff] [blame] | 2287 | 	ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num); | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 2288 | 	if (ret > 0) { | 
 | 2289 | 		/* Not enough MSI-X vectors. */ | 
 | 2290 |  | 
 | 2291 | 		spin_lock_irqsave(&bnad->bna_lock, flags); | 
 | 2292 | 		/* ret = #of vectors that we got */ | 
 | 2293 | 		bnad_q_num_adjust(bnad, ret); | 
 | 2294 | 		spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
 | 2295 |  | 
 | 2296 | 		bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) | 
 | 2297 | 			+ (bnad->num_rx | 
 | 2298 | 			* bnad->num_rxp_per_rx) + | 
 | 2299 | 			 BNAD_MAILBOX_MSIX_VECTORS; | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 2300 |  | 
 | 2301 | 		/* Try once more with adjusted numbers */ | 
 | 2302 | 		/* If this fails, fall back to INTx */ | 
 | 2303 | 		ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, | 
| Rasesh Mody | b7ee31c | 2010-10-05 15:46:05 +0000 | [diff] [blame] | 2304 | 				      bnad->msix_num); | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 2305 | 		if (ret) | 
 | 2306 | 			goto intx_mode; | 
 | 2307 |  | 
 | 2308 | 	} else if (ret < 0) | 
 | 2309 | 		goto intx_mode; | 
 | 2310 | 	return; | 
 | 2311 |  | 
 | 2312 | intx_mode: | 
 | 2313 |  | 
 | 2314 | 	kfree(bnad->msix_table); | 
 | 2315 | 	bnad->msix_table = NULL; | 
 | 2316 | 	bnad->msix_num = 0; | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 2317 | 	spin_lock_irqsave(&bnad->bna_lock, flags); | 
 | 2318 | 	bnad->cfg_flags &= ~BNAD_CF_MSIX; | 
 | 2319 | 	bnad_q_num_init(bnad); | 
 | 2320 | 	spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
 | 2321 | } | 
 | 2322 |  | 
 | 2323 | static void | 
 | 2324 | bnad_disable_msix(struct bnad *bnad) | 
 | 2325 | { | 
 | 2326 | 	u32 cfg_flags; | 
 | 2327 | 	unsigned long flags; | 
 | 2328 |  | 
 | 2329 | 	spin_lock_irqsave(&bnad->bna_lock, flags); | 
 | 2330 | 	cfg_flags = bnad->cfg_flags; | 
 | 2331 | 	if (bnad->cfg_flags & BNAD_CF_MSIX) | 
 | 2332 | 		bnad->cfg_flags &= ~BNAD_CF_MSIX; | 
 | 2333 | 	spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
 | 2334 |  | 
 | 2335 | 	if (cfg_flags & BNAD_CF_MSIX) { | 
 | 2336 | 		pci_disable_msix(bnad->pcidev); | 
 | 2337 | 		kfree(bnad->msix_table); | 
 | 2338 | 		bnad->msix_table = NULL; | 
 | 2339 | 	} | 
 | 2340 | } | 
 | 2341 |  | 
 | 2342 | /* Netdev entry points */ | 
 | 2343 | static int | 
 | 2344 | bnad_open(struct net_device *netdev) | 
 | 2345 | { | 
 | 2346 | 	int err; | 
 | 2347 | 	struct bnad *bnad = netdev_priv(netdev); | 
 | 2348 | 	struct bna_pause_config pause_config; | 
 | 2349 | 	int mtu; | 
 | 2350 | 	unsigned long flags; | 
 | 2351 |  | 
 | 2352 | 	mutex_lock(&bnad->conf_mutex); | 
 | 2353 |  | 
 | 2354 | 	/* Tx */ | 
 | 2355 | 	err = bnad_setup_tx(bnad, 0); | 
 | 2356 | 	if (err) | 
 | 2357 | 		goto err_return; | 
 | 2358 |  | 
 | 2359 | 	/* Rx */ | 
 | 2360 | 	err = bnad_setup_rx(bnad, 0); | 
 | 2361 | 	if (err) | 
 | 2362 | 		goto cleanup_tx; | 
 | 2363 |  | 
 | 2364 | 	/* Port */ | 
 | 2365 | 	pause_config.tx_pause = 0; | 
 | 2366 | 	pause_config.rx_pause = 0; | 
 | 2367 |  | 
 | 2368 | 	mtu = ETH_HLEN + bnad->netdev->mtu + ETH_FCS_LEN; | 
 | 2369 |  | 
 | 2370 | 	spin_lock_irqsave(&bnad->bna_lock, flags); | 
 | 2371 | 	bna_port_mtu_set(&bnad->bna.port, mtu, NULL); | 
 | 2372 | 	bna_port_pause_config(&bnad->bna.port, &pause_config, NULL); | 
 | 2373 | 	bna_port_enable(&bnad->bna.port); | 
 | 2374 | 	spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
 | 2375 |  | 
 | 2376 | 	/* Enable broadcast */ | 
 | 2377 | 	bnad_enable_default_bcast(bnad); | 
 | 2378 |  | 
| Rasesh Mody | aad75b6 | 2010-12-23 21:45:08 +0000 | [diff] [blame] | 2379 | 	/* Restore VLANs, if any */ | 
 | 2380 | 	bnad_restore_vlans(bnad, 0); | 
 | 2381 |  | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 2382 | 	/* Set the UCAST address */ | 
 | 2383 | 	spin_lock_irqsave(&bnad->bna_lock, flags); | 
 | 2384 | 	bnad_mac_addr_set_locked(bnad, netdev->dev_addr); | 
 | 2385 | 	spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
 | 2386 |  | 
 | 2387 | 	/* Start the stats timer */ | 
 | 2388 | 	bnad_stats_timer_start(bnad); | 
 | 2389 |  | 
 | 2390 | 	mutex_unlock(&bnad->conf_mutex); | 
 | 2391 |  | 
 | 2392 | 	return 0; | 
 | 2393 |  | 
 | 2394 | cleanup_tx: | 
 | 2395 | 	bnad_cleanup_tx(bnad, 0); | 
 | 2396 |  | 
 | 2397 | err_return: | 
 | 2398 | 	mutex_unlock(&bnad->conf_mutex); | 
 | 2399 | 	return err; | 
 | 2400 | } | 
 | 2401 |  | 
 | 2402 | static int | 
 | 2403 | bnad_stop(struct net_device *netdev) | 
 | 2404 | { | 
 | 2405 | 	struct bnad *bnad = netdev_priv(netdev); | 
 | 2406 | 	unsigned long flags; | 
 | 2407 |  | 
 | 2408 | 	mutex_lock(&bnad->conf_mutex); | 
 | 2409 |  | 
 | 2410 | 	/* Stop the stats timer */ | 
 | 2411 | 	bnad_stats_timer_stop(bnad); | 
 | 2412 |  | 
 | 2413 | 	init_completion(&bnad->bnad_completions.port_comp); | 
 | 2414 |  | 
 | 2415 | 	spin_lock_irqsave(&bnad->bna_lock, flags); | 
 | 2416 | 	bna_port_disable(&bnad->bna.port, BNA_HARD_CLEANUP, | 
 | 2417 | 			bnad_cb_port_disabled); | 
 | 2418 | 	spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
 | 2419 |  | 
 | 2420 | 	wait_for_completion(&bnad->bnad_completions.port_comp); | 
 | 2421 |  | 
 | 2422 | 	bnad_cleanup_tx(bnad, 0); | 
 | 2423 | 	bnad_cleanup_rx(bnad, 0); | 
 | 2424 |  | 
 | 2425 | 	/* Synchronize mailbox IRQ */ | 
 | 2426 | 	bnad_mbox_irq_sync(bnad); | 
 | 2427 |  | 
 | 2428 | 	mutex_unlock(&bnad->conf_mutex); | 
 | 2429 |  | 
 | 2430 | 	return 0; | 
 | 2431 | } | 
 | 2432 |  | 
 | 2433 | /* TX */ | 
 | 2434 | /* | 
 | 2435 |  * bnad_start_xmit : Netdev entry point for Transmit | 
 | 2436 |  *		     Called under lock held by net_device | 
 | 2437 |  */ | 
 | 2438 | static netdev_tx_t | 
 | 2439 | bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) | 
 | 2440 | { | 
 | 2441 | 	struct bnad *bnad = netdev_priv(netdev); | 
 | 2442 |  | 
 | 2443 | 	u16 		txq_prod, vlan_tag = 0; | 
 | 2444 | 	u32 		unmap_prod, wis, wis_used, wi_range; | 
 | 2445 | 	u32 		vectors, vect_id, i, acked; | 
 | 2446 | 	u32		tx_id; | 
 | 2447 | 	int 			err; | 
 | 2448 |  | 
 | 2449 | 	struct bnad_tx_info *tx_info; | 
 | 2450 | 	struct bna_tcb *tcb; | 
 | 2451 | 	struct bnad_unmap_q *unmap_q; | 
 | 2452 | 	dma_addr_t 		dma_addr; | 
 | 2453 | 	struct bna_txq_entry *txqent; | 
 | 2454 | 	bna_txq_wi_ctrl_flag_t 	flags; | 
 | 2455 |  | 
 | 2456 | 	if (unlikely | 
 | 2457 | 	    (skb->len <= ETH_HLEN || skb->len > BFI_TX_MAX_DATA_PER_PKT)) { | 
 | 2458 | 		dev_kfree_skb(skb); | 
 | 2459 | 		return NETDEV_TX_OK; | 
 | 2460 | 	} | 
 | 2461 |  | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 2462 | 	tx_id = 0; | 
 | 2463 |  | 
 | 2464 | 	tx_info = &bnad->tx_info[tx_id]; | 
 | 2465 | 	tcb = tx_info->tcb[tx_id]; | 
 | 2466 | 	unmap_q = tcb->unmap_q; | 
 | 2467 |  | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 2468 | 	/* | 
 | 2469 | 	 * Takes care of the Tx that is scheduled between clearing the flag | 
 | 2470 | 	 * and the netif_stop_queue() call. | 
 | 2471 | 	 */ | 
 | 2472 | 	if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) { | 
 | 2473 | 		dev_kfree_skb(skb); | 
 | 2474 | 		return NETDEV_TX_OK; | 
 | 2475 | 	} | 
 | 2476 |  | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 2477 | 	vectors = 1 + skb_shinfo(skb)->nr_frags; | 
 | 2478 | 	if (vectors > BFI_TX_MAX_VECTORS_PER_PKT) { | 
 | 2479 | 		dev_kfree_skb(skb); | 
 | 2480 | 		return NETDEV_TX_OK; | 
 | 2481 | 	} | 
 | 2482 | 	wis = BNA_TXQ_WI_NEEDED(vectors);	/* 4 vectors per work item */ | 
 | 2483 | 	acked = 0; | 
 | 2484 | 	if (unlikely | 
 | 2485 | 	    (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) || | 
 | 2486 | 	     vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) { | 
 | 2487 | 		if ((u16) (*tcb->hw_consumer_index) != | 
 | 2488 | 		    tcb->consumer_index && | 
 | 2489 | 		    !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) { | 
 | 2490 | 			acked = bnad_free_txbufs(bnad, tcb); | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 2491 | 			if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) | 
 | 2492 | 				bna_ib_ack(tcb->i_dbell, acked); | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 2493 | 			smp_mb__before_clear_bit(); | 
 | 2494 | 			clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); | 
 | 2495 | 		} else { | 
 | 2496 | 			netif_stop_queue(netdev); | 
 | 2497 | 			BNAD_UPDATE_CTR(bnad, netif_queue_stop); | 
 | 2498 | 		} | 
 | 2499 |  | 
 | 2500 | 		smp_mb(); | 
 | 2501 | 		/* | 
 | 2502 | 		 * Check again to deal with race condition between | 
 | 2503 | 		 * netif_stop_queue here, and netif_wake_queue in | 
 | 2504 | 		 * interrupt handler which is not inside netif tx lock. | 
 | 2505 | 		 */ | 
 | 2506 | 		if (likely | 
 | 2507 | 		    (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) || | 
 | 2508 | 		     vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) { | 
 | 2509 | 			BNAD_UPDATE_CTR(bnad, netif_queue_stop); | 
 | 2510 | 			return NETDEV_TX_BUSY; | 
 | 2511 | 		} else { | 
 | 2512 | 			netif_wake_queue(netdev); | 
 | 2513 | 			BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); | 
 | 2514 | 		} | 
 | 2515 | 	} | 
 | 2516 |  | 
 | 2517 | 	unmap_prod = unmap_q->producer_index; | 
 | 2518 | 	wis_used = 1; | 
 | 2519 | 	vect_id = 0; | 
 | 2520 | 	flags = 0; | 
 | 2521 |  | 
 | 2522 | 	txq_prod = tcb->producer_index; | 
 | 2523 | 	BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range); | 
 | 2524 | 	BUG_ON(!(wi_range <= tcb->q_depth)); | 
 | 2525 | 	txqent->hdr.wi.reserved = 0; | 
 | 2526 | 	txqent->hdr.wi.num_vectors = vectors; | 
 | 2527 | 	txqent->hdr.wi.opcode = | 
 | 2528 | 		htons((skb_is_gso(skb) ? BNA_TXQ_WI_SEND_LSO : | 
 | 2529 | 		       BNA_TXQ_WI_SEND)); | 
 | 2530 |  | 
| Jesse Gross | eab6d18 | 2010-10-20 13:56:03 +0000 | [diff] [blame] | 2531 | 	if (vlan_tx_tag_present(skb)) { | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 2532 | 		vlan_tag = (u16) vlan_tx_tag_get(skb); | 
 | 2533 | 		flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN); | 
 | 2534 | 	} | 
 | 2535 | 	if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) { | 
 | 2536 | 		vlan_tag = | 
 | 2537 | 			(tcb->priority & 0x7) << 13 | (vlan_tag & 0x1fff); | 
 | 2538 | 		flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN); | 
 | 2539 | 	} | 
 | 2540 |  | 
 | 2541 | 	txqent->hdr.wi.vlan_tag = htons(vlan_tag); | 
 | 2542 |  | 
 | 2543 | 	if (skb_is_gso(skb)) { | 
 | 2544 | 		err = bnad_tso_prepare(bnad, skb); | 
 | 2545 | 		if (err) { | 
 | 2546 | 			dev_kfree_skb(skb); | 
 | 2547 | 			return NETDEV_TX_OK; | 
 | 2548 | 		} | 
 | 2549 | 		txqent->hdr.wi.lso_mss = htons(skb_is_gso(skb)); | 
 | 2550 | 		flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM); | 
 | 2551 | 		txqent->hdr.wi.l4_hdr_size_n_offset = | 
 | 2552 | 			htons(BNA_TXQ_WI_L4_HDR_N_OFFSET | 
 | 2553 | 			      (tcp_hdrlen(skb) >> 2, | 
 | 2554 | 			       skb_transport_offset(skb))); | 
 | 2555 | 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) { | 
 | 2556 | 		u8 proto = 0; | 
 | 2557 |  | 
 | 2558 | 		txqent->hdr.wi.lso_mss = 0; | 
 | 2559 |  | 
 | 2560 | 		if (skb->protocol == htons(ETH_P_IP)) | 
 | 2561 | 			proto = ip_hdr(skb)->protocol; | 
 | 2562 | 		else if (skb->protocol == htons(ETH_P_IPV6)) { | 
 | 2563 | 			/* nexthdr may not be TCP immediately. */ | 
 | 2564 | 			proto = ipv6_hdr(skb)->nexthdr; | 
 | 2565 | 		} | 
 | 2566 | 		if (proto == IPPROTO_TCP) { | 
 | 2567 | 			flags |= BNA_TXQ_WI_CF_TCP_CKSUM; | 
 | 2568 | 			txqent->hdr.wi.l4_hdr_size_n_offset = | 
 | 2569 | 				htons(BNA_TXQ_WI_L4_HDR_N_OFFSET | 
 | 2570 | 				      (0, skb_transport_offset(skb))); | 
 | 2571 |  | 
 | 2572 | 			BNAD_UPDATE_CTR(bnad, tcpcsum_offload); | 
 | 2573 |  | 
 | 2574 | 			BUG_ON(!(skb_headlen(skb) >= | 
 | 2575 | 				skb_transport_offset(skb) + tcp_hdrlen(skb))); | 
 | 2576 |  | 
 | 2577 | 		} else if (proto == IPPROTO_UDP) { | 
 | 2578 | 			flags |= BNA_TXQ_WI_CF_UDP_CKSUM; | 
 | 2579 | 			txqent->hdr.wi.l4_hdr_size_n_offset = | 
 | 2580 | 				htons(BNA_TXQ_WI_L4_HDR_N_OFFSET | 
 | 2581 | 				      (0, skb_transport_offset(skb))); | 
 | 2582 |  | 
 | 2583 | 			BNAD_UPDATE_CTR(bnad, udpcsum_offload); | 
 | 2584 |  | 
 | 2585 | 			BUG_ON(!(skb_headlen(skb) >= | 
 | 2586 | 				   skb_transport_offset(skb) + | 
 | 2587 | 				   sizeof(struct udphdr))); | 
 | 2588 | 		} else { | 
 | 2589 | 			err = skb_checksum_help(skb); | 
 | 2590 | 			BNAD_UPDATE_CTR(bnad, csum_help); | 
 | 2591 | 			if (err) { | 
 | 2592 | 				dev_kfree_skb(skb); | 
 | 2593 | 				BNAD_UPDATE_CTR(bnad, csum_help_err); | 
 | 2594 | 				return NETDEV_TX_OK; | 
 | 2595 | 			} | 
 | 2596 | 		} | 
 | 2597 | 	} else { | 
 | 2598 | 		txqent->hdr.wi.lso_mss = 0; | 
 | 2599 | 		txqent->hdr.wi.l4_hdr_size_n_offset = 0; | 
 | 2600 | 	} | 
 | 2601 |  | 
 | 2602 | 	txqent->hdr.wi.flags = htons(flags); | 
 | 2603 |  | 
 | 2604 | 	txqent->hdr.wi.frame_length = htonl(skb->len); | 
 | 2605 |  | 
 | 2606 | 	unmap_q->unmap_array[unmap_prod].skb = skb; | 
 | 2607 | 	BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR)); | 
 | 2608 | 	txqent->vector[vect_id].length = htons(skb_headlen(skb)); | 
| Ivan Vecera | 5ea7431 | 2011-02-02 04:37:02 +0000 | [diff] [blame] | 2609 | 	dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, | 
 | 2610 | 				  skb_headlen(skb), DMA_TO_DEVICE); | 
 | 2611 | 	dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr, | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 2612 | 			   dma_addr); | 
 | 2613 |  | 
 | 2614 | 	BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr); | 
 | 2615 | 	BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth); | 
 | 2616 |  | 
 | 2617 | 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 
 | 2618 | 		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; | 
 | 2619 | 		u32		size = frag->size; | 
 | 2620 |  | 
 | 2621 | 		if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) { | 
 | 2622 | 			vect_id = 0; | 
 | 2623 | 			if (--wi_range) | 
 | 2624 | 				txqent++; | 
 | 2625 | 			else { | 
 | 2626 | 				BNA_QE_INDX_ADD(txq_prod, wis_used, | 
 | 2627 | 						tcb->q_depth); | 
 | 2628 | 				wis_used = 0; | 
 | 2629 | 				BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, | 
 | 2630 | 						     txqent, wi_range); | 
 | 2631 | 				BUG_ON(!(wi_range <= tcb->q_depth)); | 
 | 2632 | 			} | 
 | 2633 | 			wis_used++; | 
 | 2634 | 			txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION); | 
 | 2635 | 		} | 
 | 2636 |  | 
 | 2637 | 		BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR)); | 
 | 2638 | 		txqent->vector[vect_id].length = htons(size); | 
| Ivan Vecera | 5ea7431 | 2011-02-02 04:37:02 +0000 | [diff] [blame] | 2639 | 		dma_addr = dma_map_page(&bnad->pcidev->dev, frag->page, | 
 | 2640 | 					frag->page_offset, size, DMA_TO_DEVICE); | 
 | 2641 | 		dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr, | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 2642 | 				   dma_addr); | 
 | 2643 | 		BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr); | 
 | 2644 | 		BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth); | 
 | 2645 | 	} | 
 | 2646 |  | 
 | 2647 | 	unmap_q->producer_index = unmap_prod; | 
 | 2648 | 	BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth); | 
 | 2649 | 	tcb->producer_index = txq_prod; | 
 | 2650 |  | 
 | 2651 | 	smp_mb(); | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 2652 |  | 
 | 2653 | 	if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) | 
 | 2654 | 		return NETDEV_TX_OK; | 
 | 2655 |  | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 2656 | 	bna_txq_prod_indx_doorbell(tcb); | 
 | 2657 |  | 
 | 2658 | 	if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index) | 
 | 2659 | 		tasklet_schedule(&bnad->tx_free_tasklet); | 
 | 2660 |  | 
 | 2661 | 	return NETDEV_TX_OK; | 
 | 2662 | } | 
 | 2663 |  | 
 | 2664 | /* | 
 | 2665 |  * Used spin_lock to synchronize reading of stats structures, which | 
 | 2666 |  * is written by BNA under the same lock. | 
 | 2667 |  */ | 
| Eric Dumazet | 250e061 | 2010-09-02 12:45:02 -0700 | [diff] [blame] | 2668 | static struct rtnl_link_stats64 * | 
 | 2669 | bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 2670 | { | 
 | 2671 | 	struct bnad *bnad = netdev_priv(netdev); | 
 | 2672 | 	unsigned long flags; | 
 | 2673 |  | 
 | 2674 | 	spin_lock_irqsave(&bnad->bna_lock, flags); | 
 | 2675 |  | 
| Eric Dumazet | 250e061 | 2010-09-02 12:45:02 -0700 | [diff] [blame] | 2676 | 	bnad_netdev_qstats_fill(bnad, stats); | 
 | 2677 | 	bnad_netdev_hwstats_fill(bnad, stats); | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 2678 |  | 
 | 2679 | 	spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
 | 2680 |  | 
| Eric Dumazet | 250e061 | 2010-09-02 12:45:02 -0700 | [diff] [blame] | 2681 | 	return stats; | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 2682 | } | 
 | 2683 |  | 
 | 2684 | static void | 
 | 2685 | bnad_set_rx_mode(struct net_device *netdev) | 
 | 2686 | { | 
 | 2687 | 	struct bnad *bnad = netdev_priv(netdev); | 
 | 2688 | 	u32	new_mask, valid_mask; | 
 | 2689 | 	unsigned long flags; | 
 | 2690 |  | 
 | 2691 | 	spin_lock_irqsave(&bnad->bna_lock, flags); | 
 | 2692 |  | 
 | 2693 | 	new_mask = valid_mask = 0; | 
 | 2694 |  | 
 | 2695 | 	if (netdev->flags & IFF_PROMISC) { | 
 | 2696 | 		if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) { | 
 | 2697 | 			new_mask = BNAD_RXMODE_PROMISC_DEFAULT; | 
 | 2698 | 			valid_mask = BNAD_RXMODE_PROMISC_DEFAULT; | 
 | 2699 | 			bnad->cfg_flags |= BNAD_CF_PROMISC; | 
 | 2700 | 		} | 
 | 2701 | 	} else { | 
 | 2702 | 		if (bnad->cfg_flags & BNAD_CF_PROMISC) { | 
 | 2703 | 			new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT; | 
 | 2704 | 			valid_mask = BNAD_RXMODE_PROMISC_DEFAULT; | 
 | 2705 | 			bnad->cfg_flags &= ~BNAD_CF_PROMISC; | 
 | 2706 | 		} | 
 | 2707 | 	} | 
 | 2708 |  | 
 | 2709 | 	if (netdev->flags & IFF_ALLMULTI) { | 
 | 2710 | 		if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) { | 
 | 2711 | 			new_mask |= BNA_RXMODE_ALLMULTI; | 
 | 2712 | 			valid_mask |= BNA_RXMODE_ALLMULTI; | 
 | 2713 | 			bnad->cfg_flags |= BNAD_CF_ALLMULTI; | 
 | 2714 | 		} | 
 | 2715 | 	} else { | 
 | 2716 | 		if (bnad->cfg_flags & BNAD_CF_ALLMULTI) { | 
 | 2717 | 			new_mask &= ~BNA_RXMODE_ALLMULTI; | 
 | 2718 | 			valid_mask |= BNA_RXMODE_ALLMULTI; | 
 | 2719 | 			bnad->cfg_flags &= ~BNAD_CF_ALLMULTI; | 
 | 2720 | 		} | 
 | 2721 | 	} | 
 | 2722 |  | 
 | 2723 | 	bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL); | 
 | 2724 |  | 
 | 2725 | 	if (!netdev_mc_empty(netdev)) { | 
 | 2726 | 		u8 *mcaddr_list; | 
 | 2727 | 		int mc_count = netdev_mc_count(netdev); | 
 | 2728 |  | 
 | 2729 | 		/* Index 0 holds the broadcast address */ | 
 | 2730 | 		mcaddr_list = | 
 | 2731 | 			kzalloc((mc_count + 1) * ETH_ALEN, | 
 | 2732 | 				GFP_ATOMIC); | 
 | 2733 | 		if (!mcaddr_list) | 
| Jiri Slaby | ca1cef3 | 2010-09-04 02:08:41 +0000 | [diff] [blame] | 2734 | 			goto unlock; | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 2735 |  | 
 | 2736 | 		memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN); | 
 | 2737 |  | 
 | 2738 | 		/* Copy rest of the MC addresses */ | 
 | 2739 | 		bnad_netdev_mc_list_get(netdev, mcaddr_list); | 
 | 2740 |  | 
 | 2741 | 		bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1, | 
 | 2742 | 					mcaddr_list, NULL); | 
 | 2743 |  | 
 | 2744 | 		/* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */ | 
 | 2745 | 		kfree(mcaddr_list); | 
 | 2746 | 	} | 
| Jiri Slaby | ca1cef3 | 2010-09-04 02:08:41 +0000 | [diff] [blame] | 2747 | unlock: | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 2748 | 	spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
 | 2749 | } | 
 | 2750 |  | 
 | 2751 | /* | 
 | 2752 |  * bna_lock is used to sync writes to netdev->addr | 
 | 2753 |  * conf_lock cannot be used since this call may be made | 
 | 2754 |  * in a non-blocking context. | 
 | 2755 |  */ | 
 | 2756 | static int | 
 | 2757 | bnad_set_mac_address(struct net_device *netdev, void *mac_addr) | 
 | 2758 | { | 
 | 2759 | 	int err; | 
 | 2760 | 	struct bnad *bnad = netdev_priv(netdev); | 
 | 2761 | 	struct sockaddr *sa = (struct sockaddr *)mac_addr; | 
 | 2762 | 	unsigned long flags; | 
 | 2763 |  | 
 | 2764 | 	spin_lock_irqsave(&bnad->bna_lock, flags); | 
 | 2765 |  | 
 | 2766 | 	err = bnad_mac_addr_set_locked(bnad, sa->sa_data); | 
 | 2767 |  | 
 | 2768 | 	if (!err) | 
 | 2769 | 		memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len); | 
 | 2770 |  | 
 | 2771 | 	spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
 | 2772 |  | 
 | 2773 | 	return err; | 
 | 2774 | } | 
 | 2775 |  | 
 | 2776 | static int | 
 | 2777 | bnad_change_mtu(struct net_device *netdev, int new_mtu) | 
 | 2778 | { | 
 | 2779 | 	int mtu, err = 0; | 
 | 2780 | 	unsigned long flags; | 
 | 2781 |  | 
 | 2782 | 	struct bnad *bnad = netdev_priv(netdev); | 
 | 2783 |  | 
 | 2784 | 	if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU) | 
 | 2785 | 		return -EINVAL; | 
 | 2786 |  | 
 | 2787 | 	mutex_lock(&bnad->conf_mutex); | 
 | 2788 |  | 
 | 2789 | 	netdev->mtu = new_mtu; | 
 | 2790 |  | 
 | 2791 | 	mtu = ETH_HLEN + new_mtu + ETH_FCS_LEN; | 
 | 2792 |  | 
 | 2793 | 	spin_lock_irqsave(&bnad->bna_lock, flags); | 
 | 2794 | 	bna_port_mtu_set(&bnad->bna.port, mtu, NULL); | 
 | 2795 | 	spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
 | 2796 |  | 
 | 2797 | 	mutex_unlock(&bnad->conf_mutex); | 
 | 2798 | 	return err; | 
 | 2799 | } | 
 | 2800 |  | 
 | 2801 | static void | 
 | 2802 | bnad_vlan_rx_register(struct net_device *netdev, | 
 | 2803 | 				  struct vlan_group *vlan_grp) | 
 | 2804 | { | 
 | 2805 | 	struct bnad *bnad = netdev_priv(netdev); | 
 | 2806 |  | 
 | 2807 | 	mutex_lock(&bnad->conf_mutex); | 
 | 2808 | 	bnad->vlan_grp = vlan_grp; | 
 | 2809 | 	mutex_unlock(&bnad->conf_mutex); | 
 | 2810 | } | 
 | 2811 |  | 
 | 2812 | static void | 
 | 2813 | bnad_vlan_rx_add_vid(struct net_device *netdev, | 
 | 2814 | 				 unsigned short vid) | 
 | 2815 | { | 
 | 2816 | 	struct bnad *bnad = netdev_priv(netdev); | 
 | 2817 | 	unsigned long flags; | 
 | 2818 |  | 
 | 2819 | 	if (!bnad->rx_info[0].rx) | 
 | 2820 | 		return; | 
 | 2821 |  | 
 | 2822 | 	mutex_lock(&bnad->conf_mutex); | 
 | 2823 |  | 
 | 2824 | 	spin_lock_irqsave(&bnad->bna_lock, flags); | 
 | 2825 | 	bna_rx_vlan_add(bnad->rx_info[0].rx, vid); | 
 | 2826 | 	spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
 | 2827 |  | 
 | 2828 | 	mutex_unlock(&bnad->conf_mutex); | 
 | 2829 | } | 
 | 2830 |  | 
 | 2831 | static void | 
 | 2832 | bnad_vlan_rx_kill_vid(struct net_device *netdev, | 
 | 2833 | 				  unsigned short vid) | 
 | 2834 | { | 
 | 2835 | 	struct bnad *bnad = netdev_priv(netdev); | 
 | 2836 | 	unsigned long flags; | 
 | 2837 |  | 
 | 2838 | 	if (!bnad->rx_info[0].rx) | 
 | 2839 | 		return; | 
 | 2840 |  | 
 | 2841 | 	mutex_lock(&bnad->conf_mutex); | 
 | 2842 |  | 
 | 2843 | 	spin_lock_irqsave(&bnad->bna_lock, flags); | 
 | 2844 | 	bna_rx_vlan_del(bnad->rx_info[0].rx, vid); | 
 | 2845 | 	spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
 | 2846 |  | 
 | 2847 | 	mutex_unlock(&bnad->conf_mutex); | 
 | 2848 | } | 
 | 2849 |  | 
 | 2850 | #ifdef CONFIG_NET_POLL_CONTROLLER | 
 | 2851 | static void | 
 | 2852 | bnad_netpoll(struct net_device *netdev) | 
 | 2853 | { | 
 | 2854 | 	struct bnad *bnad = netdev_priv(netdev); | 
 | 2855 | 	struct bnad_rx_info *rx_info; | 
 | 2856 | 	struct bnad_rx_ctrl *rx_ctrl; | 
 | 2857 | 	u32 curr_mask; | 
 | 2858 | 	int i, j; | 
 | 2859 |  | 
 | 2860 | 	if (!(bnad->cfg_flags & BNAD_CF_MSIX)) { | 
 | 2861 | 		bna_intx_disable(&bnad->bna, curr_mask); | 
 | 2862 | 		bnad_isr(bnad->pcidev->irq, netdev); | 
 | 2863 | 		bna_intx_enable(&bnad->bna, curr_mask); | 
 | 2864 | 	} else { | 
 | 2865 | 		for (i = 0; i < bnad->num_rx; i++) { | 
 | 2866 | 			rx_info = &bnad->rx_info[i]; | 
 | 2867 | 			if (!rx_info->rx) | 
 | 2868 | 				continue; | 
 | 2869 | 			for (j = 0; j < bnad->num_rxp_per_rx; j++) { | 
 | 2870 | 				rx_ctrl = &rx_info->rx_ctrl[j]; | 
 | 2871 | 				if (rx_ctrl->ccb) { | 
 | 2872 | 					bnad_disable_rx_irq(bnad, | 
 | 2873 | 							    rx_ctrl->ccb); | 
 | 2874 | 					bnad_netif_rx_schedule_poll(bnad, | 
 | 2875 | 							    rx_ctrl->ccb); | 
 | 2876 | 				} | 
 | 2877 | 			} | 
 | 2878 | 		} | 
 | 2879 | 	} | 
 | 2880 | } | 
 | 2881 | #endif | 
 | 2882 |  | 
 | 2883 | static const struct net_device_ops bnad_netdev_ops = { | 
 | 2884 | 	.ndo_open		= bnad_open, | 
 | 2885 | 	.ndo_stop		= bnad_stop, | 
 | 2886 | 	.ndo_start_xmit		= bnad_start_xmit, | 
| Eric Dumazet | 250e061 | 2010-09-02 12:45:02 -0700 | [diff] [blame] | 2887 | 	.ndo_get_stats64		= bnad_get_stats64, | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 2888 | 	.ndo_set_rx_mode	= bnad_set_rx_mode, | 
 | 2889 | 	.ndo_set_multicast_list = bnad_set_rx_mode, | 
 | 2890 | 	.ndo_validate_addr      = eth_validate_addr, | 
 | 2891 | 	.ndo_set_mac_address    = bnad_set_mac_address, | 
 | 2892 | 	.ndo_change_mtu		= bnad_change_mtu, | 
 | 2893 | 	.ndo_vlan_rx_register   = bnad_vlan_rx_register, | 
 | 2894 | 	.ndo_vlan_rx_add_vid    = bnad_vlan_rx_add_vid, | 
 | 2895 | 	.ndo_vlan_rx_kill_vid   = bnad_vlan_rx_kill_vid, | 
 | 2896 | #ifdef CONFIG_NET_POLL_CONTROLLER | 
 | 2897 | 	.ndo_poll_controller    = bnad_netpoll | 
 | 2898 | #endif | 
 | 2899 | }; | 
 | 2900 |  | 
 | 2901 | static void | 
 | 2902 | bnad_netdev_init(struct bnad *bnad, bool using_dac) | 
 | 2903 | { | 
 | 2904 | 	struct net_device *netdev = bnad->netdev; | 
 | 2905 |  | 
| Michał Mirosław | e5ee20e | 2011-04-12 09:38:23 +0000 | [diff] [blame] | 2906 | 	netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | | 
 | 2907 | 		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | 
 | 2908 | 		NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_TX; | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 2909 |  | 
| Michał Mirosław | e5ee20e | 2011-04-12 09:38:23 +0000 | [diff] [blame] | 2910 | 	netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA | | 
 | 2911 | 		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | 
 | 2912 | 		NETIF_F_TSO | NETIF_F_TSO6; | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 2913 |  | 
| Michał Mirosław | e5ee20e | 2011-04-12 09:38:23 +0000 | [diff] [blame] | 2914 | 	netdev->features |= netdev->hw_features | | 
 | 2915 | 		NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 2916 |  | 
 | 2917 | 	if (using_dac) | 
 | 2918 | 		netdev->features |= NETIF_F_HIGHDMA; | 
 | 2919 |  | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 2920 | 	netdev->mem_start = bnad->mmio_start; | 
 | 2921 | 	netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1; | 
 | 2922 |  | 
 | 2923 | 	netdev->netdev_ops = &bnad_netdev_ops; | 
 | 2924 | 	bnad_set_ethtool_ops(netdev); | 
 | 2925 | } | 
 | 2926 |  | 
 | 2927 | /* | 
 | 2928 |  * 1. Initialize the bnad structure | 
 | 2929 |  * 2. Setup netdev pointer in pci_dev | 
 | 2930 |  * 3. Initialze Tx free tasklet | 
 | 2931 |  * 4. Initialize no. of TxQ & CQs & MSIX vectors | 
 | 2932 |  */ | 
 | 2933 | static int | 
 | 2934 | bnad_init(struct bnad *bnad, | 
 | 2935 | 	  struct pci_dev *pdev, struct net_device *netdev) | 
 | 2936 | { | 
 | 2937 | 	unsigned long flags; | 
 | 2938 |  | 
 | 2939 | 	SET_NETDEV_DEV(netdev, &pdev->dev); | 
 | 2940 | 	pci_set_drvdata(pdev, netdev); | 
 | 2941 |  | 
 | 2942 | 	bnad->netdev = netdev; | 
 | 2943 | 	bnad->pcidev = pdev; | 
 | 2944 | 	bnad->mmio_start = pci_resource_start(pdev, 0); | 
 | 2945 | 	bnad->mmio_len = pci_resource_len(pdev, 0); | 
 | 2946 | 	bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len); | 
 | 2947 | 	if (!bnad->bar0) { | 
 | 2948 | 		dev_err(&pdev->dev, "ioremap for bar0 failed\n"); | 
 | 2949 | 		pci_set_drvdata(pdev, NULL); | 
 | 2950 | 		return -ENOMEM; | 
 | 2951 | 	} | 
 | 2952 | 	pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0, | 
 | 2953 | 	       (unsigned long long) bnad->mmio_len); | 
 | 2954 |  | 
 | 2955 | 	spin_lock_irqsave(&bnad->bna_lock, flags); | 
 | 2956 | 	if (!bnad_msix_disable) | 
 | 2957 | 		bnad->cfg_flags = BNAD_CF_MSIX; | 
 | 2958 |  | 
 | 2959 | 	bnad->cfg_flags |= BNAD_CF_DIM_ENABLED; | 
 | 2960 |  | 
 | 2961 | 	bnad_q_num_init(bnad); | 
 | 2962 | 	spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
 | 2963 |  | 
 | 2964 | 	bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) + | 
 | 2965 | 		(bnad->num_rx * bnad->num_rxp_per_rx) + | 
 | 2966 | 			 BNAD_MAILBOX_MSIX_VECTORS; | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 2967 |  | 
 | 2968 | 	bnad->txq_depth = BNAD_TXQ_DEPTH; | 
 | 2969 | 	bnad->rxq_depth = BNAD_RXQ_DEPTH; | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 2970 |  | 
 | 2971 | 	bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO; | 
 | 2972 | 	bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO; | 
 | 2973 |  | 
 | 2974 | 	tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet, | 
 | 2975 | 		     (unsigned long)bnad); | 
 | 2976 |  | 
 | 2977 | 	return 0; | 
 | 2978 | } | 
 | 2979 |  | 
 | 2980 | /* | 
 | 2981 |  * Must be called after bnad_pci_uninit() | 
 | 2982 |  * so that iounmap() and pci_set_drvdata(NULL) | 
 | 2983 |  * happens only after PCI uninitialization. | 
 | 2984 |  */ | 
 | 2985 | static void | 
 | 2986 | bnad_uninit(struct bnad *bnad) | 
 | 2987 | { | 
 | 2988 | 	if (bnad->bar0) | 
 | 2989 | 		iounmap(bnad->bar0); | 
 | 2990 | 	pci_set_drvdata(bnad->pcidev, NULL); | 
 | 2991 | } | 
 | 2992 |  | 
 | 2993 | /* | 
 | 2994 |  * Initialize locks | 
 | 2995 | 	a) Per device mutes used for serializing configuration | 
 | 2996 | 	   changes from OS interface | 
 | 2997 | 	b) spin lock used to protect bna state machine | 
 | 2998 |  */ | 
 | 2999 | static void | 
 | 3000 | bnad_lock_init(struct bnad *bnad) | 
 | 3001 | { | 
 | 3002 | 	spin_lock_init(&bnad->bna_lock); | 
 | 3003 | 	mutex_init(&bnad->conf_mutex); | 
 | 3004 | } | 
 | 3005 |  | 
 | 3006 | static void | 
 | 3007 | bnad_lock_uninit(struct bnad *bnad) | 
 | 3008 | { | 
 | 3009 | 	mutex_destroy(&bnad->conf_mutex); | 
 | 3010 | } | 
 | 3011 |  | 
 | 3012 | /* PCI Initialization */ | 
 | 3013 | static int | 
 | 3014 | bnad_pci_init(struct bnad *bnad, | 
 | 3015 | 	      struct pci_dev *pdev, bool *using_dac) | 
 | 3016 | { | 
 | 3017 | 	int err; | 
 | 3018 |  | 
 | 3019 | 	err = pci_enable_device(pdev); | 
 | 3020 | 	if (err) | 
 | 3021 | 		return err; | 
 | 3022 | 	err = pci_request_regions(pdev, BNAD_NAME); | 
 | 3023 | 	if (err) | 
 | 3024 | 		goto disable_device; | 
| Ivan Vecera | 5ea7431 | 2011-02-02 04:37:02 +0000 | [diff] [blame] | 3025 | 	if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && | 
 | 3026 | 	    !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 3027 | 		*using_dac = 1; | 
 | 3028 | 	} else { | 
| Ivan Vecera | 5ea7431 | 2011-02-02 04:37:02 +0000 | [diff] [blame] | 3029 | 		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 3030 | 		if (err) { | 
| Ivan Vecera | 5ea7431 | 2011-02-02 04:37:02 +0000 | [diff] [blame] | 3031 | 			err = dma_set_coherent_mask(&pdev->dev, | 
 | 3032 | 						    DMA_BIT_MASK(32)); | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 3033 | 			if (err) | 
 | 3034 | 				goto release_regions; | 
 | 3035 | 		} | 
 | 3036 | 		*using_dac = 0; | 
 | 3037 | 	} | 
 | 3038 | 	pci_set_master(pdev); | 
 | 3039 | 	return 0; | 
 | 3040 |  | 
 | 3041 | release_regions: | 
 | 3042 | 	pci_release_regions(pdev); | 
 | 3043 | disable_device: | 
 | 3044 | 	pci_disable_device(pdev); | 
 | 3045 |  | 
 | 3046 | 	return err; | 
 | 3047 | } | 
 | 3048 |  | 
 | 3049 | static void | 
 | 3050 | bnad_pci_uninit(struct pci_dev *pdev) | 
 | 3051 | { | 
 | 3052 | 	pci_release_regions(pdev); | 
 | 3053 | 	pci_disable_device(pdev); | 
 | 3054 | } | 
 | 3055 |  | 
 | 3056 | static int __devinit | 
 | 3057 | bnad_pci_probe(struct pci_dev *pdev, | 
 | 3058 | 		const struct pci_device_id *pcidev_id) | 
 | 3059 | { | 
| Rasesh Mody | aad75b6 | 2010-12-23 21:45:08 +0000 | [diff] [blame] | 3060 | 	bool 	using_dac = false; | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 3061 | 	int 	err; | 
 | 3062 | 	struct bnad *bnad; | 
 | 3063 | 	struct bna *bna; | 
 | 3064 | 	struct net_device *netdev; | 
 | 3065 | 	struct bfa_pcidev pcidev_info; | 
 | 3066 | 	unsigned long flags; | 
 | 3067 |  | 
 | 3068 | 	pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n", | 
 | 3069 | 	       pdev, pcidev_id, PCI_FUNC(pdev->devfn)); | 
 | 3070 |  | 
 | 3071 | 	mutex_lock(&bnad_fwimg_mutex); | 
 | 3072 | 	if (!cna_get_firmware_buf(pdev)) { | 
 | 3073 | 		mutex_unlock(&bnad_fwimg_mutex); | 
 | 3074 | 		pr_warn("Failed to load Firmware Image!\n"); | 
 | 3075 | 		return -ENODEV; | 
 | 3076 | 	} | 
 | 3077 | 	mutex_unlock(&bnad_fwimg_mutex); | 
 | 3078 |  | 
 | 3079 | 	/* | 
 | 3080 | 	 * Allocates sizeof(struct net_device + struct bnad) | 
 | 3081 | 	 * bnad = netdev->priv | 
 | 3082 | 	 */ | 
 | 3083 | 	netdev = alloc_etherdev(sizeof(struct bnad)); | 
 | 3084 | 	if (!netdev) { | 
 | 3085 | 		dev_err(&pdev->dev, "alloc_etherdev failed\n"); | 
 | 3086 | 		err = -ENOMEM; | 
 | 3087 | 		return err; | 
 | 3088 | 	} | 
 | 3089 | 	bnad = netdev_priv(netdev); | 
 | 3090 |  | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 3091 | 	/* | 
 | 3092 | 	 * PCI initialization | 
 | 3093 | 	 * 	Output : using_dac = 1 for 64 bit DMA | 
| Rasesh Mody | be7fa32 | 2010-12-23 21:45:01 +0000 | [diff] [blame] | 3094 | 	 *			   = 0 for 32 bit DMA | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 3095 | 	 */ | 
 | 3096 | 	err = bnad_pci_init(bnad, pdev, &using_dac); | 
 | 3097 | 	if (err) | 
 | 3098 | 		goto free_netdev; | 
 | 3099 |  | 
 | 3100 | 	bnad_lock_init(bnad); | 
 | 3101 | 	/* | 
 | 3102 | 	 * Initialize bnad structure | 
 | 3103 | 	 * Setup relation between pci_dev & netdev | 
 | 3104 | 	 * Init Tx free tasklet | 
 | 3105 | 	 */ | 
 | 3106 | 	err = bnad_init(bnad, pdev, netdev); | 
 | 3107 | 	if (err) | 
 | 3108 | 		goto pci_uninit; | 
 | 3109 | 	/* Initialize netdev structure, set up ethtool ops */ | 
 | 3110 | 	bnad_netdev_init(bnad, using_dac); | 
 | 3111 |  | 
| Rasesh Mody | 815f41e | 2010-12-23 21:45:03 +0000 | [diff] [blame] | 3112 | 	/* Set link to down state */ | 
 | 3113 | 	netif_carrier_off(netdev); | 
 | 3114 |  | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 3115 | 	bnad_enable_msix(bnad); | 
 | 3116 |  | 
 | 3117 | 	/* Get resource requirement form bna */ | 
 | 3118 | 	bna_res_req(&bnad->res_info[0]); | 
 | 3119 |  | 
 | 3120 | 	/* Allocate resources from bna */ | 
 | 3121 | 	err = bnad_res_alloc(bnad); | 
 | 3122 | 	if (err) | 
 | 3123 | 		goto free_netdev; | 
 | 3124 |  | 
 | 3125 | 	bna = &bnad->bna; | 
 | 3126 |  | 
 | 3127 | 	/* Setup pcidev_info for bna_init() */ | 
 | 3128 | 	pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn); | 
 | 3129 | 	pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn); | 
 | 3130 | 	pcidev_info.device_id = bnad->pcidev->device; | 
 | 3131 | 	pcidev_info.pci_bar_kva = bnad->bar0; | 
 | 3132 |  | 
 | 3133 | 	mutex_lock(&bnad->conf_mutex); | 
 | 3134 |  | 
 | 3135 | 	spin_lock_irqsave(&bnad->bna_lock, flags); | 
 | 3136 | 	bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]); | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 3137 | 	spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
 | 3138 |  | 
 | 3139 | 	bnad->stats.bna_stats = &bna->stats; | 
 | 3140 |  | 
 | 3141 | 	/* Set up timers */ | 
 | 3142 | 	setup_timer(&bnad->bna.device.ioc.ioc_timer, bnad_ioc_timeout, | 
 | 3143 | 				((unsigned long)bnad)); | 
 | 3144 | 	setup_timer(&bnad->bna.device.ioc.hb_timer, bnad_ioc_hb_check, | 
 | 3145 | 				((unsigned long)bnad)); | 
| Rasesh Mody | 1d32f76 | 2010-12-23 21:45:09 +0000 | [diff] [blame] | 3146 | 	setup_timer(&bnad->bna.device.ioc.iocpf_timer, bnad_iocpf_timeout, | 
 | 3147 | 				((unsigned long)bnad)); | 
 | 3148 | 	setup_timer(&bnad->bna.device.ioc.sem_timer, bnad_iocpf_sem_timeout, | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 3149 | 				((unsigned long)bnad)); | 
 | 3150 |  | 
 | 3151 | 	/* Now start the timer before calling IOC */ | 
| Rasesh Mody | 1d32f76 | 2010-12-23 21:45:09 +0000 | [diff] [blame] | 3152 | 	mod_timer(&bnad->bna.device.ioc.iocpf_timer, | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 3153 | 		  jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ)); | 
 | 3154 |  | 
 | 3155 | 	/* | 
 | 3156 | 	 * Start the chip | 
 | 3157 | 	 * Don't care even if err != 0, bna state machine will | 
 | 3158 | 	 * deal with it | 
 | 3159 | 	 */ | 
 | 3160 | 	err = bnad_device_enable(bnad); | 
 | 3161 |  | 
 | 3162 | 	/* Get the burnt-in mac */ | 
 | 3163 | 	spin_lock_irqsave(&bnad->bna_lock, flags); | 
 | 3164 | 	bna_port_mac_get(&bna->port, &bnad->perm_addr); | 
 | 3165 | 	bnad_set_netdev_perm_addr(bnad); | 
 | 3166 | 	spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
 | 3167 |  | 
 | 3168 | 	mutex_unlock(&bnad->conf_mutex); | 
 | 3169 |  | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 3170 | 	/* Finally, reguister with net_device layer */ | 
 | 3171 | 	err = register_netdev(netdev); | 
 | 3172 | 	if (err) { | 
 | 3173 | 		pr_err("BNA : Registering with netdev failed\n"); | 
 | 3174 | 		goto disable_device; | 
 | 3175 | 	} | 
 | 3176 |  | 
 | 3177 | 	return 0; | 
 | 3178 |  | 
 | 3179 | disable_device: | 
 | 3180 | 	mutex_lock(&bnad->conf_mutex); | 
 | 3181 | 	bnad_device_disable(bnad); | 
 | 3182 | 	del_timer_sync(&bnad->bna.device.ioc.ioc_timer); | 
 | 3183 | 	del_timer_sync(&bnad->bna.device.ioc.sem_timer); | 
 | 3184 | 	del_timer_sync(&bnad->bna.device.ioc.hb_timer); | 
 | 3185 | 	spin_lock_irqsave(&bnad->bna_lock, flags); | 
 | 3186 | 	bna_uninit(bna); | 
 | 3187 | 	spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
 | 3188 | 	mutex_unlock(&bnad->conf_mutex); | 
 | 3189 |  | 
 | 3190 | 	bnad_res_free(bnad); | 
 | 3191 | 	bnad_disable_msix(bnad); | 
 | 3192 | pci_uninit: | 
 | 3193 | 	bnad_pci_uninit(pdev); | 
 | 3194 | 	bnad_lock_uninit(bnad); | 
 | 3195 | 	bnad_uninit(bnad); | 
 | 3196 | free_netdev: | 
 | 3197 | 	free_netdev(netdev); | 
 | 3198 | 	return err; | 
 | 3199 | } | 
 | 3200 |  | 
 | 3201 | static void __devexit | 
 | 3202 | bnad_pci_remove(struct pci_dev *pdev) | 
 | 3203 | { | 
 | 3204 | 	struct net_device *netdev = pci_get_drvdata(pdev); | 
 | 3205 | 	struct bnad *bnad; | 
 | 3206 | 	struct bna *bna; | 
 | 3207 | 	unsigned long flags; | 
 | 3208 |  | 
 | 3209 | 	if (!netdev) | 
 | 3210 | 		return; | 
 | 3211 |  | 
 | 3212 | 	pr_info("%s bnad_pci_remove\n", netdev->name); | 
 | 3213 | 	bnad = netdev_priv(netdev); | 
 | 3214 | 	bna = &bnad->bna; | 
 | 3215 |  | 
 | 3216 | 	unregister_netdev(netdev); | 
 | 3217 |  | 
 | 3218 | 	mutex_lock(&bnad->conf_mutex); | 
 | 3219 | 	bnad_device_disable(bnad); | 
 | 3220 | 	del_timer_sync(&bnad->bna.device.ioc.ioc_timer); | 
 | 3221 | 	del_timer_sync(&bnad->bna.device.ioc.sem_timer); | 
 | 3222 | 	del_timer_sync(&bnad->bna.device.ioc.hb_timer); | 
 | 3223 | 	spin_lock_irqsave(&bnad->bna_lock, flags); | 
 | 3224 | 	bna_uninit(bna); | 
 | 3225 | 	spin_unlock_irqrestore(&bnad->bna_lock, flags); | 
 | 3226 | 	mutex_unlock(&bnad->conf_mutex); | 
 | 3227 |  | 
 | 3228 | 	bnad_res_free(bnad); | 
 | 3229 | 	bnad_disable_msix(bnad); | 
 | 3230 | 	bnad_pci_uninit(pdev); | 
 | 3231 | 	bnad_lock_uninit(bnad); | 
 | 3232 | 	bnad_uninit(bnad); | 
 | 3233 | 	free_netdev(netdev); | 
 | 3234 | } | 
 | 3235 |  | 
| Rasesh Mody | b7ee31c | 2010-10-05 15:46:05 +0000 | [diff] [blame] | 3236 | static const struct pci_device_id bnad_pci_id_table[] = { | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 3237 | 	{ | 
 | 3238 | 		PCI_DEVICE(PCI_VENDOR_ID_BROCADE, | 
 | 3239 | 			PCI_DEVICE_ID_BROCADE_CT), | 
 | 3240 | 		.class = PCI_CLASS_NETWORK_ETHERNET << 8, | 
 | 3241 | 		.class_mask =  0xffff00 | 
 | 3242 | 	}, {0,  } | 
 | 3243 | }; | 
 | 3244 |  | 
 | 3245 | MODULE_DEVICE_TABLE(pci, bnad_pci_id_table); | 
 | 3246 |  | 
 | 3247 | static struct pci_driver bnad_pci_driver = { | 
 | 3248 | 	.name = BNAD_NAME, | 
 | 3249 | 	.id_table = bnad_pci_id_table, | 
 | 3250 | 	.probe = bnad_pci_probe, | 
 | 3251 | 	.remove = __devexit_p(bnad_pci_remove), | 
 | 3252 | }; | 
 | 3253 |  | 
 | 3254 | static int __init | 
 | 3255 | bnad_module_init(void) | 
 | 3256 | { | 
 | 3257 | 	int err; | 
 | 3258 |  | 
 | 3259 | 	pr_info("Brocade 10G Ethernet driver\n"); | 
 | 3260 |  | 
| Rasesh Mody | 8a89142 | 2010-08-25 23:00:27 -0700 | [diff] [blame] | 3261 | 	bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover); | 
| Rasesh Mody | 8b230ed | 2010-08-23 20:24:12 -0700 | [diff] [blame] | 3262 |  | 
 | 3263 | 	err = pci_register_driver(&bnad_pci_driver); | 
 | 3264 | 	if (err < 0) { | 
 | 3265 | 		pr_err("bna : PCI registration failed in module init " | 
 | 3266 | 		       "(%d)\n", err); | 
 | 3267 | 		return err; | 
 | 3268 | 	} | 
 | 3269 |  | 
 | 3270 | 	return 0; | 
 | 3271 | } | 
 | 3272 |  | 
 | 3273 | static void __exit | 
 | 3274 | bnad_module_exit(void) | 
 | 3275 | { | 
 | 3276 | 	pci_unregister_driver(&bnad_pci_driver); | 
 | 3277 |  | 
 | 3278 | 	if (bfi_fw) | 
 | 3279 | 		release_firmware(bfi_fw); | 
 | 3280 | } | 
 | 3281 |  | 
 | 3282 | module_init(bnad_module_init); | 
 | 3283 | module_exit(bnad_module_exit); | 
 | 3284 |  | 
 | 3285 | MODULE_AUTHOR("Brocade"); | 
 | 3286 | MODULE_LICENSE("GPL"); | 
 | 3287 | MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver"); | 
 | 3288 | MODULE_VERSION(BNAD_VERSION); | 
 | 3289 | MODULE_FIRMWARE(CNA_FW_FILE_CT); |