| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 1 | /**************************************************************************** | 
 | 2 |  * Driver for Solarflare Solarstorm network controllers and boards | 
 | 3 |  * Copyright 2005-2006 Fen Systems Ltd. | 
| Ben Hutchings | 906bb26 | 2009-11-29 15:16:19 +0000 | [diff] [blame] | 4 |  * Copyright 2005-2009 Solarflare Communications Inc. | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 5 |  * | 
 | 6 |  * This program is free software; you can redistribute it and/or modify it | 
 | 7 |  * under the terms of the GNU General Public License version 2 as published | 
 | 8 |  * by the Free Software Foundation, incorporated herein by reference. | 
 | 9 |  */ | 
 | 10 |  | 
 | 11 | #include <linux/pci.h> | 
 | 12 | #include <linux/tcp.h> | 
 | 13 | #include <linux/ip.h> | 
 | 14 | #include <linux/in.h> | 
| Ben Hutchings | 738a8f4 | 2009-11-29 15:16:05 +0000 | [diff] [blame] | 15 | #include <linux/ipv6.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 16 | #include <linux/slab.h> | 
| Ben Hutchings | 738a8f4 | 2009-11-29 15:16:05 +0000 | [diff] [blame] | 17 | #include <net/ipv6.h> | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 18 | #include <linux/if_ether.h> | 
 | 19 | #include <linux/highmem.h> | 
 | 20 | #include "net_driver.h" | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 21 | #include "efx.h" | 
| Ben Hutchings | 744093c | 2009-11-29 15:12:08 +0000 | [diff] [blame] | 22 | #include "nic.h" | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 23 | #include "workarounds.h" | 
 | 24 |  | 
 | 25 | /* | 
 | 26 |  * TX descriptor ring full threshold | 
 | 27 |  * | 
 | 28 |  * The tx_queue descriptor ring fill-level must fall below this value | 
 | 29 |  * before we restart the netif queue | 
 | 30 |  */ | 
| Ben Hutchings | 3ffeabd | 2009-10-23 08:30:58 +0000 | [diff] [blame] | 31 | #define EFX_TXQ_THRESHOLD (EFX_TXQ_MASK / 2u) | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 32 |  | 
| Ben Hutchings | a4900ac | 2010-04-28 09:30:43 +0000 | [diff] [blame] | 33 | /* We need to be able to nest calls to netif_tx_stop_queue(), partly | 
 | 34 |  * because of the 2 hardware queues associated with each core queue, | 
 | 35 |  * but also so that we can inhibit TX for reasons other than a full | 
 | 36 |  * hardware queue. */ | 
 | 37 | void efx_stop_queue(struct efx_channel *channel) | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 38 | { | 
| Ben Hutchings | a4900ac | 2010-04-28 09:30:43 +0000 | [diff] [blame] | 39 | 	struct efx_nic *efx = channel->efx; | 
 | 40 |  | 
 | 41 | 	if (!channel->tx_queue) | 
 | 42 | 		return; | 
 | 43 |  | 
 | 44 | 	spin_lock_bh(&channel->tx_stop_lock); | 
| Ben Hutchings | 62776d0 | 2010-06-23 11:30:07 +0000 | [diff] [blame] | 45 | 	netif_vdbg(efx, tx_queued, efx->net_dev, "stop TX queue\n"); | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 46 |  | 
| Ben Hutchings | a4900ac | 2010-04-28 09:30:43 +0000 | [diff] [blame] | 47 | 	atomic_inc(&channel->tx_stop_count); | 
 | 48 | 	netif_tx_stop_queue( | 
 | 49 | 		netdev_get_tx_queue( | 
 | 50 | 			efx->net_dev, | 
 | 51 | 			channel->tx_queue->queue / EFX_TXQ_TYPES)); | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 52 |  | 
| Ben Hutchings | a4900ac | 2010-04-28 09:30:43 +0000 | [diff] [blame] | 53 | 	spin_unlock_bh(&channel->tx_stop_lock); | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 54 | } | 
 | 55 |  | 
| Ben Hutchings | a4900ac | 2010-04-28 09:30:43 +0000 | [diff] [blame] | 56 | /* Decrement core TX queue stop count and wake it if the count is 0 */ | 
 | 57 | void efx_wake_queue(struct efx_channel *channel) | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 58 | { | 
| Ben Hutchings | a4900ac | 2010-04-28 09:30:43 +0000 | [diff] [blame] | 59 | 	struct efx_nic *efx = channel->efx; | 
 | 60 |  | 
 | 61 | 	if (!channel->tx_queue) | 
 | 62 | 		return; | 
 | 63 |  | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 64 | 	local_bh_disable(); | 
| Ben Hutchings | a4900ac | 2010-04-28 09:30:43 +0000 | [diff] [blame] | 65 | 	if (atomic_dec_and_lock(&channel->tx_stop_count, | 
 | 66 | 				&channel->tx_stop_lock)) { | 
| Ben Hutchings | 62776d0 | 2010-06-23 11:30:07 +0000 | [diff] [blame] | 67 | 		netif_vdbg(efx, tx_queued, efx->net_dev, "waking TX queue\n"); | 
| Ben Hutchings | a4900ac | 2010-04-28 09:30:43 +0000 | [diff] [blame] | 68 | 		netif_tx_wake_queue( | 
 | 69 | 			netdev_get_tx_queue( | 
 | 70 | 				efx->net_dev, | 
 | 71 | 				channel->tx_queue->queue / EFX_TXQ_TYPES)); | 
 | 72 | 		spin_unlock(&channel->tx_stop_lock); | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 73 | 	} | 
 | 74 | 	local_bh_enable(); | 
 | 75 | } | 
 | 76 |  | 
| Ben Hutchings | 4d56606 | 2008-09-01 12:47:12 +0100 | [diff] [blame] | 77 | static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, | 
 | 78 | 			       struct efx_tx_buffer *buffer) | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 79 | { | 
 | 80 | 	if (buffer->unmap_len) { | 
 | 81 | 		struct pci_dev *pci_dev = tx_queue->efx->pci_dev; | 
| Ben Hutchings | cc12dac | 2008-09-01 12:46:43 +0100 | [diff] [blame] | 82 | 		dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len - | 
 | 83 | 					 buffer->unmap_len); | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 84 | 		if (buffer->unmap_single) | 
| Ben Hutchings | cc12dac | 2008-09-01 12:46:43 +0100 | [diff] [blame] | 85 | 			pci_unmap_single(pci_dev, unmap_addr, buffer->unmap_len, | 
 | 86 | 					 PCI_DMA_TODEVICE); | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 87 | 		else | 
| Ben Hutchings | cc12dac | 2008-09-01 12:46:43 +0100 | [diff] [blame] | 88 | 			pci_unmap_page(pci_dev, unmap_addr, buffer->unmap_len, | 
 | 89 | 				       PCI_DMA_TODEVICE); | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 90 | 		buffer->unmap_len = 0; | 
| Ben Hutchings | dc8cfa5 | 2008-09-01 12:46:50 +0100 | [diff] [blame] | 91 | 		buffer->unmap_single = false; | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 92 | 	} | 
 | 93 |  | 
 | 94 | 	if (buffer->skb) { | 
 | 95 | 		dev_kfree_skb_any((struct sk_buff *) buffer->skb); | 
 | 96 | 		buffer->skb = NULL; | 
| Ben Hutchings | 62776d0 | 2010-06-23 11:30:07 +0000 | [diff] [blame] | 97 | 		netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, | 
 | 98 | 			   "TX queue %d transmission id %x complete\n", | 
 | 99 | 			   tx_queue->queue, tx_queue->read_count); | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 100 | 	} | 
 | 101 | } | 
 | 102 |  | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 103 | /** | 
 | 104 |  * struct efx_tso_header - a DMA mapped buffer for packet headers | 
 | 105 |  * @next: Linked list of free ones. | 
 | 106 |  *	The list is protected by the TX queue lock. | 
 | 107 |  * @dma_unmap_len: Length to unmap for an oversize buffer, or 0. | 
 | 108 |  * @dma_addr: The DMA address of the header below. | 
 | 109 |  * | 
 | 110 |  * This controls the memory used for a TSO header.  Use TSOH_DATA() | 
 | 111 |  * to find the packet header data.  Use TSOH_SIZE() to calculate the | 
 | 112 |  * total size required for a given packet header length.  TSO headers | 
 | 113 |  * in the free list are exactly %TSOH_STD_SIZE bytes in size. | 
 | 114 |  */ | 
 | 115 | struct efx_tso_header { | 
 | 116 | 	union { | 
 | 117 | 		struct efx_tso_header *next; | 
 | 118 | 		size_t unmap_len; | 
 | 119 | 	}; | 
 | 120 | 	dma_addr_t dma_addr; | 
 | 121 | }; | 
 | 122 |  | 
 | 123 | static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, | 
| Ben Hutchings | 740847d | 2008-09-01 12:48:23 +0100 | [diff] [blame] | 124 | 			       struct sk_buff *skb); | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 125 | static void efx_fini_tso(struct efx_tx_queue *tx_queue); | 
 | 126 | static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, | 
 | 127 | 			       struct efx_tso_header *tsoh); | 
 | 128 |  | 
| Ben Hutchings | 4d56606 | 2008-09-01 12:47:12 +0100 | [diff] [blame] | 129 | static void efx_tsoh_free(struct efx_tx_queue *tx_queue, | 
 | 130 | 			  struct efx_tx_buffer *buffer) | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 131 | { | 
 | 132 | 	if (buffer->tsoh) { | 
 | 133 | 		if (likely(!buffer->tsoh->unmap_len)) { | 
 | 134 | 			buffer->tsoh->next = tx_queue->tso_headers_free; | 
 | 135 | 			tx_queue->tso_headers_free = buffer->tsoh; | 
 | 136 | 		} else { | 
 | 137 | 			efx_tsoh_heap_free(tx_queue, buffer->tsoh); | 
 | 138 | 		} | 
 | 139 | 		buffer->tsoh = NULL; | 
 | 140 | 	} | 
 | 141 | } | 
 | 142 |  | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 143 |  | 
| Ben Hutchings | 63f1988 | 2009-10-23 08:31:20 +0000 | [diff] [blame] | 144 | static inline unsigned | 
 | 145 | efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr) | 
 | 146 | { | 
 | 147 | 	/* Depending on the NIC revision, we can use descriptor | 
 | 148 | 	 * lengths up to 8K or 8K-1.  However, since PCI Express | 
 | 149 | 	 * devices must split read requests at 4K boundaries, there is | 
 | 150 | 	 * little benefit from using descriptors that cross those | 
 | 151 | 	 * boundaries and we keep things simple by not doing so. | 
 | 152 | 	 */ | 
 | 153 | 	unsigned len = (~dma_addr & 0xfff) + 1; | 
 | 154 |  | 
 | 155 | 	/* Work around hardware bug for unaligned buffers. */ | 
 | 156 | 	if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf)) | 
 | 157 | 		len = min_t(unsigned, len, 512 - (dma_addr & 0xf)); | 
 | 158 |  | 
 | 159 | 	return len; | 
 | 160 | } | 
 | 161 |  | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 162 | /* | 
 | 163 |  * Add a socket buffer to a TX queue | 
 | 164 |  * | 
 | 165 |  * This maps all fragments of a socket buffer for DMA and adds them to | 
 | 166 |  * the TX queue.  The queue's insert pointer will be incremented by | 
 | 167 |  * the number of fragments in the socket buffer. | 
 | 168 |  * | 
 | 169 |  * If any DMA mapping fails, any mapped fragments will be unmapped, | 
 | 170 |  * the queue's insert pointer will be restored to its original value. | 
 | 171 |  * | 
| Ben Hutchings | 497f5ba | 2009-11-23 16:07:05 +0000 | [diff] [blame] | 172 |  * This function is split out from efx_hard_start_xmit to allow the | 
 | 173 |  * loopback test to direct packets via specific TX queues. | 
 | 174 |  * | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 175 |  * Returns NETDEV_TX_OK or NETDEV_TX_BUSY | 
 | 176 |  * You must hold netif_tx_lock() to call this function. | 
 | 177 |  */ | 
| Ben Hutchings | 497f5ba | 2009-11-23 16:07:05 +0000 | [diff] [blame] | 178 | netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 179 | { | 
 | 180 | 	struct efx_nic *efx = tx_queue->efx; | 
 | 181 | 	struct pci_dev *pci_dev = efx->pci_dev; | 
 | 182 | 	struct efx_tx_buffer *buffer; | 
 | 183 | 	skb_frag_t *fragment; | 
 | 184 | 	struct page *page; | 
 | 185 | 	int page_offset; | 
| Ben Hutchings | 63f1988 | 2009-10-23 08:31:20 +0000 | [diff] [blame] | 186 | 	unsigned int len, unmap_len = 0, fill_level, insert_ptr; | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 187 | 	dma_addr_t dma_addr, unmap_addr = 0; | 
 | 188 | 	unsigned int dma_len; | 
| Ben Hutchings | dc8cfa5 | 2008-09-01 12:46:50 +0100 | [diff] [blame] | 189 | 	bool unmap_single; | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 190 | 	int q_space, i = 0; | 
| Stephen Hemminger | 61357325 | 2009-08-31 19:50:58 +0000 | [diff] [blame] | 191 | 	netdev_tx_t rc = NETDEV_TX_OK; | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 192 |  | 
 | 193 | 	EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); | 
 | 194 |  | 
| Ben Hutchings | 9bc183d | 2009-11-23 16:06:47 +0000 | [diff] [blame] | 195 | 	if (skb_shinfo(skb)->gso_size) | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 196 | 		return efx_enqueue_skb_tso(tx_queue, skb); | 
 | 197 |  | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 198 | 	/* Get size of the initial fragment */ | 
 | 199 | 	len = skb_headlen(skb); | 
 | 200 |  | 
| Ben Hutchings | bb145a9 | 2009-03-20 13:25:39 +0000 | [diff] [blame] | 201 | 	/* Pad if necessary */ | 
 | 202 | 	if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) { | 
 | 203 | 		EFX_BUG_ON_PARANOID(skb->data_len); | 
 | 204 | 		len = 32 + 1; | 
 | 205 | 		if (skb_pad(skb, len - skb->len)) | 
 | 206 | 			return NETDEV_TX_OK; | 
 | 207 | 	} | 
 | 208 |  | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 209 | 	fill_level = tx_queue->insert_count - tx_queue->old_read_count; | 
| Ben Hutchings | 3ffeabd | 2009-10-23 08:30:58 +0000 | [diff] [blame] | 210 | 	q_space = EFX_TXQ_MASK - 1 - fill_level; | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 211 |  | 
 | 212 | 	/* Map for DMA.  Use pci_map_single rather than pci_map_page | 
 | 213 | 	 * since this is more efficient on machines with sparse | 
 | 214 | 	 * memory. | 
 | 215 | 	 */ | 
| Ben Hutchings | dc8cfa5 | 2008-09-01 12:46:50 +0100 | [diff] [blame] | 216 | 	unmap_single = true; | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 217 | 	dma_addr = pci_map_single(pci_dev, skb->data, len, PCI_DMA_TODEVICE); | 
 | 218 |  | 
 | 219 | 	/* Process all fragments */ | 
 | 220 | 	while (1) { | 
| FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 221 | 		if (unlikely(pci_dma_mapping_error(pci_dev, dma_addr))) | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 222 | 			goto pci_err; | 
 | 223 |  | 
 | 224 | 		/* Store fields for marking in the per-fragment final | 
 | 225 | 		 * descriptor */ | 
 | 226 | 		unmap_len = len; | 
 | 227 | 		unmap_addr = dma_addr; | 
 | 228 |  | 
 | 229 | 		/* Add to TX queue, splitting across DMA boundaries */ | 
 | 230 | 		do { | 
 | 231 | 			if (unlikely(q_space-- <= 0)) { | 
 | 232 | 				/* It might be that completions have | 
 | 233 | 				 * happened since the xmit path last | 
 | 234 | 				 * checked.  Update the xmit path's | 
 | 235 | 				 * copy of read_count. | 
 | 236 | 				 */ | 
 | 237 | 				++tx_queue->stopped; | 
 | 238 | 				/* This memory barrier protects the | 
 | 239 | 				 * change of stopped from the access | 
 | 240 | 				 * of read_count. */ | 
 | 241 | 				smp_mb(); | 
 | 242 | 				tx_queue->old_read_count = | 
 | 243 | 					*(volatile unsigned *) | 
 | 244 | 					&tx_queue->read_count; | 
 | 245 | 				fill_level = (tx_queue->insert_count | 
 | 246 | 					      - tx_queue->old_read_count); | 
| Ben Hutchings | 3ffeabd | 2009-10-23 08:30:58 +0000 | [diff] [blame] | 247 | 				q_space = EFX_TXQ_MASK - 1 - fill_level; | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 248 | 				if (unlikely(q_space-- <= 0)) | 
 | 249 | 					goto stop; | 
 | 250 | 				smp_mb(); | 
 | 251 | 				--tx_queue->stopped; | 
 | 252 | 			} | 
 | 253 |  | 
| Ben Hutchings | 3ffeabd | 2009-10-23 08:30:58 +0000 | [diff] [blame] | 254 | 			insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK; | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 255 | 			buffer = &tx_queue->buffer[insert_ptr]; | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 256 | 			efx_tsoh_free(tx_queue, buffer); | 
 | 257 | 			EFX_BUG_ON_PARANOID(buffer->tsoh); | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 258 | 			EFX_BUG_ON_PARANOID(buffer->skb); | 
 | 259 | 			EFX_BUG_ON_PARANOID(buffer->len); | 
| Ben Hutchings | dc8cfa5 | 2008-09-01 12:46:50 +0100 | [diff] [blame] | 260 | 			EFX_BUG_ON_PARANOID(!buffer->continuation); | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 261 | 			EFX_BUG_ON_PARANOID(buffer->unmap_len); | 
 | 262 |  | 
| Ben Hutchings | 63f1988 | 2009-10-23 08:31:20 +0000 | [diff] [blame] | 263 | 			dma_len = efx_max_tx_len(efx, dma_addr); | 
 | 264 | 			if (likely(dma_len >= len)) | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 265 | 				dma_len = len; | 
 | 266 |  | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 267 | 			/* Fill out per descriptor fields */ | 
 | 268 | 			buffer->len = dma_len; | 
 | 269 | 			buffer->dma_addr = dma_addr; | 
 | 270 | 			len -= dma_len; | 
 | 271 | 			dma_addr += dma_len; | 
 | 272 | 			++tx_queue->insert_count; | 
 | 273 | 		} while (len); | 
 | 274 |  | 
 | 275 | 		/* Transfer ownership of the unmapping to the final buffer */ | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 276 | 		buffer->unmap_single = unmap_single; | 
 | 277 | 		buffer->unmap_len = unmap_len; | 
 | 278 | 		unmap_len = 0; | 
 | 279 |  | 
 | 280 | 		/* Get address and size of next fragment */ | 
 | 281 | 		if (i >= skb_shinfo(skb)->nr_frags) | 
 | 282 | 			break; | 
 | 283 | 		fragment = &skb_shinfo(skb)->frags[i]; | 
 | 284 | 		len = fragment->size; | 
 | 285 | 		page = fragment->page; | 
 | 286 | 		page_offset = fragment->page_offset; | 
 | 287 | 		i++; | 
 | 288 | 		/* Map for DMA */ | 
| Ben Hutchings | dc8cfa5 | 2008-09-01 12:46:50 +0100 | [diff] [blame] | 289 | 		unmap_single = false; | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 290 | 		dma_addr = pci_map_page(pci_dev, page, page_offset, len, | 
 | 291 | 					PCI_DMA_TODEVICE); | 
 | 292 | 	} | 
 | 293 |  | 
 | 294 | 	/* Transfer ownership of the skb to the final buffer */ | 
 | 295 | 	buffer->skb = skb; | 
| Ben Hutchings | dc8cfa5 | 2008-09-01 12:46:50 +0100 | [diff] [blame] | 296 | 	buffer->continuation = false; | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 297 |  | 
 | 298 | 	/* Pass off to hardware */ | 
| Ben Hutchings | 152b6a6 | 2009-11-29 03:43:56 +0000 | [diff] [blame] | 299 | 	efx_nic_push_buffers(tx_queue); | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 300 |  | 
 | 301 | 	return NETDEV_TX_OK; | 
 | 302 |  | 
 | 303 |  pci_err: | 
| Ben Hutchings | 62776d0 | 2010-06-23 11:30:07 +0000 | [diff] [blame] | 304 | 	netif_err(efx, tx_err, efx->net_dev, | 
 | 305 | 		  " TX queue %d could not map skb with %d bytes %d " | 
 | 306 | 		  "fragments for DMA\n", tx_queue->queue, skb->len, | 
 | 307 | 		  skb_shinfo(skb)->nr_frags + 1); | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 308 |  | 
 | 309 | 	/* Mark the packet as transmitted, and free the SKB ourselves */ | 
| Ben Hutchings | 9bc183d | 2009-11-23 16:06:47 +0000 | [diff] [blame] | 310 | 	dev_kfree_skb_any(skb); | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 311 | 	goto unwind; | 
 | 312 |  | 
 | 313 |  stop: | 
 | 314 | 	rc = NETDEV_TX_BUSY; | 
 | 315 |  | 
 | 316 | 	if (tx_queue->stopped == 1) | 
| Ben Hutchings | a4900ac | 2010-04-28 09:30:43 +0000 | [diff] [blame] | 317 | 		efx_stop_queue(tx_queue->channel); | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 318 |  | 
 | 319 |  unwind: | 
 | 320 | 	/* Work backwards until we hit the original insert pointer value */ | 
 | 321 | 	while (tx_queue->insert_count != tx_queue->write_count) { | 
 | 322 | 		--tx_queue->insert_count; | 
| Ben Hutchings | 3ffeabd | 2009-10-23 08:30:58 +0000 | [diff] [blame] | 323 | 		insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK; | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 324 | 		buffer = &tx_queue->buffer[insert_ptr]; | 
 | 325 | 		efx_dequeue_buffer(tx_queue, buffer); | 
 | 326 | 		buffer->len = 0; | 
 | 327 | 	} | 
 | 328 |  | 
 | 329 | 	/* Free the fragment we were mid-way through pushing */ | 
| Ben Hutchings | ecbd95c | 2008-09-01 12:46:40 +0100 | [diff] [blame] | 330 | 	if (unmap_len) { | 
 | 331 | 		if (unmap_single) | 
 | 332 | 			pci_unmap_single(pci_dev, unmap_addr, unmap_len, | 
 | 333 | 					 PCI_DMA_TODEVICE); | 
 | 334 | 		else | 
 | 335 | 			pci_unmap_page(pci_dev, unmap_addr, unmap_len, | 
 | 336 | 				       PCI_DMA_TODEVICE); | 
 | 337 | 	} | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 338 |  | 
 | 339 | 	return rc; | 
 | 340 | } | 
 | 341 |  | 
 | 342 | /* Remove packets from the TX queue | 
 | 343 |  * | 
 | 344 |  * This removes packets from the TX queue, up to and including the | 
 | 345 |  * specified index. | 
 | 346 |  */ | 
| Ben Hutchings | 4d56606 | 2008-09-01 12:47:12 +0100 | [diff] [blame] | 347 | static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, | 
 | 348 | 				unsigned int index) | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 349 | { | 
 | 350 | 	struct efx_nic *efx = tx_queue->efx; | 
 | 351 | 	unsigned int stop_index, read_ptr; | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 352 |  | 
| Ben Hutchings | 3ffeabd | 2009-10-23 08:30:58 +0000 | [diff] [blame] | 353 | 	stop_index = (index + 1) & EFX_TXQ_MASK; | 
 | 354 | 	read_ptr = tx_queue->read_count & EFX_TXQ_MASK; | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 355 |  | 
 | 356 | 	while (read_ptr != stop_index) { | 
 | 357 | 		struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; | 
 | 358 | 		if (unlikely(buffer->len == 0)) { | 
| Ben Hutchings | 62776d0 | 2010-06-23 11:30:07 +0000 | [diff] [blame] | 359 | 			netif_err(efx, tx_err, efx->net_dev, | 
 | 360 | 				  "TX queue %d spurious TX completion id %x\n", | 
 | 361 | 				  tx_queue->queue, read_ptr); | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 362 | 			efx_schedule_reset(efx, RESET_TYPE_TX_SKIP); | 
 | 363 | 			return; | 
 | 364 | 		} | 
 | 365 |  | 
 | 366 | 		efx_dequeue_buffer(tx_queue, buffer); | 
| Ben Hutchings | dc8cfa5 | 2008-09-01 12:46:50 +0100 | [diff] [blame] | 367 | 		buffer->continuation = true; | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 368 | 		buffer->len = 0; | 
 | 369 |  | 
 | 370 | 		++tx_queue->read_count; | 
| Ben Hutchings | 3ffeabd | 2009-10-23 08:30:58 +0000 | [diff] [blame] | 371 | 		read_ptr = tx_queue->read_count & EFX_TXQ_MASK; | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 372 | 	} | 
 | 373 | } | 
 | 374 |  | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 375 | /* Initiate a packet transmission.  We use one channel per CPU | 
 | 376 |  * (sharing when we have more CPUs than channels).  On Falcon, the TX | 
 | 377 |  * completion events will be directed back to the CPU that transmitted | 
 | 378 |  * the packet, which should be cache-efficient. | 
 | 379 |  * | 
 | 380 |  * Context: non-blocking. | 
 | 381 |  * Note that returning anything other than NETDEV_TX_OK will cause the | 
 | 382 |  * OS to free the skb. | 
 | 383 |  */ | 
| Stephen Hemminger | 61357325 | 2009-08-31 19:50:58 +0000 | [diff] [blame] | 384 | netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, | 
 | 385 | 				      struct net_device *net_dev) | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 386 | { | 
| Ben Hutchings | 767e468 | 2008-09-01 12:43:14 +0100 | [diff] [blame] | 387 | 	struct efx_nic *efx = netdev_priv(net_dev); | 
| Ben Hutchings | 60ac106 | 2008-09-01 12:44:59 +0100 | [diff] [blame] | 388 | 	struct efx_tx_queue *tx_queue; | 
 | 389 |  | 
| Ben Hutchings | a7ef593 | 2009-03-04 09:52:37 +0000 | [diff] [blame] | 390 | 	if (unlikely(efx->port_inhibited)) | 
 | 391 | 		return NETDEV_TX_BUSY; | 
 | 392 |  | 
| Ben Hutchings | a4900ac | 2010-04-28 09:30:43 +0000 | [diff] [blame] | 393 | 	tx_queue = &efx->tx_queue[EFX_TXQ_TYPES * skb_get_queue_mapping(skb)]; | 
| Ben Hutchings | 60ac106 | 2008-09-01 12:44:59 +0100 | [diff] [blame] | 394 | 	if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) | 
| Ben Hutchings | a4900ac | 2010-04-28 09:30:43 +0000 | [diff] [blame] | 395 | 		tx_queue += EFX_TXQ_TYPE_OFFLOAD; | 
| Ben Hutchings | 60ac106 | 2008-09-01 12:44:59 +0100 | [diff] [blame] | 396 |  | 
| Ben Hutchings | 497f5ba | 2009-11-23 16:07:05 +0000 | [diff] [blame] | 397 | 	return efx_enqueue_skb(tx_queue, skb); | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 398 | } | 
 | 399 |  | 
 | 400 | void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) | 
 | 401 | { | 
 | 402 | 	unsigned fill_level; | 
 | 403 | 	struct efx_nic *efx = tx_queue->efx; | 
 | 404 |  | 
| Ben Hutchings | 3ffeabd | 2009-10-23 08:30:58 +0000 | [diff] [blame] | 405 | 	EFX_BUG_ON_PARANOID(index > EFX_TXQ_MASK); | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 406 |  | 
 | 407 | 	efx_dequeue_buffers(tx_queue, index); | 
 | 408 |  | 
 | 409 | 	/* See if we need to restart the netif queue.  This barrier | 
 | 410 | 	 * separates the update of read_count from the test of | 
 | 411 | 	 * stopped. */ | 
 | 412 | 	smp_mb(); | 
| Ben Hutchings | 32d7600 | 2009-03-04 09:53:15 +0000 | [diff] [blame] | 413 | 	if (unlikely(tx_queue->stopped) && likely(efx->port_enabled)) { | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 414 | 		fill_level = tx_queue->insert_count - tx_queue->read_count; | 
| Ben Hutchings | 3ffeabd | 2009-10-23 08:30:58 +0000 | [diff] [blame] | 415 | 		if (fill_level < EFX_TXQ_THRESHOLD) { | 
| Ben Hutchings | 5566861 | 2008-05-16 21:16:10 +0100 | [diff] [blame] | 416 | 			EFX_BUG_ON_PARANOID(!efx_dev_registered(efx)); | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 417 |  | 
 | 418 | 			/* Do this under netif_tx_lock(), to avoid racing | 
 | 419 | 			 * with efx_xmit(). */ | 
 | 420 | 			netif_tx_lock(efx->net_dev); | 
 | 421 | 			if (tx_queue->stopped) { | 
 | 422 | 				tx_queue->stopped = 0; | 
| Ben Hutchings | a4900ac | 2010-04-28 09:30:43 +0000 | [diff] [blame] | 423 | 				efx_wake_queue(tx_queue->channel); | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 424 | 			} | 
 | 425 | 			netif_tx_unlock(efx->net_dev); | 
 | 426 | 		} | 
 | 427 | 	} | 
 | 428 | } | 
 | 429 |  | 
 | 430 | int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) | 
 | 431 | { | 
 | 432 | 	struct efx_nic *efx = tx_queue->efx; | 
 | 433 | 	unsigned int txq_size; | 
 | 434 | 	int i, rc; | 
 | 435 |  | 
| Ben Hutchings | 62776d0 | 2010-06-23 11:30:07 +0000 | [diff] [blame] | 436 | 	netif_dbg(efx, probe, efx->net_dev, "creating TX queue %d\n", | 
 | 437 | 		  tx_queue->queue); | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 438 |  | 
 | 439 | 	/* Allocate software ring */ | 
| Ben Hutchings | 3ffeabd | 2009-10-23 08:30:58 +0000 | [diff] [blame] | 440 | 	txq_size = EFX_TXQ_SIZE * sizeof(*tx_queue->buffer); | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 441 | 	tx_queue->buffer = kzalloc(txq_size, GFP_KERNEL); | 
| Ben Hutchings | 60ac106 | 2008-09-01 12:44:59 +0100 | [diff] [blame] | 442 | 	if (!tx_queue->buffer) | 
 | 443 | 		return -ENOMEM; | 
| Ben Hutchings | 3ffeabd | 2009-10-23 08:30:58 +0000 | [diff] [blame] | 444 | 	for (i = 0; i <= EFX_TXQ_MASK; ++i) | 
| Ben Hutchings | dc8cfa5 | 2008-09-01 12:46:50 +0100 | [diff] [blame] | 445 | 		tx_queue->buffer[i].continuation = true; | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 446 |  | 
 | 447 | 	/* Allocate hardware ring */ | 
| Ben Hutchings | 152b6a6 | 2009-11-29 03:43:56 +0000 | [diff] [blame] | 448 | 	rc = efx_nic_probe_tx(tx_queue); | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 449 | 	if (rc) | 
| Ben Hutchings | 60ac106 | 2008-09-01 12:44:59 +0100 | [diff] [blame] | 450 | 		goto fail; | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 451 |  | 
 | 452 | 	return 0; | 
 | 453 |  | 
| Ben Hutchings | 60ac106 | 2008-09-01 12:44:59 +0100 | [diff] [blame] | 454 |  fail: | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 455 | 	kfree(tx_queue->buffer); | 
 | 456 | 	tx_queue->buffer = NULL; | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 457 | 	return rc; | 
 | 458 | } | 
 | 459 |  | 
| Ben Hutchings | bc3c90a | 2008-09-01 12:48:46 +0100 | [diff] [blame] | 460 | void efx_init_tx_queue(struct efx_tx_queue *tx_queue) | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 461 | { | 
| Ben Hutchings | 62776d0 | 2010-06-23 11:30:07 +0000 | [diff] [blame] | 462 | 	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, | 
 | 463 | 		  "initialising TX queue %d\n", tx_queue->queue); | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 464 |  | 
 | 465 | 	tx_queue->insert_count = 0; | 
 | 466 | 	tx_queue->write_count = 0; | 
 | 467 | 	tx_queue->read_count = 0; | 
 | 468 | 	tx_queue->old_read_count = 0; | 
 | 469 | 	BUG_ON(tx_queue->stopped); | 
 | 470 |  | 
 | 471 | 	/* Set up TX descriptor ring */ | 
| Ben Hutchings | 152b6a6 | 2009-11-29 03:43:56 +0000 | [diff] [blame] | 472 | 	efx_nic_init_tx(tx_queue); | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 473 | } | 
 | 474 |  | 
 | 475 | void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) | 
 | 476 | { | 
 | 477 | 	struct efx_tx_buffer *buffer; | 
 | 478 |  | 
 | 479 | 	if (!tx_queue->buffer) | 
 | 480 | 		return; | 
 | 481 |  | 
 | 482 | 	/* Free any buffers left in the ring */ | 
 | 483 | 	while (tx_queue->read_count != tx_queue->write_count) { | 
| Ben Hutchings | 3ffeabd | 2009-10-23 08:30:58 +0000 | [diff] [blame] | 484 | 		buffer = &tx_queue->buffer[tx_queue->read_count & EFX_TXQ_MASK]; | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 485 | 		efx_dequeue_buffer(tx_queue, buffer); | 
| Ben Hutchings | dc8cfa5 | 2008-09-01 12:46:50 +0100 | [diff] [blame] | 486 | 		buffer->continuation = true; | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 487 | 		buffer->len = 0; | 
 | 488 |  | 
 | 489 | 		++tx_queue->read_count; | 
 | 490 | 	} | 
 | 491 | } | 
 | 492 |  | 
 | 493 | void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) | 
 | 494 | { | 
| Ben Hutchings | 62776d0 | 2010-06-23 11:30:07 +0000 | [diff] [blame] | 495 | 	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, | 
 | 496 | 		  "shutting down TX queue %d\n", tx_queue->queue); | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 497 |  | 
 | 498 | 	/* Flush TX queue, remove descriptor ring */ | 
| Ben Hutchings | 152b6a6 | 2009-11-29 03:43:56 +0000 | [diff] [blame] | 499 | 	efx_nic_fini_tx(tx_queue); | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 500 |  | 
 | 501 | 	efx_release_tx_buffers(tx_queue); | 
 | 502 |  | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 503 | 	/* Free up TSO header cache */ | 
 | 504 | 	efx_fini_tso(tx_queue); | 
 | 505 |  | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 506 | 	/* Release queue's stop on port, if any */ | 
 | 507 | 	if (tx_queue->stopped) { | 
 | 508 | 		tx_queue->stopped = 0; | 
| Ben Hutchings | a4900ac | 2010-04-28 09:30:43 +0000 | [diff] [blame] | 509 | 		efx_wake_queue(tx_queue->channel); | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 510 | 	} | 
 | 511 | } | 
 | 512 |  | 
 | 513 | void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) | 
 | 514 | { | 
| Ben Hutchings | 62776d0 | 2010-06-23 11:30:07 +0000 | [diff] [blame] | 515 | 	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, | 
 | 516 | 		  "destroying TX queue %d\n", tx_queue->queue); | 
| Ben Hutchings | 152b6a6 | 2009-11-29 03:43:56 +0000 | [diff] [blame] | 517 | 	efx_nic_remove_tx(tx_queue); | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 518 |  | 
 | 519 | 	kfree(tx_queue->buffer); | 
 | 520 | 	tx_queue->buffer = NULL; | 
| Ben Hutchings | 8ceee66 | 2008-04-27 12:55:59 +0100 | [diff] [blame] | 521 | } | 
 | 522 |  | 
 | 523 |  | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 524 | /* Efx TCP segmentation acceleration. | 
 | 525 |  * | 
 | 526 |  * Why?  Because by doing it here in the driver we can go significantly | 
 | 527 |  * faster than the GSO. | 
 | 528 |  * | 
 | 529 |  * Requires TX checksum offload support. | 
 | 530 |  */ | 
 | 531 |  | 
 | 532 | /* Number of bytes inserted at the start of a TSO header buffer, | 
 | 533 |  * similar to NET_IP_ALIGN. | 
 | 534 |  */ | 
| Ben Hutchings | 13e9ab1 | 2008-09-01 12:50:28 +0100 | [diff] [blame] | 535 | #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 536 | #define TSOH_OFFSET	0 | 
 | 537 | #else | 
 | 538 | #define TSOH_OFFSET	NET_IP_ALIGN | 
 | 539 | #endif | 
 | 540 |  | 
 | 541 | #define TSOH_BUFFER(tsoh)	((u8 *)(tsoh + 1) + TSOH_OFFSET) | 
 | 542 |  | 
 | 543 | /* Total size of struct efx_tso_header, buffer and padding */ | 
 | 544 | #define TSOH_SIZE(hdr_len)					\ | 
 | 545 | 	(sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len) | 
 | 546 |  | 
 | 547 | /* Size of blocks on free list.  Larger blocks must be allocated from | 
 | 548 |  * the heap. | 
 | 549 |  */ | 
 | 550 | #define TSOH_STD_SIZE		128 | 
 | 551 |  | 
 | 552 | #define PTR_DIFF(p1, p2)  ((u8 *)(p1) - (u8 *)(p2)) | 
 | 553 | #define ETH_HDR_LEN(skb)  (skb_network_header(skb) - (skb)->data) | 
 | 554 | #define SKB_TCP_OFF(skb)  PTR_DIFF(tcp_hdr(skb), (skb)->data) | 
 | 555 | #define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data) | 
| Ben Hutchings | 738a8f4 | 2009-11-29 15:16:05 +0000 | [diff] [blame] | 556 | #define SKB_IPV6_OFF(skb) PTR_DIFF(ipv6_hdr(skb), (skb)->data) | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 557 |  | 
 | 558 | /** | 
 | 559 |  * struct tso_state - TSO state for an SKB | 
| Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 560 |  * @out_len: Remaining length in current segment | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 561 |  * @seqnum: Current sequence number | 
| Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 562 |  * @ipv4_id: Current IPv4 ID, host endian | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 563 |  * @packet_space: Remaining space in current packet | 
| Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 564 |  * @dma_addr: DMA address of current position | 
 | 565 |  * @in_len: Remaining length in current SKB fragment | 
 | 566 |  * @unmap_len: Length of SKB fragment | 
 | 567 |  * @unmap_addr: DMA address of SKB fragment | 
 | 568 |  * @unmap_single: DMA single vs page mapping flag | 
| Ben Hutchings | 738a8f4 | 2009-11-29 15:16:05 +0000 | [diff] [blame] | 569 |  * @protocol: Network protocol (after any VLAN header) | 
| Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 570 |  * @header_len: Number of bytes of header | 
 | 571 |  * @full_packet_size: Number of bytes to put in each outgoing segment | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 572 |  * | 
 | 573 |  * The state used during segmentation.  It is put into this data structure | 
 | 574 |  * just to make it easy to pass into inline functions. | 
 | 575 |  */ | 
 | 576 | struct tso_state { | 
| Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 577 | 	/* Output position */ | 
 | 578 | 	unsigned out_len; | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 579 | 	unsigned seqnum; | 
| Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 580 | 	unsigned ipv4_id; | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 581 | 	unsigned packet_space; | 
 | 582 |  | 
| Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 583 | 	/* Input position */ | 
 | 584 | 	dma_addr_t dma_addr; | 
 | 585 | 	unsigned in_len; | 
 | 586 | 	unsigned unmap_len; | 
 | 587 | 	dma_addr_t unmap_addr; | 
 | 588 | 	bool unmap_single; | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 589 |  | 
| Ben Hutchings | 738a8f4 | 2009-11-29 15:16:05 +0000 | [diff] [blame] | 590 | 	__be16 protocol; | 
| Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 591 | 	unsigned header_len; | 
 | 592 | 	int full_packet_size; | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 593 | }; | 
 | 594 |  | 
 | 595 |  | 
 | 596 | /* | 
 | 597 |  * Verify that our various assumptions about sk_buffs and the conditions | 
| Ben Hutchings | 738a8f4 | 2009-11-29 15:16:05 +0000 | [diff] [blame] | 598 |  * under which TSO will be attempted hold true.  Return the protocol number. | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 599 |  */ | 
| Ben Hutchings | 738a8f4 | 2009-11-29 15:16:05 +0000 | [diff] [blame] | 600 | static __be16 efx_tso_check_protocol(struct sk_buff *skb) | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 601 | { | 
| Ben Hutchings | 740847d | 2008-09-01 12:48:23 +0100 | [diff] [blame] | 602 | 	__be16 protocol = skb->protocol; | 
 | 603 |  | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 604 | 	EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto != | 
| Ben Hutchings | 740847d | 2008-09-01 12:48:23 +0100 | [diff] [blame] | 605 | 			    protocol); | 
 | 606 | 	if (protocol == htons(ETH_P_8021Q)) { | 
 | 607 | 		/* Find the encapsulated protocol; reset network header | 
 | 608 | 		 * and transport header based on that. */ | 
 | 609 | 		struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; | 
 | 610 | 		protocol = veh->h_vlan_encapsulated_proto; | 
 | 611 | 		skb_set_network_header(skb, sizeof(*veh)); | 
 | 612 | 		if (protocol == htons(ETH_P_IP)) | 
 | 613 | 			skb_set_transport_header(skb, sizeof(*veh) + | 
 | 614 | 						 4 * ip_hdr(skb)->ihl); | 
| Ben Hutchings | 738a8f4 | 2009-11-29 15:16:05 +0000 | [diff] [blame] | 615 | 		else if (protocol == htons(ETH_P_IPV6)) | 
 | 616 | 			skb_set_transport_header(skb, sizeof(*veh) + | 
 | 617 | 						 sizeof(struct ipv6hdr)); | 
| Ben Hutchings | 740847d | 2008-09-01 12:48:23 +0100 | [diff] [blame] | 618 | 	} | 
 | 619 |  | 
| Ben Hutchings | 738a8f4 | 2009-11-29 15:16:05 +0000 | [diff] [blame] | 620 | 	if (protocol == htons(ETH_P_IP)) { | 
 | 621 | 		EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP); | 
 | 622 | 	} else { | 
 | 623 | 		EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IPV6)); | 
 | 624 | 		EFX_BUG_ON_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP); | 
 | 625 | 	} | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 626 | 	EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data) | 
 | 627 | 			     + (tcp_hdr(skb)->doff << 2u)) > | 
 | 628 | 			    skb_headlen(skb)); | 
| Ben Hutchings | 738a8f4 | 2009-11-29 15:16:05 +0000 | [diff] [blame] | 629 |  | 
 | 630 | 	return protocol; | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 631 | } | 
 | 632 |  | 
 | 633 |  | 
 | 634 | /* | 
 | 635 |  * Allocate a page worth of efx_tso_header structures, and string them | 
 | 636 |  * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM. | 
 | 637 |  */ | 
 | 638 | static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue) | 
 | 639 | { | 
 | 640 |  | 
 | 641 | 	struct pci_dev *pci_dev = tx_queue->efx->pci_dev; | 
 | 642 | 	struct efx_tso_header *tsoh; | 
 | 643 | 	dma_addr_t dma_addr; | 
 | 644 | 	u8 *base_kva, *kva; | 
 | 645 |  | 
 | 646 | 	base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr); | 
 | 647 | 	if (base_kva == NULL) { | 
| Ben Hutchings | 62776d0 | 2010-06-23 11:30:07 +0000 | [diff] [blame] | 648 | 		netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev, | 
 | 649 | 			  "Unable to allocate page for TSO headers\n"); | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 650 | 		return -ENOMEM; | 
 | 651 | 	} | 
 | 652 |  | 
 | 653 | 	/* pci_alloc_consistent() allocates pages. */ | 
 | 654 | 	EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u)); | 
 | 655 |  | 
 | 656 | 	for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) { | 
 | 657 | 		tsoh = (struct efx_tso_header *)kva; | 
 | 658 | 		tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva); | 
 | 659 | 		tsoh->next = tx_queue->tso_headers_free; | 
 | 660 | 		tx_queue->tso_headers_free = tsoh; | 
 | 661 | 	} | 
 | 662 |  | 
 | 663 | 	return 0; | 
 | 664 | } | 
 | 665 |  | 
 | 666 |  | 
 | 667 | /* Free up a TSO header, and all others in the same page. */ | 
 | 668 | static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue, | 
 | 669 | 				struct efx_tso_header *tsoh, | 
 | 670 | 				struct pci_dev *pci_dev) | 
 | 671 | { | 
 | 672 | 	struct efx_tso_header **p; | 
 | 673 | 	unsigned long base_kva; | 
 | 674 | 	dma_addr_t base_dma; | 
 | 675 |  | 
 | 676 | 	base_kva = (unsigned long)tsoh & PAGE_MASK; | 
 | 677 | 	base_dma = tsoh->dma_addr & PAGE_MASK; | 
 | 678 |  | 
 | 679 | 	p = &tx_queue->tso_headers_free; | 
| Ben Hutchings | b347564 | 2008-05-16 21:15:49 +0100 | [diff] [blame] | 680 | 	while (*p != NULL) { | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 681 | 		if (((unsigned long)*p & PAGE_MASK) == base_kva) | 
 | 682 | 			*p = (*p)->next; | 
 | 683 | 		else | 
 | 684 | 			p = &(*p)->next; | 
| Ben Hutchings | b347564 | 2008-05-16 21:15:49 +0100 | [diff] [blame] | 685 | 	} | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 686 |  | 
 | 687 | 	pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma); | 
 | 688 | } | 
 | 689 |  | 
 | 690 | static struct efx_tso_header * | 
 | 691 | efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len) | 
 | 692 | { | 
 | 693 | 	struct efx_tso_header *tsoh; | 
 | 694 |  | 
 | 695 | 	tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA); | 
 | 696 | 	if (unlikely(!tsoh)) | 
 | 697 | 		return NULL; | 
 | 698 |  | 
 | 699 | 	tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev, | 
 | 700 | 					TSOH_BUFFER(tsoh), header_len, | 
 | 701 | 					PCI_DMA_TODEVICE); | 
| FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 702 | 	if (unlikely(pci_dma_mapping_error(tx_queue->efx->pci_dev, | 
 | 703 | 					   tsoh->dma_addr))) { | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 704 | 		kfree(tsoh); | 
 | 705 | 		return NULL; | 
 | 706 | 	} | 
 | 707 |  | 
 | 708 | 	tsoh->unmap_len = header_len; | 
 | 709 | 	return tsoh; | 
 | 710 | } | 
 | 711 |  | 
 | 712 | static void | 
 | 713 | efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh) | 
 | 714 | { | 
 | 715 | 	pci_unmap_single(tx_queue->efx->pci_dev, | 
 | 716 | 			 tsoh->dma_addr, tsoh->unmap_len, | 
 | 717 | 			 PCI_DMA_TODEVICE); | 
 | 718 | 	kfree(tsoh); | 
 | 719 | } | 
 | 720 |  | 
 | 721 | /** | 
 | 722 |  * efx_tx_queue_insert - push descriptors onto the TX queue | 
 | 723 |  * @tx_queue:		Efx TX queue | 
 | 724 |  * @dma_addr:		DMA address of fragment | 
 | 725 |  * @len:		Length of fragment | 
| Ben Hutchings | ecbd95c | 2008-09-01 12:46:40 +0100 | [diff] [blame] | 726 |  * @final_buffer:	The final buffer inserted into the queue | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 727 |  * | 
 | 728 |  * Push descriptors onto the TX queue.  Return 0 on success or 1 if | 
 | 729 |  * @tx_queue full. | 
 | 730 |  */ | 
 | 731 | static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, | 
 | 732 | 			       dma_addr_t dma_addr, unsigned len, | 
| Ben Hutchings | ecbd95c | 2008-09-01 12:46:40 +0100 | [diff] [blame] | 733 | 			       struct efx_tx_buffer **final_buffer) | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 734 | { | 
 | 735 | 	struct efx_tx_buffer *buffer; | 
 | 736 | 	struct efx_nic *efx = tx_queue->efx; | 
| Ben Hutchings | 63f1988 | 2009-10-23 08:31:20 +0000 | [diff] [blame] | 737 | 	unsigned dma_len, fill_level, insert_ptr; | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 738 | 	int q_space; | 
 | 739 |  | 
 | 740 | 	EFX_BUG_ON_PARANOID(len <= 0); | 
 | 741 |  | 
 | 742 | 	fill_level = tx_queue->insert_count - tx_queue->old_read_count; | 
 | 743 | 	/* -1 as there is no way to represent all descriptors used */ | 
| Ben Hutchings | 3ffeabd | 2009-10-23 08:30:58 +0000 | [diff] [blame] | 744 | 	q_space = EFX_TXQ_MASK - 1 - fill_level; | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 745 |  | 
 | 746 | 	while (1) { | 
 | 747 | 		if (unlikely(q_space-- <= 0)) { | 
 | 748 | 			/* It might be that completions have happened | 
 | 749 | 			 * since the xmit path last checked.  Update | 
 | 750 | 			 * the xmit path's copy of read_count. | 
 | 751 | 			 */ | 
 | 752 | 			++tx_queue->stopped; | 
 | 753 | 			/* This memory barrier protects the change of | 
 | 754 | 			 * stopped from the access of read_count. */ | 
 | 755 | 			smp_mb(); | 
 | 756 | 			tx_queue->old_read_count = | 
 | 757 | 				*(volatile unsigned *)&tx_queue->read_count; | 
 | 758 | 			fill_level = (tx_queue->insert_count | 
 | 759 | 				      - tx_queue->old_read_count); | 
| Ben Hutchings | 3ffeabd | 2009-10-23 08:30:58 +0000 | [diff] [blame] | 760 | 			q_space = EFX_TXQ_MASK - 1 - fill_level; | 
| Ben Hutchings | ecbd95c | 2008-09-01 12:46:40 +0100 | [diff] [blame] | 761 | 			if (unlikely(q_space-- <= 0)) { | 
 | 762 | 				*final_buffer = NULL; | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 763 | 				return 1; | 
| Ben Hutchings | ecbd95c | 2008-09-01 12:46:40 +0100 | [diff] [blame] | 764 | 			} | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 765 | 			smp_mb(); | 
 | 766 | 			--tx_queue->stopped; | 
 | 767 | 		} | 
 | 768 |  | 
| Ben Hutchings | 3ffeabd | 2009-10-23 08:30:58 +0000 | [diff] [blame] | 769 | 		insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK; | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 770 | 		buffer = &tx_queue->buffer[insert_ptr]; | 
 | 771 | 		++tx_queue->insert_count; | 
 | 772 |  | 
 | 773 | 		EFX_BUG_ON_PARANOID(tx_queue->insert_count - | 
 | 774 | 				    tx_queue->read_count > | 
| Ben Hutchings | 3ffeabd | 2009-10-23 08:30:58 +0000 | [diff] [blame] | 775 | 				    EFX_TXQ_MASK); | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 776 |  | 
 | 777 | 		efx_tsoh_free(tx_queue, buffer); | 
 | 778 | 		EFX_BUG_ON_PARANOID(buffer->len); | 
 | 779 | 		EFX_BUG_ON_PARANOID(buffer->unmap_len); | 
 | 780 | 		EFX_BUG_ON_PARANOID(buffer->skb); | 
| Ben Hutchings | dc8cfa5 | 2008-09-01 12:46:50 +0100 | [diff] [blame] | 781 | 		EFX_BUG_ON_PARANOID(!buffer->continuation); | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 782 | 		EFX_BUG_ON_PARANOID(buffer->tsoh); | 
 | 783 |  | 
 | 784 | 		buffer->dma_addr = dma_addr; | 
 | 785 |  | 
| Ben Hutchings | 63f1988 | 2009-10-23 08:31:20 +0000 | [diff] [blame] | 786 | 		dma_len = efx_max_tx_len(efx, dma_addr); | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 787 |  | 
 | 788 | 		/* If there is enough space to send then do so */ | 
 | 789 | 		if (dma_len >= len) | 
 | 790 | 			break; | 
 | 791 |  | 
 | 792 | 		buffer->len = dma_len; /* Don't set the other members */ | 
 | 793 | 		dma_addr += dma_len; | 
 | 794 | 		len -= dma_len; | 
 | 795 | 	} | 
 | 796 |  | 
 | 797 | 	EFX_BUG_ON_PARANOID(!len); | 
 | 798 | 	buffer->len = len; | 
| Ben Hutchings | ecbd95c | 2008-09-01 12:46:40 +0100 | [diff] [blame] | 799 | 	*final_buffer = buffer; | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 800 | 	return 0; | 
 | 801 | } | 
 | 802 |  | 
 | 803 |  | 
 | 804 | /* | 
 | 805 |  * Put a TSO header into the TX queue. | 
 | 806 |  * | 
 | 807 |  * This is special-cased because we know that it is small enough to fit in | 
 | 808 |  * a single fragment, and we know it doesn't cross a page boundary.  It | 
 | 809 |  * also allows us to not worry about end-of-packet etc. | 
 | 810 |  */ | 
| Ben Hutchings | 4d56606 | 2008-09-01 12:47:12 +0100 | [diff] [blame] | 811 | static void efx_tso_put_header(struct efx_tx_queue *tx_queue, | 
 | 812 | 			       struct efx_tso_header *tsoh, unsigned len) | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 813 | { | 
 | 814 | 	struct efx_tx_buffer *buffer; | 
 | 815 |  | 
| Ben Hutchings | 3ffeabd | 2009-10-23 08:30:58 +0000 | [diff] [blame] | 816 | 	buffer = &tx_queue->buffer[tx_queue->insert_count & EFX_TXQ_MASK]; | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 817 | 	efx_tsoh_free(tx_queue, buffer); | 
 | 818 | 	EFX_BUG_ON_PARANOID(buffer->len); | 
 | 819 | 	EFX_BUG_ON_PARANOID(buffer->unmap_len); | 
 | 820 | 	EFX_BUG_ON_PARANOID(buffer->skb); | 
| Ben Hutchings | dc8cfa5 | 2008-09-01 12:46:50 +0100 | [diff] [blame] | 821 | 	EFX_BUG_ON_PARANOID(!buffer->continuation); | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 822 | 	EFX_BUG_ON_PARANOID(buffer->tsoh); | 
 | 823 | 	buffer->len = len; | 
 | 824 | 	buffer->dma_addr = tsoh->dma_addr; | 
 | 825 | 	buffer->tsoh = tsoh; | 
 | 826 |  | 
 | 827 | 	++tx_queue->insert_count; | 
 | 828 | } | 
 | 829 |  | 
 | 830 |  | 
 | 831 | /* Remove descriptors put into a tx_queue. */ | 
 | 832 | static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) | 
 | 833 | { | 
 | 834 | 	struct efx_tx_buffer *buffer; | 
| Ben Hutchings | cc12dac | 2008-09-01 12:46:43 +0100 | [diff] [blame] | 835 | 	dma_addr_t unmap_addr; | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 836 |  | 
 | 837 | 	/* Work backwards until we hit the original insert pointer value */ | 
 | 838 | 	while (tx_queue->insert_count != tx_queue->write_count) { | 
 | 839 | 		--tx_queue->insert_count; | 
 | 840 | 		buffer = &tx_queue->buffer[tx_queue->insert_count & | 
| Ben Hutchings | 3ffeabd | 2009-10-23 08:30:58 +0000 | [diff] [blame] | 841 | 					   EFX_TXQ_MASK]; | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 842 | 		efx_tsoh_free(tx_queue, buffer); | 
 | 843 | 		EFX_BUG_ON_PARANOID(buffer->skb); | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 844 | 		if (buffer->unmap_len) { | 
| Ben Hutchings | cc12dac | 2008-09-01 12:46:43 +0100 | [diff] [blame] | 845 | 			unmap_addr = (buffer->dma_addr + buffer->len - | 
 | 846 | 				      buffer->unmap_len); | 
| Ben Hutchings | ecbd95c | 2008-09-01 12:46:40 +0100 | [diff] [blame] | 847 | 			if (buffer->unmap_single) | 
 | 848 | 				pci_unmap_single(tx_queue->efx->pci_dev, | 
| Ben Hutchings | cc12dac | 2008-09-01 12:46:43 +0100 | [diff] [blame] | 849 | 						 unmap_addr, buffer->unmap_len, | 
| Ben Hutchings | ecbd95c | 2008-09-01 12:46:40 +0100 | [diff] [blame] | 850 | 						 PCI_DMA_TODEVICE); | 
 | 851 | 			else | 
 | 852 | 				pci_unmap_page(tx_queue->efx->pci_dev, | 
| Ben Hutchings | cc12dac | 2008-09-01 12:46:43 +0100 | [diff] [blame] | 853 | 					       unmap_addr, buffer->unmap_len, | 
| Ben Hutchings | ecbd95c | 2008-09-01 12:46:40 +0100 | [diff] [blame] | 854 | 					       PCI_DMA_TODEVICE); | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 855 | 			buffer->unmap_len = 0; | 
 | 856 | 		} | 
| Neil Turton | a7ebd27 | 2009-12-23 13:47:13 +0000 | [diff] [blame] | 857 | 		buffer->len = 0; | 
 | 858 | 		buffer->continuation = true; | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 859 | 	} | 
 | 860 | } | 
 | 861 |  | 
 | 862 |  | 
 | 863 | /* Parse the SKB header and initialise state. */ | 
| Ben Hutchings | 4d56606 | 2008-09-01 12:47:12 +0100 | [diff] [blame] | 864 | static void tso_start(struct tso_state *st, const struct sk_buff *skb) | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 865 | { | 
 | 866 | 	/* All ethernet/IP/TCP headers combined size is TCP header size | 
 | 867 | 	 * plus offset of TCP header relative to start of packet. | 
 | 868 | 	 */ | 
| Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 869 | 	st->header_len = ((tcp_hdr(skb)->doff << 2u) | 
 | 870 | 			  + PTR_DIFF(tcp_hdr(skb), skb->data)); | 
 | 871 | 	st->full_packet_size = st->header_len + skb_shinfo(skb)->gso_size; | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 872 |  | 
| Ben Hutchings | 738a8f4 | 2009-11-29 15:16:05 +0000 | [diff] [blame] | 873 | 	if (st->protocol == htons(ETH_P_IP)) | 
 | 874 | 		st->ipv4_id = ntohs(ip_hdr(skb)->id); | 
 | 875 | 	else | 
 | 876 | 		st->ipv4_id = 0; | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 877 | 	st->seqnum = ntohl(tcp_hdr(skb)->seq); | 
 | 878 |  | 
 | 879 | 	EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg); | 
 | 880 | 	EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn); | 
 | 881 | 	EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst); | 
 | 882 |  | 
| Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 883 | 	st->packet_space = st->full_packet_size; | 
 | 884 | 	st->out_len = skb->len - st->header_len; | 
 | 885 | 	st->unmap_len = 0; | 
 | 886 | 	st->unmap_single = false; | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 887 | } | 
 | 888 |  | 
| Ben Hutchings | 4d56606 | 2008-09-01 12:47:12 +0100 | [diff] [blame] | 889 | static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, | 
 | 890 | 			    skb_frag_t *frag) | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 891 | { | 
| Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 892 | 	st->unmap_addr = pci_map_page(efx->pci_dev, frag->page, | 
 | 893 | 				      frag->page_offset, frag->size, | 
 | 894 | 				      PCI_DMA_TODEVICE); | 
 | 895 | 	if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) { | 
 | 896 | 		st->unmap_single = false; | 
 | 897 | 		st->unmap_len = frag->size; | 
 | 898 | 		st->in_len = frag->size; | 
 | 899 | 		st->dma_addr = st->unmap_addr; | 
| Ben Hutchings | ecbd95c | 2008-09-01 12:46:40 +0100 | [diff] [blame] | 900 | 		return 0; | 
 | 901 | 	} | 
 | 902 | 	return -ENOMEM; | 
 | 903 | } | 
 | 904 |  | 
| Ben Hutchings | 4d56606 | 2008-09-01 12:47:12 +0100 | [diff] [blame] | 905 | static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx, | 
 | 906 | 				 const struct sk_buff *skb) | 
| Ben Hutchings | ecbd95c | 2008-09-01 12:46:40 +0100 | [diff] [blame] | 907 | { | 
| Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 908 | 	int hl = st->header_len; | 
| Ben Hutchings | ecbd95c | 2008-09-01 12:46:40 +0100 | [diff] [blame] | 909 | 	int len = skb_headlen(skb) - hl; | 
 | 910 |  | 
| Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 911 | 	st->unmap_addr = pci_map_single(efx->pci_dev, skb->data + hl, | 
 | 912 | 					len, PCI_DMA_TODEVICE); | 
 | 913 | 	if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) { | 
 | 914 | 		st->unmap_single = true; | 
 | 915 | 		st->unmap_len = len; | 
 | 916 | 		st->in_len = len; | 
 | 917 | 		st->dma_addr = st->unmap_addr; | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 918 | 		return 0; | 
 | 919 | 	} | 
 | 920 | 	return -ENOMEM; | 
 | 921 | } | 
 | 922 |  | 
 | 923 |  | 
 | 924 | /** | 
 | 925 |  * tso_fill_packet_with_fragment - form descriptors for the current fragment | 
 | 926 |  * @tx_queue:		Efx TX queue | 
 | 927 |  * @skb:		Socket buffer | 
 | 928 |  * @st:			TSO state | 
 | 929 |  * | 
 | 930 |  * Form descriptors for the current fragment, until we reach the end | 
 | 931 |  * of fragment or end-of-packet.  Return 0 on success, 1 if not enough | 
 | 932 |  * space in @tx_queue. | 
 | 933 |  */ | 
| Ben Hutchings | 4d56606 | 2008-09-01 12:47:12 +0100 | [diff] [blame] | 934 | static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, | 
 | 935 | 					 const struct sk_buff *skb, | 
 | 936 | 					 struct tso_state *st) | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 937 | { | 
| Ben Hutchings | ecbd95c | 2008-09-01 12:46:40 +0100 | [diff] [blame] | 938 | 	struct efx_tx_buffer *buffer; | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 939 | 	int n, end_of_packet, rc; | 
 | 940 |  | 
| Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 941 | 	if (st->in_len == 0) | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 942 | 		return 0; | 
 | 943 | 	if (st->packet_space == 0) | 
 | 944 | 		return 0; | 
 | 945 |  | 
| Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 946 | 	EFX_BUG_ON_PARANOID(st->in_len <= 0); | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 947 | 	EFX_BUG_ON_PARANOID(st->packet_space <= 0); | 
 | 948 |  | 
| Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 949 | 	n = min(st->in_len, st->packet_space); | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 950 |  | 
 | 951 | 	st->packet_space -= n; | 
| Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 952 | 	st->out_len -= n; | 
 | 953 | 	st->in_len -= n; | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 954 |  | 
| Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 955 | 	rc = efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer); | 
| Ben Hutchings | ecbd95c | 2008-09-01 12:46:40 +0100 | [diff] [blame] | 956 | 	if (likely(rc == 0)) { | 
| Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 957 | 		if (st->out_len == 0) | 
| Ben Hutchings | ecbd95c | 2008-09-01 12:46:40 +0100 | [diff] [blame] | 958 | 			/* Transfer ownership of the skb */ | 
 | 959 | 			buffer->skb = skb; | 
 | 960 |  | 
| Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 961 | 		end_of_packet = st->out_len == 0 || st->packet_space == 0; | 
| Ben Hutchings | ecbd95c | 2008-09-01 12:46:40 +0100 | [diff] [blame] | 962 | 		buffer->continuation = !end_of_packet; | 
 | 963 |  | 
| Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 964 | 		if (st->in_len == 0) { | 
| Ben Hutchings | ecbd95c | 2008-09-01 12:46:40 +0100 | [diff] [blame] | 965 | 			/* Transfer ownership of the pci mapping */ | 
| Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 966 | 			buffer->unmap_len = st->unmap_len; | 
 | 967 | 			buffer->unmap_single = st->unmap_single; | 
 | 968 | 			st->unmap_len = 0; | 
| Ben Hutchings | ecbd95c | 2008-09-01 12:46:40 +0100 | [diff] [blame] | 969 | 		} | 
 | 970 | 	} | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 971 |  | 
| Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 972 | 	st->dma_addr += n; | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 973 | 	return rc; | 
 | 974 | } | 
 | 975 |  | 
 | 976 |  | 
 | 977 | /** | 
 | 978 |  * tso_start_new_packet - generate a new header and prepare for the new packet | 
 | 979 |  * @tx_queue:		Efx TX queue | 
 | 980 |  * @skb:		Socket buffer | 
 | 981 |  * @st:			TSO state | 
 | 982 |  * | 
 | 983 |  * Generate a new header and prepare for the new packet.  Return 0 on | 
 | 984 |  * success, or -1 if failed to alloc header. | 
 | 985 |  */ | 
| Ben Hutchings | 4d56606 | 2008-09-01 12:47:12 +0100 | [diff] [blame] | 986 | static int tso_start_new_packet(struct efx_tx_queue *tx_queue, | 
 | 987 | 				const struct sk_buff *skb, | 
 | 988 | 				struct tso_state *st) | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 989 | { | 
 | 990 | 	struct efx_tso_header *tsoh; | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 991 | 	struct tcphdr *tsoh_th; | 
 | 992 | 	unsigned ip_length; | 
 | 993 | 	u8 *header; | 
 | 994 |  | 
 | 995 | 	/* Allocate a DMA-mapped header buffer. */ | 
| Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 996 | 	if (likely(TSOH_SIZE(st->header_len) <= TSOH_STD_SIZE)) { | 
| Ben Hutchings | b347564 | 2008-05-16 21:15:49 +0100 | [diff] [blame] | 997 | 		if (tx_queue->tso_headers_free == NULL) { | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 998 | 			if (efx_tsoh_block_alloc(tx_queue)) | 
 | 999 | 				return -1; | 
| Ben Hutchings | b347564 | 2008-05-16 21:15:49 +0100 | [diff] [blame] | 1000 | 		} | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1001 | 		EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free); | 
 | 1002 | 		tsoh = tx_queue->tso_headers_free; | 
 | 1003 | 		tx_queue->tso_headers_free = tsoh->next; | 
 | 1004 | 		tsoh->unmap_len = 0; | 
 | 1005 | 	} else { | 
 | 1006 | 		tx_queue->tso_long_headers++; | 
| Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 1007 | 		tsoh = efx_tsoh_heap_alloc(tx_queue, st->header_len); | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1008 | 		if (unlikely(!tsoh)) | 
 | 1009 | 			return -1; | 
 | 1010 | 	} | 
 | 1011 |  | 
 | 1012 | 	header = TSOH_BUFFER(tsoh); | 
 | 1013 | 	tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb)); | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1014 |  | 
 | 1015 | 	/* Copy and update the headers. */ | 
| Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 1016 | 	memcpy(header, skb->data, st->header_len); | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1017 |  | 
 | 1018 | 	tsoh_th->seq = htonl(st->seqnum); | 
 | 1019 | 	st->seqnum += skb_shinfo(skb)->gso_size; | 
| Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 1020 | 	if (st->out_len > skb_shinfo(skb)->gso_size) { | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1021 | 		/* This packet will not finish the TSO burst. */ | 
| Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 1022 | 		ip_length = st->full_packet_size - ETH_HDR_LEN(skb); | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1023 | 		tsoh_th->fin = 0; | 
 | 1024 | 		tsoh_th->psh = 0; | 
 | 1025 | 	} else { | 
 | 1026 | 		/* This packet will be the last in the TSO burst. */ | 
| Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 1027 | 		ip_length = st->header_len - ETH_HDR_LEN(skb) + st->out_len; | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1028 | 		tsoh_th->fin = tcp_hdr(skb)->fin; | 
 | 1029 | 		tsoh_th->psh = tcp_hdr(skb)->psh; | 
 | 1030 | 	} | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1031 |  | 
| Ben Hutchings | 738a8f4 | 2009-11-29 15:16:05 +0000 | [diff] [blame] | 1032 | 	if (st->protocol == htons(ETH_P_IP)) { | 
 | 1033 | 		struct iphdr *tsoh_iph = | 
 | 1034 | 			(struct iphdr *)(header + SKB_IPV4_OFF(skb)); | 
 | 1035 |  | 
 | 1036 | 		tsoh_iph->tot_len = htons(ip_length); | 
 | 1037 |  | 
 | 1038 | 		/* Linux leaves suitable gaps in the IP ID space for us to fill. */ | 
 | 1039 | 		tsoh_iph->id = htons(st->ipv4_id); | 
 | 1040 | 		st->ipv4_id++; | 
 | 1041 | 	} else { | 
 | 1042 | 		struct ipv6hdr *tsoh_iph = | 
 | 1043 | 			(struct ipv6hdr *)(header + SKB_IPV6_OFF(skb)); | 
 | 1044 |  | 
 | 1045 | 		tsoh_iph->payload_len = htons(ip_length - sizeof(*tsoh_iph)); | 
 | 1046 | 	} | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1047 |  | 
 | 1048 | 	st->packet_space = skb_shinfo(skb)->gso_size; | 
 | 1049 | 	++tx_queue->tso_packets; | 
 | 1050 |  | 
 | 1051 | 	/* Form a descriptor for this header. */ | 
| Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 1052 | 	efx_tso_put_header(tx_queue, tsoh, st->header_len); | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1053 |  | 
 | 1054 | 	return 0; | 
 | 1055 | } | 
 | 1056 |  | 
 | 1057 |  | 
 | 1058 | /** | 
 | 1059 |  * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer | 
 | 1060 |  * @tx_queue:		Efx TX queue | 
 | 1061 |  * @skb:		Socket buffer | 
 | 1062 |  * | 
 | 1063 |  * Context: You must hold netif_tx_lock() to call this function. | 
 | 1064 |  * | 
 | 1065 |  * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if | 
 | 1066 |  * @skb was not enqueued.  In all cases @skb is consumed.  Return | 
 | 1067 |  * %NETDEV_TX_OK or %NETDEV_TX_BUSY. | 
 | 1068 |  */ | 
 | 1069 | static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, | 
| Ben Hutchings | 740847d | 2008-09-01 12:48:23 +0100 | [diff] [blame] | 1070 | 			       struct sk_buff *skb) | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1071 | { | 
| Ben Hutchings | ecbd95c | 2008-09-01 12:46:40 +0100 | [diff] [blame] | 1072 | 	struct efx_nic *efx = tx_queue->efx; | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1073 | 	int frag_i, rc, rc2 = NETDEV_TX_OK; | 
 | 1074 | 	struct tso_state state; | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1075 |  | 
| Ben Hutchings | 738a8f4 | 2009-11-29 15:16:05 +0000 | [diff] [blame] | 1076 | 	/* Find the packet protocol and sanity-check it */ | 
 | 1077 | 	state.protocol = efx_tso_check_protocol(skb); | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1078 |  | 
 | 1079 | 	EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); | 
 | 1080 |  | 
 | 1081 | 	tso_start(&state, skb); | 
 | 1082 |  | 
 | 1083 | 	/* Assume that skb header area contains exactly the headers, and | 
 | 1084 | 	 * all payload is in the frag list. | 
 | 1085 | 	 */ | 
| Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 1086 | 	if (skb_headlen(skb) == state.header_len) { | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1087 | 		/* Grab the first payload fragment. */ | 
 | 1088 | 		EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1); | 
 | 1089 | 		frag_i = 0; | 
| Ben Hutchings | ecbd95c | 2008-09-01 12:46:40 +0100 | [diff] [blame] | 1090 | 		rc = tso_get_fragment(&state, efx, | 
 | 1091 | 				      skb_shinfo(skb)->frags + frag_i); | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1092 | 		if (rc) | 
 | 1093 | 			goto mem_err; | 
 | 1094 | 	} else { | 
| Ben Hutchings | ecbd95c | 2008-09-01 12:46:40 +0100 | [diff] [blame] | 1095 | 		rc = tso_get_head_fragment(&state, efx, skb); | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1096 | 		if (rc) | 
 | 1097 | 			goto mem_err; | 
 | 1098 | 		frag_i = -1; | 
 | 1099 | 	} | 
 | 1100 |  | 
 | 1101 | 	if (tso_start_new_packet(tx_queue, skb, &state) < 0) | 
 | 1102 | 		goto mem_err; | 
 | 1103 |  | 
 | 1104 | 	while (1) { | 
 | 1105 | 		rc = tso_fill_packet_with_fragment(tx_queue, skb, &state); | 
 | 1106 | 		if (unlikely(rc)) | 
 | 1107 | 			goto stop; | 
 | 1108 |  | 
 | 1109 | 		/* Move onto the next fragment? */ | 
| Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 1110 | 		if (state.in_len == 0) { | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1111 | 			if (++frag_i >= skb_shinfo(skb)->nr_frags) | 
 | 1112 | 				/* End of payload reached. */ | 
 | 1113 | 				break; | 
| Ben Hutchings | ecbd95c | 2008-09-01 12:46:40 +0100 | [diff] [blame] | 1114 | 			rc = tso_get_fragment(&state, efx, | 
 | 1115 | 					      skb_shinfo(skb)->frags + frag_i); | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1116 | 			if (rc) | 
 | 1117 | 				goto mem_err; | 
 | 1118 | 		} | 
 | 1119 |  | 
 | 1120 | 		/* Start at new packet? */ | 
 | 1121 | 		if (state.packet_space == 0 && | 
 | 1122 | 		    tso_start_new_packet(tx_queue, skb, &state) < 0) | 
 | 1123 | 			goto mem_err; | 
 | 1124 | 	} | 
 | 1125 |  | 
 | 1126 | 	/* Pass off to hardware */ | 
| Ben Hutchings | 152b6a6 | 2009-11-29 03:43:56 +0000 | [diff] [blame] | 1127 | 	efx_nic_push_buffers(tx_queue); | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1128 |  | 
 | 1129 | 	tx_queue->tso_bursts++; | 
 | 1130 | 	return NETDEV_TX_OK; | 
 | 1131 |  | 
 | 1132 |  mem_err: | 
| Ben Hutchings | 62776d0 | 2010-06-23 11:30:07 +0000 | [diff] [blame] | 1133 | 	netif_err(efx, tx_err, efx->net_dev, | 
 | 1134 | 		  "Out of memory for TSO headers, or PCI mapping error\n"); | 
| Ben Hutchings | 9bc183d | 2009-11-23 16:06:47 +0000 | [diff] [blame] | 1135 | 	dev_kfree_skb_any(skb); | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1136 | 	goto unwind; | 
 | 1137 |  | 
 | 1138 |  stop: | 
 | 1139 | 	rc2 = NETDEV_TX_BUSY; | 
 | 1140 |  | 
 | 1141 | 	/* Stop the queue if it wasn't stopped before. */ | 
 | 1142 | 	if (tx_queue->stopped == 1) | 
| Ben Hutchings | a4900ac | 2010-04-28 09:30:43 +0000 | [diff] [blame] | 1143 | 		efx_stop_queue(tx_queue->channel); | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1144 |  | 
 | 1145 |  unwind: | 
| Ben Hutchings | 5988b63 | 2008-09-01 12:46:36 +0100 | [diff] [blame] | 1146 | 	/* Free the DMA mapping we were in the process of writing out */ | 
| Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 1147 | 	if (state.unmap_len) { | 
 | 1148 | 		if (state.unmap_single) | 
 | 1149 | 			pci_unmap_single(efx->pci_dev, state.unmap_addr, | 
 | 1150 | 					 state.unmap_len, PCI_DMA_TODEVICE); | 
| Ben Hutchings | ecbd95c | 2008-09-01 12:46:40 +0100 | [diff] [blame] | 1151 | 		else | 
| Ben Hutchings | 23d9e60 | 2008-09-01 12:47:02 +0100 | [diff] [blame] | 1152 | 			pci_unmap_page(efx->pci_dev, state.unmap_addr, | 
 | 1153 | 				       state.unmap_len, PCI_DMA_TODEVICE); | 
| Ben Hutchings | ecbd95c | 2008-09-01 12:46:40 +0100 | [diff] [blame] | 1154 | 	} | 
| Ben Hutchings | 5988b63 | 2008-09-01 12:46:36 +0100 | [diff] [blame] | 1155 |  | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1156 | 	efx_enqueue_unwind(tx_queue); | 
 | 1157 | 	return rc2; | 
 | 1158 | } | 
 | 1159 |  | 
 | 1160 |  | 
 | 1161 | /* | 
 | 1162 |  * Free up all TSO datastructures associated with tx_queue. This | 
 | 1163 |  * routine should be called only once the tx_queue is both empty and | 
 | 1164 |  * will no longer be used. | 
 | 1165 |  */ | 
 | 1166 | static void efx_fini_tso(struct efx_tx_queue *tx_queue) | 
 | 1167 | { | 
 | 1168 | 	unsigned i; | 
 | 1169 |  | 
| Ben Hutchings | b347564 | 2008-05-16 21:15:49 +0100 | [diff] [blame] | 1170 | 	if (tx_queue->buffer) { | 
| Ben Hutchings | 3ffeabd | 2009-10-23 08:30:58 +0000 | [diff] [blame] | 1171 | 		for (i = 0; i <= EFX_TXQ_MASK; ++i) | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1172 | 			efx_tsoh_free(tx_queue, &tx_queue->buffer[i]); | 
| Ben Hutchings | b347564 | 2008-05-16 21:15:49 +0100 | [diff] [blame] | 1173 | 	} | 
| Ben Hutchings | b9b39b6 | 2008-05-07 12:51:12 +0100 | [diff] [blame] | 1174 |  | 
 | 1175 | 	while (tx_queue->tso_headers_free != NULL) | 
 | 1176 | 		efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free, | 
 | 1177 | 				    tx_queue->efx->pci_dev); | 
 | 1178 | } |