| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 1 | /****************************************************************************** | 
 | 2 |  * | 
 | 3 |  * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved. | 
 | 4 |  * | 
 | 5 |  * Portions of this file are derived from the ipw3945 project, as well | 
 | 6 |  * as portions of the ieee80211 subsystem header files. | 
 | 7 |  * | 
 | 8 |  * This program is free software; you can redistribute it and/or modify it | 
 | 9 |  * under the terms of version 2 of the GNU General Public License as | 
 | 10 |  * published by the Free Software Foundation. | 
 | 11 |  * | 
 | 12 |  * This program is distributed in the hope that it will be useful, but WITHOUT | 
 | 13 |  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 
 | 14 |  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for | 
 | 15 |  * more details. | 
 | 16 |  * | 
 | 17 |  * You should have received a copy of the GNU General Public License along with | 
 | 18 |  * this program; if not, write to the Free Software Foundation, Inc., | 
 | 19 |  * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA | 
 | 20 |  * | 
 | 21 |  * The full GNU General Public License is included in this distribution in the | 
 | 22 |  * file called LICENSE. | 
 | 23 |  * | 
 | 24 |  * Contact Information: | 
 | 25 |  * James P. Ketrenos <ipw2100-admin@linux.intel.com> | 
 | 26 |  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | 
 | 27 |  * | 
 | 28 |  *****************************************************************************/ | 
 | 29 |  | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 30 | #include <linux/etherdevice.h> | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 31 | #include <net/mac80211.h> | 
 | 32 | #include "iwl-eeprom.h" | 
 | 33 | #include "iwl-dev.h" | 
 | 34 | #include "iwl-core.h" | 
 | 35 | #include "iwl-sta.h" | 
 | 36 | #include "iwl-io.h" | 
 | 37 | #include "iwl-helpers.h" | 
 | 38 |  | 
| Tomas Winkler | 30e553e | 2008-05-29 16:35:16 +0800 | [diff] [blame] | 39 | static const u16 default_tid_to_tx_fifo[] = { | 
 | 40 | 	IWL_TX_FIFO_AC1, | 
 | 41 | 	IWL_TX_FIFO_AC0, | 
 | 42 | 	IWL_TX_FIFO_AC0, | 
 | 43 | 	IWL_TX_FIFO_AC1, | 
 | 44 | 	IWL_TX_FIFO_AC2, | 
 | 45 | 	IWL_TX_FIFO_AC2, | 
 | 46 | 	IWL_TX_FIFO_AC3, | 
 | 47 | 	IWL_TX_FIFO_AC3, | 
 | 48 | 	IWL_TX_FIFO_NONE, | 
 | 49 | 	IWL_TX_FIFO_NONE, | 
 | 50 | 	IWL_TX_FIFO_NONE, | 
 | 51 | 	IWL_TX_FIFO_NONE, | 
 | 52 | 	IWL_TX_FIFO_NONE, | 
 | 53 | 	IWL_TX_FIFO_NONE, | 
 | 54 | 	IWL_TX_FIFO_NONE, | 
 | 55 | 	IWL_TX_FIFO_NONE, | 
 | 56 | 	IWL_TX_FIFO_AC3 | 
 | 57 | }; | 
 | 58 |  | 
| Tomas Winkler | 30e553e | 2008-05-29 16:35:16 +0800 | [diff] [blame] | 59 |  | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 60 | /** | 
 | 61 |  * iwl_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] | 
 | 62 |  * | 
 | 63 |  * Does NOT advance any TFD circular buffer read/write indexes | 
 | 64 |  * Does NOT free the TFD itself (which is within circular buffer) | 
 | 65 |  */ | 
| Ron Rindjunsky | 1646690 | 2008-05-05 10:22:50 +0800 | [diff] [blame] | 66 | int iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq) | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 67 | { | 
 | 68 | 	struct iwl_tfd_frame *bd_tmp = (struct iwl_tfd_frame *)&txq->bd[0]; | 
 | 69 | 	struct iwl_tfd_frame *bd = &bd_tmp[txq->q.read_ptr]; | 
 | 70 | 	struct pci_dev *dev = priv->pci_dev; | 
 | 71 | 	int i; | 
 | 72 | 	int counter = 0; | 
 | 73 | 	int index, is_odd; | 
 | 74 |  | 
 | 75 | 	/* Host command buffers stay mapped in memory, nothing to clean */ | 
 | 76 | 	if (txq->q.id == IWL_CMD_QUEUE_NUM) | 
 | 77 | 		return 0; | 
 | 78 |  | 
 | 79 | 	/* Sanity check on number of chunks */ | 
 | 80 | 	counter = IWL_GET_BITS(*bd, num_tbs); | 
 | 81 | 	if (counter > MAX_NUM_OF_TBS) { | 
 | 82 | 		IWL_ERROR("Too many chunks: %i\n", counter); | 
 | 83 | 		/* @todo issue fatal error, it is quite serious situation */ | 
 | 84 | 		return 0; | 
 | 85 | 	} | 
 | 86 |  | 
 | 87 | 	/* Unmap chunks, if any. | 
 | 88 | 	 * TFD info for odd chunks is different format than for even chunks. */ | 
 | 89 | 	for (i = 0; i < counter; i++) { | 
 | 90 | 		index = i / 2; | 
 | 91 | 		is_odd = i & 0x1; | 
 | 92 |  | 
 | 93 | 		if (is_odd) | 
 | 94 | 			pci_unmap_single( | 
 | 95 | 				dev, | 
 | 96 | 				IWL_GET_BITS(bd->pa[index], tb2_addr_lo16) | | 
 | 97 | 				(IWL_GET_BITS(bd->pa[index], | 
 | 98 | 					      tb2_addr_hi20) << 16), | 
 | 99 | 				IWL_GET_BITS(bd->pa[index], tb2_len), | 
 | 100 | 				PCI_DMA_TODEVICE); | 
 | 101 |  | 
 | 102 | 		else if (i > 0) | 
 | 103 | 			pci_unmap_single(dev, | 
 | 104 | 					 le32_to_cpu(bd->pa[index].tb1_addr), | 
 | 105 | 					 IWL_GET_BITS(bd->pa[index], tb1_len), | 
 | 106 | 					 PCI_DMA_TODEVICE); | 
 | 107 |  | 
 | 108 | 		/* Free SKB, if any, for this chunk */ | 
 | 109 | 		if (txq->txb[txq->q.read_ptr].skb[i]) { | 
 | 110 | 			struct sk_buff *skb = txq->txb[txq->q.read_ptr].skb[i]; | 
 | 111 |  | 
 | 112 | 			dev_kfree_skb(skb); | 
 | 113 | 			txq->txb[txq->q.read_ptr].skb[i] = NULL; | 
 | 114 | 		} | 
 | 115 | 	} | 
 | 116 | 	return 0; | 
 | 117 | } | 
 | 118 | EXPORT_SYMBOL(iwl_hw_txq_free_tfd); | 
 | 119 |  | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 120 |  | 
 | 121 | int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *ptr, | 
 | 122 | 				 dma_addr_t addr, u16 len) | 
 | 123 | { | 
 | 124 | 	int index, is_odd; | 
 | 125 | 	struct iwl_tfd_frame *tfd = ptr; | 
 | 126 | 	u32 num_tbs = IWL_GET_BITS(*tfd, num_tbs); | 
 | 127 |  | 
 | 128 | 	/* Each TFD can point to a maximum 20 Tx buffers */ | 
 | 129 | 	if ((num_tbs >= MAX_NUM_OF_TBS) || (num_tbs < 0)) { | 
 | 130 | 		IWL_ERROR("Error can not send more than %d chunks\n", | 
 | 131 | 			  MAX_NUM_OF_TBS); | 
 | 132 | 		return -EINVAL; | 
 | 133 | 	} | 
 | 134 |  | 
 | 135 | 	index = num_tbs / 2; | 
 | 136 | 	is_odd = num_tbs & 0x1; | 
 | 137 |  | 
 | 138 | 	if (!is_odd) { | 
 | 139 | 		tfd->pa[index].tb1_addr = cpu_to_le32(addr); | 
 | 140 | 		IWL_SET_BITS(tfd->pa[index], tb1_addr_hi, | 
 | 141 | 			     iwl_get_dma_hi_address(addr)); | 
 | 142 | 		IWL_SET_BITS(tfd->pa[index], tb1_len, len); | 
 | 143 | 	} else { | 
 | 144 | 		IWL_SET_BITS(tfd->pa[index], tb2_addr_lo16, | 
 | 145 | 			     (u32) (addr & 0xffff)); | 
 | 146 | 		IWL_SET_BITS(tfd->pa[index], tb2_addr_hi20, addr >> 16); | 
 | 147 | 		IWL_SET_BITS(tfd->pa[index], tb2_len, len); | 
 | 148 | 	} | 
 | 149 |  | 
 | 150 | 	IWL_SET_BITS(*tfd, num_tbs, num_tbs + 1); | 
 | 151 |  | 
 | 152 | 	return 0; | 
 | 153 | } | 
 | 154 | EXPORT_SYMBOL(iwl_hw_txq_attach_buf_to_tfd); | 
 | 155 |  | 
 | 156 | /** | 
 | 157 |  * iwl_txq_update_write_ptr - Send new write index to hardware | 
 | 158 |  */ | 
 | 159 | int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq) | 
 | 160 | { | 
 | 161 | 	u32 reg = 0; | 
 | 162 | 	int ret = 0; | 
 | 163 | 	int txq_id = txq->q.id; | 
 | 164 |  | 
 | 165 | 	if (txq->need_update == 0) | 
 | 166 | 		return ret; | 
 | 167 |  | 
 | 168 | 	/* if we're trying to save power */ | 
 | 169 | 	if (test_bit(STATUS_POWER_PMI, &priv->status)) { | 
 | 170 | 		/* wake up nic if it's powered down ... | 
 | 171 | 		 * uCode will wake up, and interrupt us again, so next | 
 | 172 | 		 * time we'll skip this part. */ | 
 | 173 | 		reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); | 
 | 174 |  | 
 | 175 | 		if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { | 
 | 176 | 			IWL_DEBUG_INFO("Requesting wakeup, GP1 = 0x%x\n", reg); | 
 | 177 | 			iwl_set_bit(priv, CSR_GP_CNTRL, | 
 | 178 | 				    CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); | 
 | 179 | 			return ret; | 
 | 180 | 		} | 
 | 181 |  | 
 | 182 | 		/* restore this queue's parameters in nic hardware. */ | 
 | 183 | 		ret = iwl_grab_nic_access(priv); | 
 | 184 | 		if (ret) | 
 | 185 | 			return ret; | 
 | 186 | 		iwl_write_direct32(priv, HBUS_TARG_WRPTR, | 
 | 187 | 				     txq->q.write_ptr | (txq_id << 8)); | 
 | 188 | 		iwl_release_nic_access(priv); | 
 | 189 |  | 
 | 190 | 	/* else not in power-save mode, uCode will never sleep when we're | 
 | 191 | 	 * trying to tx (during RFKILL, we're not trying to tx). */ | 
 | 192 | 	} else | 
 | 193 | 		iwl_write32(priv, HBUS_TARG_WRPTR, | 
 | 194 | 			    txq->q.write_ptr | (txq_id << 8)); | 
 | 195 |  | 
 | 196 | 	txq->need_update = 0; | 
 | 197 |  | 
 | 198 | 	return ret; | 
 | 199 | } | 
 | 200 | EXPORT_SYMBOL(iwl_txq_update_write_ptr); | 
 | 201 |  | 
 | 202 |  | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 203 | /** | 
 | 204 |  * iwl_tx_queue_free - Deallocate DMA queue. | 
 | 205 |  * @txq: Transmit queue to deallocate. | 
 | 206 |  * | 
 | 207 |  * Empty queue by removing and destroying all BD's. | 
 | 208 |  * Free all buffers. | 
 | 209 |  * 0-fill, but do not free "txq" descriptor structure. | 
 | 210 |  */ | 
| Ron Rindjunsky | 1646690 | 2008-05-05 10:22:50 +0800 | [diff] [blame] | 211 | static void iwl_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq) | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 212 | { | 
| Tomas Winkler | 443cfd4 | 2008-05-15 13:53:57 +0800 | [diff] [blame] | 213 | 	struct iwl_queue *q = &txq->q; | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 214 | 	struct pci_dev *dev = priv->pci_dev; | 
 | 215 | 	int len; | 
 | 216 |  | 
 | 217 | 	if (q->n_bd == 0) | 
 | 218 | 		return; | 
 | 219 |  | 
 | 220 | 	/* first, empty all BD's */ | 
 | 221 | 	for (; q->write_ptr != q->read_ptr; | 
 | 222 | 	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) | 
 | 223 | 		iwl_hw_txq_free_tfd(priv, txq); | 
 | 224 |  | 
 | 225 | 	len = sizeof(struct iwl_cmd) * q->n_window; | 
 | 226 | 	if (q->id == IWL_CMD_QUEUE_NUM) | 
 | 227 | 		len += IWL_MAX_SCAN_SIZE; | 
 | 228 |  | 
 | 229 | 	/* De-alloc array of command/tx buffers */ | 
 | 230 | 	pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd); | 
 | 231 |  | 
 | 232 | 	/* De-alloc circular buffer of TFDs */ | 
 | 233 | 	if (txq->q.n_bd) | 
 | 234 | 		pci_free_consistent(dev, sizeof(struct iwl_tfd_frame) * | 
 | 235 | 				    txq->q.n_bd, txq->bd, txq->q.dma_addr); | 
 | 236 |  | 
 | 237 | 	/* De-alloc array of per-TFD driver data */ | 
 | 238 | 	kfree(txq->txb); | 
 | 239 | 	txq->txb = NULL; | 
 | 240 |  | 
 | 241 | 	/* 0-fill queue descriptor structure */ | 
 | 242 | 	memset(txq, 0, sizeof(*txq)); | 
 | 243 | } | 
 | 244 |  | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 245 | /*************** DMA-QUEUE-GENERAL-FUNCTIONS  ***** | 
 | 246 |  * DMA services | 
 | 247 |  * | 
 | 248 |  * Theory of operation | 
 | 249 |  * | 
 | 250 |  * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer | 
 | 251 |  * of buffer descriptors, each of which points to one or more data buffers for | 
 | 252 |  * the device to read from or fill.  Driver and device exchange status of each | 
 | 253 |  * queue via "read" and "write" pointers.  Driver keeps minimum of 2 empty | 
 | 254 |  * entries in each circular buffer, to protect against confusing empty and full | 
 | 255 |  * queue states. | 
 | 256 |  * | 
 | 257 |  * The device reads or writes the data in the queues via the device's several | 
 | 258 |  * DMA/FIFO channels.  Each queue is mapped to a single DMA channel. | 
 | 259 |  * | 
 | 260 |  * For Tx queue, there are low mark and high mark limits. If, after queuing | 
 | 261 |  * the packet for Tx, free space become < low mark, Tx queue stopped. When | 
 | 262 |  * reclaiming packets (on 'tx done IRQ), if free space become > high mark, | 
 | 263 |  * Tx queue resumed. | 
 | 264 |  * | 
 | 265 |  * See more detailed info in iwl-4965-hw.h. | 
 | 266 |  ***************************************************/ | 
 | 267 |  | 
 | 268 | int iwl_queue_space(const struct iwl_queue *q) | 
 | 269 | { | 
 | 270 | 	int s = q->read_ptr - q->write_ptr; | 
 | 271 |  | 
 | 272 | 	if (q->read_ptr > q->write_ptr) | 
 | 273 | 		s -= q->n_bd; | 
 | 274 |  | 
 | 275 | 	if (s <= 0) | 
 | 276 | 		s += q->n_window; | 
 | 277 | 	/* keep some reserve to not confuse empty and full situations */ | 
 | 278 | 	s -= 2; | 
 | 279 | 	if (s < 0) | 
 | 280 | 		s = 0; | 
 | 281 | 	return s; | 
 | 282 | } | 
 | 283 | EXPORT_SYMBOL(iwl_queue_space); | 
 | 284 |  | 
 | 285 |  | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 286 | /** | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 287 |  * iwl_queue_init - Initialize queue's high/low-water and read/write indexes | 
 | 288 |  */ | 
| Tomas Winkler | 443cfd4 | 2008-05-15 13:53:57 +0800 | [diff] [blame] | 289 | static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q, | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 290 | 			  int count, int slots_num, u32 id) | 
 | 291 | { | 
 | 292 | 	q->n_bd = count; | 
 | 293 | 	q->n_window = slots_num; | 
 | 294 | 	q->id = id; | 
 | 295 |  | 
 | 296 | 	/* count must be power-of-two size, otherwise iwl_queue_inc_wrap | 
 | 297 | 	 * and iwl_queue_dec_wrap are broken. */ | 
 | 298 | 	BUG_ON(!is_power_of_2(count)); | 
 | 299 |  | 
 | 300 | 	/* slots_num must be power-of-two size, otherwise | 
 | 301 | 	 * get_cmd_index is broken. */ | 
 | 302 | 	BUG_ON(!is_power_of_2(slots_num)); | 
 | 303 |  | 
 | 304 | 	q->low_mark = q->n_window / 4; | 
 | 305 | 	if (q->low_mark < 4) | 
 | 306 | 		q->low_mark = 4; | 
 | 307 |  | 
 | 308 | 	q->high_mark = q->n_window / 8; | 
 | 309 | 	if (q->high_mark < 2) | 
 | 310 | 		q->high_mark = 2; | 
 | 311 |  | 
 | 312 | 	q->write_ptr = q->read_ptr = 0; | 
 | 313 |  | 
 | 314 | 	return 0; | 
 | 315 | } | 
 | 316 |  | 
 | 317 | /** | 
 | 318 |  * iwl_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue | 
 | 319 |  */ | 
 | 320 | static int iwl_tx_queue_alloc(struct iwl_priv *priv, | 
| Ron Rindjunsky | 1646690 | 2008-05-05 10:22:50 +0800 | [diff] [blame] | 321 | 			      struct iwl_tx_queue *txq, u32 id) | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 322 | { | 
 | 323 | 	struct pci_dev *dev = priv->pci_dev; | 
 | 324 |  | 
 | 325 | 	/* Driver private data, only for Tx (not command) queues, | 
 | 326 | 	 * not shared with device. */ | 
 | 327 | 	if (id != IWL_CMD_QUEUE_NUM) { | 
 | 328 | 		txq->txb = kmalloc(sizeof(txq->txb[0]) * | 
 | 329 | 				   TFD_QUEUE_SIZE_MAX, GFP_KERNEL); | 
 | 330 | 		if (!txq->txb) { | 
 | 331 | 			IWL_ERROR("kmalloc for auxiliary BD " | 
 | 332 | 				  "structures failed\n"); | 
 | 333 | 			goto error; | 
 | 334 | 		} | 
 | 335 | 	} else | 
 | 336 | 		txq->txb = NULL; | 
 | 337 |  | 
 | 338 | 	/* Circular buffer of transmit frame descriptors (TFDs), | 
 | 339 | 	 * shared with device */ | 
 | 340 | 	txq->bd = pci_alloc_consistent(dev, | 
 | 341 | 			sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX, | 
 | 342 | 			&txq->q.dma_addr); | 
 | 343 |  | 
 | 344 | 	if (!txq->bd) { | 
 | 345 | 		IWL_ERROR("pci_alloc_consistent(%zd) failed\n", | 
 | 346 | 			  sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX); | 
 | 347 | 		goto error; | 
 | 348 | 	} | 
 | 349 | 	txq->q.id = id; | 
 | 350 |  | 
 | 351 | 	return 0; | 
 | 352 |  | 
 | 353 |  error: | 
 | 354 | 	kfree(txq->txb); | 
 | 355 | 	txq->txb = NULL; | 
 | 356 |  | 
 | 357 | 	return -ENOMEM; | 
 | 358 | } | 
 | 359 |  | 
 | 360 | /* | 
 | 361 |  * Tell nic where to find circular buffer of Tx Frame Descriptors for | 
 | 362 |  * given Tx queue, and enable the DMA channel used for that queue. | 
 | 363 |  * | 
 | 364 |  * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA | 
 | 365 |  * channels supported in hardware. | 
 | 366 |  */ | 
 | 367 | static int iwl_hw_tx_queue_init(struct iwl_priv *priv, | 
| Ron Rindjunsky | 1646690 | 2008-05-05 10:22:50 +0800 | [diff] [blame] | 368 | 				struct iwl_tx_queue *txq) | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 369 | { | 
 | 370 | 	int rc; | 
 | 371 | 	unsigned long flags; | 
 | 372 | 	int txq_id = txq->q.id; | 
 | 373 |  | 
 | 374 | 	spin_lock_irqsave(&priv->lock, flags); | 
 | 375 | 	rc = iwl_grab_nic_access(priv); | 
 | 376 | 	if (rc) { | 
 | 377 | 		spin_unlock_irqrestore(&priv->lock, flags); | 
 | 378 | 		return rc; | 
 | 379 | 	} | 
 | 380 |  | 
 | 381 | 	/* Circular buffer (TFD queue in DRAM) physical base address */ | 
 | 382 | 	iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id), | 
 | 383 | 			     txq->q.dma_addr >> 8); | 
 | 384 |  | 
 | 385 | 	/* Enable DMA channel, using same id as for TFD queue */ | 
 | 386 | 	iwl_write_direct32( | 
 | 387 | 		priv, FH_TCSR_CHNL_TX_CONFIG_REG(txq_id), | 
 | 388 | 		FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | | 
 | 389 | 		FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL); | 
 | 390 | 	iwl_release_nic_access(priv); | 
 | 391 | 	spin_unlock_irqrestore(&priv->lock, flags); | 
 | 392 |  | 
 | 393 | 	return 0; | 
 | 394 | } | 
 | 395 |  | 
 | 396 | /** | 
 | 397 |  * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue | 
 | 398 |  */ | 
 | 399 | static int iwl_tx_queue_init(struct iwl_priv *priv, | 
| Ron Rindjunsky | 1646690 | 2008-05-05 10:22:50 +0800 | [diff] [blame] | 400 | 			     struct iwl_tx_queue *txq, | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 401 | 			     int slots_num, u32 txq_id) | 
 | 402 | { | 
 | 403 | 	struct pci_dev *dev = priv->pci_dev; | 
 | 404 | 	int len; | 
 | 405 | 	int rc = 0; | 
 | 406 |  | 
 | 407 | 	/* | 
 | 408 | 	 * Alloc buffer array for commands (Tx or other types of commands). | 
 | 409 | 	 * For the command queue (#4), allocate command space + one big | 
 | 410 | 	 * command for scan, since scan command is very huge; the system will | 
 | 411 | 	 * not have two scans at the same time, so only one is needed. | 
 | 412 | 	 * For normal Tx queues (all other queues), no super-size command | 
 | 413 | 	 * space is needed. | 
 | 414 | 	 */ | 
 | 415 | 	len = sizeof(struct iwl_cmd) * slots_num; | 
 | 416 | 	if (txq_id == IWL_CMD_QUEUE_NUM) | 
 | 417 | 		len +=  IWL_MAX_SCAN_SIZE; | 
 | 418 | 	txq->cmd = pci_alloc_consistent(dev, len, &txq->dma_addr_cmd); | 
 | 419 | 	if (!txq->cmd) | 
 | 420 | 		return -ENOMEM; | 
 | 421 |  | 
 | 422 | 	/* Alloc driver data array and TFD circular buffer */ | 
 | 423 | 	rc = iwl_tx_queue_alloc(priv, txq, txq_id); | 
 | 424 | 	if (rc) { | 
 | 425 | 		pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd); | 
 | 426 |  | 
 | 427 | 		return -ENOMEM; | 
 | 428 | 	} | 
 | 429 | 	txq->need_update = 0; | 
 | 430 |  | 
 | 431 | 	/* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise | 
 | 432 | 	 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ | 
 | 433 | 	BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); | 
 | 434 |  | 
 | 435 | 	/* Initialize queue's high/low-water marks, and head/tail indexes */ | 
 | 436 | 	iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id); | 
 | 437 |  | 
 | 438 | 	/* Tell device where to find queue */ | 
 | 439 | 	iwl_hw_tx_queue_init(priv, txq); | 
 | 440 |  | 
 | 441 | 	return 0; | 
 | 442 | } | 
| Tomas Winkler | da1bc45 | 2008-05-29 16:35:00 +0800 | [diff] [blame] | 443 | /** | 
 | 444 |  * iwl_hw_txq_ctx_free - Free TXQ Context | 
 | 445 |  * | 
 | 446 |  * Destroy all TX DMA queues and structures | 
 | 447 |  */ | 
 | 448 | void iwl_hw_txq_ctx_free(struct iwl_priv *priv) | 
 | 449 | { | 
 | 450 | 	int txq_id; | 
 | 451 |  | 
 | 452 | 	/* Tx queues */ | 
 | 453 | 	for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) | 
 | 454 | 		iwl_tx_queue_free(priv, &priv->txq[txq_id]); | 
 | 455 |  | 
 | 456 | 	/* Keep-warm buffer */ | 
 | 457 | 	iwl_kw_free(priv); | 
 | 458 | } | 
 | 459 | EXPORT_SYMBOL(iwl_hw_txq_ctx_free); | 
 | 460 |  | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 461 |  | 
 | 462 | /** | 
 | 463 |  * iwl_txq_ctx_reset - Reset TX queue context | 
 | 464 |  * Destroys all DMA structures and initialise them again | 
 | 465 |  * | 
 | 466 |  * @param priv | 
 | 467 |  * @return error code | 
 | 468 |  */ | 
 | 469 | int iwl_txq_ctx_reset(struct iwl_priv *priv) | 
 | 470 | { | 
 | 471 | 	int ret = 0; | 
 | 472 | 	int txq_id, slots_num; | 
| Tomas Winkler | da1bc45 | 2008-05-29 16:35:00 +0800 | [diff] [blame] | 473 | 	unsigned long flags; | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 474 |  | 
 | 475 | 	iwl_kw_free(priv); | 
 | 476 |  | 
 | 477 | 	/* Free all tx/cmd queues and keep-warm buffer */ | 
 | 478 | 	iwl_hw_txq_ctx_free(priv); | 
 | 479 |  | 
 | 480 | 	/* Alloc keep-warm buffer */ | 
 | 481 | 	ret = iwl_kw_alloc(priv); | 
 | 482 | 	if (ret) { | 
 | 483 | 		IWL_ERROR("Keep Warm allocation failed"); | 
 | 484 | 		goto error_kw; | 
 | 485 | 	} | 
| Tomas Winkler | da1bc45 | 2008-05-29 16:35:00 +0800 | [diff] [blame] | 486 | 	spin_lock_irqsave(&priv->lock, flags); | 
 | 487 | 	ret = iwl_grab_nic_access(priv); | 
 | 488 | 	if (unlikely(ret)) { | 
 | 489 | 		spin_unlock_irqrestore(&priv->lock, flags); | 
 | 490 | 		goto error_reset; | 
 | 491 | 	} | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 492 |  | 
 | 493 | 	/* Turn off all Tx DMA fifos */ | 
| Tomas Winkler | da1bc45 | 2008-05-29 16:35:00 +0800 | [diff] [blame] | 494 | 	priv->cfg->ops->lib->txq_set_sched(priv, 0); | 
 | 495 |  | 
 | 496 | 	iwl_release_nic_access(priv); | 
 | 497 | 	spin_unlock_irqrestore(&priv->lock, flags); | 
 | 498 |  | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 499 |  | 
 | 500 | 	/* Tell nic where to find the keep-warm buffer */ | 
 | 501 | 	ret = iwl_kw_init(priv); | 
 | 502 | 	if (ret) { | 
 | 503 | 		IWL_ERROR("kw_init failed\n"); | 
 | 504 | 		goto error_reset; | 
 | 505 | 	} | 
 | 506 |  | 
| Tomas Winkler | da1bc45 | 2008-05-29 16:35:00 +0800 | [diff] [blame] | 507 | 	/* Alloc and init all Tx queues, including the command queue (#4) */ | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 508 | 	for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { | 
 | 509 | 		slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ? | 
 | 510 | 					TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; | 
 | 511 | 		ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num, | 
 | 512 | 				       txq_id); | 
 | 513 | 		if (ret) { | 
 | 514 | 			IWL_ERROR("Tx %d queue init failed\n", txq_id); | 
 | 515 | 			goto error; | 
 | 516 | 		} | 
 | 517 | 	} | 
 | 518 |  | 
 | 519 | 	return ret; | 
 | 520 |  | 
 | 521 |  error: | 
 | 522 | 	iwl_hw_txq_ctx_free(priv); | 
 | 523 |  error_reset: | 
 | 524 | 	iwl_kw_free(priv); | 
 | 525 |  error_kw: | 
 | 526 | 	return ret; | 
 | 527 | } | 
| Tomas Winkler | da1bc45 | 2008-05-29 16:35:00 +0800 | [diff] [blame] | 528 | /** | 
 | 529 |  * iwl_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory | 
 | 530 |  */ | 
 | 531 | void iwl_txq_ctx_stop(struct iwl_priv *priv) | 
 | 532 | { | 
 | 533 |  | 
 | 534 | 	int txq_id; | 
 | 535 | 	unsigned long flags; | 
 | 536 |  | 
 | 537 |  | 
 | 538 | 	/* Turn off all Tx DMA fifos */ | 
 | 539 | 	spin_lock_irqsave(&priv->lock, flags); | 
 | 540 | 	if (iwl_grab_nic_access(priv)) { | 
 | 541 | 		spin_unlock_irqrestore(&priv->lock, flags); | 
 | 542 | 		return; | 
 | 543 | 	} | 
 | 544 |  | 
 | 545 | 	priv->cfg->ops->lib->txq_set_sched(priv, 0); | 
 | 546 |  | 
 | 547 | 	/* Stop each Tx DMA channel, and wait for it to be idle */ | 
 | 548 | 	for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { | 
 | 549 | 		iwl_write_direct32(priv, | 
 | 550 | 				   FH_TCSR_CHNL_TX_CONFIG_REG(txq_id), 0x0); | 
 | 551 | 		iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG, | 
 | 552 | 				    FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE | 
 | 553 | 				    (txq_id), 200); | 
 | 554 | 	} | 
 | 555 | 	iwl_release_nic_access(priv); | 
 | 556 | 	spin_unlock_irqrestore(&priv->lock, flags); | 
 | 557 |  | 
 | 558 | 	/* Deallocate memory for all Tx queues */ | 
 | 559 | 	iwl_hw_txq_ctx_free(priv); | 
 | 560 | } | 
 | 561 | EXPORT_SYMBOL(iwl_txq_ctx_stop); | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 562 |  | 
 | 563 | /* | 
 | 564 |  * handle build REPLY_TX command notification. | 
 | 565 |  */ | 
 | 566 | static void iwl_tx_cmd_build_basic(struct iwl_priv *priv, | 
 | 567 | 				  struct iwl_tx_cmd *tx_cmd, | 
| Johannes Berg | e039fa4 | 2008-05-15 12:55:29 +0200 | [diff] [blame] | 568 | 				  struct ieee80211_tx_info *info, | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 569 | 				  struct ieee80211_hdr *hdr, | 
 | 570 | 				  int is_unicast, u8 std_id) | 
 | 571 | { | 
| Harvey Harrison | fd7c8a4 | 2008-06-11 14:21:56 -0700 | [diff] [blame] | 572 | 	__le16 fc = hdr->frame_control; | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 573 | 	__le32 tx_flags = tx_cmd->tx_flags; | 
 | 574 |  | 
 | 575 | 	tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; | 
| Johannes Berg | e039fa4 | 2008-05-15 12:55:29 +0200 | [diff] [blame] | 576 | 	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 577 | 		tx_flags |= TX_CMD_FLG_ACK_MSK; | 
| Harvey Harrison | fd7c8a4 | 2008-06-11 14:21:56 -0700 | [diff] [blame] | 578 | 		if (ieee80211_is_mgmt(fc)) | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 579 | 			tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | 
| Harvey Harrison | fd7c8a4 | 2008-06-11 14:21:56 -0700 | [diff] [blame] | 580 | 		if (ieee80211_is_probe_resp(fc) && | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 581 | 		    !(le16_to_cpu(hdr->seq_ctrl) & 0xf)) | 
 | 582 | 			tx_flags |= TX_CMD_FLG_TSF_MSK; | 
 | 583 | 	} else { | 
 | 584 | 		tx_flags &= (~TX_CMD_FLG_ACK_MSK); | 
 | 585 | 		tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | 
 | 586 | 	} | 
 | 587 |  | 
| Harvey Harrison | fd7c8a4 | 2008-06-11 14:21:56 -0700 | [diff] [blame] | 588 | 	if (ieee80211_is_back_req(fc)) | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 589 | 		tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK; | 
 | 590 |  | 
 | 591 |  | 
 | 592 | 	tx_cmd->sta_id = std_id; | 
| Harvey Harrison | 8b7b1e0 | 2008-06-11 14:21:56 -0700 | [diff] [blame] | 593 | 	if (ieee80211_has_morefrags(fc)) | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 594 | 		tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; | 
 | 595 |  | 
| Harvey Harrison | fd7c8a4 | 2008-06-11 14:21:56 -0700 | [diff] [blame] | 596 | 	if (ieee80211_is_data_qos(fc)) { | 
 | 597 | 		u8 *qc = ieee80211_get_qos_ctl(hdr); | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 598 | 		tx_cmd->tid_tspec = qc[0] & 0xf; | 
 | 599 | 		tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; | 
 | 600 | 	} else { | 
 | 601 | 		tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | 
 | 602 | 	} | 
 | 603 |  | 
| Emmanuel Grumbach | a326a5d | 2008-07-11 11:53:31 +0800 | [diff] [blame] | 604 | 	priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags); | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 605 |  | 
 | 606 | 	if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK)) | 
 | 607 | 		tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; | 
 | 608 |  | 
 | 609 | 	tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); | 
| Harvey Harrison | fd7c8a4 | 2008-06-11 14:21:56 -0700 | [diff] [blame] | 610 | 	if (ieee80211_is_mgmt(fc)) { | 
 | 611 | 		if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 612 | 			tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3); | 
 | 613 | 		else | 
 | 614 | 			tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2); | 
 | 615 | 	} else { | 
 | 616 | 		tx_cmd->timeout.pm_frame_timeout = 0; | 
 | 617 | 	} | 
 | 618 |  | 
 | 619 | 	tx_cmd->driver_txop = 0; | 
 | 620 | 	tx_cmd->tx_flags = tx_flags; | 
 | 621 | 	tx_cmd->next_frame_len = 0; | 
 | 622 | } | 
 | 623 |  | 
 | 624 | #define RTS_HCCA_RETRY_LIMIT		3 | 
 | 625 | #define RTS_DFAULT_RETRY_LIMIT		60 | 
 | 626 |  | 
 | 627 | static void iwl_tx_cmd_build_rate(struct iwl_priv *priv, | 
 | 628 | 			      struct iwl_tx_cmd *tx_cmd, | 
| Johannes Berg | e039fa4 | 2008-05-15 12:55:29 +0200 | [diff] [blame] | 629 | 			      struct ieee80211_tx_info *info, | 
| Harvey Harrison | fd7c8a4 | 2008-06-11 14:21:56 -0700 | [diff] [blame] | 630 | 			      __le16 fc, int sta_id, | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 631 | 			      int is_hcca) | 
 | 632 | { | 
 | 633 | 	u8 rts_retry_limit = 0; | 
 | 634 | 	u8 data_retry_limit = 0; | 
 | 635 | 	u8 rate_plcp; | 
 | 636 | 	u16 rate_flags = 0; | 
| Johannes Berg | 2e92e6f | 2008-05-15 12:55:27 +0200 | [diff] [blame] | 637 | 	int rate_idx; | 
 | 638 |  | 
| Johannes Berg | e039fa4 | 2008-05-15 12:55:29 +0200 | [diff] [blame] | 639 | 	rate_idx = min(ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xffff, | 
| Johannes Berg | 2e92e6f | 2008-05-15 12:55:27 +0200 | [diff] [blame] | 640 | 			IWL_RATE_COUNT - 1); | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 641 |  | 
 | 642 | 	rate_plcp = iwl_rates[rate_idx].plcp; | 
 | 643 |  | 
 | 644 | 	rts_retry_limit = (is_hcca) ? | 
 | 645 | 	    RTS_HCCA_RETRY_LIMIT : RTS_DFAULT_RETRY_LIMIT; | 
 | 646 |  | 
 | 647 | 	if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE)) | 
 | 648 | 		rate_flags |= RATE_MCS_CCK_MSK; | 
 | 649 |  | 
 | 650 |  | 
| Harvey Harrison | fd7c8a4 | 2008-06-11 14:21:56 -0700 | [diff] [blame] | 651 | 	if (ieee80211_is_probe_resp(fc)) { | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 652 | 		data_retry_limit = 3; | 
 | 653 | 		if (data_retry_limit < rts_retry_limit) | 
 | 654 | 			rts_retry_limit = data_retry_limit; | 
 | 655 | 	} else | 
 | 656 | 		data_retry_limit = IWL_DEFAULT_TX_RETRY; | 
 | 657 |  | 
 | 658 | 	if (priv->data_retry_limit != -1) | 
 | 659 | 		data_retry_limit = priv->data_retry_limit; | 
 | 660 |  | 
 | 661 |  | 
 | 662 | 	if (ieee80211_is_data(fc)) { | 
 | 663 | 		tx_cmd->initial_rate_index = 0; | 
 | 664 | 		tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK; | 
 | 665 | 	} else { | 
| Harvey Harrison | fd7c8a4 | 2008-06-11 14:21:56 -0700 | [diff] [blame] | 666 | 		switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { | 
 | 667 | 		case cpu_to_le16(IEEE80211_STYPE_AUTH): | 
 | 668 | 		case cpu_to_le16(IEEE80211_STYPE_DEAUTH): | 
 | 669 | 		case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ): | 
 | 670 | 		case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ): | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 671 | 			if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) { | 
 | 672 | 				tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK; | 
 | 673 | 				tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK; | 
 | 674 | 			} | 
 | 675 | 			break; | 
 | 676 | 		default: | 
 | 677 | 			break; | 
 | 678 | 		} | 
 | 679 |  | 
 | 680 | 		/* Alternate between antenna A and B for successive frames */ | 
 | 681 | 		if (priv->use_ant_b_for_management_frame) { | 
 | 682 | 			priv->use_ant_b_for_management_frame = 0; | 
 | 683 | 			rate_flags |= RATE_MCS_ANT_B_MSK; | 
 | 684 | 		} else { | 
 | 685 | 			priv->use_ant_b_for_management_frame = 1; | 
 | 686 | 			rate_flags |= RATE_MCS_ANT_A_MSK; | 
 | 687 | 		} | 
 | 688 | 	} | 
 | 689 |  | 
 | 690 | 	tx_cmd->rts_retry_limit = rts_retry_limit; | 
 | 691 | 	tx_cmd->data_retry_limit = data_retry_limit; | 
| Tomas Winkler | e7d326a | 2008-06-12 09:47:11 +0800 | [diff] [blame] | 692 | 	tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags); | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 693 | } | 
 | 694 |  | 
 | 695 | static void iwl_tx_cmd_build_hwcrypto(struct iwl_priv *priv, | 
| Johannes Berg | e039fa4 | 2008-05-15 12:55:29 +0200 | [diff] [blame] | 696 | 				      struct ieee80211_tx_info *info, | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 697 | 				      struct iwl_tx_cmd *tx_cmd, | 
 | 698 | 				      struct sk_buff *skb_frag, | 
 | 699 | 				      int sta_id) | 
 | 700 | { | 
| Johannes Berg | e039fa4 | 2008-05-15 12:55:29 +0200 | [diff] [blame] | 701 | 	struct ieee80211_key_conf *keyconf = info->control.hw_key; | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 702 |  | 
| Emmanuel Grumbach | ccc038a | 2008-05-15 13:54:09 +0800 | [diff] [blame] | 703 | 	switch (keyconf->alg) { | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 704 | 	case ALG_CCMP: | 
 | 705 | 		tx_cmd->sec_ctl = TX_CMD_SEC_CCM; | 
| Emmanuel Grumbach | ccc038a | 2008-05-15 13:54:09 +0800 | [diff] [blame] | 706 | 		memcpy(tx_cmd->key, keyconf->key, keyconf->keylen); | 
| Johannes Berg | e039fa4 | 2008-05-15 12:55:29 +0200 | [diff] [blame] | 707 | 		if (info->flags & IEEE80211_TX_CTL_AMPDU) | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 708 | 			tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK; | 
 | 709 | 		IWL_DEBUG_TX("tx_cmd with aes hwcrypto\n"); | 
 | 710 | 		break; | 
 | 711 |  | 
 | 712 | 	case ALG_TKIP: | 
 | 713 | 		tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; | 
| Emmanuel Grumbach | ccc038a | 2008-05-15 13:54:09 +0800 | [diff] [blame] | 714 | 		ieee80211_get_tkip_key(keyconf, skb_frag, | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 715 | 			IEEE80211_TKIP_P2_KEY, tx_cmd->key); | 
 | 716 | 		IWL_DEBUG_TX("tx_cmd with tkip hwcrypto\n"); | 
 | 717 | 		break; | 
 | 718 |  | 
 | 719 | 	case ALG_WEP: | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 720 | 		tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP | | 
| Emmanuel Grumbach | ccc038a | 2008-05-15 13:54:09 +0800 | [diff] [blame] | 721 | 			(keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT); | 
 | 722 |  | 
 | 723 | 		if (keyconf->keylen == WEP_KEY_LEN_128) | 
 | 724 | 			tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; | 
 | 725 |  | 
 | 726 | 		memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen); | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 727 |  | 
 | 728 | 		IWL_DEBUG_TX("Configuring packet for WEP encryption " | 
| Emmanuel Grumbach | ccc038a | 2008-05-15 13:54:09 +0800 | [diff] [blame] | 729 | 			     "with key %d\n", keyconf->keyidx); | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 730 | 		break; | 
 | 731 |  | 
 | 732 | 	default: | 
| Emmanuel Grumbach | ccc038a | 2008-05-15 13:54:09 +0800 | [diff] [blame] | 733 | 		printk(KERN_ERR "Unknown encode alg %d\n", keyconf->alg); | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 734 | 		break; | 
 | 735 | 	} | 
 | 736 | } | 
 | 737 |  | 
 | 738 | static void iwl_update_tx_stats(struct iwl_priv *priv, u16 fc, u16 len) | 
 | 739 | { | 
 | 740 | 	/* 0 - mgmt, 1 - cnt, 2 - data */ | 
 | 741 | 	int idx = (fc & IEEE80211_FCTL_FTYPE) >> 2; | 
 | 742 | 	priv->tx_stats[idx].cnt++; | 
 | 743 | 	priv->tx_stats[idx].bytes += len; | 
 | 744 | } | 
 | 745 |  | 
 | 746 | /* | 
 | 747 |  * start REPLY_TX command process | 
 | 748 |  */ | 
| Johannes Berg | e039fa4 | 2008-05-15 12:55:29 +0200 | [diff] [blame] | 749 | int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 750 | { | 
 | 751 | 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | 
| Johannes Berg | e039fa4 | 2008-05-15 12:55:29 +0200 | [diff] [blame] | 752 | 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 753 | 	struct iwl_tfd_frame *tfd; | 
 | 754 | 	u32 *control_flags; | 
| Johannes Berg | e253008 | 2008-05-17 00:57:14 +0200 | [diff] [blame] | 755 | 	int txq_id = skb_get_queue_mapping(skb); | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 756 | 	struct iwl_tx_queue *txq = NULL; | 
 | 757 | 	struct iwl_queue *q = NULL; | 
 | 758 | 	dma_addr_t phys_addr; | 
 | 759 | 	dma_addr_t txcmd_phys; | 
 | 760 | 	dma_addr_t scratch_phys; | 
 | 761 | 	struct iwl_cmd *out_cmd = NULL; | 
 | 762 | 	struct iwl_tx_cmd *tx_cmd; | 
 | 763 | 	u16 len, idx, len_org; | 
 | 764 | 	u16 seq_number = 0; | 
 | 765 | 	u8 id, hdr_len, unicast; | 
 | 766 | 	u8 sta_id; | 
| Harvey Harrison | fd7c8a4 | 2008-06-11 14:21:56 -0700 | [diff] [blame] | 767 | 	__le16 fc; | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 768 | 	u8 wait_write_ptr = 0; | 
 | 769 | 	u8 tid = 0; | 
 | 770 | 	u8 *qc = NULL; | 
 | 771 | 	unsigned long flags; | 
 | 772 | 	int ret; | 
 | 773 |  | 
 | 774 | 	spin_lock_irqsave(&priv->lock, flags); | 
 | 775 | 	if (iwl_is_rfkill(priv)) { | 
 | 776 | 		IWL_DEBUG_DROP("Dropping - RF KILL\n"); | 
 | 777 | 		goto drop_unlock; | 
 | 778 | 	} | 
 | 779 |  | 
 | 780 | 	if (!priv->vif) { | 
 | 781 | 		IWL_DEBUG_DROP("Dropping - !priv->vif\n"); | 
 | 782 | 		goto drop_unlock; | 
 | 783 | 	} | 
 | 784 |  | 
| Johannes Berg | e039fa4 | 2008-05-15 12:55:29 +0200 | [diff] [blame] | 785 | 	if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) == | 
| Johannes Berg | 2e92e6f | 2008-05-15 12:55:27 +0200 | [diff] [blame] | 786 | 	     IWL_INVALID_RATE) { | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 787 | 		IWL_ERROR("ERROR: No TX rate available.\n"); | 
 | 788 | 		goto drop_unlock; | 
 | 789 | 	} | 
 | 790 |  | 
 | 791 | 	unicast = !is_multicast_ether_addr(hdr->addr1); | 
 | 792 | 	id = 0; | 
 | 793 |  | 
| Harvey Harrison | fd7c8a4 | 2008-06-11 14:21:56 -0700 | [diff] [blame] | 794 | 	fc = hdr->frame_control; | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 795 |  | 
 | 796 | #ifdef CONFIG_IWLWIFI_DEBUG | 
 | 797 | 	if (ieee80211_is_auth(fc)) | 
 | 798 | 		IWL_DEBUG_TX("Sending AUTH frame\n"); | 
| Harvey Harrison | fd7c8a4 | 2008-06-11 14:21:56 -0700 | [diff] [blame] | 799 | 	else if (ieee80211_is_assoc_req(fc)) | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 800 | 		IWL_DEBUG_TX("Sending ASSOC frame\n"); | 
| Harvey Harrison | fd7c8a4 | 2008-06-11 14:21:56 -0700 | [diff] [blame] | 801 | 	else if (ieee80211_is_reassoc_req(fc)) | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 802 | 		IWL_DEBUG_TX("Sending REASSOC frame\n"); | 
 | 803 | #endif | 
 | 804 |  | 
 | 805 | 	/* drop all data frame if we are not associated */ | 
| Harvey Harrison | fd7c8a4 | 2008-06-11 14:21:56 -0700 | [diff] [blame] | 806 | 	if (ieee80211_is_data(fc) && | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 807 | 	   (!iwl_is_associated(priv) || | 
 | 808 | 	    ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && !priv->assoc_id) || | 
 | 809 | 	    !priv->assoc_station_added)) { | 
 | 810 | 		IWL_DEBUG_DROP("Dropping - !iwl_is_associated\n"); | 
 | 811 | 		goto drop_unlock; | 
 | 812 | 	} | 
 | 813 |  | 
 | 814 | 	spin_unlock_irqrestore(&priv->lock, flags); | 
 | 815 |  | 
| Harvey Harrison | fd7c8a4 | 2008-06-11 14:21:56 -0700 | [diff] [blame] | 816 | 	hdr_len = ieee80211_get_hdrlen(le16_to_cpu(fc)); | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 817 |  | 
 | 818 | 	/* Find (or create) index into station table for destination station */ | 
 | 819 | 	sta_id = iwl_get_sta_id(priv, hdr); | 
 | 820 | 	if (sta_id == IWL_INVALID_STATION) { | 
 | 821 | 		DECLARE_MAC_BUF(mac); | 
 | 822 |  | 
 | 823 | 		IWL_DEBUG_DROP("Dropping - INVALID STATION: %s\n", | 
 | 824 | 			       print_mac(mac, hdr->addr1)); | 
 | 825 | 		goto drop; | 
 | 826 | 	} | 
 | 827 |  | 
 | 828 | 	IWL_DEBUG_TX("station Id %d\n", sta_id); | 
 | 829 |  | 
| Harvey Harrison | fd7c8a4 | 2008-06-11 14:21:56 -0700 | [diff] [blame] | 830 | 	if (ieee80211_is_data_qos(fc)) { | 
 | 831 | 		qc = ieee80211_get_qos_ctl(hdr); | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 832 | 		tid = qc[0] & 0xf; | 
 | 833 | 		seq_number = priv->stations[sta_id].tid[tid].seq_number & | 
 | 834 | 				IEEE80211_SCTL_SEQ; | 
 | 835 | 		hdr->seq_ctrl = cpu_to_le16(seq_number) | | 
 | 836 | 			(hdr->seq_ctrl & | 
 | 837 | 				__constant_cpu_to_le16(IEEE80211_SCTL_FRAG)); | 
 | 838 | 		seq_number += 0x10; | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 839 | 		/* aggregation is on for this <sta,tid> */ | 
| Johannes Berg | e039fa4 | 2008-05-15 12:55:29 +0200 | [diff] [blame] | 840 | 		if (info->flags & IEEE80211_TX_CTL_AMPDU) | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 841 | 			txq_id = priv->stations[sta_id].tid[tid].agg.txq_id; | 
 | 842 | 		priv->stations[sta_id].tid[tid].tfds_in_queue++; | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 843 | 	} | 
 | 844 |  | 
 | 845 | 	/* Descriptor for chosen Tx queue */ | 
 | 846 | 	txq = &priv->txq[txq_id]; | 
 | 847 | 	q = &txq->q; | 
 | 848 |  | 
 | 849 | 	spin_lock_irqsave(&priv->lock, flags); | 
 | 850 |  | 
 | 851 | 	/* Set up first empty TFD within this queue's circular TFD buffer */ | 
 | 852 | 	tfd = &txq->bd[q->write_ptr]; | 
 | 853 | 	memset(tfd, 0, sizeof(*tfd)); | 
 | 854 | 	control_flags = (u32 *) tfd; | 
 | 855 | 	idx = get_cmd_index(q, q->write_ptr, 0); | 
 | 856 |  | 
 | 857 | 	/* Set up driver data for this TFD */ | 
 | 858 | 	memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); | 
 | 859 | 	txq->txb[q->write_ptr].skb[0] = skb; | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 860 |  | 
 | 861 | 	/* Set up first empty entry in queue's array of Tx/cmd buffers */ | 
 | 862 | 	out_cmd = &txq->cmd[idx]; | 
 | 863 | 	tx_cmd = &out_cmd->cmd.tx; | 
 | 864 | 	memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); | 
 | 865 | 	memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd)); | 
 | 866 |  | 
 | 867 | 	/* | 
 | 868 | 	 * Set up the Tx-command (not MAC!) header. | 
 | 869 | 	 * Store the chosen Tx queue and TFD index within the sequence field; | 
 | 870 | 	 * after Tx, uCode's Tx response will return this value so driver can | 
 | 871 | 	 * locate the frame within the tx queue and do post-tx processing. | 
 | 872 | 	 */ | 
 | 873 | 	out_cmd->hdr.cmd = REPLY_TX; | 
 | 874 | 	out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | | 
 | 875 | 				INDEX_TO_SEQ(q->write_ptr))); | 
 | 876 |  | 
 | 877 | 	/* Copy MAC header from skb into command buffer */ | 
 | 878 | 	memcpy(tx_cmd->hdr, hdr, hdr_len); | 
 | 879 |  | 
 | 880 | 	/* | 
 | 881 | 	 * Use the first empty entry in this queue's command buffer array | 
 | 882 | 	 * to contain the Tx command and MAC header concatenated together | 
 | 883 | 	 * (payload data will be in another buffer). | 
 | 884 | 	 * Size of this varies, due to varying MAC header length. | 
 | 885 | 	 * If end is not dword aligned, we'll have 2 extra bytes at the end | 
 | 886 | 	 * of the MAC header (device reads on dword boundaries). | 
 | 887 | 	 * We'll tell device about this padding later. | 
 | 888 | 	 */ | 
 | 889 | 	len = sizeof(struct iwl_tx_cmd) + | 
 | 890 | 		sizeof(struct iwl_cmd_header) + hdr_len; | 
 | 891 |  | 
 | 892 | 	len_org = len; | 
 | 893 | 	len = (len + 3) & ~3; | 
 | 894 |  | 
 | 895 | 	if (len_org != len) | 
 | 896 | 		len_org = 1; | 
 | 897 | 	else | 
 | 898 | 		len_org = 0; | 
 | 899 |  | 
 | 900 | 	/* Physical address of this Tx command's header (not MAC header!), | 
 | 901 | 	 * within command buffer array. */ | 
 | 902 | 	txcmd_phys = txq->dma_addr_cmd + sizeof(struct iwl_cmd) * idx + | 
 | 903 | 		     offsetof(struct iwl_cmd, hdr); | 
 | 904 |  | 
 | 905 | 	/* Add buffer containing Tx command and MAC(!) header to TFD's | 
 | 906 | 	 * first entry */ | 
 | 907 | 	iwl_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len); | 
 | 908 |  | 
| Johannes Berg | e039fa4 | 2008-05-15 12:55:29 +0200 | [diff] [blame] | 909 | 	if (!(info->flags & IEEE80211_TX_CTL_DO_NOT_ENCRYPT)) | 
 | 910 | 		iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id); | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 911 |  | 
 | 912 | 	/* Set up TFD's 2nd entry to point directly to remainder of skb, | 
 | 913 | 	 * if any (802.11 null frames have no payload). */ | 
 | 914 | 	len = skb->len - hdr_len; | 
 | 915 | 	if (len) { | 
 | 916 | 		phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, | 
 | 917 | 					   len, PCI_DMA_TODEVICE); | 
 | 918 | 		iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, len); | 
 | 919 | 	} | 
 | 920 |  | 
 | 921 | 	/* Tell NIC about any 2-byte padding after MAC header */ | 
 | 922 | 	if (len_org) | 
 | 923 | 		tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; | 
 | 924 |  | 
 | 925 | 	/* Total # bytes to be transmitted */ | 
 | 926 | 	len = (u16)skb->len; | 
 | 927 | 	tx_cmd->len = cpu_to_le16(len); | 
 | 928 | 	/* TODO need this for burst mode later on */ | 
| Johannes Berg | e039fa4 | 2008-05-15 12:55:29 +0200 | [diff] [blame] | 929 | 	iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, unicast, sta_id); | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 930 |  | 
 | 931 | 	/* set is_hcca to 0; it probably will never be implemented */ | 
| Johannes Berg | e039fa4 | 2008-05-15 12:55:29 +0200 | [diff] [blame] | 932 | 	iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, sta_id, 0); | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 933 |  | 
| Harvey Harrison | fd7c8a4 | 2008-06-11 14:21:56 -0700 | [diff] [blame] | 934 | 	iwl_update_tx_stats(priv, le16_to_cpu(fc), len); | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 935 |  | 
 | 936 | 	scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + | 
 | 937 | 		offsetof(struct iwl_tx_cmd, scratch); | 
 | 938 | 	tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); | 
 | 939 | 	tx_cmd->dram_msb_ptr = iwl_get_dma_hi_address(scratch_phys); | 
 | 940 |  | 
| Harvey Harrison | 8b7b1e0 | 2008-06-11 14:21:56 -0700 | [diff] [blame] | 941 | 	if (!ieee80211_has_morefrags(hdr->frame_control)) { | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 942 | 		txq->need_update = 1; | 
 | 943 | 		if (qc) | 
 | 944 | 			priv->stations[sta_id].tid[tid].seq_number = seq_number; | 
 | 945 | 	} else { | 
 | 946 | 		wait_write_ptr = 1; | 
 | 947 | 		txq->need_update = 0; | 
 | 948 | 	} | 
 | 949 |  | 
 | 950 | 	iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd)); | 
 | 951 |  | 
 | 952 | 	iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len); | 
 | 953 |  | 
 | 954 | 	/* Set up entry for this TFD in Tx byte-count array */ | 
 | 955 | 	priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, len); | 
 | 956 |  | 
 | 957 | 	/* Tell device the write index *just past* this latest filled TFD */ | 
 | 958 | 	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); | 
 | 959 | 	ret = iwl_txq_update_write_ptr(priv, txq); | 
 | 960 | 	spin_unlock_irqrestore(&priv->lock, flags); | 
 | 961 |  | 
 | 962 | 	if (ret) | 
 | 963 | 		return ret; | 
 | 964 |  | 
 | 965 | 	if ((iwl_queue_space(q) < q->high_mark) | 
 | 966 | 	    && priv->mac80211_registered) { | 
 | 967 | 		if (wait_write_ptr) { | 
 | 968 | 			spin_lock_irqsave(&priv->lock, flags); | 
 | 969 | 			txq->need_update = 1; | 
 | 970 | 			iwl_txq_update_write_ptr(priv, txq); | 
 | 971 | 			spin_unlock_irqrestore(&priv->lock, flags); | 
 | 972 | 		} | 
 | 973 |  | 
| Johannes Berg | e253008 | 2008-05-17 00:57:14 +0200 | [diff] [blame] | 974 | 		ieee80211_stop_queue(priv->hw, skb_get_queue_mapping(skb)); | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 975 | 	} | 
 | 976 |  | 
 | 977 | 	return 0; | 
 | 978 |  | 
 | 979 | drop_unlock: | 
 | 980 | 	spin_unlock_irqrestore(&priv->lock, flags); | 
 | 981 | drop: | 
 | 982 | 	return -1; | 
 | 983 | } | 
 | 984 | EXPORT_SYMBOL(iwl_tx_skb); | 
 | 985 |  | 
 | 986 | /*************** HOST COMMAND QUEUE FUNCTIONS   *****/ | 
 | 987 |  | 
 | 988 | /** | 
 | 989 |  * iwl_enqueue_hcmd - enqueue a uCode command | 
 | 990 |  * @priv: device private data point | 
 | 991 |  * @cmd: a point to the ucode command structure | 
 | 992 |  * | 
 | 993 |  * The function returns < 0 values to indicate the operation is | 
 | 994 |  * failed. On success, it turns the index (> 0) of command in the | 
 | 995 |  * command queue. | 
 | 996 |  */ | 
 | 997 | int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | 
 | 998 | { | 
 | 999 | 	struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; | 
 | 1000 | 	struct iwl_queue *q = &txq->q; | 
 | 1001 | 	struct iwl_tfd_frame *tfd; | 
 | 1002 | 	u32 *control_flags; | 
 | 1003 | 	struct iwl_cmd *out_cmd; | 
 | 1004 | 	u32 idx; | 
 | 1005 | 	u16 fix_size; | 
 | 1006 | 	dma_addr_t phys_addr; | 
 | 1007 | 	int ret; | 
 | 1008 | 	unsigned long flags; | 
 | 1009 |  | 
 | 1010 | 	cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len); | 
 | 1011 | 	fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr)); | 
 | 1012 |  | 
 | 1013 | 	/* If any of the command structures end up being larger than | 
 | 1014 | 	 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then | 
 | 1015 | 	 * we will need to increase the size of the TFD entries */ | 
 | 1016 | 	BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) && | 
 | 1017 | 	       !(cmd->meta.flags & CMD_SIZE_HUGE)); | 
 | 1018 |  | 
 | 1019 | 	if (iwl_is_rfkill(priv)) { | 
 | 1020 | 		IWL_DEBUG_INFO("Not sending command - RF KILL"); | 
 | 1021 | 		return -EIO; | 
 | 1022 | 	} | 
 | 1023 |  | 
 | 1024 | 	if (iwl_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) { | 
 | 1025 | 		IWL_ERROR("No space for Tx\n"); | 
 | 1026 | 		return -ENOSPC; | 
 | 1027 | 	} | 
 | 1028 |  | 
 | 1029 | 	spin_lock_irqsave(&priv->hcmd_lock, flags); | 
 | 1030 |  | 
 | 1031 | 	tfd = &txq->bd[q->write_ptr]; | 
 | 1032 | 	memset(tfd, 0, sizeof(*tfd)); | 
 | 1033 |  | 
 | 1034 | 	control_flags = (u32 *) tfd; | 
 | 1035 |  | 
 | 1036 | 	idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE); | 
 | 1037 | 	out_cmd = &txq->cmd[idx]; | 
 | 1038 |  | 
 | 1039 | 	out_cmd->hdr.cmd = cmd->id; | 
 | 1040 | 	memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta)); | 
 | 1041 | 	memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len); | 
 | 1042 |  | 
 | 1043 | 	/* At this point, the out_cmd now has all of the incoming cmd | 
 | 1044 | 	 * information */ | 
 | 1045 |  | 
 | 1046 | 	out_cmd->hdr.flags = 0; | 
 | 1047 | 	out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) | | 
 | 1048 | 			INDEX_TO_SEQ(q->write_ptr)); | 
 | 1049 | 	if (out_cmd->meta.flags & CMD_SIZE_HUGE) | 
 | 1050 | 		out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME); | 
 | 1051 |  | 
 | 1052 | 	phys_addr = txq->dma_addr_cmd + sizeof(txq->cmd[0]) * idx + | 
 | 1053 | 			offsetof(struct iwl_cmd, hdr); | 
 | 1054 | 	iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size); | 
 | 1055 |  | 
 | 1056 | 	IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, " | 
 | 1057 | 		     "%d bytes at %d[%d]:%d\n", | 
 | 1058 | 		     get_cmd_string(out_cmd->hdr.cmd), | 
 | 1059 | 		     out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), | 
 | 1060 | 		     fix_size, q->write_ptr, idx, IWL_CMD_QUEUE_NUM); | 
 | 1061 |  | 
 | 1062 | 	txq->need_update = 1; | 
 | 1063 |  | 
 | 1064 | 	/* Set up entry in queue's byte count circular buffer */ | 
 | 1065 | 	priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0); | 
 | 1066 |  | 
 | 1067 | 	/* Increment and update queue's write index */ | 
 | 1068 | 	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); | 
 | 1069 | 	ret = iwl_txq_update_write_ptr(priv, txq); | 
 | 1070 |  | 
 | 1071 | 	spin_unlock_irqrestore(&priv->hcmd_lock, flags); | 
 | 1072 | 	return ret ? ret : idx; | 
 | 1073 | } | 
 | 1074 |  | 
| Tomas Winkler | 17b8892 | 2008-05-29 16:35:12 +0800 | [diff] [blame] | 1075 | int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) | 
 | 1076 | { | 
 | 1077 | 	struct iwl_tx_queue *txq = &priv->txq[txq_id]; | 
 | 1078 | 	struct iwl_queue *q = &txq->q; | 
 | 1079 | 	struct iwl_tx_info *tx_info; | 
 | 1080 | 	int nfreed = 0; | 
 | 1081 |  | 
 | 1082 | 	if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) { | 
 | 1083 | 		IWL_ERROR("Read index for DMA queue txq id (%d), index %d, " | 
 | 1084 | 			  "is out of range [0-%d] %d %d.\n", txq_id, | 
 | 1085 | 			  index, q->n_bd, q->write_ptr, q->read_ptr); | 
 | 1086 | 		return 0; | 
 | 1087 | 	} | 
 | 1088 |  | 
 | 1089 | 	for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index; | 
 | 1090 | 		q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | 
 | 1091 |  | 
 | 1092 | 		tx_info = &txq->txb[txq->q.read_ptr]; | 
 | 1093 | 		ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]); | 
 | 1094 | 		tx_info->skb[0] = NULL; | 
| Tomas Winkler | 17b8892 | 2008-05-29 16:35:12 +0800 | [diff] [blame] | 1095 |  | 
| Tomas Winkler | 972cf44 | 2008-05-29 16:35:13 +0800 | [diff] [blame] | 1096 | 		if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl) | 
 | 1097 | 			priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq); | 
 | 1098 |  | 
 | 1099 | 		iwl_hw_txq_free_tfd(priv, txq); | 
| Tomas Winkler | 17b8892 | 2008-05-29 16:35:12 +0800 | [diff] [blame] | 1100 | 		nfreed++; | 
 | 1101 | 	} | 
 | 1102 | 	return nfreed; | 
 | 1103 | } | 
 | 1104 | EXPORT_SYMBOL(iwl_tx_queue_reclaim); | 
 | 1105 |  | 
 | 1106 |  | 
 | 1107 | /** | 
 | 1108 |  * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd | 
 | 1109 |  * | 
 | 1110 |  * When FW advances 'R' index, all entries between old and new 'R' index | 
 | 1111 |  * need to be reclaimed. As result, some free space forms.  If there is | 
 | 1112 |  * enough free space (> low mark), wake the stack that feeds us. | 
 | 1113 |  */ | 
 | 1114 | static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) | 
 | 1115 | { | 
 | 1116 | 	struct iwl_tx_queue *txq = &priv->txq[txq_id]; | 
 | 1117 | 	struct iwl_queue *q = &txq->q; | 
 | 1118 | 	int nfreed = 0; | 
 | 1119 |  | 
 | 1120 | 	if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) { | 
 | 1121 | 		IWL_ERROR("Read index for DMA queue txq id (%d), index %d, " | 
 | 1122 | 			  "is out of range [0-%d] %d %d.\n", txq_id, | 
 | 1123 | 			  index, q->n_bd, q->write_ptr, q->read_ptr); | 
 | 1124 | 		return; | 
 | 1125 | 	} | 
 | 1126 |  | 
 | 1127 | 	for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index; | 
 | 1128 | 		q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | 
 | 1129 |  | 
 | 1130 | 		if (nfreed > 1) { | 
 | 1131 | 			IWL_ERROR("HCMD skipped: index (%d) %d %d\n", index, | 
 | 1132 | 					q->write_ptr, q->read_ptr); | 
 | 1133 | 			queue_work(priv->workqueue, &priv->restart); | 
 | 1134 | 		} | 
 | 1135 | 		nfreed++; | 
 | 1136 | 	} | 
 | 1137 | } | 
 | 1138 |  | 
 | 1139 | /** | 
 | 1140 |  * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them | 
 | 1141 |  * @rxb: Rx buffer to reclaim | 
 | 1142 |  * | 
 | 1143 |  * If an Rx buffer has an async callback associated with it the callback | 
 | 1144 |  * will be executed.  The attached skb (if present) will only be freed | 
 | 1145 |  * if the callback returns 1 | 
 | 1146 |  */ | 
 | 1147 | void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | 
 | 1148 | { | 
 | 1149 | 	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; | 
 | 1150 | 	u16 sequence = le16_to_cpu(pkt->hdr.sequence); | 
 | 1151 | 	int txq_id = SEQ_TO_QUEUE(sequence); | 
 | 1152 | 	int index = SEQ_TO_INDEX(sequence); | 
 | 1153 | 	int huge = sequence & SEQ_HUGE_FRAME; | 
 | 1154 | 	int cmd_index; | 
 | 1155 | 	struct iwl_cmd *cmd; | 
 | 1156 |  | 
 | 1157 | 	/* If a Tx command is being handled and it isn't in the actual | 
 | 1158 | 	 * command queue then there a command routing bug has been introduced | 
 | 1159 | 	 * in the queue management code. */ | 
 | 1160 | 	if (txq_id != IWL_CMD_QUEUE_NUM) | 
 | 1161 | 		IWL_ERROR("Error wrong command queue %d command id 0x%X\n", | 
 | 1162 | 			  txq_id, pkt->hdr.cmd); | 
 | 1163 | 	BUG_ON(txq_id != IWL_CMD_QUEUE_NUM); | 
 | 1164 |  | 
 | 1165 | 	cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge); | 
 | 1166 | 	cmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index]; | 
 | 1167 |  | 
 | 1168 | 	/* Input error checking is done when commands are added to queue. */ | 
 | 1169 | 	if (cmd->meta.flags & CMD_WANT_SKB) { | 
 | 1170 | 		cmd->meta.source->u.skb = rxb->skb; | 
 | 1171 | 		rxb->skb = NULL; | 
 | 1172 | 	} else if (cmd->meta.u.callback && | 
 | 1173 | 		   !cmd->meta.u.callback(priv, cmd, rxb->skb)) | 
 | 1174 | 		rxb->skb = NULL; | 
 | 1175 |  | 
 | 1176 | 	iwl_hcmd_queue_reclaim(priv, txq_id, index); | 
 | 1177 |  | 
 | 1178 | 	if (!(cmd->meta.flags & CMD_ASYNC)) { | 
 | 1179 | 		clear_bit(STATUS_HCMD_ACTIVE, &priv->status); | 
 | 1180 | 		wake_up_interruptible(&priv->wait_command_queue); | 
 | 1181 | 	} | 
 | 1182 | } | 
 | 1183 | EXPORT_SYMBOL(iwl_tx_cmd_complete); | 
 | 1184 |  | 
| Tomas Winkler | 30e553e | 2008-05-29 16:35:16 +0800 | [diff] [blame] | 1185 | /* | 
 | 1186 |  * Find first available (lowest unused) Tx Queue, mark it "active". | 
 | 1187 |  * Called only when finding queue for aggregation. | 
 | 1188 |  * Should never return anything < 7, because they should already | 
 | 1189 |  * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6). | 
 | 1190 |  */ | 
 | 1191 | static int iwl_txq_ctx_activate_free(struct iwl_priv *priv) | 
 | 1192 | { | 
 | 1193 | 	int txq_id; | 
 | 1194 |  | 
 | 1195 | 	for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) | 
 | 1196 | 		if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk)) | 
 | 1197 | 			return txq_id; | 
 | 1198 | 	return -1; | 
 | 1199 | } | 
 | 1200 |  | 
 | 1201 | int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn) | 
 | 1202 | { | 
 | 1203 | 	int sta_id; | 
 | 1204 | 	int tx_fifo; | 
 | 1205 | 	int txq_id; | 
 | 1206 | 	int ret; | 
 | 1207 | 	unsigned long flags; | 
 | 1208 | 	struct iwl_tid_data *tid_data; | 
 | 1209 | 	DECLARE_MAC_BUF(mac); | 
 | 1210 |  | 
 | 1211 | 	if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo))) | 
 | 1212 | 		tx_fifo = default_tid_to_tx_fifo[tid]; | 
 | 1213 | 	else | 
 | 1214 | 		return -EINVAL; | 
 | 1215 |  | 
 | 1216 | 	IWL_WARNING("%s on ra = %s tid = %d\n", | 
 | 1217 | 			__func__, print_mac(mac, ra), tid); | 
 | 1218 |  | 
 | 1219 | 	sta_id = iwl_find_station(priv, ra); | 
 | 1220 | 	if (sta_id == IWL_INVALID_STATION) | 
 | 1221 | 		return -ENXIO; | 
 | 1222 |  | 
 | 1223 | 	if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) { | 
 | 1224 | 		IWL_ERROR("Start AGG when state is not IWL_AGG_OFF !\n"); | 
 | 1225 | 		return -ENXIO; | 
 | 1226 | 	} | 
 | 1227 |  | 
 | 1228 | 	txq_id = iwl_txq_ctx_activate_free(priv); | 
 | 1229 | 	if (txq_id == -1) | 
 | 1230 | 		return -ENXIO; | 
 | 1231 |  | 
 | 1232 | 	spin_lock_irqsave(&priv->sta_lock, flags); | 
 | 1233 | 	tid_data = &priv->stations[sta_id].tid[tid]; | 
 | 1234 | 	*ssn = SEQ_TO_SN(tid_data->seq_number); | 
 | 1235 | 	tid_data->agg.txq_id = txq_id; | 
 | 1236 | 	spin_unlock_irqrestore(&priv->sta_lock, flags); | 
 | 1237 |  | 
 | 1238 | 	ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo, | 
 | 1239 | 						  sta_id, tid, *ssn); | 
 | 1240 | 	if (ret) | 
 | 1241 | 		return ret; | 
 | 1242 |  | 
 | 1243 | 	if (tid_data->tfds_in_queue == 0) { | 
 | 1244 | 		printk(KERN_ERR "HW queue is empty\n"); | 
 | 1245 | 		tid_data->agg.state = IWL_AGG_ON; | 
 | 1246 | 		ieee80211_start_tx_ba_cb_irqsafe(priv->hw, ra, tid); | 
 | 1247 | 	} else { | 
 | 1248 | 		IWL_DEBUG_HT("HW queue is NOT empty: %d packets in HW queue\n", | 
 | 1249 | 			     tid_data->tfds_in_queue); | 
 | 1250 | 		tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA; | 
 | 1251 | 	} | 
 | 1252 | 	return ret; | 
 | 1253 | } | 
 | 1254 | EXPORT_SYMBOL(iwl_tx_agg_start); | 
 | 1255 |  | 
 | 1256 | int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid) | 
 | 1257 | { | 
 | 1258 | 	int tx_fifo_id, txq_id, sta_id, ssn = -1; | 
 | 1259 | 	struct iwl_tid_data *tid_data; | 
 | 1260 | 	int ret, write_ptr, read_ptr; | 
 | 1261 | 	unsigned long flags; | 
 | 1262 | 	DECLARE_MAC_BUF(mac); | 
 | 1263 |  | 
 | 1264 | 	if (!ra) { | 
 | 1265 | 		IWL_ERROR("ra = NULL\n"); | 
 | 1266 | 		return -EINVAL; | 
 | 1267 | 	} | 
 | 1268 |  | 
 | 1269 | 	if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo))) | 
 | 1270 | 		tx_fifo_id = default_tid_to_tx_fifo[tid]; | 
 | 1271 | 	else | 
 | 1272 | 		return -EINVAL; | 
 | 1273 |  | 
 | 1274 | 	sta_id = iwl_find_station(priv, ra); | 
 | 1275 |  | 
 | 1276 | 	if (sta_id == IWL_INVALID_STATION) | 
 | 1277 | 		return -ENXIO; | 
 | 1278 |  | 
 | 1279 | 	if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON) | 
 | 1280 | 		IWL_WARNING("Stopping AGG while state not IWL_AGG_ON\n"); | 
 | 1281 |  | 
 | 1282 | 	tid_data = &priv->stations[sta_id].tid[tid]; | 
 | 1283 | 	ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4; | 
 | 1284 | 	txq_id = tid_data->agg.txq_id; | 
 | 1285 | 	write_ptr = priv->txq[txq_id].q.write_ptr; | 
 | 1286 | 	read_ptr = priv->txq[txq_id].q.read_ptr; | 
 | 1287 |  | 
 | 1288 | 	/* The queue is not empty */ | 
 | 1289 | 	if (write_ptr != read_ptr) { | 
 | 1290 | 		IWL_DEBUG_HT("Stopping a non empty AGG HW QUEUE\n"); | 
 | 1291 | 		priv->stations[sta_id].tid[tid].agg.state = | 
 | 1292 | 				IWL_EMPTYING_HW_QUEUE_DELBA; | 
 | 1293 | 		return 0; | 
 | 1294 | 	} | 
 | 1295 |  | 
 | 1296 | 	IWL_DEBUG_HT("HW queue is empty\n"); | 
 | 1297 | 	priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF; | 
 | 1298 |  | 
 | 1299 | 	spin_lock_irqsave(&priv->lock, flags); | 
 | 1300 | 	ret = priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn, | 
 | 1301 | 						   tx_fifo_id); | 
 | 1302 | 	spin_unlock_irqrestore(&priv->lock, flags); | 
 | 1303 |  | 
 | 1304 | 	if (ret) | 
 | 1305 | 		return ret; | 
 | 1306 |  | 
 | 1307 | 	ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, ra, tid); | 
 | 1308 |  | 
 | 1309 | 	return 0; | 
 | 1310 | } | 
 | 1311 | EXPORT_SYMBOL(iwl_tx_agg_stop); | 
 | 1312 |  | 
 | 1313 | int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id) | 
 | 1314 | { | 
 | 1315 | 	struct iwl_queue *q = &priv->txq[txq_id].q; | 
 | 1316 | 	u8 *addr = priv->stations[sta_id].sta.sta.addr; | 
 | 1317 | 	struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid]; | 
 | 1318 |  | 
 | 1319 | 	switch (priv->stations[sta_id].tid[tid].agg.state) { | 
 | 1320 | 	case IWL_EMPTYING_HW_QUEUE_DELBA: | 
 | 1321 | 		/* We are reclaiming the last packet of the */ | 
 | 1322 | 		/* aggregated HW queue */ | 
 | 1323 | 		if (txq_id  == tid_data->agg.txq_id && | 
 | 1324 | 		    q->read_ptr == q->write_ptr) { | 
 | 1325 | 			u16 ssn = SEQ_TO_SN(tid_data->seq_number); | 
 | 1326 | 			int tx_fifo = default_tid_to_tx_fifo[tid]; | 
 | 1327 | 			IWL_DEBUG_HT("HW queue empty: continue DELBA flow\n"); | 
 | 1328 | 			priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, | 
 | 1329 | 							     ssn, tx_fifo); | 
 | 1330 | 			tid_data->agg.state = IWL_AGG_OFF; | 
 | 1331 | 			ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, addr, tid); | 
 | 1332 | 		} | 
 | 1333 | 		break; | 
 | 1334 | 	case IWL_EMPTYING_HW_QUEUE_ADDBA: | 
 | 1335 | 		/* We are reclaiming the last packet of the queue */ | 
 | 1336 | 		if (tid_data->tfds_in_queue == 0) { | 
 | 1337 | 			IWL_DEBUG_HT("HW queue empty: continue ADDBA flow\n"); | 
 | 1338 | 			tid_data->agg.state = IWL_AGG_ON; | 
 | 1339 | 			ieee80211_start_tx_ba_cb_irqsafe(priv->hw, addr, tid); | 
 | 1340 | 		} | 
 | 1341 | 		break; | 
 | 1342 | 	} | 
 | 1343 | 	return 0; | 
 | 1344 | } | 
 | 1345 | EXPORT_SYMBOL(iwl_txq_check_empty); | 
| Tomas Winkler | 30e553e | 2008-05-29 16:35:16 +0800 | [diff] [blame] | 1346 |  | 
| Emmanuel Grumbach | 653fa4a | 2008-06-30 17:23:11 +0800 | [diff] [blame] | 1347 | /** | 
 | 1348 |  * iwl_tx_status_reply_compressed_ba - Update tx status from block-ack | 
 | 1349 |  * | 
 | 1350 |  * Go through block-ack's bitmap of ACK'd frames, update driver's record of | 
 | 1351 |  * ACK vs. not.  This gets sent to mac80211, then to rate scaling algo. | 
 | 1352 |  */ | 
 | 1353 | static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv, | 
 | 1354 | 				 struct iwl_ht_agg *agg, | 
 | 1355 | 				 struct iwl_compressed_ba_resp *ba_resp) | 
 | 1356 |  | 
 | 1357 | { | 
 | 1358 | 	int i, sh, ack; | 
 | 1359 | 	u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl); | 
 | 1360 | 	u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); | 
 | 1361 | 	u64 bitmap; | 
 | 1362 | 	int successes = 0; | 
 | 1363 | 	struct ieee80211_tx_info *info; | 
 | 1364 |  | 
 | 1365 | 	if (unlikely(!agg->wait_for_ba))  { | 
 | 1366 | 		IWL_ERROR("Received BA when not expected\n"); | 
 | 1367 | 		return -EINVAL; | 
 | 1368 | 	} | 
 | 1369 |  | 
 | 1370 | 	/* Mark that the expected block-ack response arrived */ | 
 | 1371 | 	agg->wait_for_ba = 0; | 
 | 1372 | 	IWL_DEBUG_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->seq_ctl); | 
 | 1373 |  | 
 | 1374 | 	/* Calculate shift to align block-ack bits with our Tx window bits */ | 
 | 1375 | 	sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl>>4); | 
 | 1376 | 	if (sh < 0) /* tbw something is wrong with indices */ | 
 | 1377 | 		sh += 0x100; | 
 | 1378 |  | 
 | 1379 | 	/* don't use 64-bit values for now */ | 
 | 1380 | 	bitmap = le64_to_cpu(ba_resp->bitmap) >> sh; | 
 | 1381 |  | 
 | 1382 | 	if (agg->frame_count > (64 - sh)) { | 
 | 1383 | 		IWL_DEBUG_TX_REPLY("more frames than bitmap size"); | 
 | 1384 | 		return -1; | 
 | 1385 | 	} | 
 | 1386 |  | 
 | 1387 | 	/* check for success or failure according to the | 
 | 1388 | 	 * transmitted bitmap and block-ack bitmap */ | 
 | 1389 | 	bitmap &= agg->bitmap; | 
 | 1390 |  | 
 | 1391 | 	/* For each frame attempted in aggregation, | 
 | 1392 | 	 * update driver's record of tx frame's status. */ | 
 | 1393 | 	for (i = 0; i < agg->frame_count ; i++) { | 
 | 1394 | 		ack = bitmap & (1 << i); | 
 | 1395 | 		successes += !!ack; | 
 | 1396 | 		IWL_DEBUG_TX_REPLY("%s ON i=%d idx=%d raw=%d\n", | 
 | 1397 | 			ack? "ACK":"NACK", i, (agg->start_idx + i) & 0xff, | 
 | 1398 | 			agg->start_idx + i); | 
 | 1399 | 	} | 
 | 1400 |  | 
 | 1401 | 	info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]); | 
 | 1402 | 	memset(&info->status, 0, sizeof(info->status)); | 
 | 1403 | 	info->flags = IEEE80211_TX_STAT_ACK; | 
 | 1404 | 	info->flags |= IEEE80211_TX_STAT_AMPDU; | 
 | 1405 | 	info->status.ampdu_ack_map = successes; | 
 | 1406 | 	info->status.ampdu_ack_len = agg->frame_count; | 
 | 1407 | 	iwl_hwrate_to_tx_control(priv, agg->rate_n_flags, info); | 
 | 1408 |  | 
 | 1409 | 	IWL_DEBUG_TX_REPLY("Bitmap %llx\n", (unsigned long long)bitmap); | 
 | 1410 |  | 
 | 1411 | 	return 0; | 
 | 1412 | } | 
 | 1413 |  | 
 | 1414 | /** | 
 | 1415 |  * iwl_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA | 
 | 1416 |  * | 
 | 1417 |  * Handles block-acknowledge notification from device, which reports success | 
 | 1418 |  * of frames sent via aggregation. | 
 | 1419 |  */ | 
 | 1420 | void iwl_rx_reply_compressed_ba(struct iwl_priv *priv, | 
 | 1421 | 					   struct iwl_rx_mem_buffer *rxb) | 
 | 1422 | { | 
 | 1423 | 	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; | 
 | 1424 | 	struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba; | 
 | 1425 | 	int index; | 
 | 1426 | 	struct iwl_tx_queue *txq = NULL; | 
 | 1427 | 	struct iwl_ht_agg *agg; | 
 | 1428 | 	DECLARE_MAC_BUF(mac); | 
 | 1429 |  | 
 | 1430 | 	/* "flow" corresponds to Tx queue */ | 
 | 1431 | 	u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); | 
 | 1432 |  | 
 | 1433 | 	/* "ssn" is start of block-ack Tx window, corresponds to index | 
 | 1434 | 	 * (in Tx queue's circular buffer) of first TFD/frame in window */ | 
 | 1435 | 	u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); | 
 | 1436 |  | 
 | 1437 | 	if (scd_flow >= priv->hw_params.max_txq_num) { | 
 | 1438 | 		IWL_ERROR("BUG_ON scd_flow is bigger than number of queues"); | 
 | 1439 | 		return; | 
 | 1440 | 	} | 
 | 1441 |  | 
 | 1442 | 	txq = &priv->txq[scd_flow]; | 
 | 1443 | 	agg = &priv->stations[ba_resp->sta_id].tid[ba_resp->tid].agg; | 
 | 1444 |  | 
 | 1445 | 	/* Find index just before block-ack window */ | 
 | 1446 | 	index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd); | 
 | 1447 |  | 
 | 1448 | 	/* TODO: Need to get this copy more safely - now good for debug */ | 
 | 1449 |  | 
 | 1450 | 	IWL_DEBUG_TX_REPLY("REPLY_COMPRESSED_BA [%d]Received from %s, " | 
 | 1451 | 			   "sta_id = %d\n", | 
 | 1452 | 			   agg->wait_for_ba, | 
 | 1453 | 			   print_mac(mac, (u8 *) &ba_resp->sta_addr_lo32), | 
 | 1454 | 			   ba_resp->sta_id); | 
 | 1455 | 	IWL_DEBUG_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = " | 
 | 1456 | 			   "%d, scd_ssn = %d\n", | 
 | 1457 | 			   ba_resp->tid, | 
 | 1458 | 			   ba_resp->seq_ctl, | 
 | 1459 | 			   (unsigned long long)le64_to_cpu(ba_resp->bitmap), | 
 | 1460 | 			   ba_resp->scd_flow, | 
 | 1461 | 			   ba_resp->scd_ssn); | 
 | 1462 | 	IWL_DEBUG_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx \n", | 
 | 1463 | 			   agg->start_idx, | 
 | 1464 | 			   (unsigned long long)agg->bitmap); | 
 | 1465 |  | 
 | 1466 | 	/* Update driver's record of ACK vs. not for each frame in window */ | 
 | 1467 | 	iwl_tx_status_reply_compressed_ba(priv, agg, ba_resp); | 
 | 1468 |  | 
 | 1469 | 	/* Release all TFDs before the SSN, i.e. all TFDs in front of | 
 | 1470 | 	 * block-ack window (we assume that they've been successfully | 
 | 1471 | 	 * transmitted ... if not, it's too late anyway). */ | 
 | 1472 | 	if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) { | 
 | 1473 | 		/* calculate mac80211 ampdu sw queue to wake */ | 
 | 1474 | 		int ampdu_q = | 
 | 1475 | 		   scd_flow - priv->hw_params.first_ampdu_q + priv->hw->queues; | 
 | 1476 | 		int freed = iwl_tx_queue_reclaim(priv, scd_flow, index); | 
 | 1477 | 		priv->stations[ba_resp->sta_id]. | 
 | 1478 | 			tid[ba_resp->tid].tfds_in_queue -= freed; | 
 | 1479 | 		if (iwl_queue_space(&txq->q) > txq->q.low_mark && | 
 | 1480 | 			priv->mac80211_registered && | 
 | 1481 | 			agg->state != IWL_EMPTYING_HW_QUEUE_DELBA) | 
 | 1482 | 			ieee80211_wake_queue(priv->hw, ampdu_q); | 
 | 1483 |  | 
 | 1484 | 		iwl_txq_check_empty(priv, ba_resp->sta_id, | 
 | 1485 | 				    ba_resp->tid, scd_flow); | 
 | 1486 | 	} | 
 | 1487 | } | 
 | 1488 | EXPORT_SYMBOL(iwl_rx_reply_compressed_ba); | 
 | 1489 |  | 
| Helmut Schaa | 994d31f | 2008-07-02 12:17:06 +0200 | [diff] [blame] | 1490 | #ifdef CONFIG_IWLWIFI_DEBUG | 
| Tomas Winkler | a332f8d | 2008-05-29 16:35:08 +0800 | [diff] [blame] | 1491 | #define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x | 
 | 1492 |  | 
 | 1493 | const char *iwl_get_tx_fail_reason(u32 status) | 
 | 1494 | { | 
 | 1495 | 	switch (status & TX_STATUS_MSK) { | 
 | 1496 | 	case TX_STATUS_SUCCESS: | 
 | 1497 | 		return "SUCCESS"; | 
 | 1498 | 		TX_STATUS_ENTRY(SHORT_LIMIT); | 
 | 1499 | 		TX_STATUS_ENTRY(LONG_LIMIT); | 
 | 1500 | 		TX_STATUS_ENTRY(FIFO_UNDERRUN); | 
 | 1501 | 		TX_STATUS_ENTRY(MGMNT_ABORT); | 
 | 1502 | 		TX_STATUS_ENTRY(NEXT_FRAG); | 
 | 1503 | 		TX_STATUS_ENTRY(LIFE_EXPIRE); | 
 | 1504 | 		TX_STATUS_ENTRY(DEST_PS); | 
 | 1505 | 		TX_STATUS_ENTRY(ABORTED); | 
 | 1506 | 		TX_STATUS_ENTRY(BT_RETRY); | 
 | 1507 | 		TX_STATUS_ENTRY(STA_INVALID); | 
 | 1508 | 		TX_STATUS_ENTRY(FRAG_DROPPED); | 
 | 1509 | 		TX_STATUS_ENTRY(TID_DISABLE); | 
 | 1510 | 		TX_STATUS_ENTRY(FRAME_FLUSHED); | 
 | 1511 | 		TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL); | 
 | 1512 | 		TX_STATUS_ENTRY(TX_LOCKED); | 
 | 1513 | 		TX_STATUS_ENTRY(NO_BEACON_ON_RADAR); | 
 | 1514 | 	} | 
 | 1515 |  | 
 | 1516 | 	return "UNKNOWN"; | 
 | 1517 | } | 
 | 1518 | EXPORT_SYMBOL(iwl_get_tx_fail_reason); | 
 | 1519 | #endif /* CONFIG_IWLWIFI_DEBUG */ |