| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 1 | /****************************************************************************** | 
 | 2 |  * | 
| Reinette Chatre | 1f44780 | 2010-01-15 13:43:41 -0800 | [diff] [blame] | 3 |  * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 4 |  * | 
 | 5 |  * Portions of this file are derived from the ipw3945 project, as well | 
 | 6 |  * as portions of the ieee80211 subsystem header files. | 
 | 7 |  * | 
 | 8 |  * This program is free software; you can redistribute it and/or modify it | 
 | 9 |  * under the terms of version 2 of the GNU General Public License as | 
 | 10 |  * published by the Free Software Foundation. | 
 | 11 |  * | 
 | 12 |  * This program is distributed in the hope that it will be useful, but WITHOUT | 
 | 13 |  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 
 | 14 |  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for | 
 | 15 |  * more details. | 
 | 16 |  * | 
 | 17 |  * You should have received a copy of the GNU General Public License along with | 
 | 18 |  * this program; if not, write to the Free Software Foundation, Inc., | 
 | 19 |  * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA | 
 | 20 |  * | 
 | 21 |  * The full GNU General Public License is included in this distribution in the | 
 | 22 |  * file called LICENSE. | 
 | 23 |  * | 
 | 24 |  * Contact Information: | 
| Winkler, Tomas | 759ef89 | 2008-12-09 11:28:58 -0800 | [diff] [blame] | 25 |  *  Intel Linux Wireless <ilw@linux.intel.com> | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 26 |  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | 
 | 27 |  * | 
 | 28 |  *****************************************************************************/ | 
 | 29 |  | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 30 | #include <linux/etherdevice.h> | 
| Alexey Dobriyan | d43c36d | 2009-10-07 17:09:06 +0400 | [diff] [blame] | 31 | #include <linux/sched.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 32 | #include <linux/slab.h> | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 33 | #include <net/mac80211.h> | 
 | 34 | #include "iwl-eeprom.h" | 
 | 35 | #include "iwl-dev.h" | 
 | 36 | #include "iwl-core.h" | 
 | 37 | #include "iwl-sta.h" | 
 | 38 | #include "iwl-io.h" | 
 | 39 | #include "iwl-helpers.h" | 
 | 40 |  | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 41 | /** | 
 | 42 |  * iwl_txq_update_write_ptr - Send new write index to hardware | 
 | 43 |  */ | 
| Abhijeet Kolekar | 7bfedc5 | 2010-02-03 13:47:56 -0800 | [diff] [blame] | 44 | void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq) | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 45 | { | 
 | 46 | 	u32 reg = 0; | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 47 | 	int txq_id = txq->q.id; | 
 | 48 |  | 
 | 49 | 	if (txq->need_update == 0) | 
| Abhijeet Kolekar | 7bfedc5 | 2010-02-03 13:47:56 -0800 | [diff] [blame] | 50 | 		return; | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 51 |  | 
| Wey-Yi Guy | f81c1f4 | 2010-11-10 09:56:50 -0800 | [diff] [blame] | 52 | 	if (priv->cfg->base_params->shadow_reg_enable) { | 
 | 53 | 		/* shadow register enabled */ | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 54 | 		iwl_write32(priv, HBUS_TARG_WRPTR, | 
 | 55 | 			    txq->q.write_ptr | (txq_id << 8)); | 
| Wey-Yi Guy | f81c1f4 | 2010-11-10 09:56:50 -0800 | [diff] [blame] | 56 | 	} else { | 
 | 57 | 		/* if we're trying to save power */ | 
 | 58 | 		if (test_bit(STATUS_POWER_PMI, &priv->status)) { | 
 | 59 | 			/* wake up nic if it's powered down ... | 
 | 60 | 			 * uCode will wake up, and interrupt us again, so next | 
 | 61 | 			 * time we'll skip this part. */ | 
 | 62 | 			reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 63 |  | 
| Wey-Yi Guy | f81c1f4 | 2010-11-10 09:56:50 -0800 | [diff] [blame] | 64 | 			if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { | 
 | 65 | 				IWL_DEBUG_INFO(priv, | 
 | 66 | 					"Tx queue %d requesting wakeup," | 
 | 67 | 					" GP1 = 0x%x\n", txq_id, reg); | 
 | 68 | 				iwl_set_bit(priv, CSR_GP_CNTRL, | 
 | 69 | 					CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); | 
 | 70 | 				return; | 
 | 71 | 			} | 
 | 72 |  | 
 | 73 | 			iwl_write_direct32(priv, HBUS_TARG_WRPTR, | 
 | 74 | 				     txq->q.write_ptr | (txq_id << 8)); | 
 | 75 |  | 
 | 76 | 		/* | 
 | 77 | 		 * else not in power-save mode, | 
 | 78 | 		 * uCode will never sleep when we're | 
 | 79 | 		 * trying to tx (during RFKILL, we're not trying to tx). | 
 | 80 | 		 */ | 
 | 81 | 		} else | 
 | 82 | 			iwl_write32(priv, HBUS_TARG_WRPTR, | 
 | 83 | 				    txq->q.write_ptr | (txq_id << 8)); | 
 | 84 | 	} | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 85 | 	txq->need_update = 0; | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 86 | } | 
 | 87 | EXPORT_SYMBOL(iwl_txq_update_write_ptr); | 
 | 88 |  | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 89 | /** | 
 | 90 |  * iwl_tx_queue_free - Deallocate DMA queue. | 
 | 91 |  * @txq: Transmit queue to deallocate. | 
 | 92 |  * | 
 | 93 |  * Empty queue by removing and destroying all BD's. | 
 | 94 |  * Free all buffers. | 
 | 95 |  * 0-fill, but do not free "txq" descriptor structure. | 
 | 96 |  */ | 
| Samuel Ortiz | a8e74e2 | 2009-01-23 13:45:14 -0800 | [diff] [blame] | 97 | void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id) | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 98 | { | 
| Gregory Greenman | da99c4b | 2008-08-04 16:00:40 +0800 | [diff] [blame] | 99 | 	struct iwl_tx_queue *txq = &priv->txq[txq_id]; | 
| Tomas Winkler | 443cfd4 | 2008-05-15 13:53:57 +0800 | [diff] [blame] | 100 | 	struct iwl_queue *q = &txq->q; | 
| Stanislaw Gruszka | f36d04a | 2010-02-10 05:07:45 -0800 | [diff] [blame] | 101 | 	struct device *dev = &priv->pci_dev->dev; | 
| Wey-Yi Guy | 71c55d9 | 2009-10-23 13:42:31 -0700 | [diff] [blame] | 102 | 	int i; | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 103 |  | 
 | 104 | 	if (q->n_bd == 0) | 
 | 105 | 		return; | 
 | 106 |  | 
 | 107 | 	/* first, empty all BD's */ | 
 | 108 | 	for (; q->write_ptr != q->read_ptr; | 
 | 109 | 	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) | 
| Samuel Ortiz | 7aaa1d7 | 2009-01-19 15:30:26 -0800 | [diff] [blame] | 110 | 		priv->cfg->ops->lib->txq_free_tfd(priv, txq); | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 111 |  | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 112 | 	/* De-alloc array of command/tx buffers */ | 
| Tomas Winkler | 961ba60 | 2008-10-14 12:32:44 -0700 | [diff] [blame] | 113 | 	for (i = 0; i < TFD_TX_CMD_SLOTS; i++) | 
| Gregory Greenman | da99c4b | 2008-08-04 16:00:40 +0800 | [diff] [blame] | 114 | 		kfree(txq->cmd[i]); | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 115 |  | 
 | 116 | 	/* De-alloc circular buffer of TFDs */ | 
 | 117 | 	if (txq->q.n_bd) | 
| Stanislaw Gruszka | f36d04a | 2010-02-10 05:07:45 -0800 | [diff] [blame] | 118 | 		dma_free_coherent(dev, priv->hw_params.tfd_size * | 
 | 119 | 				  txq->q.n_bd, txq->tfds, txq->q.dma_addr); | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 120 |  | 
 | 121 | 	/* De-alloc array of per-TFD driver data */ | 
 | 122 | 	kfree(txq->txb); | 
 | 123 | 	txq->txb = NULL; | 
 | 124 |  | 
| Johannes Berg | c2acea8 | 2009-07-24 11:13:05 -0700 | [diff] [blame] | 125 | 	/* deallocate arrays */ | 
 | 126 | 	kfree(txq->cmd); | 
 | 127 | 	kfree(txq->meta); | 
 | 128 | 	txq->cmd = NULL; | 
 | 129 | 	txq->meta = NULL; | 
 | 130 |  | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 131 | 	/* 0-fill queue descriptor structure */ | 
 | 132 | 	memset(txq, 0, sizeof(*txq)); | 
 | 133 | } | 
| Samuel Ortiz | a8e74e2 | 2009-01-23 13:45:14 -0800 | [diff] [blame] | 134 | EXPORT_SYMBOL(iwl_tx_queue_free); | 
| Tomas Winkler | 961ba60 | 2008-10-14 12:32:44 -0700 | [diff] [blame] | 135 |  | 
 | 136 | /** | 
 | 137 |  * iwl_cmd_queue_free - Deallocate DMA queue. | 
 | 138 |  * @txq: Transmit queue to deallocate. | 
 | 139 |  * | 
 | 140 |  * Empty queue by removing and destroying all BD's. | 
 | 141 |  * Free all buffers. | 
 | 142 |  * 0-fill, but do not free "txq" descriptor structure. | 
 | 143 |  */ | 
| Abhijeet Kolekar | 3e5d238 | 2009-03-17 21:51:49 -0700 | [diff] [blame] | 144 | void iwl_cmd_queue_free(struct iwl_priv *priv) | 
| Tomas Winkler | 961ba60 | 2008-10-14 12:32:44 -0700 | [diff] [blame] | 145 | { | 
| Johannes Berg | 13bb948 | 2010-08-23 10:46:33 +0200 | [diff] [blame] | 146 | 	struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; | 
| Tomas Winkler | 961ba60 | 2008-10-14 12:32:44 -0700 | [diff] [blame] | 147 | 	struct iwl_queue *q = &txq->q; | 
| Stanislaw Gruszka | f36d04a | 2010-02-10 05:07:45 -0800 | [diff] [blame] | 148 | 	struct device *dev = &priv->pci_dev->dev; | 
| Wey-Yi Guy | 71c55d9 | 2009-10-23 13:42:31 -0700 | [diff] [blame] | 149 | 	int i; | 
| Zhu Yi | dd48744 | 2010-03-22 02:28:41 -0700 | [diff] [blame] | 150 | 	bool huge = false; | 
| Tomas Winkler | 961ba60 | 2008-10-14 12:32:44 -0700 | [diff] [blame] | 151 |  | 
 | 152 | 	if (q->n_bd == 0) | 
 | 153 | 		return; | 
 | 154 |  | 
| Zhu Yi | dd48744 | 2010-03-22 02:28:41 -0700 | [diff] [blame] | 155 | 	for (; q->read_ptr != q->write_ptr; | 
 | 156 | 	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | 
 | 157 | 		/* we have no way to tell if it is a huge cmd ATM */ | 
 | 158 | 		i = get_cmd_index(q, q->read_ptr, 0); | 
 | 159 |  | 
 | 160 | 		if (txq->meta[i].flags & CMD_SIZE_HUGE) { | 
 | 161 | 			huge = true; | 
 | 162 | 			continue; | 
 | 163 | 		} | 
 | 164 |  | 
 | 165 | 		pci_unmap_single(priv->pci_dev, | 
| FUJITA Tomonori | 2e72444 | 2010-06-03 14:19:20 +0900 | [diff] [blame] | 166 | 				 dma_unmap_addr(&txq->meta[i], mapping), | 
 | 167 | 				 dma_unmap_len(&txq->meta[i], len), | 
| Zhu Yi | dd48744 | 2010-03-22 02:28:41 -0700 | [diff] [blame] | 168 | 				 PCI_DMA_BIDIRECTIONAL); | 
 | 169 | 	} | 
 | 170 | 	if (huge) { | 
 | 171 | 		i = q->n_window; | 
 | 172 | 		pci_unmap_single(priv->pci_dev, | 
| FUJITA Tomonori | 2e72444 | 2010-06-03 14:19:20 +0900 | [diff] [blame] | 173 | 				 dma_unmap_addr(&txq->meta[i], mapping), | 
 | 174 | 				 dma_unmap_len(&txq->meta[i], len), | 
| Zhu Yi | dd48744 | 2010-03-22 02:28:41 -0700 | [diff] [blame] | 175 | 				 PCI_DMA_BIDIRECTIONAL); | 
 | 176 | 	} | 
 | 177 |  | 
| Tomas Winkler | 961ba60 | 2008-10-14 12:32:44 -0700 | [diff] [blame] | 178 | 	/* De-alloc array of command/tx buffers */ | 
 | 179 | 	for (i = 0; i <= TFD_CMD_SLOTS; i++) | 
 | 180 | 		kfree(txq->cmd[i]); | 
 | 181 |  | 
 | 182 | 	/* De-alloc circular buffer of TFDs */ | 
 | 183 | 	if (txq->q.n_bd) | 
| Stanislaw Gruszka | f36d04a | 2010-02-10 05:07:45 -0800 | [diff] [blame] | 184 | 		dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd, | 
 | 185 | 				  txq->tfds, txq->q.dma_addr); | 
| Tomas Winkler | 961ba60 | 2008-10-14 12:32:44 -0700 | [diff] [blame] | 186 |  | 
| Reinette Chatre | 2814298 | 2009-09-25 14:24:22 -0700 | [diff] [blame] | 187 | 	/* deallocate arrays */ | 
 | 188 | 	kfree(txq->cmd); | 
 | 189 | 	kfree(txq->meta); | 
 | 190 | 	txq->cmd = NULL; | 
 | 191 | 	txq->meta = NULL; | 
 | 192 |  | 
| Tomas Winkler | 961ba60 | 2008-10-14 12:32:44 -0700 | [diff] [blame] | 193 | 	/* 0-fill queue descriptor structure */ | 
 | 194 | 	memset(txq, 0, sizeof(*txq)); | 
 | 195 | } | 
| Abhijeet Kolekar | 3e5d238 | 2009-03-17 21:51:49 -0700 | [diff] [blame] | 196 | EXPORT_SYMBOL(iwl_cmd_queue_free); | 
 | 197 |  | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 198 | /*************** DMA-QUEUE-GENERAL-FUNCTIONS  ***** | 
 | 199 |  * DMA services | 
 | 200 |  * | 
 | 201 |  * Theory of operation | 
 | 202 |  * | 
 | 203 |  * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer | 
 | 204 |  * of buffer descriptors, each of which points to one or more data buffers for | 
 | 205 |  * the device to read from or fill.  Driver and device exchange status of each | 
 | 206 |  * queue via "read" and "write" pointers.  Driver keeps minimum of 2 empty | 
 | 207 |  * entries in each circular buffer, to protect against confusing empty and full | 
 | 208 |  * queue states. | 
 | 209 |  * | 
 | 210 |  * The device reads or writes the data in the queues via the device's several | 
 | 211 |  * DMA/FIFO channels.  Each queue is mapped to a single DMA channel. | 
 | 212 |  * | 
 | 213 |  * For Tx queue, there are low mark and high mark limits. If, after queuing | 
 | 214 |  * the packet for Tx, free space become < low mark, Tx queue stopped. When | 
 | 215 |  * reclaiming packets (on 'tx done IRQ), if free space become > high mark, | 
 | 216 |  * Tx queue resumed. | 
 | 217 |  * | 
 | 218 |  * See more detailed info in iwl-4965-hw.h. | 
 | 219 |  ***************************************************/ | 
 | 220 |  | 
 | 221 | int iwl_queue_space(const struct iwl_queue *q) | 
 | 222 | { | 
 | 223 | 	int s = q->read_ptr - q->write_ptr; | 
 | 224 |  | 
 | 225 | 	if (q->read_ptr > q->write_ptr) | 
 | 226 | 		s -= q->n_bd; | 
 | 227 |  | 
 | 228 | 	if (s <= 0) | 
 | 229 | 		s += q->n_window; | 
 | 230 | 	/* keep some reserve to not confuse empty and full situations */ | 
 | 231 | 	s -= 2; | 
 | 232 | 	if (s < 0) | 
 | 233 | 		s = 0; | 
 | 234 | 	return s; | 
 | 235 | } | 
 | 236 | EXPORT_SYMBOL(iwl_queue_space); | 
 | 237 |  | 
 | 238 |  | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 239 | /** | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 240 |  * iwl_queue_init - Initialize queue's high/low-water and read/write indexes | 
 | 241 |  */ | 
| Tomas Winkler | 443cfd4 | 2008-05-15 13:53:57 +0800 | [diff] [blame] | 242 | static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q, | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 243 | 			  int count, int slots_num, u32 id) | 
 | 244 | { | 
 | 245 | 	q->n_bd = count; | 
 | 246 | 	q->n_window = slots_num; | 
 | 247 | 	q->id = id; | 
 | 248 |  | 
 | 249 | 	/* count must be power-of-two size, otherwise iwl_queue_inc_wrap | 
 | 250 | 	 * and iwl_queue_dec_wrap are broken. */ | 
 | 251 | 	BUG_ON(!is_power_of_2(count)); | 
 | 252 |  | 
 | 253 | 	/* slots_num must be power-of-two size, otherwise | 
 | 254 | 	 * get_cmd_index is broken. */ | 
 | 255 | 	BUG_ON(!is_power_of_2(slots_num)); | 
 | 256 |  | 
 | 257 | 	q->low_mark = q->n_window / 4; | 
 | 258 | 	if (q->low_mark < 4) | 
 | 259 | 		q->low_mark = 4; | 
 | 260 |  | 
 | 261 | 	q->high_mark = q->n_window / 8; | 
 | 262 | 	if (q->high_mark < 2) | 
 | 263 | 		q->high_mark = 2; | 
 | 264 |  | 
 | 265 | 	q->write_ptr = q->read_ptr = 0; | 
 | 266 |  | 
 | 267 | 	return 0; | 
 | 268 | } | 
 | 269 |  | 
 | 270 | /** | 
 | 271 |  * iwl_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue | 
 | 272 |  */ | 
 | 273 | static int iwl_tx_queue_alloc(struct iwl_priv *priv, | 
| Ron Rindjunsky | 1646690 | 2008-05-05 10:22:50 +0800 | [diff] [blame] | 274 | 			      struct iwl_tx_queue *txq, u32 id) | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 275 | { | 
| Stanislaw Gruszka | f36d04a | 2010-02-10 05:07:45 -0800 | [diff] [blame] | 276 | 	struct device *dev = &priv->pci_dev->dev; | 
| Winkler, Tomas | 3978e5b | 2009-01-23 13:45:23 -0800 | [diff] [blame] | 277 | 	size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX; | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 278 |  | 
 | 279 | 	/* Driver private data, only for Tx (not command) queues, | 
 | 280 | 	 * not shared with device. */ | 
| Johannes Berg | 13bb948 | 2010-08-23 10:46:33 +0200 | [diff] [blame] | 281 | 	if (id != priv->cmd_queue) { | 
| Johannes Berg | 519c7c4 | 2010-05-17 02:37:33 -0700 | [diff] [blame] | 282 | 		txq->txb = kzalloc(sizeof(txq->txb[0]) * | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 283 | 				   TFD_QUEUE_SIZE_MAX, GFP_KERNEL); | 
 | 284 | 		if (!txq->txb) { | 
| Winkler, Tomas | 15b1687 | 2008-12-19 10:37:33 +0800 | [diff] [blame] | 285 | 			IWL_ERR(priv, "kmalloc for auxiliary BD " | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 286 | 				  "structures failed\n"); | 
 | 287 | 			goto error; | 
 | 288 | 		} | 
| Winkler, Tomas | 3978e5b | 2009-01-23 13:45:23 -0800 | [diff] [blame] | 289 | 	} else { | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 290 | 		txq->txb = NULL; | 
| Winkler, Tomas | 3978e5b | 2009-01-23 13:45:23 -0800 | [diff] [blame] | 291 | 	} | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 292 |  | 
 | 293 | 	/* Circular buffer of transmit frame descriptors (TFDs), | 
 | 294 | 	 * shared with device */ | 
| Stanislaw Gruszka | f36d04a | 2010-02-10 05:07:45 -0800 | [diff] [blame] | 295 | 	txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, | 
 | 296 | 				       GFP_KERNEL); | 
| Tomas Winkler | 499b188 | 2008-10-14 12:32:48 -0700 | [diff] [blame] | 297 | 	if (!txq->tfds) { | 
| Winkler, Tomas | 3978e5b | 2009-01-23 13:45:23 -0800 | [diff] [blame] | 298 | 		IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz); | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 299 | 		goto error; | 
 | 300 | 	} | 
 | 301 | 	txq->q.id = id; | 
 | 302 |  | 
 | 303 | 	return 0; | 
 | 304 |  | 
 | 305 |  error: | 
 | 306 | 	kfree(txq->txb); | 
 | 307 | 	txq->txb = NULL; | 
 | 308 |  | 
 | 309 | 	return -ENOMEM; | 
 | 310 | } | 
 | 311 |  | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 312 | /** | 
 | 313 |  * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue | 
 | 314 |  */ | 
| Samuel Ortiz | a8e74e2 | 2009-01-23 13:45:14 -0800 | [diff] [blame] | 315 | int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, | 
 | 316 | 		      int slots_num, u32 txq_id) | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 317 | { | 
| Gregory Greenman | da99c4b | 2008-08-04 16:00:40 +0800 | [diff] [blame] | 318 | 	int i, len; | 
| Tomas Winkler | 73b7d74 | 2008-09-03 11:18:48 +0800 | [diff] [blame] | 319 | 	int ret; | 
| Johannes Berg | c2acea8 | 2009-07-24 11:13:05 -0700 | [diff] [blame] | 320 | 	int actual_slots = slots_num; | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 321 |  | 
 | 322 | 	/* | 
 | 323 | 	 * Alloc buffer array for commands (Tx or other types of commands). | 
| Johannes Berg | 13bb948 | 2010-08-23 10:46:33 +0200 | [diff] [blame] | 324 | 	 * For the command queue (#4/#9), allocate command space + one big | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 325 | 	 * command for scan, since scan command is very huge; the system will | 
 | 326 | 	 * not have two scans at the same time, so only one is needed. | 
 | 327 | 	 * For normal Tx queues (all other queues), no super-size command | 
 | 328 | 	 * space is needed. | 
 | 329 | 	 */ | 
| Johannes Berg | 13bb948 | 2010-08-23 10:46:33 +0200 | [diff] [blame] | 330 | 	if (txq_id == priv->cmd_queue) | 
| Johannes Berg | c2acea8 | 2009-07-24 11:13:05 -0700 | [diff] [blame] | 331 | 		actual_slots++; | 
 | 332 |  | 
 | 333 | 	txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots, | 
 | 334 | 			    GFP_KERNEL); | 
 | 335 | 	txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots, | 
 | 336 | 			   GFP_KERNEL); | 
 | 337 |  | 
 | 338 | 	if (!txq->meta || !txq->cmd) | 
 | 339 | 		goto out_free_arrays; | 
 | 340 |  | 
 | 341 | 	len = sizeof(struct iwl_device_cmd); | 
 | 342 | 	for (i = 0; i < actual_slots; i++) { | 
 | 343 | 		/* only happens for cmd queue */ | 
 | 344 | 		if (i == slots_num) | 
| Abhijeet Kolekar | 8961212 | 2010-02-19 11:49:49 -0800 | [diff] [blame] | 345 | 			len = IWL_MAX_CMD_SIZE; | 
| Gregory Greenman | da99c4b | 2008-08-04 16:00:40 +0800 | [diff] [blame] | 346 |  | 
| John W. Linville | 4989885 | 2008-09-02 15:07:18 -0400 | [diff] [blame] | 347 | 		txq->cmd[i] = kmalloc(len, GFP_KERNEL); | 
| Gregory Greenman | da99c4b | 2008-08-04 16:00:40 +0800 | [diff] [blame] | 348 | 		if (!txq->cmd[i]) | 
| Tomas Winkler | 73b7d74 | 2008-09-03 11:18:48 +0800 | [diff] [blame] | 349 | 			goto err; | 
| Gregory Greenman | da99c4b | 2008-08-04 16:00:40 +0800 | [diff] [blame] | 350 | 	} | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 351 |  | 
 | 352 | 	/* Alloc driver data array and TFD circular buffer */ | 
| Tomas Winkler | 73b7d74 | 2008-09-03 11:18:48 +0800 | [diff] [blame] | 353 | 	ret = iwl_tx_queue_alloc(priv, txq, txq_id); | 
 | 354 | 	if (ret) | 
 | 355 | 		goto err; | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 356 |  | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 357 | 	txq->need_update = 0; | 
 | 358 |  | 
| Johannes Berg | 1a71655 | 2009-11-06 14:52:51 -0800 | [diff] [blame] | 359 | 	/* | 
| Johannes Berg | ea9b307 | 2010-11-10 18:25:45 -0800 | [diff] [blame] | 360 | 	 * For the default queues 0-3, set up the swq_id | 
 | 361 | 	 * already -- all others need to get one later | 
 | 362 | 	 * (if they need one at all). | 
| Johannes Berg | 1a71655 | 2009-11-06 14:52:51 -0800 | [diff] [blame] | 363 | 	 */ | 
| Johannes Berg | ea9b307 | 2010-11-10 18:25:45 -0800 | [diff] [blame] | 364 | 	if (txq_id < 4) | 
 | 365 | 		iwl_set_swq_id(txq, txq_id, txq_id); | 
| Johannes Berg | 45af819 | 2009-06-19 13:52:43 -0700 | [diff] [blame] | 366 |  | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 367 | 	/* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise | 
 | 368 | 	 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ | 
 | 369 | 	BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); | 
 | 370 |  | 
 | 371 | 	/* Initialize queue's high/low-water marks, and head/tail indexes */ | 
 | 372 | 	iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id); | 
 | 373 |  | 
 | 374 | 	/* Tell device where to find queue */ | 
| Samuel Ortiz | a8e74e2 | 2009-01-23 13:45:14 -0800 | [diff] [blame] | 375 | 	priv->cfg->ops->lib->txq_init(priv, txq); | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 376 |  | 
 | 377 | 	return 0; | 
| Tomas Winkler | 73b7d74 | 2008-09-03 11:18:48 +0800 | [diff] [blame] | 378 | err: | 
| Johannes Berg | c2acea8 | 2009-07-24 11:13:05 -0700 | [diff] [blame] | 379 | 	for (i = 0; i < actual_slots; i++) | 
| Tomas Winkler | 73b7d74 | 2008-09-03 11:18:48 +0800 | [diff] [blame] | 380 | 		kfree(txq->cmd[i]); | 
| Johannes Berg | c2acea8 | 2009-07-24 11:13:05 -0700 | [diff] [blame] | 381 | out_free_arrays: | 
 | 382 | 	kfree(txq->meta); | 
 | 383 | 	kfree(txq->cmd); | 
| Tomas Winkler | 73b7d74 | 2008-09-03 11:18:48 +0800 | [diff] [blame] | 384 |  | 
| Tomas Winkler | 73b7d74 | 2008-09-03 11:18:48 +0800 | [diff] [blame] | 385 | 	return -ENOMEM; | 
| Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 386 | } | 
| Samuel Ortiz | a8e74e2 | 2009-01-23 13:45:14 -0800 | [diff] [blame] | 387 | EXPORT_SYMBOL(iwl_tx_queue_init); | 
 | 388 |  | 
| Zhu Yi | de0f60e | 2010-03-23 00:45:03 -0700 | [diff] [blame] | 389 | void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq, | 
 | 390 | 			int slots_num, u32 txq_id) | 
 | 391 | { | 
 | 392 | 	int actual_slots = slots_num; | 
 | 393 |  | 
| Johannes Berg | 13bb948 | 2010-08-23 10:46:33 +0200 | [diff] [blame] | 394 | 	if (txq_id == priv->cmd_queue) | 
| Zhu Yi | de0f60e | 2010-03-23 00:45:03 -0700 | [diff] [blame] | 395 | 		actual_slots++; | 
 | 396 |  | 
 | 397 | 	memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots); | 
 | 398 |  | 
 | 399 | 	txq->need_update = 0; | 
 | 400 |  | 
 | 401 | 	/* Initialize queue's high/low-water marks, and head/tail indexes */ | 
 | 402 | 	iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id); | 
 | 403 |  | 
 | 404 | 	/* Tell device where to find queue */ | 
 | 405 | 	priv->cfg->ops->lib->txq_init(priv, txq); | 
 | 406 | } | 
 | 407 | EXPORT_SYMBOL(iwl_tx_queue_reset); | 
 | 408 |  | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 409 | /*************** HOST COMMAND QUEUE FUNCTIONS   *****/ | 
 | 410 |  | 
 | 411 | /** | 
 | 412 |  * iwl_enqueue_hcmd - enqueue a uCode command | 
 | 413 |  * @priv: device private data point | 
 | 414 |  * @cmd: a point to the ucode command structure | 
 | 415 |  * | 
 | 416 |  * The function returns < 0 values to indicate the operation is | 
 | 417 |  * failed. On success, it turns the index (> 0) of command in the | 
 | 418 |  * command queue. | 
 | 419 |  */ | 
 | 420 | int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | 
 | 421 | { | 
| Johannes Berg | 13bb948 | 2010-08-23 10:46:33 +0200 | [diff] [blame] | 422 | 	struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 423 | 	struct iwl_queue *q = &txq->q; | 
| Johannes Berg | c2acea8 | 2009-07-24 11:13:05 -0700 | [diff] [blame] | 424 | 	struct iwl_device_cmd *out_cmd; | 
 | 425 | 	struct iwl_cmd_meta *out_meta; | 
| Tomas Winkler | f367422 | 2008-08-04 16:00:44 +0800 | [diff] [blame] | 426 | 	dma_addr_t phys_addr; | 
 | 427 | 	unsigned long flags; | 
| Abhijeet Kolekar | 7bfedc5 | 2010-02-03 13:47:56 -0800 | [diff] [blame] | 428 | 	int len; | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 429 | 	u32 idx; | 
 | 430 | 	u16 fix_size; | 
| Wey-Yi Guy | 0975cc8 | 2010-07-31 08:34:07 -0700 | [diff] [blame] | 431 | 	bool is_ct_kill = false; | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 432 |  | 
 | 433 | 	cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len); | 
 | 434 | 	fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr)); | 
 | 435 |  | 
 | 436 | 	/* If any of the command structures end up being larger than | 
 | 437 | 	 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then | 
| Abhijeet Kolekar | 8961212 | 2010-02-19 11:49:49 -0800 | [diff] [blame] | 438 | 	 * we will need to increase the size of the TFD entries | 
 | 439 | 	 * Also, check to see if command buffer should not exceed the size | 
 | 440 | 	 * of device_cmd and max_cmd_size. */ | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 441 | 	BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) && | 
| Johannes Berg | c2acea8 | 2009-07-24 11:13:05 -0700 | [diff] [blame] | 442 | 	       !(cmd->flags & CMD_SIZE_HUGE)); | 
| Abhijeet Kolekar | 8961212 | 2010-02-19 11:49:49 -0800 | [diff] [blame] | 443 | 	BUG_ON(fix_size > IWL_MAX_CMD_SIZE); | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 444 |  | 
| Wey-Yi Guy | 7812b16 | 2009-10-02 13:43:58 -0700 | [diff] [blame] | 445 | 	if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) { | 
| Reinette Chatre | f2f21b4 | 2009-10-30 14:36:15 -0700 | [diff] [blame] | 446 | 		IWL_WARN(priv, "Not sending command - %s KILL\n", | 
 | 447 | 			 iwl_is_rfkill(priv) ? "RF" : "CT"); | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 448 | 		return -EIO; | 
 | 449 | 	} | 
 | 450 |  | 
| Johannes Berg | c2acea8 | 2009-07-24 11:13:05 -0700 | [diff] [blame] | 451 | 	if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { | 
| Wey-Yi Guy | 2d237f7 | 2009-11-20 12:05:08 -0800 | [diff] [blame] | 452 | 		IWL_ERR(priv, "No space in command queue\n"); | 
| Wey-Yi Guy | 0975cc8 | 2010-07-31 08:34:07 -0700 | [diff] [blame] | 453 | 		if (priv->cfg->ops->lib->tt_ops.ct_kill_check) { | 
 | 454 | 			is_ct_kill = | 
 | 455 | 				priv->cfg->ops->lib->tt_ops.ct_kill_check(priv); | 
 | 456 | 		} | 
 | 457 | 		if (!is_ct_kill) { | 
| Wey-Yi Guy | 7812b16 | 2009-10-02 13:43:58 -0700 | [diff] [blame] | 458 | 			IWL_ERR(priv, "Restarting adapter due to queue full\n"); | 
 | 459 | 			queue_work(priv->workqueue, &priv->restart); | 
 | 460 | 		} | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 461 | 		return -ENOSPC; | 
 | 462 | 	} | 
 | 463 |  | 
 | 464 | 	spin_lock_irqsave(&priv->hcmd_lock, flags); | 
 | 465 |  | 
| Zhu Yi | dd48744 | 2010-03-22 02:28:41 -0700 | [diff] [blame] | 466 | 	/* If this is a huge cmd, mark the huge flag also on the meta.flags | 
 | 467 | 	 * of the _original_ cmd. This is used for DMA mapping clean up. | 
 | 468 | 	 */ | 
 | 469 | 	if (cmd->flags & CMD_SIZE_HUGE) { | 
 | 470 | 		idx = get_cmd_index(q, q->write_ptr, 0); | 
 | 471 | 		txq->meta[idx].flags = CMD_SIZE_HUGE; | 
 | 472 | 	} | 
 | 473 |  | 
| Johannes Berg | c2acea8 | 2009-07-24 11:13:05 -0700 | [diff] [blame] | 474 | 	idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE); | 
| Gregory Greenman | da99c4b | 2008-08-04 16:00:40 +0800 | [diff] [blame] | 475 | 	out_cmd = txq->cmd[idx]; | 
| Johannes Berg | c2acea8 | 2009-07-24 11:13:05 -0700 | [diff] [blame] | 476 | 	out_meta = &txq->meta[idx]; | 
 | 477 |  | 
| Daniel C Halperin | 8ce73f3 | 2009-07-31 14:28:06 -0700 | [diff] [blame] | 478 | 	memset(out_meta, 0, sizeof(*out_meta));	/* re-initialize to NULL */ | 
| Johannes Berg | c2acea8 | 2009-07-24 11:13:05 -0700 | [diff] [blame] | 479 | 	out_meta->flags = cmd->flags; | 
 | 480 | 	if (cmd->flags & CMD_WANT_SKB) | 
 | 481 | 		out_meta->source = cmd; | 
 | 482 | 	if (cmd->flags & CMD_ASYNC) | 
 | 483 | 		out_meta->callback = cmd->callback; | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 484 |  | 
 | 485 | 	out_cmd->hdr.cmd = cmd->id; | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 486 | 	memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len); | 
 | 487 |  | 
 | 488 | 	/* At this point, the out_cmd now has all of the incoming cmd | 
 | 489 | 	 * information */ | 
 | 490 |  | 
 | 491 | 	out_cmd->hdr.flags = 0; | 
| Johannes Berg | 13bb948 | 2010-08-23 10:46:33 +0200 | [diff] [blame] | 492 | 	out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) | | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 493 | 			INDEX_TO_SEQ(q->write_ptr)); | 
| Johannes Berg | c2acea8 | 2009-07-24 11:13:05 -0700 | [diff] [blame] | 494 | 	if (cmd->flags & CMD_SIZE_HUGE) | 
| Tomas Winkler | 9734cb2 | 2008-09-03 11:26:52 +0800 | [diff] [blame] | 495 | 		out_cmd->hdr.sequence |= SEQ_HUGE_FRAME; | 
| Johannes Berg | c2acea8 | 2009-07-24 11:13:05 -0700 | [diff] [blame] | 496 | 	len = sizeof(struct iwl_device_cmd); | 
| Abhijeet Kolekar | 8961212 | 2010-02-19 11:49:49 -0800 | [diff] [blame] | 497 | 	if (idx == TFD_CMD_SLOTS) | 
 | 498 | 		len = IWL_MAX_CMD_SIZE; | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 499 |  | 
| Esti Kummer | ded2ae7 | 2008-08-04 16:00:45 +0800 | [diff] [blame] | 500 | #ifdef CONFIG_IWLWIFI_DEBUG | 
 | 501 | 	switch (out_cmd->hdr.cmd) { | 
 | 502 | 	case REPLY_TX_LINK_QUALITY_CMD: | 
 | 503 | 	case SENSITIVITY_CMD: | 
| Tomas Winkler | e162344 | 2009-01-27 14:27:56 -0800 | [diff] [blame] | 504 | 		IWL_DEBUG_HC_DUMP(priv, "Sending command %s (#%x), seq: 0x%04X, " | 
| Esti Kummer | ded2ae7 | 2008-08-04 16:00:45 +0800 | [diff] [blame] | 505 | 				"%d bytes at %d[%d]:%d\n", | 
 | 506 | 				get_cmd_string(out_cmd->hdr.cmd), | 
 | 507 | 				out_cmd->hdr.cmd, | 
 | 508 | 				le16_to_cpu(out_cmd->hdr.sequence), fix_size, | 
| Johannes Berg | 13bb948 | 2010-08-23 10:46:33 +0200 | [diff] [blame] | 509 | 				q->write_ptr, idx, priv->cmd_queue); | 
 | 510 | 		break; | 
| Esti Kummer | ded2ae7 | 2008-08-04 16:00:45 +0800 | [diff] [blame] | 511 | 	default: | 
| Tomas Winkler | e162344 | 2009-01-27 14:27:56 -0800 | [diff] [blame] | 512 | 		IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, " | 
| Esti Kummer | ded2ae7 | 2008-08-04 16:00:45 +0800 | [diff] [blame] | 513 | 				"%d bytes at %d[%d]:%d\n", | 
 | 514 | 				get_cmd_string(out_cmd->hdr.cmd), | 
 | 515 | 				out_cmd->hdr.cmd, | 
 | 516 | 				le16_to_cpu(out_cmd->hdr.sequence), fix_size, | 
| Johannes Berg | 13bb948 | 2010-08-23 10:46:33 +0200 | [diff] [blame] | 517 | 				q->write_ptr, idx, priv->cmd_queue); | 
| Esti Kummer | ded2ae7 | 2008-08-04 16:00:45 +0800 | [diff] [blame] | 518 | 	} | 
 | 519 | #endif | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 520 | 	txq->need_update = 1; | 
 | 521 |  | 
| Samuel Ortiz | 518099a | 2009-01-19 15:30:27 -0800 | [diff] [blame] | 522 | 	if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl) | 
 | 523 | 		/* Set up entry in queue's byte count circular buffer */ | 
 | 524 | 		priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0); | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 525 |  | 
| Reinette Chatre | df833b1 | 2009-04-21 10:55:48 -0700 | [diff] [blame] | 526 | 	phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr, | 
 | 527 | 				   fix_size, PCI_DMA_BIDIRECTIONAL); | 
| FUJITA Tomonori | 2e72444 | 2010-06-03 14:19:20 +0900 | [diff] [blame] | 528 | 	dma_unmap_addr_set(out_meta, mapping, phys_addr); | 
 | 529 | 	dma_unmap_len_set(out_meta, len, fix_size); | 
| Reinette Chatre | df833b1 | 2009-04-21 10:55:48 -0700 | [diff] [blame] | 530 |  | 
| Johannes Berg | be1a71a | 2009-10-02 13:44:02 -0700 | [diff] [blame] | 531 | 	trace_iwlwifi_dev_hcmd(priv, &out_cmd->hdr, fix_size, cmd->flags); | 
 | 532 |  | 
| Reinette Chatre | df833b1 | 2009-04-21 10:55:48 -0700 | [diff] [blame] | 533 | 	priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, | 
 | 534 | 						   phys_addr, fix_size, 1, | 
 | 535 | 						   U32_PAD(cmd->len)); | 
 | 536 |  | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 537 | 	/* Increment and update queue's write index */ | 
 | 538 | 	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); | 
| Abhijeet Kolekar | 7bfedc5 | 2010-02-03 13:47:56 -0800 | [diff] [blame] | 539 | 	iwl_txq_update_write_ptr(priv, txq); | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 540 |  | 
 | 541 | 	spin_unlock_irqrestore(&priv->hcmd_lock, flags); | 
| Abhijeet Kolekar | 7bfedc5 | 2010-02-03 13:47:56 -0800 | [diff] [blame] | 542 | 	return idx; | 
| Tomas Winkler | fd4abac | 2008-05-15 13:54:07 +0800 | [diff] [blame] | 543 | } | 
 | 544 |  | 
| Tomas Winkler | 17b8892 | 2008-05-29 16:35:12 +0800 | [diff] [blame] | 545 | /** | 
 | 546 |  * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd | 
 | 547 |  * | 
 | 548 |  * When FW advances 'R' index, all entries between old and new 'R' index | 
 | 549 |  * need to be reclaimed. As result, some free space forms.  If there is | 
 | 550 |  * enough free space (> low mark), wake the stack that feeds us. | 
 | 551 |  */ | 
| Tomas Winkler | 499b188 | 2008-10-14 12:32:48 -0700 | [diff] [blame] | 552 | static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, | 
 | 553 | 				   int idx, int cmd_idx) | 
| Tomas Winkler | 17b8892 | 2008-05-29 16:35:12 +0800 | [diff] [blame] | 554 | { | 
 | 555 | 	struct iwl_tx_queue *txq = &priv->txq[txq_id]; | 
 | 556 | 	struct iwl_queue *q = &txq->q; | 
 | 557 | 	int nfreed = 0; | 
 | 558 |  | 
| Tomas Winkler | 499b188 | 2008-10-14 12:32:48 -0700 | [diff] [blame] | 559 | 	if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) { | 
| Winkler, Tomas | 15b1687 | 2008-12-19 10:37:33 +0800 | [diff] [blame] | 560 | 		IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, " | 
| Tomas Winkler | 17b8892 | 2008-05-29 16:35:12 +0800 | [diff] [blame] | 561 | 			  "is out of range [0-%d] %d %d.\n", txq_id, | 
| Tomas Winkler | 499b188 | 2008-10-14 12:32:48 -0700 | [diff] [blame] | 562 | 			  idx, q->n_bd, q->write_ptr, q->read_ptr); | 
| Tomas Winkler | 17b8892 | 2008-05-29 16:35:12 +0800 | [diff] [blame] | 563 | 		return; | 
 | 564 | 	} | 
 | 565 |  | 
| Tomas Winkler | 499b188 | 2008-10-14 12:32:48 -0700 | [diff] [blame] | 566 | 	for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; | 
 | 567 | 	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | 
 | 568 |  | 
 | 569 | 		if (nfreed++ > 0) { | 
| Winkler, Tomas | 15b1687 | 2008-12-19 10:37:33 +0800 | [diff] [blame] | 570 | 			IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx, | 
| Tomas Winkler | 17b8892 | 2008-05-29 16:35:12 +0800 | [diff] [blame] | 571 | 					q->write_ptr, q->read_ptr); | 
 | 572 | 			queue_work(priv->workqueue, &priv->restart); | 
 | 573 | 		} | 
| Gregory Greenman | da99c4b | 2008-08-04 16:00:40 +0800 | [diff] [blame] | 574 |  | 
| Tomas Winkler | 17b8892 | 2008-05-29 16:35:12 +0800 | [diff] [blame] | 575 | 	} | 
 | 576 | } | 
 | 577 |  | 
 | 578 | /** | 
 | 579 |  * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them | 
 | 580 |  * @rxb: Rx buffer to reclaim | 
 | 581 |  * | 
 | 582 |  * If an Rx buffer has an async callback associated with it the callback | 
 | 583 |  * will be executed.  The attached skb (if present) will only be freed | 
 | 584 |  * if the callback returns 1 | 
 | 585 |  */ | 
 | 586 | void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | 
 | 587 | { | 
| Zhu Yi | 2f30122 | 2009-10-09 17:19:45 +0800 | [diff] [blame] | 588 | 	struct iwl_rx_packet *pkt = rxb_addr(rxb); | 
| Tomas Winkler | 17b8892 | 2008-05-29 16:35:12 +0800 | [diff] [blame] | 589 | 	u16 sequence = le16_to_cpu(pkt->hdr.sequence); | 
 | 590 | 	int txq_id = SEQ_TO_QUEUE(sequence); | 
 | 591 | 	int index = SEQ_TO_INDEX(sequence); | 
| Tomas Winkler | 17b8892 | 2008-05-29 16:35:12 +0800 | [diff] [blame] | 592 | 	int cmd_index; | 
| Tomas Winkler | 9734cb2 | 2008-09-03 11:26:52 +0800 | [diff] [blame] | 593 | 	bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME); | 
| Johannes Berg | c2acea8 | 2009-07-24 11:13:05 -0700 | [diff] [blame] | 594 | 	struct iwl_device_cmd *cmd; | 
 | 595 | 	struct iwl_cmd_meta *meta; | 
| Johannes Berg | 13bb948 | 2010-08-23 10:46:33 +0200 | [diff] [blame] | 596 | 	struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; | 
| Tomas Winkler | 17b8892 | 2008-05-29 16:35:12 +0800 | [diff] [blame] | 597 |  | 
 | 598 | 	/* If a Tx command is being handled and it isn't in the actual | 
 | 599 | 	 * command queue then there a command routing bug has been introduced | 
 | 600 | 	 * in the queue management code. */ | 
| Johannes Berg | 13bb948 | 2010-08-23 10:46:33 +0200 | [diff] [blame] | 601 | 	if (WARN(txq_id != priv->cmd_queue, | 
 | 602 | 		 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", | 
 | 603 | 		  txq_id, priv->cmd_queue, sequence, | 
 | 604 | 		  priv->txq[priv->cmd_queue].q.read_ptr, | 
 | 605 | 		  priv->txq[priv->cmd_queue].q.write_ptr)) { | 
| Reinette Chatre | ec74116 | 2009-07-24 11:13:08 -0700 | [diff] [blame] | 606 | 		iwl_print_hex_error(priv, pkt, 32); | 
| Johannes Berg | 55d6a3c | 2008-09-23 19:18:43 +0200 | [diff] [blame] | 607 | 		return; | 
| Winkler, Tomas | 01ef932 | 2008-11-07 09:58:45 -0800 | [diff] [blame] | 608 | 	} | 
| Tomas Winkler | 17b8892 | 2008-05-29 16:35:12 +0800 | [diff] [blame] | 609 |  | 
| Zhu Yi | dd48744 | 2010-03-22 02:28:41 -0700 | [diff] [blame] | 610 | 	/* If this is a huge cmd, clear the huge flag on the meta.flags | 
 | 611 | 	 * of the _original_ cmd. So that iwl_cmd_queue_free won't unmap | 
 | 612 | 	 * the DMA buffer for the scan (huge) command. | 
 | 613 | 	 */ | 
 | 614 | 	if (huge) { | 
 | 615 | 		cmd_index = get_cmd_index(&txq->q, index, 0); | 
 | 616 | 		txq->meta[cmd_index].flags = 0; | 
 | 617 | 	} | 
 | 618 | 	cmd_index = get_cmd_index(&txq->q, index, huge); | 
 | 619 | 	cmd = txq->cmd[cmd_index]; | 
 | 620 | 	meta = &txq->meta[cmd_index]; | 
| Tomas Winkler | 17b8892 | 2008-05-29 16:35:12 +0800 | [diff] [blame] | 621 |  | 
| Reinette Chatre | c33de62 | 2009-10-30 14:36:10 -0700 | [diff] [blame] | 622 | 	pci_unmap_single(priv->pci_dev, | 
| FUJITA Tomonori | 2e72444 | 2010-06-03 14:19:20 +0900 | [diff] [blame] | 623 | 			 dma_unmap_addr(meta, mapping), | 
 | 624 | 			 dma_unmap_len(meta, len), | 
| Reinette Chatre | c33de62 | 2009-10-30 14:36:10 -0700 | [diff] [blame] | 625 | 			 PCI_DMA_BIDIRECTIONAL); | 
 | 626 |  | 
| Tomas Winkler | 17b8892 | 2008-05-29 16:35:12 +0800 | [diff] [blame] | 627 | 	/* Input error checking is done when commands are added to queue. */ | 
| Johannes Berg | c2acea8 | 2009-07-24 11:13:05 -0700 | [diff] [blame] | 628 | 	if (meta->flags & CMD_WANT_SKB) { | 
| Zhu Yi | 2f30122 | 2009-10-09 17:19:45 +0800 | [diff] [blame] | 629 | 		meta->source->reply_page = (unsigned long)rxb_addr(rxb); | 
 | 630 | 		rxb->page = NULL; | 
| Johannes Berg | 5696aea | 2009-07-24 11:13:06 -0700 | [diff] [blame] | 631 | 	} else if (meta->callback) | 
| Zhu Yi | 2f30122 | 2009-10-09 17:19:45 +0800 | [diff] [blame] | 632 | 		meta->callback(priv, cmd, pkt); | 
| Tomas Winkler | 17b8892 | 2008-05-29 16:35:12 +0800 | [diff] [blame] | 633 |  | 
| Tomas Winkler | 499b188 | 2008-10-14 12:32:48 -0700 | [diff] [blame] | 634 | 	iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index); | 
| Tomas Winkler | 17b8892 | 2008-05-29 16:35:12 +0800 | [diff] [blame] | 635 |  | 
| Johannes Berg | c2acea8 | 2009-07-24 11:13:05 -0700 | [diff] [blame] | 636 | 	if (!(meta->flags & CMD_ASYNC)) { | 
| Tomas Winkler | 17b8892 | 2008-05-29 16:35:12 +0800 | [diff] [blame] | 637 | 		clear_bit(STATUS_HCMD_ACTIVE, &priv->status); | 
| Frans Pop | 91dd6c2 | 2010-03-24 14:19:58 -0700 | [diff] [blame] | 638 | 		IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n", | 
| Reinette Chatre | d2dfe6d | 2010-02-18 22:03:04 -0800 | [diff] [blame] | 639 | 			       get_cmd_string(cmd->hdr.cmd)); | 
| Tomas Winkler | 17b8892 | 2008-05-29 16:35:12 +0800 | [diff] [blame] | 640 | 		wake_up_interruptible(&priv->wait_command_queue); | 
 | 641 | 	} | 
| Zhu Yi | dd48744 | 2010-03-22 02:28:41 -0700 | [diff] [blame] | 642 | 	meta->flags = 0; | 
| Tomas Winkler | 17b8892 | 2008-05-29 16:35:12 +0800 | [diff] [blame] | 643 | } | 
 | 644 | EXPORT_SYMBOL(iwl_tx_cmd_complete); |