blob: 1ece2ea09773567145a5acde6bf997749856e107 [file] [log] [blame]
Ron Rindjunsky1053d352008-05-05 10:22:43 +08001/******************************************************************************
2 *
Reinette Chatre1f447802010-01-15 13:43:41 -08003 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
Ron Rindjunsky1053d352008-05-05 10:22:43 +08004 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
Winkler, Tomas759ef892008-12-09 11:28:58 -080025 * Intel Linux Wireless <ilw@linux.intel.com>
Ron Rindjunsky1053d352008-05-05 10:22:43 +080026 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
Tomas Winklerfd4abac2008-05-15 13:54:07 +080030#include <linux/etherdevice.h>
Alexey Dobriyand43c36d2009-10-07 17:09:06 +040031#include <linux/sched.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090032#include <linux/slab.h>
Ron Rindjunsky1053d352008-05-05 10:22:43 +080033#include <net/mac80211.h>
34#include "iwl-eeprom.h"
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-sta.h"
38#include "iwl-io.h"
39#include "iwl-helpers.h"
40
Tomas Winklerfd4abac2008-05-15 13:54:07 +080041/**
42 * iwl_txq_update_write_ptr - Send new write index to hardware
43 */
Abhijeet Kolekar7bfedc52010-02-03 13:47:56 -080044void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
Tomas Winklerfd4abac2008-05-15 13:54:07 +080045{
46 u32 reg = 0;
Tomas Winklerfd4abac2008-05-15 13:54:07 +080047 int txq_id = txq->q.id;
48
49 if (txq->need_update == 0)
Abhijeet Kolekar7bfedc52010-02-03 13:47:56 -080050 return;
Tomas Winklerfd4abac2008-05-15 13:54:07 +080051
52 /* if we're trying to save power */
53 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
54 /* wake up nic if it's powered down ...
55 * uCode will wake up, and interrupt us again, so next
56 * time we'll skip this part. */
57 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
58
59 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
Ben Cahill309e7312009-11-06 14:53:03 -080060 IWL_DEBUG_INFO(priv, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
61 txq_id, reg);
Tomas Winklerfd4abac2008-05-15 13:54:07 +080062 iwl_set_bit(priv, CSR_GP_CNTRL,
63 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
Abhijeet Kolekar7bfedc52010-02-03 13:47:56 -080064 return;
Tomas Winklerfd4abac2008-05-15 13:54:07 +080065 }
66
Tomas Winklerfd4abac2008-05-15 13:54:07 +080067 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
68 txq->q.write_ptr | (txq_id << 8));
Tomas Winklerfd4abac2008-05-15 13:54:07 +080069
70 /* else not in power-save mode, uCode will never sleep when we're
71 * trying to tx (during RFKILL, we're not trying to tx). */
72 } else
73 iwl_write32(priv, HBUS_TARG_WRPTR,
74 txq->q.write_ptr | (txq_id << 8));
75
76 txq->need_update = 0;
Tomas Winklerfd4abac2008-05-15 13:54:07 +080077}
78EXPORT_SYMBOL(iwl_txq_update_write_ptr);
79
80
Wey-Yi Guya239a8b2010-02-19 15:47:32 -080081void iwl_free_tfds_in_queue(struct iwl_priv *priv,
82 int sta_id, int tid, int freed)
83{
84 if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
85 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
86 else {
Adel Gadllahc8406ea2010-03-14 19:16:25 +010087 IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n",
Wey-Yi Guya239a8b2010-02-19 15:47:32 -080088 priv->stations[sta_id].tid[tid].tfds_in_queue,
89 freed);
90 priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
91 }
92}
93EXPORT_SYMBOL(iwl_free_tfds_in_queue);
94
Ron Rindjunsky1053d352008-05-05 10:22:43 +080095/**
96 * iwl_tx_queue_free - Deallocate DMA queue.
97 * @txq: Transmit queue to deallocate.
98 *
99 * Empty queue by removing and destroying all BD's.
100 * Free all buffers.
101 * 0-fill, but do not free "txq" descriptor structure.
102 */
Samuel Ortiza8e74e22009-01-23 13:45:14 -0800103void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800104{
Gregory Greenmanda99c4b2008-08-04 16:00:40 +0800105 struct iwl_tx_queue *txq = &priv->txq[txq_id];
Tomas Winkler443cfd42008-05-15 13:53:57 +0800106 struct iwl_queue *q = &txq->q;
Stanislaw Gruszkaf36d04a2010-02-10 05:07:45 -0800107 struct device *dev = &priv->pci_dev->dev;
Wey-Yi Guy71c55d92009-10-23 13:42:31 -0700108 int i;
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800109
110 if (q->n_bd == 0)
111 return;
112
113 /* first, empty all BD's */
114 for (; q->write_ptr != q->read_ptr;
115 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
Samuel Ortiz7aaa1d72009-01-19 15:30:26 -0800116 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800117
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800118 /* De-alloc array of command/tx buffers */
Tomas Winkler961ba602008-10-14 12:32:44 -0700119 for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
Gregory Greenmanda99c4b2008-08-04 16:00:40 +0800120 kfree(txq->cmd[i]);
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800121
122 /* De-alloc circular buffer of TFDs */
123 if (txq->q.n_bd)
Stanislaw Gruszkaf36d04a2010-02-10 05:07:45 -0800124 dma_free_coherent(dev, priv->hw_params.tfd_size *
125 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800126
127 /* De-alloc array of per-TFD driver data */
128 kfree(txq->txb);
129 txq->txb = NULL;
130
Johannes Bergc2acea82009-07-24 11:13:05 -0700131 /* deallocate arrays */
132 kfree(txq->cmd);
133 kfree(txq->meta);
134 txq->cmd = NULL;
135 txq->meta = NULL;
136
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800137 /* 0-fill queue descriptor structure */
138 memset(txq, 0, sizeof(*txq));
139}
Samuel Ortiza8e74e22009-01-23 13:45:14 -0800140EXPORT_SYMBOL(iwl_tx_queue_free);
Tomas Winkler961ba602008-10-14 12:32:44 -0700141
142/**
143 * iwl_cmd_queue_free - Deallocate DMA queue.
144 * @txq: Transmit queue to deallocate.
145 *
146 * Empty queue by removing and destroying all BD's.
147 * Free all buffers.
148 * 0-fill, but do not free "txq" descriptor structure.
149 */
Abhijeet Kolekar3e5d2382009-03-17 21:51:49 -0700150void iwl_cmd_queue_free(struct iwl_priv *priv)
Tomas Winkler961ba602008-10-14 12:32:44 -0700151{
152 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
153 struct iwl_queue *q = &txq->q;
Stanislaw Gruszkaf36d04a2010-02-10 05:07:45 -0800154 struct device *dev = &priv->pci_dev->dev;
Wey-Yi Guy71c55d92009-10-23 13:42:31 -0700155 int i;
Zhu Yidd487442010-03-22 02:28:41 -0700156 bool huge = false;
Tomas Winkler961ba602008-10-14 12:32:44 -0700157
158 if (q->n_bd == 0)
159 return;
160
Zhu Yidd487442010-03-22 02:28:41 -0700161 for (; q->read_ptr != q->write_ptr;
162 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
163 /* we have no way to tell if it is a huge cmd ATM */
164 i = get_cmd_index(q, q->read_ptr, 0);
165
166 if (txq->meta[i].flags & CMD_SIZE_HUGE) {
167 huge = true;
168 continue;
169 }
170
171 pci_unmap_single(priv->pci_dev,
172 pci_unmap_addr(&txq->meta[i], mapping),
173 pci_unmap_len(&txq->meta[i], len),
174 PCI_DMA_BIDIRECTIONAL);
175 }
176 if (huge) {
177 i = q->n_window;
178 pci_unmap_single(priv->pci_dev,
179 pci_unmap_addr(&txq->meta[i], mapping),
180 pci_unmap_len(&txq->meta[i], len),
181 PCI_DMA_BIDIRECTIONAL);
182 }
183
Tomas Winkler961ba602008-10-14 12:32:44 -0700184 /* De-alloc array of command/tx buffers */
185 for (i = 0; i <= TFD_CMD_SLOTS; i++)
186 kfree(txq->cmd[i]);
187
188 /* De-alloc circular buffer of TFDs */
189 if (txq->q.n_bd)
Stanislaw Gruszkaf36d04a2010-02-10 05:07:45 -0800190 dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd,
191 txq->tfds, txq->q.dma_addr);
Tomas Winkler961ba602008-10-14 12:32:44 -0700192
Reinette Chatre28142982009-09-25 14:24:22 -0700193 /* deallocate arrays */
194 kfree(txq->cmd);
195 kfree(txq->meta);
196 txq->cmd = NULL;
197 txq->meta = NULL;
198
Tomas Winkler961ba602008-10-14 12:32:44 -0700199 /* 0-fill queue descriptor structure */
200 memset(txq, 0, sizeof(*txq));
201}
Abhijeet Kolekar3e5d2382009-03-17 21:51:49 -0700202EXPORT_SYMBOL(iwl_cmd_queue_free);
203
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800204/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
205 * DMA services
206 *
207 * Theory of operation
208 *
209 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
210 * of buffer descriptors, each of which points to one or more data buffers for
211 * the device to read from or fill. Driver and device exchange status of each
212 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
213 * entries in each circular buffer, to protect against confusing empty and full
214 * queue states.
215 *
216 * The device reads or writes the data in the queues via the device's several
217 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
218 *
219 * For Tx queue, there are low mark and high mark limits. If, after queuing
220 * the packet for Tx, free space become < low mark, Tx queue stopped. When
221 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
222 * Tx queue resumed.
223 *
224 * See more detailed info in iwl-4965-hw.h.
225 ***************************************************/
226
227int iwl_queue_space(const struct iwl_queue *q)
228{
229 int s = q->read_ptr - q->write_ptr;
230
231 if (q->read_ptr > q->write_ptr)
232 s -= q->n_bd;
233
234 if (s <= 0)
235 s += q->n_window;
236 /* keep some reserve to not confuse empty and full situations */
237 s -= 2;
238 if (s < 0)
239 s = 0;
240 return s;
241}
242EXPORT_SYMBOL(iwl_queue_space);
243
244
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800245/**
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800246 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
247 */
Tomas Winkler443cfd42008-05-15 13:53:57 +0800248static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800249 int count, int slots_num, u32 id)
250{
251 q->n_bd = count;
252 q->n_window = slots_num;
253 q->id = id;
254
255 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
256 * and iwl_queue_dec_wrap are broken. */
257 BUG_ON(!is_power_of_2(count));
258
259 /* slots_num must be power-of-two size, otherwise
260 * get_cmd_index is broken. */
261 BUG_ON(!is_power_of_2(slots_num));
262
263 q->low_mark = q->n_window / 4;
264 if (q->low_mark < 4)
265 q->low_mark = 4;
266
267 q->high_mark = q->n_window / 8;
268 if (q->high_mark < 2)
269 q->high_mark = 2;
270
271 q->write_ptr = q->read_ptr = 0;
Wey-Yi Guyb74e31a2010-03-01 17:23:50 -0800272 q->last_read_ptr = 0;
273 q->repeat_same_read_ptr = 0;
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800274
275 return 0;
276}
277
278/**
279 * iwl_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
280 */
281static int iwl_tx_queue_alloc(struct iwl_priv *priv,
Ron Rindjunsky16466902008-05-05 10:22:50 +0800282 struct iwl_tx_queue *txq, u32 id)
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800283{
Stanislaw Gruszkaf36d04a2010-02-10 05:07:45 -0800284 struct device *dev = &priv->pci_dev->dev;
Winkler, Tomas3978e5b2009-01-23 13:45:23 -0800285 size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800286
287 /* Driver private data, only for Tx (not command) queues,
288 * not shared with device. */
289 if (id != IWL_CMD_QUEUE_NUM) {
290 txq->txb = kmalloc(sizeof(txq->txb[0]) *
291 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
292 if (!txq->txb) {
Winkler, Tomas15b16872008-12-19 10:37:33 +0800293 IWL_ERR(priv, "kmalloc for auxiliary BD "
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800294 "structures failed\n");
295 goto error;
296 }
Winkler, Tomas3978e5b2009-01-23 13:45:23 -0800297 } else {
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800298 txq->txb = NULL;
Winkler, Tomas3978e5b2009-01-23 13:45:23 -0800299 }
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800300
301 /* Circular buffer of transmit frame descriptors (TFDs),
302 * shared with device */
Stanislaw Gruszkaf36d04a2010-02-10 05:07:45 -0800303 txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr,
304 GFP_KERNEL);
Tomas Winkler499b1882008-10-14 12:32:48 -0700305 if (!txq->tfds) {
Winkler, Tomas3978e5b2009-01-23 13:45:23 -0800306 IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800307 goto error;
308 }
309 txq->q.id = id;
310
311 return 0;
312
313 error:
314 kfree(txq->txb);
315 txq->txb = NULL;
316
317 return -ENOMEM;
318}
319
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800320/**
321 * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue
322 */
Samuel Ortiza8e74e22009-01-23 13:45:14 -0800323int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
324 int slots_num, u32 txq_id)
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800325{
Gregory Greenmanda99c4b2008-08-04 16:00:40 +0800326 int i, len;
Tomas Winkler73b7d742008-09-03 11:18:48 +0800327 int ret;
Johannes Bergc2acea82009-07-24 11:13:05 -0700328 int actual_slots = slots_num;
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800329
330 /*
331 * Alloc buffer array for commands (Tx or other types of commands).
332 * For the command queue (#4), allocate command space + one big
333 * command for scan, since scan command is very huge; the system will
334 * not have two scans at the same time, so only one is needed.
335 * For normal Tx queues (all other queues), no super-size command
336 * space is needed.
337 */
Johannes Bergc2acea82009-07-24 11:13:05 -0700338 if (txq_id == IWL_CMD_QUEUE_NUM)
339 actual_slots++;
340
341 txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots,
342 GFP_KERNEL);
343 txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots,
344 GFP_KERNEL);
345
346 if (!txq->meta || !txq->cmd)
347 goto out_free_arrays;
348
349 len = sizeof(struct iwl_device_cmd);
350 for (i = 0; i < actual_slots; i++) {
351 /* only happens for cmd queue */
352 if (i == slots_num)
Abhijeet Kolekar89612122010-02-19 11:49:49 -0800353 len = IWL_MAX_CMD_SIZE;
Gregory Greenmanda99c4b2008-08-04 16:00:40 +0800354
John W. Linville49898852008-09-02 15:07:18 -0400355 txq->cmd[i] = kmalloc(len, GFP_KERNEL);
Gregory Greenmanda99c4b2008-08-04 16:00:40 +0800356 if (!txq->cmd[i])
Tomas Winkler73b7d742008-09-03 11:18:48 +0800357 goto err;
Gregory Greenmanda99c4b2008-08-04 16:00:40 +0800358 }
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800359
360 /* Alloc driver data array and TFD circular buffer */
Tomas Winkler73b7d742008-09-03 11:18:48 +0800361 ret = iwl_tx_queue_alloc(priv, txq, txq_id);
362 if (ret)
363 goto err;
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800364
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800365 txq->need_update = 0;
366
Johannes Berg1a716552009-11-06 14:52:51 -0800367 /*
368 * Aggregation TX queues will get their ID when aggregation begins;
369 * they overwrite the setting done here. The command FIFO doesn't
370 * need an swq_id so don't set one to catch errors, all others can
371 * be set up to the identity mapping.
372 */
373 if (txq_id != IWL_CMD_QUEUE_NUM)
Johannes Berg45af8192009-06-19 13:52:43 -0700374 txq->swq_id = txq_id;
375
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800376 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
377 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
378 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
379
380 /* Initialize queue's high/low-water marks, and head/tail indexes */
381 iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
382
383 /* Tell device where to find queue */
Samuel Ortiza8e74e22009-01-23 13:45:14 -0800384 priv->cfg->ops->lib->txq_init(priv, txq);
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800385
386 return 0;
Tomas Winkler73b7d742008-09-03 11:18:48 +0800387err:
Johannes Bergc2acea82009-07-24 11:13:05 -0700388 for (i = 0; i < actual_slots; i++)
Tomas Winkler73b7d742008-09-03 11:18:48 +0800389 kfree(txq->cmd[i]);
Johannes Bergc2acea82009-07-24 11:13:05 -0700390out_free_arrays:
391 kfree(txq->meta);
392 kfree(txq->cmd);
Tomas Winkler73b7d742008-09-03 11:18:48 +0800393
Tomas Winkler73b7d742008-09-03 11:18:48 +0800394 return -ENOMEM;
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800395}
Samuel Ortiza8e74e22009-01-23 13:45:14 -0800396EXPORT_SYMBOL(iwl_tx_queue_init);
397
Zhu Yide0f60e2010-03-23 00:45:03 -0700398void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
399 int slots_num, u32 txq_id)
400{
401 int actual_slots = slots_num;
402
403 if (txq_id == IWL_CMD_QUEUE_NUM)
404 actual_slots++;
405
406 memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots);
407
408 txq->need_update = 0;
409
410 /* Initialize queue's high/low-water marks, and head/tail indexes */
411 iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
412
413 /* Tell device where to find queue */
414 priv->cfg->ops->lib->txq_init(priv, txq);
415}
416EXPORT_SYMBOL(iwl_tx_queue_reset);
417
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800418/*************** HOST COMMAND QUEUE FUNCTIONS *****/
419
420/**
421 * iwl_enqueue_hcmd - enqueue a uCode command
422 * @priv: device private data point
423 * @cmd: a point to the ucode command structure
424 *
425 * The function returns < 0 values to indicate the operation is
426 * failed. On success, it turns the index (> 0) of command in the
427 * command queue.
428 */
429int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
430{
431 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
432 struct iwl_queue *q = &txq->q;
Johannes Bergc2acea82009-07-24 11:13:05 -0700433 struct iwl_device_cmd *out_cmd;
434 struct iwl_cmd_meta *out_meta;
Tomas Winklerf3674222008-08-04 16:00:44 +0800435 dma_addr_t phys_addr;
436 unsigned long flags;
Abhijeet Kolekar7bfedc52010-02-03 13:47:56 -0800437 int len;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800438 u32 idx;
439 u16 fix_size;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800440
441 cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
442 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
443
444 /* If any of the command structures end up being larger than
445 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
Abhijeet Kolekar89612122010-02-19 11:49:49 -0800446 * we will need to increase the size of the TFD entries
447 * Also, check to see if command buffer should not exceed the size
448 * of device_cmd and max_cmd_size. */
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800449 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
Johannes Bergc2acea82009-07-24 11:13:05 -0700450 !(cmd->flags & CMD_SIZE_HUGE));
Abhijeet Kolekar89612122010-02-19 11:49:49 -0800451 BUG_ON(fix_size > IWL_MAX_CMD_SIZE);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800452
Wey-Yi Guy7812b162009-10-02 13:43:58 -0700453 if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) {
Reinette Chatref2f21b42009-10-30 14:36:15 -0700454 IWL_WARN(priv, "Not sending command - %s KILL\n",
455 iwl_is_rfkill(priv) ? "RF" : "CT");
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800456 return -EIO;
457 }
458
Johannes Bergc2acea82009-07-24 11:13:05 -0700459 if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
Wey-Yi Guy2d237f72009-11-20 12:05:08 -0800460 IWL_ERR(priv, "No space in command queue\n");
Wey-Yi Guy7812b162009-10-02 13:43:58 -0700461 if (iwl_within_ct_kill_margin(priv))
462 iwl_tt_enter_ct_kill(priv);
463 else {
464 IWL_ERR(priv, "Restarting adapter due to queue full\n");
465 queue_work(priv->workqueue, &priv->restart);
466 }
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800467 return -ENOSPC;
468 }
469
470 spin_lock_irqsave(&priv->hcmd_lock, flags);
471
Zhu Yidd487442010-03-22 02:28:41 -0700472 /* If this is a huge cmd, mark the huge flag also on the meta.flags
473 * of the _original_ cmd. This is used for DMA mapping clean up.
474 */
475 if (cmd->flags & CMD_SIZE_HUGE) {
476 idx = get_cmd_index(q, q->write_ptr, 0);
477 txq->meta[idx].flags = CMD_SIZE_HUGE;
478 }
479
Johannes Bergc2acea82009-07-24 11:13:05 -0700480 idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
Gregory Greenmanda99c4b2008-08-04 16:00:40 +0800481 out_cmd = txq->cmd[idx];
Johannes Bergc2acea82009-07-24 11:13:05 -0700482 out_meta = &txq->meta[idx];
483
Daniel C Halperin8ce73f32009-07-31 14:28:06 -0700484 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
Johannes Bergc2acea82009-07-24 11:13:05 -0700485 out_meta->flags = cmd->flags;
486 if (cmd->flags & CMD_WANT_SKB)
487 out_meta->source = cmd;
488 if (cmd->flags & CMD_ASYNC)
489 out_meta->callback = cmd->callback;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800490
491 out_cmd->hdr.cmd = cmd->id;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800492 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
493
494 /* At this point, the out_cmd now has all of the incoming cmd
495 * information */
496
497 out_cmd->hdr.flags = 0;
498 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
499 INDEX_TO_SEQ(q->write_ptr));
Johannes Bergc2acea82009-07-24 11:13:05 -0700500 if (cmd->flags & CMD_SIZE_HUGE)
Tomas Winkler9734cb22008-09-03 11:26:52 +0800501 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
Johannes Bergc2acea82009-07-24 11:13:05 -0700502 len = sizeof(struct iwl_device_cmd);
Abhijeet Kolekar89612122010-02-19 11:49:49 -0800503 if (idx == TFD_CMD_SLOTS)
504 len = IWL_MAX_CMD_SIZE;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800505
Esti Kummerded2ae72008-08-04 16:00:45 +0800506#ifdef CONFIG_IWLWIFI_DEBUG
507 switch (out_cmd->hdr.cmd) {
508 case REPLY_TX_LINK_QUALITY_CMD:
509 case SENSITIVITY_CMD:
Tomas Winklere1623442009-01-27 14:27:56 -0800510 IWL_DEBUG_HC_DUMP(priv, "Sending command %s (#%x), seq: 0x%04X, "
Esti Kummerded2ae72008-08-04 16:00:45 +0800511 "%d bytes at %d[%d]:%d\n",
512 get_cmd_string(out_cmd->hdr.cmd),
513 out_cmd->hdr.cmd,
514 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
515 q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
516 break;
517 default:
Tomas Winklere1623442009-01-27 14:27:56 -0800518 IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
Esti Kummerded2ae72008-08-04 16:00:45 +0800519 "%d bytes at %d[%d]:%d\n",
520 get_cmd_string(out_cmd->hdr.cmd),
521 out_cmd->hdr.cmd,
522 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
523 q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
524 }
525#endif
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800526 txq->need_update = 1;
527
Samuel Ortiz518099a2009-01-19 15:30:27 -0800528 if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl)
529 /* Set up entry in queue's byte count circular buffer */
530 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800531
Reinette Chatredf833b12009-04-21 10:55:48 -0700532 phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
533 fix_size, PCI_DMA_BIDIRECTIONAL);
Johannes Bergc2acea82009-07-24 11:13:05 -0700534 pci_unmap_addr_set(out_meta, mapping, phys_addr);
535 pci_unmap_len_set(out_meta, len, fix_size);
Reinette Chatredf833b12009-04-21 10:55:48 -0700536
Johannes Bergbe1a71a2009-10-02 13:44:02 -0700537 trace_iwlwifi_dev_hcmd(priv, &out_cmd->hdr, fix_size, cmd->flags);
538
Reinette Chatredf833b12009-04-21 10:55:48 -0700539 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
540 phys_addr, fix_size, 1,
541 U32_PAD(cmd->len));
542
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800543 /* Increment and update queue's write index */
544 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
Abhijeet Kolekar7bfedc52010-02-03 13:47:56 -0800545 iwl_txq_update_write_ptr(priv, txq);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800546
547 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
Abhijeet Kolekar7bfedc52010-02-03 13:47:56 -0800548 return idx;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800549}
550
Tomas Winkler17b88922008-05-29 16:35:12 +0800551/**
552 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
553 *
554 * When FW advances 'R' index, all entries between old and new 'R' index
555 * need to be reclaimed. As result, some free space forms. If there is
556 * enough free space (> low mark), wake the stack that feeds us.
557 */
Tomas Winkler499b1882008-10-14 12:32:48 -0700558static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
559 int idx, int cmd_idx)
Tomas Winkler17b88922008-05-29 16:35:12 +0800560{
561 struct iwl_tx_queue *txq = &priv->txq[txq_id];
562 struct iwl_queue *q = &txq->q;
563 int nfreed = 0;
564
Tomas Winkler499b1882008-10-14 12:32:48 -0700565 if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
Winkler, Tomas15b16872008-12-19 10:37:33 +0800566 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
Tomas Winkler17b88922008-05-29 16:35:12 +0800567 "is out of range [0-%d] %d %d.\n", txq_id,
Tomas Winkler499b1882008-10-14 12:32:48 -0700568 idx, q->n_bd, q->write_ptr, q->read_ptr);
Tomas Winkler17b88922008-05-29 16:35:12 +0800569 return;
570 }
571
Tomas Winkler499b1882008-10-14 12:32:48 -0700572 for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
573 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
574
575 if (nfreed++ > 0) {
Winkler, Tomas15b16872008-12-19 10:37:33 +0800576 IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
Tomas Winkler17b88922008-05-29 16:35:12 +0800577 q->write_ptr, q->read_ptr);
578 queue_work(priv->workqueue, &priv->restart);
579 }
Gregory Greenmanda99c4b2008-08-04 16:00:40 +0800580
Tomas Winkler17b88922008-05-29 16:35:12 +0800581 }
582}
583
584/**
585 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
586 * @rxb: Rx buffer to reclaim
587 *
588 * If an Rx buffer has an async callback associated with it the callback
589 * will be executed. The attached skb (if present) will only be freed
590 * if the callback returns 1
591 */
592void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
593{
Zhu Yi2f301222009-10-09 17:19:45 +0800594 struct iwl_rx_packet *pkt = rxb_addr(rxb);
Tomas Winkler17b88922008-05-29 16:35:12 +0800595 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
596 int txq_id = SEQ_TO_QUEUE(sequence);
597 int index = SEQ_TO_INDEX(sequence);
Tomas Winkler17b88922008-05-29 16:35:12 +0800598 int cmd_index;
Tomas Winkler9734cb22008-09-03 11:26:52 +0800599 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
Johannes Bergc2acea82009-07-24 11:13:05 -0700600 struct iwl_device_cmd *cmd;
601 struct iwl_cmd_meta *meta;
Zhu Yidd487442010-03-22 02:28:41 -0700602 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
Tomas Winkler17b88922008-05-29 16:35:12 +0800603
604 /* If a Tx command is being handled and it isn't in the actual
605 * command queue then there a command routing bug has been introduced
606 * in the queue management code. */
Johannes Berg55d6a3c2008-09-23 19:18:43 +0200607 if (WARN(txq_id != IWL_CMD_QUEUE_NUM,
Winkler, Tomas01ef9322008-11-07 09:58:45 -0800608 "wrong command queue %d, sequence 0x%X readp=%d writep=%d\n",
609 txq_id, sequence,
610 priv->txq[IWL_CMD_QUEUE_NUM].q.read_ptr,
611 priv->txq[IWL_CMD_QUEUE_NUM].q.write_ptr)) {
Reinette Chatreec741162009-07-24 11:13:08 -0700612 iwl_print_hex_error(priv, pkt, 32);
Johannes Berg55d6a3c2008-09-23 19:18:43 +0200613 return;
Winkler, Tomas01ef9322008-11-07 09:58:45 -0800614 }
Tomas Winkler17b88922008-05-29 16:35:12 +0800615
Zhu Yidd487442010-03-22 02:28:41 -0700616 /* If this is a huge cmd, clear the huge flag on the meta.flags
617 * of the _original_ cmd. So that iwl_cmd_queue_free won't unmap
618 * the DMA buffer for the scan (huge) command.
619 */
620 if (huge) {
621 cmd_index = get_cmd_index(&txq->q, index, 0);
622 txq->meta[cmd_index].flags = 0;
623 }
624 cmd_index = get_cmd_index(&txq->q, index, huge);
625 cmd = txq->cmd[cmd_index];
626 meta = &txq->meta[cmd_index];
Tomas Winkler17b88922008-05-29 16:35:12 +0800627
Reinette Chatrec33de622009-10-30 14:36:10 -0700628 pci_unmap_single(priv->pci_dev,
629 pci_unmap_addr(meta, mapping),
630 pci_unmap_len(meta, len),
631 PCI_DMA_BIDIRECTIONAL);
632
Tomas Winkler17b88922008-05-29 16:35:12 +0800633 /* Input error checking is done when commands are added to queue. */
Johannes Bergc2acea82009-07-24 11:13:05 -0700634 if (meta->flags & CMD_WANT_SKB) {
Zhu Yi2f301222009-10-09 17:19:45 +0800635 meta->source->reply_page = (unsigned long)rxb_addr(rxb);
636 rxb->page = NULL;
Johannes Berg5696aea2009-07-24 11:13:06 -0700637 } else if (meta->callback)
Zhu Yi2f301222009-10-09 17:19:45 +0800638 meta->callback(priv, cmd, pkt);
Tomas Winkler17b88922008-05-29 16:35:12 +0800639
Tomas Winkler499b1882008-10-14 12:32:48 -0700640 iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
Tomas Winkler17b88922008-05-29 16:35:12 +0800641
Johannes Bergc2acea82009-07-24 11:13:05 -0700642 if (!(meta->flags & CMD_ASYNC)) {
Tomas Winkler17b88922008-05-29 16:35:12 +0800643 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
Frans Pop91dd6c22010-03-24 14:19:58 -0700644 IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
Reinette Chatred2dfe6d2010-02-18 22:03:04 -0800645 get_cmd_string(cmd->hdr.cmd));
Tomas Winkler17b88922008-05-29 16:35:12 +0800646 wake_up_interruptible(&priv->wait_command_queue);
647 }
Zhu Yidd487442010-03-22 02:28:41 -0700648 meta->flags = 0;
Tomas Winkler17b88922008-05-29 16:35:12 +0800649}
650EXPORT_SYMBOL(iwl_tx_cmd_complete);
651
Helmut Schaa994d31f2008-07-02 12:17:06 +0200652#ifdef CONFIG_IWLWIFI_DEBUG
Wey-Yi Guy04569cb2010-03-31 17:57:28 -0700653#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
654#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
Tomas Winklera332f8d2008-05-29 16:35:08 +0800655
656const char *iwl_get_tx_fail_reason(u32 status)
657{
658 switch (status & TX_STATUS_MSK) {
659 case TX_STATUS_SUCCESS:
660 return "SUCCESS";
Wey-Yi Guy04569cb2010-03-31 17:57:28 -0700661 TX_STATUS_POSTPONE(DELAY);
662 TX_STATUS_POSTPONE(FEW_BYTES);
663 TX_STATUS_POSTPONE(BT_PRIO);
664 TX_STATUS_POSTPONE(QUIET_PERIOD);
665 TX_STATUS_POSTPONE(CALC_TTAK);
666 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
667 TX_STATUS_FAIL(SHORT_LIMIT);
668 TX_STATUS_FAIL(LONG_LIMIT);
669 TX_STATUS_FAIL(FIFO_UNDERRUN);
670 TX_STATUS_FAIL(DRAIN_FLOW);
671 TX_STATUS_FAIL(RFKILL_FLUSH);
672 TX_STATUS_FAIL(LIFE_EXPIRE);
673 TX_STATUS_FAIL(DEST_PS);
674 TX_STATUS_FAIL(HOST_ABORTED);
675 TX_STATUS_FAIL(BT_RETRY);
676 TX_STATUS_FAIL(STA_INVALID);
677 TX_STATUS_FAIL(FRAG_DROPPED);
678 TX_STATUS_FAIL(TID_DISABLE);
679 TX_STATUS_FAIL(FIFO_FLUSHED);
680 TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
681 TX_STATUS_FAIL(FW_DROP);
682 TX_STATUS_FAIL(STA_COLOR_MISMATCH_DROP);
Tomas Winklera332f8d2008-05-29 16:35:08 +0800683 }
684
685 return "UNKNOWN";
686}
687EXPORT_SYMBOL(iwl_get_tx_fail_reason);
688#endif /* CONFIG_IWLWIFI_DEBUG */