blob: 2f6b38cfcc138837a58b4855c124517412a90809 [file] [log] [blame]
Ron Rindjunsky1053d352008-05-05 10:22:43 +08001/******************************************************************************
2 *
Wey-Yi Guy901069c2011-04-05 09:42:00 -07003 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
Ron Rindjunsky1053d352008-05-05 10:22:43 +08004 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
Winkler, Tomas759ef892008-12-09 11:28:58 -080025 * Intel Linux Wireless <ilw@linux.intel.com>
Ron Rindjunsky1053d352008-05-05 10:22:43 +080026 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
Tomas Winklerfd4abac2008-05-15 13:54:07 +080030#include <linux/etherdevice.h>
Alexey Dobriyand43c36d2009-10-07 17:09:06 +040031#include <linux/sched.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090032#include <linux/slab.h>
Ron Rindjunsky1053d352008-05-05 10:22:43 +080033#include <net/mac80211.h>
34#include "iwl-eeprom.h"
Johannes Berg214d14d2011-05-04 07:50:44 -070035#include "iwl-agn.h"
Ron Rindjunsky1053d352008-05-05 10:22:43 +080036#include "iwl-dev.h"
37#include "iwl-core.h"
38#include "iwl-sta.h"
39#include "iwl-io.h"
40#include "iwl-helpers.h"
41
Tomas Winklerfd4abac2008-05-15 13:54:07 +080042/**
43 * iwl_txq_update_write_ptr - Send new write index to hardware
44 */
Abhijeet Kolekar7bfedc52010-02-03 13:47:56 -080045void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
Tomas Winklerfd4abac2008-05-15 13:54:07 +080046{
47 u32 reg = 0;
Tomas Winklerfd4abac2008-05-15 13:54:07 +080048 int txq_id = txq->q.id;
49
50 if (txq->need_update == 0)
Abhijeet Kolekar7bfedc52010-02-03 13:47:56 -080051 return;
Tomas Winklerfd4abac2008-05-15 13:54:07 +080052
Wey-Yi Guyf81c1f42010-11-10 09:56:50 -080053 if (priv->cfg->base_params->shadow_reg_enable) {
54 /* shadow register enabled */
Tomas Winklerfd4abac2008-05-15 13:54:07 +080055 iwl_write32(priv, HBUS_TARG_WRPTR,
56 txq->q.write_ptr | (txq_id << 8));
Wey-Yi Guyf81c1f42010-11-10 09:56:50 -080057 } else {
58 /* if we're trying to save power */
59 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
60 /* wake up nic if it's powered down ...
61 * uCode will wake up, and interrupt us again, so next
62 * time we'll skip this part. */
63 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
Tomas Winklerfd4abac2008-05-15 13:54:07 +080064
Wey-Yi Guyf81c1f42010-11-10 09:56:50 -080065 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
66 IWL_DEBUG_INFO(priv,
67 "Tx queue %d requesting wakeup,"
68 " GP1 = 0x%x\n", txq_id, reg);
69 iwl_set_bit(priv, CSR_GP_CNTRL,
70 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
71 return;
72 }
73
74 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
75 txq->q.write_ptr | (txq_id << 8));
76
77 /*
78 * else not in power-save mode,
79 * uCode will never sleep when we're
80 * trying to tx (during RFKILL, we're not trying to tx).
81 */
82 } else
83 iwl_write32(priv, HBUS_TARG_WRPTR,
84 txq->q.write_ptr | (txq_id << 8));
85 }
Tomas Winklerfd4abac2008-05-15 13:54:07 +080086 txq->need_update = 0;
Tomas Winklerfd4abac2008-05-15 13:54:07 +080087}
Tomas Winklerfd4abac2008-05-15 13:54:07 +080088
Johannes Berg214d14d2011-05-04 07:50:44 -070089static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
90{
91 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
92
93 dma_addr_t addr = get_unaligned_le32(&tb->lo);
94 if (sizeof(dma_addr_t) > sizeof(u32))
95 addr |=
96 ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
97
98 return addr;
99}
100
101static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
102{
103 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
104
105 return le16_to_cpu(tb->hi_n_len) >> 4;
106}
107
108static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
109 dma_addr_t addr, u16 len)
110{
111 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
112 u16 hi_n_len = len << 4;
113
114 put_unaligned_le32(addr, &tb->lo);
115 if (sizeof(dma_addr_t) > sizeof(u32))
116 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
117
118 tb->hi_n_len = cpu_to_le16(hi_n_len);
119
120 tfd->num_tbs = idx + 1;
121}
122
123static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
124{
125 return tfd->num_tbs & 0x1f;
126}
127
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700128static void iwlagn_unmap_tfd(struct iwl_priv *priv, struct iwl_cmd_meta *meta,
129 struct iwl_tfd *tfd)
Johannes Berg214d14d2011-05-04 07:50:44 -0700130{
Johannes Berg214d14d2011-05-04 07:50:44 -0700131 struct pci_dev *dev = priv->pci_dev;
Johannes Berg214d14d2011-05-04 07:50:44 -0700132 int i;
133 int num_tbs;
134
Johannes Berg214d14d2011-05-04 07:50:44 -0700135 /* Sanity check on number of chunks */
136 num_tbs = iwl_tfd_get_num_tbs(tfd);
137
138 if (num_tbs >= IWL_NUM_OF_TBS) {
139 IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
140 /* @todo issue fatal error, it is quite serious situation */
141 return;
142 }
143
144 /* Unmap tx_cmd */
145 if (num_tbs)
146 pci_unmap_single(dev,
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700147 dma_unmap_addr(meta, mapping),
148 dma_unmap_len(meta, len),
Johannes Berg214d14d2011-05-04 07:50:44 -0700149 PCI_DMA_BIDIRECTIONAL);
150
151 /* Unmap chunks, if any. */
152 for (i = 1; i < num_tbs; i++)
153 pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i),
154 iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE);
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700155}
156
157/**
158 * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
159 * @priv - driver private data
160 * @txq - tx queue
161 *
162 * Does NOT advance any TFD circular buffer read/write indexes
163 * Does NOT free the TFD itself (which is within circular buffer)
164 */
165void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
166{
167 struct iwl_tfd *tfd_tmp = txq->tfds;
168 int index = txq->q.read_ptr;
169
170 iwlagn_unmap_tfd(priv, &txq->meta[index], &tfd_tmp[index]);
Johannes Berg214d14d2011-05-04 07:50:44 -0700171
172 /* free SKB */
173 if (txq->txb) {
174 struct sk_buff *skb;
175
176 skb = txq->txb[txq->q.read_ptr].skb;
177
178 /* can be called from irqs-disabled context */
179 if (skb) {
180 dev_kfree_skb_any(skb);
181 txq->txb[txq->q.read_ptr].skb = NULL;
182 }
183 }
184}
185
186int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv,
187 struct iwl_tx_queue *txq,
188 dma_addr_t addr, u16 len,
Johannes Berg4c42db02011-05-04 07:50:48 -0700189 u8 reset)
Johannes Berg214d14d2011-05-04 07:50:44 -0700190{
191 struct iwl_queue *q;
192 struct iwl_tfd *tfd, *tfd_tmp;
193 u32 num_tbs;
194
195 q = &txq->q;
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700196 tfd_tmp = txq->tfds;
Johannes Berg214d14d2011-05-04 07:50:44 -0700197 tfd = &tfd_tmp[q->write_ptr];
198
199 if (reset)
200 memset(tfd, 0, sizeof(*tfd));
201
202 num_tbs = iwl_tfd_get_num_tbs(tfd);
203
204 /* Each TFD can point to a maximum 20 Tx buffers */
205 if (num_tbs >= IWL_NUM_OF_TBS) {
206 IWL_ERR(priv, "Error can not send more than %d chunks\n",
207 IWL_NUM_OF_TBS);
208 return -EINVAL;
209 }
210
211 if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
212 return -EINVAL;
213
214 if (unlikely(addr & ~IWL_TX_DMA_MASK))
215 IWL_ERR(priv, "Unaligned address = %llx\n",
216 (unsigned long long)addr);
217
218 iwl_tfd_set_tb(tfd, num_tbs, addr, len);
219
220 return 0;
221}
222
223/*
224 * Tell nic where to find circular buffer of Tx Frame Descriptors for
225 * given Tx queue, and enable the DMA channel used for that queue.
226 *
227 * supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
228 * channels supported in hardware.
229 */
230static int iwlagn_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq)
231{
232 int txq_id = txq->q.id;
233
234 /* Circular buffer (TFD queue in DRAM) physical base address */
235 iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
236 txq->q.dma_addr >> 8);
237
238 return 0;
239}
240
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800241/**
Stanislaw Gruszka387f3382011-02-28 14:33:13 +0100242 * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
243 */
244void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
245{
246 struct iwl_tx_queue *txq = &priv->txq[txq_id];
247 struct iwl_queue *q = &txq->q;
248
249 if (q->n_bd == 0)
250 return;
251
252 while (q->write_ptr != q->read_ptr) {
Johannes Berg214d14d2011-05-04 07:50:44 -0700253 iwlagn_txq_free_tfd(priv, txq);
Stanislaw Gruszka387f3382011-02-28 14:33:13 +0100254 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
255 }
256}
257
258/**
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800259 * iwl_tx_queue_free - Deallocate DMA queue.
260 * @txq: Transmit queue to deallocate.
261 *
262 * Empty queue by removing and destroying all BD's.
263 * Free all buffers.
264 * 0-fill, but do not free "txq" descriptor structure.
265 */
Samuel Ortiza8e74e22009-01-23 13:45:14 -0800266void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800267{
Gregory Greenmanda99c4b2008-08-04 16:00:40 +0800268 struct iwl_tx_queue *txq = &priv->txq[txq_id];
Stanislaw Gruszkaf36d04a2010-02-10 05:07:45 -0800269 struct device *dev = &priv->pci_dev->dev;
Wey-Yi Guy71c55d92009-10-23 13:42:31 -0700270 int i;
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800271
Stanislaw Gruszka387f3382011-02-28 14:33:13 +0100272 iwl_tx_queue_unmap(priv, txq_id);
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800273
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800274 /* De-alloc array of command/tx buffers */
Tomas Winkler961ba602008-10-14 12:32:44 -0700275 for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
Gregory Greenmanda99c4b2008-08-04 16:00:40 +0800276 kfree(txq->cmd[i]);
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800277
278 /* De-alloc circular buffer of TFDs */
279 if (txq->q.n_bd)
Stanislaw Gruszkaf36d04a2010-02-10 05:07:45 -0800280 dma_free_coherent(dev, priv->hw_params.tfd_size *
281 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800282
283 /* De-alloc array of per-TFD driver data */
284 kfree(txq->txb);
285 txq->txb = NULL;
286
Johannes Bergc2acea82009-07-24 11:13:05 -0700287 /* deallocate arrays */
288 kfree(txq->cmd);
289 kfree(txq->meta);
290 txq->cmd = NULL;
291 txq->meta = NULL;
292
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800293 /* 0-fill queue descriptor structure */
294 memset(txq, 0, sizeof(*txq));
295}
Tomas Winkler961ba602008-10-14 12:32:44 -0700296
297/**
Stanislaw Gruszka387f3382011-02-28 14:33:13 +0100298 * iwl_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
299 */
300void iwl_cmd_queue_unmap(struct iwl_priv *priv)
301{
302 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
303 struct iwl_queue *q = &txq->q;
304 int i;
Stanislaw Gruszka387f3382011-02-28 14:33:13 +0100305
306 if (q->n_bd == 0)
307 return;
308
309 while (q->read_ptr != q->write_ptr) {
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700310 i = get_cmd_index(q, q->read_ptr);
Stanislaw Gruszka387f3382011-02-28 14:33:13 +0100311
Stanislaw Gruszka3598e172011-03-31 17:36:26 +0200312 if (txq->meta[i].flags & CMD_MAPPED) {
Stanislaw Gruszka387f3382011-02-28 14:33:13 +0100313 pci_unmap_single(priv->pci_dev,
314 dma_unmap_addr(&txq->meta[i], mapping),
315 dma_unmap_len(&txq->meta[i], len),
316 PCI_DMA_BIDIRECTIONAL);
Stanislaw Gruszka3598e172011-03-31 17:36:26 +0200317 txq->meta[i].flags = 0;
318 }
Stanislaw Gruszka387f3382011-02-28 14:33:13 +0100319
Stanislaw Gruszka3598e172011-03-31 17:36:26 +0200320 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
Stanislaw Gruszka387f3382011-02-28 14:33:13 +0100321 }
Stanislaw Gruszka387f3382011-02-28 14:33:13 +0100322}
323
324/**
Tomas Winkler961ba602008-10-14 12:32:44 -0700325 * iwl_cmd_queue_free - Deallocate DMA queue.
326 * @txq: Transmit queue to deallocate.
327 *
328 * Empty queue by removing and destroying all BD's.
329 * Free all buffers.
330 * 0-fill, but do not free "txq" descriptor structure.
331 */
Abhijeet Kolekar3e5d2382009-03-17 21:51:49 -0700332void iwl_cmd_queue_free(struct iwl_priv *priv)
Tomas Winkler961ba602008-10-14 12:32:44 -0700333{
Johannes Berg13bb9482010-08-23 10:46:33 +0200334 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
Stanislaw Gruszkaf36d04a2010-02-10 05:07:45 -0800335 struct device *dev = &priv->pci_dev->dev;
Wey-Yi Guy71c55d92009-10-23 13:42:31 -0700336 int i;
Tomas Winkler961ba602008-10-14 12:32:44 -0700337
Stanislaw Gruszka387f3382011-02-28 14:33:13 +0100338 iwl_cmd_queue_unmap(priv);
Zhu Yidd487442010-03-22 02:28:41 -0700339
Tomas Winkler961ba602008-10-14 12:32:44 -0700340 /* De-alloc array of command/tx buffers */
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700341 for (i = 0; i < TFD_CMD_SLOTS; i++)
Tomas Winkler961ba602008-10-14 12:32:44 -0700342 kfree(txq->cmd[i]);
343
344 /* De-alloc circular buffer of TFDs */
345 if (txq->q.n_bd)
Stanislaw Gruszkaf36d04a2010-02-10 05:07:45 -0800346 dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd,
347 txq->tfds, txq->q.dma_addr);
Tomas Winkler961ba602008-10-14 12:32:44 -0700348
Reinette Chatre28142982009-09-25 14:24:22 -0700349 /* deallocate arrays */
350 kfree(txq->cmd);
351 kfree(txq->meta);
352 txq->cmd = NULL;
353 txq->meta = NULL;
354
Tomas Winkler961ba602008-10-14 12:32:44 -0700355 /* 0-fill queue descriptor structure */
356 memset(txq, 0, sizeof(*txq));
357}
Abhijeet Kolekar3e5d2382009-03-17 21:51:49 -0700358
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800359/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
360 * DMA services
361 *
362 * Theory of operation
363 *
364 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
365 * of buffer descriptors, each of which points to one or more data buffers for
366 * the device to read from or fill. Driver and device exchange status of each
367 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
368 * entries in each circular buffer, to protect against confusing empty and full
369 * queue states.
370 *
371 * The device reads or writes the data in the queues via the device's several
372 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
373 *
374 * For Tx queue, there are low mark and high mark limits. If, after queuing
375 * the packet for Tx, free space become < low mark, Tx queue stopped. When
376 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
377 * Tx queue resumed.
378 *
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800379 ***************************************************/
380
381int iwl_queue_space(const struct iwl_queue *q)
382{
383 int s = q->read_ptr - q->write_ptr;
384
385 if (q->read_ptr > q->write_ptr)
386 s -= q->n_bd;
387
388 if (s <= 0)
389 s += q->n_window;
390 /* keep some reserve to not confuse empty and full situations */
391 s -= 2;
392 if (s < 0)
393 s = 0;
394 return s;
395}
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800396
397
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800398/**
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800399 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
400 */
Tomas Winkler443cfd42008-05-15 13:53:57 +0800401static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800402 int count, int slots_num, u32 id)
403{
404 q->n_bd = count;
405 q->n_window = slots_num;
406 q->id = id;
407
408 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
409 * and iwl_queue_dec_wrap are broken. */
Johannes Berg3e41ace2011-04-18 09:12:37 -0700410 if (WARN_ON(!is_power_of_2(count)))
411 return -EINVAL;
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800412
413 /* slots_num must be power-of-two size, otherwise
414 * get_cmd_index is broken. */
Johannes Berg3e41ace2011-04-18 09:12:37 -0700415 if (WARN_ON(!is_power_of_2(slots_num)))
416 return -EINVAL;
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800417
418 q->low_mark = q->n_window / 4;
419 if (q->low_mark < 4)
420 q->low_mark = 4;
421
422 q->high_mark = q->n_window / 8;
423 if (q->high_mark < 2)
424 q->high_mark = 2;
425
426 q->write_ptr = q->read_ptr = 0;
427
428 return 0;
429}
430
431/**
432 * iwl_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
433 */
434static int iwl_tx_queue_alloc(struct iwl_priv *priv,
Ron Rindjunsky16466902008-05-05 10:22:50 +0800435 struct iwl_tx_queue *txq, u32 id)
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800436{
Stanislaw Gruszkaf36d04a2010-02-10 05:07:45 -0800437 struct device *dev = &priv->pci_dev->dev;
Winkler, Tomas3978e5b2009-01-23 13:45:23 -0800438 size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800439
440 /* Driver private data, only for Tx (not command) queues,
441 * not shared with device. */
Johannes Berg13bb9482010-08-23 10:46:33 +0200442 if (id != priv->cmd_queue) {
Johannes Berg519c7c42010-05-17 02:37:33 -0700443 txq->txb = kzalloc(sizeof(txq->txb[0]) *
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800444 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
445 if (!txq->txb) {
Winkler, Tomas15b16872008-12-19 10:37:33 +0800446 IWL_ERR(priv, "kmalloc for auxiliary BD "
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800447 "structures failed\n");
448 goto error;
449 }
Winkler, Tomas3978e5b2009-01-23 13:45:23 -0800450 } else {
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800451 txq->txb = NULL;
Winkler, Tomas3978e5b2009-01-23 13:45:23 -0800452 }
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800453
454 /* Circular buffer of transmit frame descriptors (TFDs),
455 * shared with device */
Stanislaw Gruszkaf36d04a2010-02-10 05:07:45 -0800456 txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr,
457 GFP_KERNEL);
Tomas Winkler499b1882008-10-14 12:32:48 -0700458 if (!txq->tfds) {
Winkler, Tomas3978e5b2009-01-23 13:45:23 -0800459 IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800460 goto error;
461 }
462 txq->q.id = id;
463
464 return 0;
465
466 error:
467 kfree(txq->txb);
468 txq->txb = NULL;
469
470 return -ENOMEM;
471}
472
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800473/**
474 * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue
475 */
Samuel Ortiza8e74e22009-01-23 13:45:14 -0800476int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
477 int slots_num, u32 txq_id)
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800478{
Gregory Greenmanda99c4b2008-08-04 16:00:40 +0800479 int i, len;
Tomas Winkler73b7d742008-09-03 11:18:48 +0800480 int ret;
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800481
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700482 txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * slots_num,
Johannes Bergc2acea82009-07-24 11:13:05 -0700483 GFP_KERNEL);
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700484 txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * slots_num,
Johannes Bergc2acea82009-07-24 11:13:05 -0700485 GFP_KERNEL);
486
487 if (!txq->meta || !txq->cmd)
488 goto out_free_arrays;
489
490 len = sizeof(struct iwl_device_cmd);
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700491 for (i = 0; i < slots_num; i++) {
John W. Linville49898852008-09-02 15:07:18 -0400492 txq->cmd[i] = kmalloc(len, GFP_KERNEL);
Gregory Greenmanda99c4b2008-08-04 16:00:40 +0800493 if (!txq->cmd[i])
Tomas Winkler73b7d742008-09-03 11:18:48 +0800494 goto err;
Gregory Greenmanda99c4b2008-08-04 16:00:40 +0800495 }
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800496
497 /* Alloc driver data array and TFD circular buffer */
Tomas Winkler73b7d742008-09-03 11:18:48 +0800498 ret = iwl_tx_queue_alloc(priv, txq, txq_id);
499 if (ret)
500 goto err;
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800501
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800502 txq->need_update = 0;
503
Johannes Berg1a716552009-11-06 14:52:51 -0800504 /*
Johannes Bergea9b3072010-11-10 18:25:45 -0800505 * For the default queues 0-3, set up the swq_id
506 * already -- all others need to get one later
507 * (if they need one at all).
Johannes Berg1a716552009-11-06 14:52:51 -0800508 */
Johannes Bergea9b3072010-11-10 18:25:45 -0800509 if (txq_id < 4)
510 iwl_set_swq_id(txq, txq_id, txq_id);
Johannes Berg45af8192009-06-19 13:52:43 -0700511
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800512 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
513 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
514 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
515
516 /* Initialize queue's high/low-water marks, and head/tail indexes */
Johannes Berg3e41ace2011-04-18 09:12:37 -0700517 ret = iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
518 if (ret)
519 return ret;
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800520
521 /* Tell device where to find queue */
Johannes Berg214d14d2011-05-04 07:50:44 -0700522 iwlagn_tx_queue_init(priv, txq);
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800523
524 return 0;
Tomas Winkler73b7d742008-09-03 11:18:48 +0800525err:
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700526 for (i = 0; i < slots_num; i++)
Tomas Winkler73b7d742008-09-03 11:18:48 +0800527 kfree(txq->cmd[i]);
Johannes Bergc2acea82009-07-24 11:13:05 -0700528out_free_arrays:
529 kfree(txq->meta);
530 kfree(txq->cmd);
Tomas Winkler73b7d742008-09-03 11:18:48 +0800531
Tomas Winkler73b7d742008-09-03 11:18:48 +0800532 return -ENOMEM;
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800533}
Samuel Ortiza8e74e22009-01-23 13:45:14 -0800534
Zhu Yide0f60e2010-03-23 00:45:03 -0700535void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
536 int slots_num, u32 txq_id)
537{
538 int actual_slots = slots_num;
539
Johannes Berg13bb9482010-08-23 10:46:33 +0200540 if (txq_id == priv->cmd_queue)
Zhu Yide0f60e2010-03-23 00:45:03 -0700541 actual_slots++;
542
543 memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots);
544
545 txq->need_update = 0;
546
547 /* Initialize queue's high/low-water marks, and head/tail indexes */
548 iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
549
550 /* Tell device where to find queue */
Johannes Berg214d14d2011-05-04 07:50:44 -0700551 iwlagn_tx_queue_init(priv, txq);
Zhu Yide0f60e2010-03-23 00:45:03 -0700552}
Zhu Yide0f60e2010-03-23 00:45:03 -0700553
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800554/*************** HOST COMMAND QUEUE FUNCTIONS *****/
555
556/**
557 * iwl_enqueue_hcmd - enqueue a uCode command
558 * @priv: device private data point
559 * @cmd: a point to the ucode command structure
560 *
561 * The function returns < 0 values to indicate the operation is
562 * failed. On success, it turns the index (> 0) of command in the
563 * command queue.
564 */
565int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
566{
Johannes Berg13bb9482010-08-23 10:46:33 +0200567 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800568 struct iwl_queue *q = &txq->q;
Johannes Bergc2acea82009-07-24 11:13:05 -0700569 struct iwl_device_cmd *out_cmd;
570 struct iwl_cmd_meta *out_meta;
Tomas Winklerf3674222008-08-04 16:00:44 +0800571 dma_addr_t phys_addr;
572 unsigned long flags;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800573 u32 idx;
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700574 u16 copy_size, cmd_size;
Wey-Yi Guy0975cc82010-07-31 08:34:07 -0700575 bool is_ct_kill = false;
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700576 bool had_nocopy = false;
577 int i;
578 u8 *cmd_dest;
579#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
580 const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {};
581 int trace_lens[IWL_MAX_CMD_TFDS + 1] = {};
582 int trace_idx;
583#endif
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800584
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700585 copy_size = sizeof(out_cmd->hdr);
586 cmd_size = sizeof(out_cmd->hdr);
587
588 /* need one for the header if the first is NOCOPY */
589 BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);
590
591 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
592 if (!cmd->len[i])
593 continue;
594 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
595 had_nocopy = true;
596 } else {
597 /* NOCOPY must not be followed by normal! */
598 if (WARN_ON(had_nocopy))
599 return -EINVAL;
600 copy_size += cmd->len[i];
601 }
602 cmd_size += cmd->len[i];
603 }
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800604
Johannes Berg3e41ace2011-04-18 09:12:37 -0700605 /*
606 * If any of the command structures end up being larger than
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700607 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
608 * allocated into separate TFDs, then we will need to
609 * increase the size of the buffers.
Johannes Berg3e41ace2011-04-18 09:12:37 -0700610 */
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700611 if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE))
Johannes Berg3e41ace2011-04-18 09:12:37 -0700612 return -EINVAL;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800613
Wey-Yi Guy7812b162009-10-02 13:43:58 -0700614 if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) {
Reinette Chatref2f21b42009-10-30 14:36:15 -0700615 IWL_WARN(priv, "Not sending command - %s KILL\n",
616 iwl_is_rfkill(priv) ? "RF" : "CT");
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800617 return -EIO;
618 }
619
Stanislaw Gruszka3598e172011-03-31 17:36:26 +0200620 spin_lock_irqsave(&priv->hcmd_lock, flags);
621
Johannes Bergc2acea82009-07-24 11:13:05 -0700622 if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
Stanislaw Gruszka3598e172011-03-31 17:36:26 +0200623 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
624
Wey-Yi Guy2d237f72009-11-20 12:05:08 -0800625 IWL_ERR(priv, "No space in command queue\n");
Wey-Yi Guyf42e7662011-04-18 09:30:09 -0700626 is_ct_kill = iwl_check_for_ct_kill(priv);
Wey-Yi Guy0975cc82010-07-31 08:34:07 -0700627 if (!is_ct_kill) {
Wey-Yi Guy7812b162009-10-02 13:43:58 -0700628 IWL_ERR(priv, "Restarting adapter due to queue full\n");
Johannes Berge6494372011-04-05 09:41:58 -0700629 iwlagn_fw_error(priv, false);
Wey-Yi Guy7812b162009-10-02 13:43:58 -0700630 }
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800631 return -ENOSPC;
632 }
633
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700634 idx = get_cmd_index(q, q->write_ptr);
Gregory Greenmanda99c4b2008-08-04 16:00:40 +0800635 out_cmd = txq->cmd[idx];
Johannes Bergc2acea82009-07-24 11:13:05 -0700636 out_meta = &txq->meta[idx];
637
Stanislaw Gruszka3598e172011-03-31 17:36:26 +0200638 if (WARN_ON(out_meta->flags & CMD_MAPPED)) {
639 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
640 return -ENOSPC;
641 }
642
Daniel C Halperin8ce73f32009-07-31 14:28:06 -0700643 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
Johannes Bergc2acea82009-07-24 11:13:05 -0700644 if (cmd->flags & CMD_WANT_SKB)
645 out_meta->source = cmd;
646 if (cmd->flags & CMD_ASYNC)
647 out_meta->callback = cmd->callback;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800648
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700649 /* set up the header */
650
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800651 out_cmd->hdr.cmd = cmd->id;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800652 out_cmd->hdr.flags = 0;
Johannes Berg13bb9482010-08-23 10:46:33 +0200653 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) |
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700654 INDEX_TO_SEQ(q->write_ptr));
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800655
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700656 /* and copy the data that needs to be copied */
657
658 cmd_dest = &out_cmd->cmd.payload[0];
659 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
660 if (!cmd->len[i])
661 continue;
662 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
663 break;
664 memcpy(cmd_dest, cmd->data[i], cmd->len[i]);
665 cmd_dest += cmd->len[i];
Esti Kummerded2ae72008-08-04 16:00:45 +0800666 }
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700667
668 IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
669 "%d bytes at %d[%d]:%d\n",
670 get_cmd_string(out_cmd->hdr.cmd),
671 out_cmd->hdr.cmd,
672 le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
673 q->write_ptr, idx, priv->cmd_queue);
674
Reinette Chatredf833b12009-04-21 10:55:48 -0700675 phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700676 copy_size, PCI_DMA_BIDIRECTIONAL);
Johannes Berg2c46f722011-04-28 07:27:10 -0700677 if (unlikely(pci_dma_mapping_error(priv->pci_dev, phys_addr))) {
678 idx = -ENOMEM;
679 goto out;
680 }
681
FUJITA Tomonori2e724442010-06-03 14:19:20 +0900682 dma_unmap_addr_set(out_meta, mapping, phys_addr);
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700683 dma_unmap_len_set(out_meta, len, copy_size);
684
685 iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr, copy_size, 1);
686#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
687 trace_bufs[0] = &out_cmd->hdr;
688 trace_lens[0] = copy_size;
689 trace_idx = 1;
690#endif
691
692 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
693 if (!cmd->len[i])
694 continue;
695 if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
696 continue;
697 phys_addr = pci_map_single(priv->pci_dev, (void *)cmd->data[i],
698 cmd->len[i], PCI_DMA_TODEVICE);
699 if (pci_dma_mapping_error(priv->pci_dev, phys_addr)) {
700 iwlagn_unmap_tfd(priv, out_meta,
701 &txq->tfds[q->write_ptr]);
702 idx = -ENOMEM;
703 goto out;
704 }
705
706 iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr,
707 cmd->len[i], 0);
708#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
709 trace_bufs[trace_idx] = cmd->data[i];
710 trace_lens[trace_idx] = cmd->len[i];
711 trace_idx++;
712#endif
713 }
Reinette Chatredf833b12009-04-21 10:55:48 -0700714
Johannes Berg2c46f722011-04-28 07:27:10 -0700715 out_meta->flags = cmd->flags | CMD_MAPPED;
716
717 txq->need_update = 1;
718
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700719 /* check that tracing gets all possible blocks */
720 BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3);
721#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
722 trace_iwlwifi_dev_hcmd(priv, cmd->flags,
723 trace_bufs[0], trace_lens[0],
724 trace_bufs[1], trace_lens[1],
725 trace_bufs[2], trace_lens[2]);
726#endif
Reinette Chatredf833b12009-04-21 10:55:48 -0700727
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800728 /* Increment and update queue's write index */
729 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
Abhijeet Kolekar7bfedc52010-02-03 13:47:56 -0800730 iwl_txq_update_write_ptr(priv, txq);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800731
Johannes Berg2c46f722011-04-28 07:27:10 -0700732 out:
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800733 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
Abhijeet Kolekar7bfedc52010-02-03 13:47:56 -0800734 return idx;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800735}
736
Tomas Winkler17b88922008-05-29 16:35:12 +0800737/**
738 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
739 *
740 * When FW advances 'R' index, all entries between old and new 'R' index
741 * need to be reclaimed. As result, some free space forms. If there is
742 * enough free space (> low mark), wake the stack that feeds us.
743 */
Tomas Winkler499b1882008-10-14 12:32:48 -0700744static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
745 int idx, int cmd_idx)
Tomas Winkler17b88922008-05-29 16:35:12 +0800746{
747 struct iwl_tx_queue *txq = &priv->txq[txq_id];
748 struct iwl_queue *q = &txq->q;
749 int nfreed = 0;
750
Tomas Winkler499b1882008-10-14 12:32:48 -0700751 if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
Winkler, Tomas15b16872008-12-19 10:37:33 +0800752 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
Tomas Winkler17b88922008-05-29 16:35:12 +0800753 "is out of range [0-%d] %d %d.\n", txq_id,
Tomas Winkler499b1882008-10-14 12:32:48 -0700754 idx, q->n_bd, q->write_ptr, q->read_ptr);
Tomas Winkler17b88922008-05-29 16:35:12 +0800755 return;
756 }
757
Tomas Winkler499b1882008-10-14 12:32:48 -0700758 for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
759 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
760
761 if (nfreed++ > 0) {
Winkler, Tomas15b16872008-12-19 10:37:33 +0800762 IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
Tomas Winkler17b88922008-05-29 16:35:12 +0800763 q->write_ptr, q->read_ptr);
Johannes Berge6494372011-04-05 09:41:58 -0700764 iwlagn_fw_error(priv, false);
Tomas Winkler17b88922008-05-29 16:35:12 +0800765 }
Gregory Greenmanda99c4b2008-08-04 16:00:40 +0800766
Tomas Winkler17b88922008-05-29 16:35:12 +0800767 }
768}
769
770/**
771 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
772 * @rxb: Rx buffer to reclaim
773 *
774 * If an Rx buffer has an async callback associated with it the callback
775 * will be executed. The attached skb (if present) will only be freed
776 * if the callback returns 1
777 */
778void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
779{
Zhu Yi2f301222009-10-09 17:19:45 +0800780 struct iwl_rx_packet *pkt = rxb_addr(rxb);
Tomas Winkler17b88922008-05-29 16:35:12 +0800781 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
782 int txq_id = SEQ_TO_QUEUE(sequence);
783 int index = SEQ_TO_INDEX(sequence);
Tomas Winkler17b88922008-05-29 16:35:12 +0800784 int cmd_index;
Johannes Bergc2acea82009-07-24 11:13:05 -0700785 struct iwl_device_cmd *cmd;
786 struct iwl_cmd_meta *meta;
Johannes Berg13bb9482010-08-23 10:46:33 +0200787 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
Stanislaw Gruszka3598e172011-03-31 17:36:26 +0200788 unsigned long flags;
Tomas Winkler17b88922008-05-29 16:35:12 +0800789
790 /* If a Tx command is being handled and it isn't in the actual
791 * command queue then there a command routing bug has been introduced
792 * in the queue management code. */
Johannes Berg13bb9482010-08-23 10:46:33 +0200793 if (WARN(txq_id != priv->cmd_queue,
794 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
795 txq_id, priv->cmd_queue, sequence,
796 priv->txq[priv->cmd_queue].q.read_ptr,
797 priv->txq[priv->cmd_queue].q.write_ptr)) {
Reinette Chatreec741162009-07-24 11:13:08 -0700798 iwl_print_hex_error(priv, pkt, 32);
Johannes Berg55d6a3c2008-09-23 19:18:43 +0200799 return;
Winkler, Tomas01ef9322008-11-07 09:58:45 -0800800 }
Tomas Winkler17b88922008-05-29 16:35:12 +0800801
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700802 cmd_index = get_cmd_index(&txq->q, index);
Zhu Yidd487442010-03-22 02:28:41 -0700803 cmd = txq->cmd[cmd_index];
804 meta = &txq->meta[cmd_index];
Tomas Winkler17b88922008-05-29 16:35:12 +0800805
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700806 iwlagn_unmap_tfd(priv, meta, &txq->tfds[index]);
Reinette Chatrec33de622009-10-30 14:36:10 -0700807
Tomas Winkler17b88922008-05-29 16:35:12 +0800808 /* Input error checking is done when commands are added to queue. */
Johannes Bergc2acea82009-07-24 11:13:05 -0700809 if (meta->flags & CMD_WANT_SKB) {
Zhu Yi2f301222009-10-09 17:19:45 +0800810 meta->source->reply_page = (unsigned long)rxb_addr(rxb);
811 rxb->page = NULL;
Stanislaw Gruszka2624e962011-04-20 16:02:58 +0200812 } else if (meta->callback)
813 meta->callback(priv, cmd, pkt);
814
815 spin_lock_irqsave(&priv->hcmd_lock, flags);
Tomas Winkler17b88922008-05-29 16:35:12 +0800816
Tomas Winkler499b1882008-10-14 12:32:48 -0700817 iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
Tomas Winkler17b88922008-05-29 16:35:12 +0800818
Johannes Bergc2acea82009-07-24 11:13:05 -0700819 if (!(meta->flags & CMD_ASYNC)) {
Tomas Winkler17b88922008-05-29 16:35:12 +0800820 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
Frans Pop91dd6c22010-03-24 14:19:58 -0700821 IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
Reinette Chatred2dfe6d2010-02-18 22:03:04 -0800822 get_cmd_string(cmd->hdr.cmd));
Tomas Winkler17b88922008-05-29 16:35:12 +0800823 wake_up_interruptible(&priv->wait_command_queue);
824 }
Stanislaw Gruszka3598e172011-03-31 17:36:26 +0200825
826 /* Mark as unmapped */
Zhu Yidd487442010-03-22 02:28:41 -0700827 meta->flags = 0;
Stanislaw Gruszka3598e172011-03-31 17:36:26 +0200828
829 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
Tomas Winkler17b88922008-05-29 16:35:12 +0800830}