blob: dc8f63f81cfaac49c79ebbd215483e517752e067 [file] [log] [blame]
Ron Rindjunsky1053d352008-05-05 10:22:43 +08001/******************************************************************************
2 *
Wey-Yi Guy901069c2011-04-05 09:42:00 -07003 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
Ron Rindjunsky1053d352008-05-05 10:22:43 +08004 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
Winkler, Tomas759ef892008-12-09 11:28:58 -080025 * Intel Linux Wireless <ilw@linux.intel.com>
Ron Rindjunsky1053d352008-05-05 10:22:43 +080026 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
Tomas Winklerfd4abac2008-05-15 13:54:07 +080030#include <linux/etherdevice.h>
Alexey Dobriyand43c36d2009-10-07 17:09:06 +040031#include <linux/sched.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090032#include <linux/slab.h>
Ron Rindjunsky1053d352008-05-05 10:22:43 +080033#include <net/mac80211.h>
34#include "iwl-eeprom.h"
Johannes Berg214d14d2011-05-04 07:50:44 -070035#include "iwl-agn.h"
Ron Rindjunsky1053d352008-05-05 10:22:43 +080036#include "iwl-dev.h"
37#include "iwl-core.h"
38#include "iwl-sta.h"
39#include "iwl-io.h"
40#include "iwl-helpers.h"
41
Tomas Winklerfd4abac2008-05-15 13:54:07 +080042/**
43 * iwl_txq_update_write_ptr - Send new write index to hardware
44 */
Abhijeet Kolekar7bfedc52010-02-03 13:47:56 -080045void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
Tomas Winklerfd4abac2008-05-15 13:54:07 +080046{
47 u32 reg = 0;
Tomas Winklerfd4abac2008-05-15 13:54:07 +080048 int txq_id = txq->q.id;
49
50 if (txq->need_update == 0)
Abhijeet Kolekar7bfedc52010-02-03 13:47:56 -080051 return;
Tomas Winklerfd4abac2008-05-15 13:54:07 +080052
Wey-Yi Guyf81c1f42010-11-10 09:56:50 -080053 if (priv->cfg->base_params->shadow_reg_enable) {
54 /* shadow register enabled */
Tomas Winklerfd4abac2008-05-15 13:54:07 +080055 iwl_write32(priv, HBUS_TARG_WRPTR,
56 txq->q.write_ptr | (txq_id << 8));
Wey-Yi Guyf81c1f42010-11-10 09:56:50 -080057 } else {
58 /* if we're trying to save power */
59 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
60 /* wake up nic if it's powered down ...
61 * uCode will wake up, and interrupt us again, so next
62 * time we'll skip this part. */
63 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
Tomas Winklerfd4abac2008-05-15 13:54:07 +080064
Wey-Yi Guyf81c1f42010-11-10 09:56:50 -080065 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
66 IWL_DEBUG_INFO(priv,
67 "Tx queue %d requesting wakeup,"
68 " GP1 = 0x%x\n", txq_id, reg);
69 iwl_set_bit(priv, CSR_GP_CNTRL,
70 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
71 return;
72 }
73
74 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
75 txq->q.write_ptr | (txq_id << 8));
76
77 /*
78 * else not in power-save mode,
79 * uCode will never sleep when we're
80 * trying to tx (during RFKILL, we're not trying to tx).
81 */
82 } else
83 iwl_write32(priv, HBUS_TARG_WRPTR,
84 txq->q.write_ptr | (txq_id << 8));
85 }
Tomas Winklerfd4abac2008-05-15 13:54:07 +080086 txq->need_update = 0;
Tomas Winklerfd4abac2008-05-15 13:54:07 +080087}
Tomas Winklerfd4abac2008-05-15 13:54:07 +080088
Johannes Berg214d14d2011-05-04 07:50:44 -070089static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
90{
91 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
92
93 dma_addr_t addr = get_unaligned_le32(&tb->lo);
94 if (sizeof(dma_addr_t) > sizeof(u32))
95 addr |=
96 ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
97
98 return addr;
99}
100
101static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
102{
103 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
104
105 return le16_to_cpu(tb->hi_n_len) >> 4;
106}
107
108static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
109 dma_addr_t addr, u16 len)
110{
111 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
112 u16 hi_n_len = len << 4;
113
114 put_unaligned_le32(addr, &tb->lo);
115 if (sizeof(dma_addr_t) > sizeof(u32))
116 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
117
118 tb->hi_n_len = cpu_to_le16(hi_n_len);
119
120 tfd->num_tbs = idx + 1;
121}
122
123static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
124{
125 return tfd->num_tbs & 0x1f;
126}
127
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700128static void iwlagn_unmap_tfd(struct iwl_priv *priv, struct iwl_cmd_meta *meta,
John W. Linville3be3fdb2011-06-28 13:53:32 -0400129 struct iwl_tfd *tfd, enum dma_data_direction dma_dir)
Johannes Berg214d14d2011-05-04 07:50:44 -0700130{
Johannes Berg214d14d2011-05-04 07:50:44 -0700131 int i;
132 int num_tbs;
133
Johannes Berg214d14d2011-05-04 07:50:44 -0700134 /* Sanity check on number of chunks */
135 num_tbs = iwl_tfd_get_num_tbs(tfd);
136
137 if (num_tbs >= IWL_NUM_OF_TBS) {
138 IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
139 /* @todo issue fatal error, it is quite serious situation */
140 return;
141 }
142
143 /* Unmap tx_cmd */
144 if (num_tbs)
Emmanuel Grumbach795414d2011-06-18 08:12:57 -0700145 dma_unmap_single(priv->bus.dev,
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700146 dma_unmap_addr(meta, mapping),
147 dma_unmap_len(meta, len),
Emmanuel Grumbach795414d2011-06-18 08:12:57 -0700148 DMA_BIDIRECTIONAL);
Johannes Berg214d14d2011-05-04 07:50:44 -0700149
150 /* Unmap chunks, if any. */
151 for (i = 1; i < num_tbs; i++)
Emmanuel Grumbach795414d2011-06-18 08:12:57 -0700152 dma_unmap_single(priv->bus.dev, iwl_tfd_tb_get_addr(tfd, i),
Johannes Berge8154072011-06-27 07:54:49 -0700153 iwl_tfd_tb_get_len(tfd, i), dma_dir);
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700154}
155
156/**
157 * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
158 * @priv - driver private data
159 * @txq - tx queue
160 *
161 * Does NOT advance any TFD circular buffer read/write indexes
162 * Does NOT free the TFD itself (which is within circular buffer)
163 */
164void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
165{
166 struct iwl_tfd *tfd_tmp = txq->tfds;
167 int index = txq->q.read_ptr;
168
Johannes Berge8154072011-06-27 07:54:49 -0700169 iwlagn_unmap_tfd(priv, &txq->meta[index], &tfd_tmp[index],
John W. Linville3be3fdb2011-06-28 13:53:32 -0400170 DMA_TO_DEVICE);
Johannes Berg214d14d2011-05-04 07:50:44 -0700171
172 /* free SKB */
173 if (txq->txb) {
174 struct sk_buff *skb;
175
176 skb = txq->txb[txq->q.read_ptr].skb;
177
178 /* can be called from irqs-disabled context */
179 if (skb) {
180 dev_kfree_skb_any(skb);
181 txq->txb[txq->q.read_ptr].skb = NULL;
182 }
183 }
184}
185
186int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv,
187 struct iwl_tx_queue *txq,
188 dma_addr_t addr, u16 len,
Johannes Berg4c42db02011-05-04 07:50:48 -0700189 u8 reset)
Johannes Berg214d14d2011-05-04 07:50:44 -0700190{
191 struct iwl_queue *q;
192 struct iwl_tfd *tfd, *tfd_tmp;
193 u32 num_tbs;
194
195 q = &txq->q;
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700196 tfd_tmp = txq->tfds;
Johannes Berg214d14d2011-05-04 07:50:44 -0700197 tfd = &tfd_tmp[q->write_ptr];
198
199 if (reset)
200 memset(tfd, 0, sizeof(*tfd));
201
202 num_tbs = iwl_tfd_get_num_tbs(tfd);
203
204 /* Each TFD can point to a maximum 20 Tx buffers */
205 if (num_tbs >= IWL_NUM_OF_TBS) {
206 IWL_ERR(priv, "Error can not send more than %d chunks\n",
207 IWL_NUM_OF_TBS);
208 return -EINVAL;
209 }
210
211 if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
212 return -EINVAL;
213
214 if (unlikely(addr & ~IWL_TX_DMA_MASK))
215 IWL_ERR(priv, "Unaligned address = %llx\n",
216 (unsigned long long)addr);
217
218 iwl_tfd_set_tb(tfd, num_tbs, addr, len);
219
220 return 0;
221}
222
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800223/**
Stanislaw Gruszka387f3382011-02-28 14:33:13 +0100224 * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
225 */
226void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
227{
228 struct iwl_tx_queue *txq = &priv->txq[txq_id];
229 struct iwl_queue *q = &txq->q;
230
231 if (q->n_bd == 0)
232 return;
233
234 while (q->write_ptr != q->read_ptr) {
Johannes Berg214d14d2011-05-04 07:50:44 -0700235 iwlagn_txq_free_tfd(priv, txq);
Stanislaw Gruszka387f3382011-02-28 14:33:13 +0100236 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
237 }
238}
239
240/**
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800241 * iwl_tx_queue_free - Deallocate DMA queue.
242 * @txq: Transmit queue to deallocate.
243 *
244 * Empty queue by removing and destroying all BD's.
245 * Free all buffers.
246 * 0-fill, but do not free "txq" descriptor structure.
247 */
Samuel Ortiza8e74e22009-01-23 13:45:14 -0800248void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800249{
Gregory Greenmanda99c4b2008-08-04 16:00:40 +0800250 struct iwl_tx_queue *txq = &priv->txq[txq_id];
Emmanuel Grumbach3599d392011-05-31 08:52:10 +0300251 struct device *dev = priv->bus.dev;
Wey-Yi Guy71c55d92009-10-23 13:42:31 -0700252 int i;
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800253
Stanislaw Gruszka387f3382011-02-28 14:33:13 +0100254 iwl_tx_queue_unmap(priv, txq_id);
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800255
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800256 /* De-alloc array of command/tx buffers */
Tomas Winkler961ba602008-10-14 12:32:44 -0700257 for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
Gregory Greenmanda99c4b2008-08-04 16:00:40 +0800258 kfree(txq->cmd[i]);
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800259
260 /* De-alloc circular buffer of TFDs */
261 if (txq->q.n_bd)
Stanislaw Gruszkaf36d04a2010-02-10 05:07:45 -0800262 dma_free_coherent(dev, priv->hw_params.tfd_size *
263 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800264
265 /* De-alloc array of per-TFD driver data */
266 kfree(txq->txb);
267 txq->txb = NULL;
268
Johannes Bergc2acea82009-07-24 11:13:05 -0700269 /* deallocate arrays */
270 kfree(txq->cmd);
271 kfree(txq->meta);
272 txq->cmd = NULL;
273 txq->meta = NULL;
274
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800275 /* 0-fill queue descriptor structure */
276 memset(txq, 0, sizeof(*txq));
277}
Tomas Winkler961ba602008-10-14 12:32:44 -0700278
279/**
Stanislaw Gruszka387f3382011-02-28 14:33:13 +0100280 * iwl_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
281 */
282void iwl_cmd_queue_unmap(struct iwl_priv *priv)
283{
284 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
285 struct iwl_queue *q = &txq->q;
286 int i;
Stanislaw Gruszka387f3382011-02-28 14:33:13 +0100287
288 if (q->n_bd == 0)
289 return;
290
291 while (q->read_ptr != q->write_ptr) {
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700292 i = get_cmd_index(q, q->read_ptr);
Stanislaw Gruszka387f3382011-02-28 14:33:13 +0100293
Emmanuel Grumbachafaf6b52011-07-08 08:46:09 -0700294 iwlagn_unmap_tfd(priv, &txq->meta[i], &txq->tfds[i],
295 DMA_BIDIRECTIONAL);
296 txq->meta[i].flags = 0;
Stanislaw Gruszka387f3382011-02-28 14:33:13 +0100297
Stanislaw Gruszka3598e172011-03-31 17:36:26 +0200298 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
Stanislaw Gruszka387f3382011-02-28 14:33:13 +0100299 }
Stanislaw Gruszka387f3382011-02-28 14:33:13 +0100300}
301
302/**
Tomas Winkler961ba602008-10-14 12:32:44 -0700303 * iwl_cmd_queue_free - Deallocate DMA queue.
304 * @txq: Transmit queue to deallocate.
305 *
306 * Empty queue by removing and destroying all BD's.
307 * Free all buffers.
308 * 0-fill, but do not free "txq" descriptor structure.
309 */
Abhijeet Kolekar3e5d2382009-03-17 21:51:49 -0700310void iwl_cmd_queue_free(struct iwl_priv *priv)
Tomas Winkler961ba602008-10-14 12:32:44 -0700311{
Johannes Berg13bb9482010-08-23 10:46:33 +0200312 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
Emmanuel Grumbach3599d392011-05-31 08:52:10 +0300313 struct device *dev = priv->bus.dev;
Wey-Yi Guy71c55d92009-10-23 13:42:31 -0700314 int i;
Tomas Winkler961ba602008-10-14 12:32:44 -0700315
Stanislaw Gruszka387f3382011-02-28 14:33:13 +0100316 iwl_cmd_queue_unmap(priv);
Zhu Yidd487442010-03-22 02:28:41 -0700317
Tomas Winkler961ba602008-10-14 12:32:44 -0700318 /* De-alloc array of command/tx buffers */
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700319 for (i = 0; i < TFD_CMD_SLOTS; i++)
Tomas Winkler961ba602008-10-14 12:32:44 -0700320 kfree(txq->cmd[i]);
321
322 /* De-alloc circular buffer of TFDs */
323 if (txq->q.n_bd)
Stanislaw Gruszkaf36d04a2010-02-10 05:07:45 -0800324 dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd,
325 txq->tfds, txq->q.dma_addr);
Tomas Winkler961ba602008-10-14 12:32:44 -0700326
Reinette Chatre28142982009-09-25 14:24:22 -0700327 /* deallocate arrays */
328 kfree(txq->cmd);
329 kfree(txq->meta);
330 txq->cmd = NULL;
331 txq->meta = NULL;
332
Tomas Winkler961ba602008-10-14 12:32:44 -0700333 /* 0-fill queue descriptor structure */
334 memset(txq, 0, sizeof(*txq));
335}
Abhijeet Kolekar3e5d2382009-03-17 21:51:49 -0700336
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800337/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
338 * DMA services
339 *
340 * Theory of operation
341 *
342 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
343 * of buffer descriptors, each of which points to one or more data buffers for
344 * the device to read from or fill. Driver and device exchange status of each
345 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
346 * entries in each circular buffer, to protect against confusing empty and full
347 * queue states.
348 *
349 * The device reads or writes the data in the queues via the device's several
350 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
351 *
352 * For Tx queue, there are low mark and high mark limits. If, after queuing
353 * the packet for Tx, free space become < low mark, Tx queue stopped. When
354 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
355 * Tx queue resumed.
356 *
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800357 ***************************************************/
358
359int iwl_queue_space(const struct iwl_queue *q)
360{
361 int s = q->read_ptr - q->write_ptr;
362
363 if (q->read_ptr > q->write_ptr)
364 s -= q->n_bd;
365
366 if (s <= 0)
367 s += q->n_window;
368 /* keep some reserve to not confuse empty and full situations */
369 s -= 2;
370 if (s < 0)
371 s = 0;
372 return s;
373}
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800374
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800375/**
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800376 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
377 */
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700378int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800379 int count, int slots_num, u32 id)
380{
381 q->n_bd = count;
382 q->n_window = slots_num;
383 q->id = id;
384
385 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
386 * and iwl_queue_dec_wrap are broken. */
Johannes Berg3e41ace2011-04-18 09:12:37 -0700387 if (WARN_ON(!is_power_of_2(count)))
388 return -EINVAL;
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800389
390 /* slots_num must be power-of-two size, otherwise
391 * get_cmd_index is broken. */
Johannes Berg3e41ace2011-04-18 09:12:37 -0700392 if (WARN_ON(!is_power_of_2(slots_num)))
393 return -EINVAL;
Ron Rindjunsky1053d352008-05-05 10:22:43 +0800394
395 q->low_mark = q->n_window / 4;
396 if (q->low_mark < 4)
397 q->low_mark = 4;
398
399 q->high_mark = q->n_window / 8;
400 if (q->high_mark < 2)
401 q->high_mark = 2;
402
403 q->write_ptr = q->read_ptr = 0;
404
405 return 0;
406}
407
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800408/*************** HOST COMMAND QUEUE FUNCTIONS *****/
409
410/**
411 * iwl_enqueue_hcmd - enqueue a uCode command
412 * @priv: device private data point
413 * @cmd: a point to the ucode command structure
414 *
415 * The function returns < 0 values to indicate the operation is
416 * failed. On success, it turns the index (> 0) of command in the
417 * command queue.
418 */
419int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
420{
Johannes Berg13bb9482010-08-23 10:46:33 +0200421 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800422 struct iwl_queue *q = &txq->q;
Johannes Bergc2acea82009-07-24 11:13:05 -0700423 struct iwl_device_cmd *out_cmd;
424 struct iwl_cmd_meta *out_meta;
Tomas Winklerf3674222008-08-04 16:00:44 +0800425 dma_addr_t phys_addr;
426 unsigned long flags;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800427 u32 idx;
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700428 u16 copy_size, cmd_size;
Wey-Yi Guy0975cc82010-07-31 08:34:07 -0700429 bool is_ct_kill = false;
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700430 bool had_nocopy = false;
431 int i;
432 u8 *cmd_dest;
433#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
434 const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {};
435 int trace_lens[IWL_MAX_CMD_TFDS + 1] = {};
436 int trace_idx;
437#endif
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800438
Wey-Yi Guy3083d032011-05-06 17:06:44 -0700439 if (test_bit(STATUS_FW_ERROR, &priv->status)) {
440 IWL_WARN(priv, "fw recovery, no hcmd send\n");
441 return -EIO;
442 }
443
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700444 copy_size = sizeof(out_cmd->hdr);
445 cmd_size = sizeof(out_cmd->hdr);
446
447 /* need one for the header if the first is NOCOPY */
448 BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);
449
450 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
451 if (!cmd->len[i])
452 continue;
453 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
454 had_nocopy = true;
455 } else {
456 /* NOCOPY must not be followed by normal! */
457 if (WARN_ON(had_nocopy))
458 return -EINVAL;
459 copy_size += cmd->len[i];
460 }
461 cmd_size += cmd->len[i];
462 }
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800463
Johannes Berg3e41ace2011-04-18 09:12:37 -0700464 /*
465 * If any of the command structures end up being larger than
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700466 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
467 * allocated into separate TFDs, then we will need to
468 * increase the size of the buffers.
Johannes Berg3e41ace2011-04-18 09:12:37 -0700469 */
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700470 if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE))
Johannes Berg3e41ace2011-04-18 09:12:37 -0700471 return -EINVAL;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800472
Wey-Yi Guy7812b162009-10-02 13:43:58 -0700473 if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) {
Reinette Chatref2f21b42009-10-30 14:36:15 -0700474 IWL_WARN(priv, "Not sending command - %s KILL\n",
475 iwl_is_rfkill(priv) ? "RF" : "CT");
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800476 return -EIO;
477 }
478
Stanislaw Gruszka3598e172011-03-31 17:36:26 +0200479 spin_lock_irqsave(&priv->hcmd_lock, flags);
480
Johannes Bergc2acea82009-07-24 11:13:05 -0700481 if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
Stanislaw Gruszka3598e172011-03-31 17:36:26 +0200482 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
483
Wey-Yi Guy2d237f72009-11-20 12:05:08 -0800484 IWL_ERR(priv, "No space in command queue\n");
Wey-Yi Guyf42e7662011-04-18 09:30:09 -0700485 is_ct_kill = iwl_check_for_ct_kill(priv);
Wey-Yi Guy0975cc82010-07-31 08:34:07 -0700486 if (!is_ct_kill) {
Wey-Yi Guy7812b162009-10-02 13:43:58 -0700487 IWL_ERR(priv, "Restarting adapter due to queue full\n");
Johannes Berge6494372011-04-05 09:41:58 -0700488 iwlagn_fw_error(priv, false);
Wey-Yi Guy7812b162009-10-02 13:43:58 -0700489 }
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800490 return -ENOSPC;
491 }
492
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700493 idx = get_cmd_index(q, q->write_ptr);
Gregory Greenmanda99c4b2008-08-04 16:00:40 +0800494 out_cmd = txq->cmd[idx];
Johannes Bergc2acea82009-07-24 11:13:05 -0700495 out_meta = &txq->meta[idx];
496
Daniel C Halperin8ce73f32009-07-31 14:28:06 -0700497 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
Johannes Bergc2acea82009-07-24 11:13:05 -0700498 if (cmd->flags & CMD_WANT_SKB)
499 out_meta->source = cmd;
500 if (cmd->flags & CMD_ASYNC)
501 out_meta->callback = cmd->callback;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800502
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700503 /* set up the header */
504
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800505 out_cmd->hdr.cmd = cmd->id;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800506 out_cmd->hdr.flags = 0;
Johannes Berg13bb9482010-08-23 10:46:33 +0200507 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) |
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700508 INDEX_TO_SEQ(q->write_ptr));
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800509
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700510 /* and copy the data that needs to be copied */
511
512 cmd_dest = &out_cmd->cmd.payload[0];
513 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
514 if (!cmd->len[i])
515 continue;
516 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
517 break;
518 memcpy(cmd_dest, cmd->data[i], cmd->len[i]);
519 cmd_dest += cmd->len[i];
Esti Kummerded2ae72008-08-04 16:00:45 +0800520 }
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700521
522 IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
523 "%d bytes at %d[%d]:%d\n",
524 get_cmd_string(out_cmd->hdr.cmd),
525 out_cmd->hdr.cmd,
526 le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
527 q->write_ptr, idx, priv->cmd_queue);
528
Emmanuel Grumbach795414d2011-06-18 08:12:57 -0700529 phys_addr = dma_map_single(priv->bus.dev, &out_cmd->hdr, copy_size,
530 DMA_BIDIRECTIONAL);
531 if (unlikely(dma_mapping_error(priv->bus.dev, phys_addr))) {
Johannes Berg2c46f722011-04-28 07:27:10 -0700532 idx = -ENOMEM;
533 goto out;
534 }
535
FUJITA Tomonori2e724442010-06-03 14:19:20 +0900536 dma_unmap_addr_set(out_meta, mapping, phys_addr);
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700537 dma_unmap_len_set(out_meta, len, copy_size);
538
539 iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr, copy_size, 1);
540#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
541 trace_bufs[0] = &out_cmd->hdr;
542 trace_lens[0] = copy_size;
543 trace_idx = 1;
544#endif
545
546 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
547 if (!cmd->len[i])
548 continue;
549 if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
550 continue;
Emmanuel Grumbach795414d2011-06-18 08:12:57 -0700551 phys_addr = dma_map_single(priv->bus.dev, (void *)cmd->data[i],
John W. Linville3be3fdb2011-06-28 13:53:32 -0400552 cmd->len[i], DMA_BIDIRECTIONAL);
Emmanuel Grumbach795414d2011-06-18 08:12:57 -0700553 if (dma_mapping_error(priv->bus.dev, phys_addr)) {
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700554 iwlagn_unmap_tfd(priv, out_meta,
Johannes Berge8154072011-06-27 07:54:49 -0700555 &txq->tfds[q->write_ptr],
John W. Linville3be3fdb2011-06-28 13:53:32 -0400556 DMA_BIDIRECTIONAL);
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700557 idx = -ENOMEM;
558 goto out;
559 }
560
561 iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr,
562 cmd->len[i], 0);
563#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
564 trace_bufs[trace_idx] = cmd->data[i];
565 trace_lens[trace_idx] = cmd->len[i];
566 trace_idx++;
567#endif
568 }
Reinette Chatredf833b12009-04-21 10:55:48 -0700569
Emmanuel Grumbachafaf6b52011-07-08 08:46:09 -0700570 out_meta->flags = cmd->flags;
Johannes Berg2c46f722011-04-28 07:27:10 -0700571
572 txq->need_update = 1;
573
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700574 /* check that tracing gets all possible blocks */
575 BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3);
576#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
577 trace_iwlwifi_dev_hcmd(priv, cmd->flags,
578 trace_bufs[0], trace_lens[0],
579 trace_bufs[1], trace_lens[1],
580 trace_bufs[2], trace_lens[2]);
581#endif
Reinette Chatredf833b12009-04-21 10:55:48 -0700582
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800583 /* Increment and update queue's write index */
584 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
Abhijeet Kolekar7bfedc52010-02-03 13:47:56 -0800585 iwl_txq_update_write_ptr(priv, txq);
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800586
Johannes Berg2c46f722011-04-28 07:27:10 -0700587 out:
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800588 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
Abhijeet Kolekar7bfedc52010-02-03 13:47:56 -0800589 return idx;
Tomas Winklerfd4abac2008-05-15 13:54:07 +0800590}
591
Tomas Winkler17b88922008-05-29 16:35:12 +0800592/**
593 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
594 *
595 * When FW advances 'R' index, all entries between old and new 'R' index
596 * need to be reclaimed. As result, some free space forms. If there is
597 * enough free space (> low mark), wake the stack that feeds us.
598 */
Daniel Halperin20ba2862011-05-16 21:46:28 -0700599static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int idx)
Tomas Winkler17b88922008-05-29 16:35:12 +0800600{
601 struct iwl_tx_queue *txq = &priv->txq[txq_id];
602 struct iwl_queue *q = &txq->q;
603 int nfreed = 0;
604
Tomas Winkler499b1882008-10-14 12:32:48 -0700605 if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
Daniel Halperin2e5d04d2011-05-27 08:40:28 -0700606 IWL_ERR(priv, "%s: Read index for DMA queue txq id (%d), "
607 "index %d is out of range [0-%d] %d %d.\n", __func__,
608 txq_id, idx, q->n_bd, q->write_ptr, q->read_ptr);
Tomas Winkler17b88922008-05-29 16:35:12 +0800609 return;
610 }
611
Tomas Winkler499b1882008-10-14 12:32:48 -0700612 for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
613 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
614
615 if (nfreed++ > 0) {
Winkler, Tomas15b16872008-12-19 10:37:33 +0800616 IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
Tomas Winkler17b88922008-05-29 16:35:12 +0800617 q->write_ptr, q->read_ptr);
Johannes Berge6494372011-04-05 09:41:58 -0700618 iwlagn_fw_error(priv, false);
Tomas Winkler17b88922008-05-29 16:35:12 +0800619 }
Gregory Greenmanda99c4b2008-08-04 16:00:40 +0800620
Tomas Winkler17b88922008-05-29 16:35:12 +0800621 }
622}
623
624/**
625 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
626 * @rxb: Rx buffer to reclaim
627 *
628 * If an Rx buffer has an async callback associated with it the callback
629 * will be executed. The attached skb (if present) will only be freed
630 * if the callback returns 1
631 */
632void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
633{
Zhu Yi2f301222009-10-09 17:19:45 +0800634 struct iwl_rx_packet *pkt = rxb_addr(rxb);
Tomas Winkler17b88922008-05-29 16:35:12 +0800635 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
636 int txq_id = SEQ_TO_QUEUE(sequence);
637 int index = SEQ_TO_INDEX(sequence);
Tomas Winkler17b88922008-05-29 16:35:12 +0800638 int cmd_index;
Johannes Bergc2acea82009-07-24 11:13:05 -0700639 struct iwl_device_cmd *cmd;
640 struct iwl_cmd_meta *meta;
Johannes Berg13bb9482010-08-23 10:46:33 +0200641 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
Stanislaw Gruszka3598e172011-03-31 17:36:26 +0200642 unsigned long flags;
Tomas Winkler17b88922008-05-29 16:35:12 +0800643
644 /* If a Tx command is being handled and it isn't in the actual
645 * command queue then there a command routing bug has been introduced
646 * in the queue management code. */
Johannes Berg13bb9482010-08-23 10:46:33 +0200647 if (WARN(txq_id != priv->cmd_queue,
648 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
649 txq_id, priv->cmd_queue, sequence,
650 priv->txq[priv->cmd_queue].q.read_ptr,
651 priv->txq[priv->cmd_queue].q.write_ptr)) {
Reinette Chatreec741162009-07-24 11:13:08 -0700652 iwl_print_hex_error(priv, pkt, 32);
Johannes Berg55d6a3c2008-09-23 19:18:43 +0200653 return;
Winkler, Tomas01ef9322008-11-07 09:58:45 -0800654 }
Tomas Winkler17b88922008-05-29 16:35:12 +0800655
Johannes Berg4ce7cc22011-05-13 11:57:40 -0700656 cmd_index = get_cmd_index(&txq->q, index);
Zhu Yidd487442010-03-22 02:28:41 -0700657 cmd = txq->cmd[cmd_index];
658 meta = &txq->meta[cmd_index];
Tomas Winkler17b88922008-05-29 16:35:12 +0800659
John W. Linville3be3fdb2011-06-28 13:53:32 -0400660 iwlagn_unmap_tfd(priv, meta, &txq->tfds[index], DMA_BIDIRECTIONAL);
Reinette Chatrec33de622009-10-30 14:36:10 -0700661
Tomas Winkler17b88922008-05-29 16:35:12 +0800662 /* Input error checking is done when commands are added to queue. */
Johannes Bergc2acea82009-07-24 11:13:05 -0700663 if (meta->flags & CMD_WANT_SKB) {
Zhu Yi2f301222009-10-09 17:19:45 +0800664 meta->source->reply_page = (unsigned long)rxb_addr(rxb);
665 rxb->page = NULL;
Stanislaw Gruszka2624e962011-04-20 16:02:58 +0200666 } else if (meta->callback)
667 meta->callback(priv, cmd, pkt);
668
669 spin_lock_irqsave(&priv->hcmd_lock, flags);
Tomas Winkler17b88922008-05-29 16:35:12 +0800670
Daniel Halperin20ba2862011-05-16 21:46:28 -0700671 iwl_hcmd_queue_reclaim(priv, txq_id, index);
Tomas Winkler17b88922008-05-29 16:35:12 +0800672
Johannes Bergc2acea82009-07-24 11:13:05 -0700673 if (!(meta->flags & CMD_ASYNC)) {
Tomas Winkler17b88922008-05-29 16:35:12 +0800674 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
Frans Pop91dd6c22010-03-24 14:19:58 -0700675 IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
Reinette Chatred2dfe6d2010-02-18 22:03:04 -0800676 get_cmd_string(cmd->hdr.cmd));
Tomas Winkler17b88922008-05-29 16:35:12 +0800677 wake_up_interruptible(&priv->wait_command_queue);
678 }
Stanislaw Gruszka3598e172011-03-31 17:36:26 +0200679
Zhu Yidd487442010-03-22 02:28:41 -0700680 meta->flags = 0;
Stanislaw Gruszka3598e172011-03-31 17:36:26 +0200681
682 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
Tomas Winkler17b88922008-05-29 16:35:12 +0800683}