blob: 3948d317bc19fd7f6af452037b5f1fd3c4cb385a [file] [log] [blame]
Emmanuel Grumbachc85eb612011-06-14 10:13:24 +03001/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
Emmanuel Grumbache6bb4c92011-08-25 23:10:48 -070063#include <linux/interrupt.h>
Emmanuel Grumbach87e56662011-08-25 23:10:50 -070064#include <linux/debugfs.h>
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -070065#include <linux/bitops.h>
66#include <linux/gfp.h>
Emmanuel Grumbache6bb4c92011-08-25 23:10:48 -070067
Emmanuel Grumbachc85eb612011-06-14 10:13:24 +030068#include "iwl-trans.h"
Emmanuel Grumbachab697a92011-07-11 07:35:34 -070069#include "iwl-trans-int-pcie.h"
Emmanuel Grumbach522376d2011-09-06 09:31:19 -070070#include "iwl-csr.h"
71#include "iwl-prph.h"
Emmanuel Grumbach48f20d32011-08-25 23:10:36 -070072#include "iwl-shared.h"
Emmanuel Grumbach522376d2011-09-06 09:31:19 -070073#include "iwl-eeprom.h"
74
75/* TODO: the transport layer should not include this */
76#include "iwl-core.h"
Emmanuel Grumbachc85eb612011-06-14 10:13:24 +030077
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -070078static int iwl_trans_rx_alloc(struct iwl_trans *trans)
Emmanuel Grumbachc85eb612011-06-14 10:13:24 +030079{
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -070080 struct iwl_trans_pcie *trans_pcie =
81 IWL_TRANS_GET_PCIE_TRANS(trans);
82 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
83 struct device *dev = bus(trans)->dev;
Emmanuel Grumbachc85eb612011-06-14 10:13:24 +030084
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -070085 memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
Emmanuel Grumbachc85eb612011-06-14 10:13:24 +030086
87 spin_lock_init(&rxq->lock);
88 INIT_LIST_HEAD(&rxq->rx_free);
89 INIT_LIST_HEAD(&rxq->rx_used);
90
91 if (WARN_ON(rxq->bd || rxq->rb_stts))
92 return -EINVAL;
93
94 /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
Emmanuel Grumbacha0f6b0a2011-06-21 14:25:45 +030095 rxq->bd = dma_alloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
96 &rxq->bd_dma, GFP_KERNEL);
Emmanuel Grumbachc85eb612011-06-14 10:13:24 +030097 if (!rxq->bd)
98 goto err_bd;
Emmanuel Grumbacha0f6b0a2011-06-21 14:25:45 +030099 memset(rxq->bd, 0, sizeof(__le32) * RX_QUEUE_SIZE);
Emmanuel Grumbachc85eb612011-06-14 10:13:24 +0300100
101 /*Allocate the driver's pointer to receive buffer status */
102 rxq->rb_stts = dma_alloc_coherent(dev, sizeof(*rxq->rb_stts),
103 &rxq->rb_stts_dma, GFP_KERNEL);
104 if (!rxq->rb_stts)
105 goto err_rb_stts;
106 memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
107
108 return 0;
109
110err_rb_stts:
Emmanuel Grumbacha0f6b0a2011-06-21 14:25:45 +0300111 dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
112 rxq->bd, rxq->bd_dma);
Emmanuel Grumbachc85eb612011-06-14 10:13:24 +0300113 memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
114 rxq->bd = NULL;
115err_bd:
116 return -ENOMEM;
117}
118
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -0700119static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
Emmanuel Grumbacha0f6b0a2011-06-21 14:25:45 +0300120{
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -0700121 struct iwl_trans_pcie *trans_pcie =
122 IWL_TRANS_GET_PCIE_TRANS(trans);
123 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
Emmanuel Grumbacha0f6b0a2011-06-21 14:25:45 +0300124 int i;
125
126 /* Fill the rx_used queue with _all_ of the Rx buffers */
127 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
128 /* In the reset function, these buffers may have been allocated
129 * to an SKB, so we need to unmap and free potential storage */
130 if (rxq->pool[i].page != NULL) {
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -0700131 dma_unmap_page(bus(trans)->dev, rxq->pool[i].page_dma,
132 PAGE_SIZE << hw_params(trans).rx_page_order,
Emmanuel Grumbacha0f6b0a2011-06-21 14:25:45 +0300133 DMA_FROM_DEVICE);
Emmanuel Grumbach790428b2011-08-25 23:11:05 -0700134 __free_pages(rxq->pool[i].page,
135 hw_params(trans).rx_page_order);
Emmanuel Grumbacha0f6b0a2011-06-21 14:25:45 +0300136 rxq->pool[i].page = NULL;
137 }
138 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
139 }
140}
141
Emmanuel Grumbachfd656932011-08-25 23:11:19 -0700142static void iwl_trans_rx_hw_init(struct iwl_trans *trans,
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700143 struct iwl_rx_queue *rxq)
144{
145 u32 rb_size;
146 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
147 u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
148
149 rb_timeout = RX_RB_TIMEOUT;
150
151 if (iwlagn_mod_params.amsdu_size_8K)
152 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
153 else
154 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
155
156 /* Stop Rx DMA */
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -0700157 iwl_write_direct32(bus(trans), FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700158
159 /* Reset driver's Rx queue write index */
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -0700160 iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700161
162 /* Tell device where to find RBD circular buffer in DRAM */
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -0700163 iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_RBDCB_BASE_REG,
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700164 (u32)(rxq->bd_dma >> 8));
165
166 /* Tell device where in DRAM to update its Rx status */
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -0700167 iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_STTS_WPTR_REG,
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700168 rxq->rb_stts_dma >> 4);
169
170 /* Enable Rx DMA
171 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
172 * the credit mechanism in 5000 HW RX FIFO
173 * Direct rx interrupts to hosts
174 * Rx buffer size 4 or 8k
175 * RB timeout 0x10
176 * 256 RBDs
177 */
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -0700178 iwl_write_direct32(bus(trans), FH_MEM_RCSR_CHNL0_CONFIG_REG,
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700179 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
180 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
181 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
182 FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
183 rb_size|
184 (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
185 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
186
187 /* Set interrupt coalescing timer to default (2048 usecs) */
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -0700188 iwl_write8(bus(trans), CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700189}
190
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -0700191static int iwl_rx_init(struct iwl_trans *trans)
Emmanuel Grumbachc85eb612011-06-14 10:13:24 +0300192{
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -0700193 struct iwl_trans_pcie *trans_pcie =
194 IWL_TRANS_GET_PCIE_TRANS(trans);
195 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
196
Emmanuel Grumbachc85eb612011-06-14 10:13:24 +0300197 int i, err;
198 unsigned long flags;
199
200 if (!rxq->bd) {
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -0700201 err = iwl_trans_rx_alloc(trans);
Emmanuel Grumbachc85eb612011-06-14 10:13:24 +0300202 if (err)
203 return err;
204 }
205
206 spin_lock_irqsave(&rxq->lock, flags);
207 INIT_LIST_HEAD(&rxq->rx_free);
208 INIT_LIST_HEAD(&rxq->rx_used);
209
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -0700210 iwl_trans_rxq_free_rx_bufs(trans);
Emmanuel Grumbachc85eb612011-06-14 10:13:24 +0300211
212 for (i = 0; i < RX_QUEUE_SIZE; i++)
213 rxq->queue[i] = NULL;
214
215 /* Set us so that we have processed and used all buffers, but have
216 * not restocked the Rx queue with fresh buffers */
217 rxq->read = rxq->write = 0;
218 rxq->write_actual = 0;
219 rxq->free_count = 0;
220 spin_unlock_irqrestore(&rxq->lock, flags);
221
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -0700222 iwlagn_rx_replenish(trans);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700223
Emmanuel Grumbachfd656932011-08-25 23:11:19 -0700224 iwl_trans_rx_hw_init(trans, rxq);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700225
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -0700226 spin_lock_irqsave(&trans->shrd->lock, flags);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700227 rxq->need_update = 1;
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -0700228 iwl_rx_queue_update_write_ptr(trans, rxq);
229 spin_unlock_irqrestore(&trans->shrd->lock, flags);
Emmanuel Grumbachab697a92011-07-11 07:35:34 -0700230
Emmanuel Grumbachc85eb612011-06-14 10:13:24 +0300231 return 0;
232}
233
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -0700234static void iwl_trans_pcie_rx_free(struct iwl_trans *trans)
Emmanuel Grumbacha0f6b0a2011-06-21 14:25:45 +0300235{
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -0700236 struct iwl_trans_pcie *trans_pcie =
237 IWL_TRANS_GET_PCIE_TRANS(trans);
238 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
239
Emmanuel Grumbacha0f6b0a2011-06-21 14:25:45 +0300240 unsigned long flags;
241
242 /*if rxq->bd is NULL, it means that nothing has been allocated,
243 * exit now */
244 if (!rxq->bd) {
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -0700245 IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
Emmanuel Grumbacha0f6b0a2011-06-21 14:25:45 +0300246 return;
247 }
248
249 spin_lock_irqsave(&rxq->lock, flags);
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -0700250 iwl_trans_rxq_free_rx_bufs(trans);
Emmanuel Grumbacha0f6b0a2011-06-21 14:25:45 +0300251 spin_unlock_irqrestore(&rxq->lock, flags);
252
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -0700253 dma_free_coherent(bus(trans)->dev, sizeof(__le32) * RX_QUEUE_SIZE,
Emmanuel Grumbacha0f6b0a2011-06-21 14:25:45 +0300254 rxq->bd, rxq->bd_dma);
255 memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
256 rxq->bd = NULL;
257
258 if (rxq->rb_stts)
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -0700259 dma_free_coherent(bus(trans)->dev,
Emmanuel Grumbacha0f6b0a2011-06-21 14:25:45 +0300260 sizeof(struct iwl_rb_status),
261 rxq->rb_stts, rxq->rb_stts_dma);
262 else
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -0700263 IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
Emmanuel Grumbacha0f6b0a2011-06-21 14:25:45 +0300264 memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
265 rxq->rb_stts = NULL;
266}
267
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700268static int iwl_trans_rx_stop(struct iwl_trans *trans)
Emmanuel Grumbachc2c52e82011-07-08 08:46:11 -0700269{
270
271 /* stop Rx DMA */
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -0700272 iwl_write_direct32(bus(trans), FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
273 return iwl_poll_direct_bit(bus(trans), FH_MEM_RSSR_RX_STATUS_REG,
Emmanuel Grumbachc2c52e82011-07-08 08:46:11 -0700274 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
275}
276
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700277static inline int iwlagn_alloc_dma_ptr(struct iwl_trans *trans,
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700278 struct iwl_dma_ptr *ptr, size_t size)
279{
280 if (WARN_ON(ptr->addr))
281 return -EINVAL;
282
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700283 ptr->addr = dma_alloc_coherent(bus(trans)->dev, size,
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700284 &ptr->dma, GFP_KERNEL);
285 if (!ptr->addr)
286 return -ENOMEM;
287 ptr->size = size;
288 return 0;
289}
290
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700291static inline void iwlagn_free_dma_ptr(struct iwl_trans *trans,
Emmanuel Grumbach1359ca42011-07-08 08:46:10 -0700292 struct iwl_dma_ptr *ptr)
293{
294 if (unlikely(!ptr->addr))
295 return;
296
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700297 dma_free_coherent(bus(trans)->dev, ptr->size, ptr->addr, ptr->dma);
Emmanuel Grumbach1359ca42011-07-08 08:46:10 -0700298 memset(ptr, 0, sizeof(*ptr));
299}
300
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700301static int iwl_trans_txq_alloc(struct iwl_trans *trans,
302 struct iwl_tx_queue *txq, int slots_num,
303 u32 txq_id)
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700304{
Emmanuel Grumbachab9e2122011-08-25 23:11:10 -0700305 size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700306 int i;
307
Emmanuel Grumbach2c452292011-08-25 23:11:21 -0700308 if (WARN_ON(txq->meta || txq->cmd || txq->skbs || txq->tfds))
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700309 return -EINVAL;
310
Emmanuel Grumbach1359ca42011-07-08 08:46:10 -0700311 txq->q.n_window = slots_num;
312
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700313 txq->meta = kzalloc(sizeof(txq->meta[0]) * slots_num,
314 GFP_KERNEL);
315 txq->cmd = kzalloc(sizeof(txq->cmd[0]) * slots_num,
316 GFP_KERNEL);
317
318 if (!txq->meta || !txq->cmd)
319 goto error;
320
Emmanuel Grumbachdfa2bdb2011-08-25 23:11:23 -0700321 if (txq_id == trans->shrd->cmd_queue)
322 for (i = 0; i < slots_num; i++) {
323 txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd),
324 GFP_KERNEL);
325 if (!txq->cmd[i])
326 goto error;
327 }
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700328
329 /* Alloc driver data array and TFD circular buffer */
330 /* Driver private data, only for Tx (not command) queues,
331 * not shared with device. */
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700332 if (txq_id != trans->shrd->cmd_queue) {
Emmanuel Grumbach2c452292011-08-25 23:11:21 -0700333 txq->skbs = kzalloc(sizeof(txq->skbs[0]) *
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700334 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
Emmanuel Grumbach2c452292011-08-25 23:11:21 -0700335 if (!txq->skbs) {
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700336 IWL_ERR(trans, "kmalloc for auxiliary BD "
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700337 "structures failed\n");
338 goto error;
339 }
340 } else {
Emmanuel Grumbach2c452292011-08-25 23:11:21 -0700341 txq->skbs = NULL;
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700342 }
343
344 /* Circular buffer of transmit frame descriptors (TFDs),
345 * shared with device */
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700346 txq->tfds = dma_alloc_coherent(bus(trans)->dev, tfd_sz,
347 &txq->q.dma_addr, GFP_KERNEL);
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700348 if (!txq->tfds) {
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700349 IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700350 goto error;
351 }
352 txq->q.id = txq_id;
353
354 return 0;
355error:
Emmanuel Grumbach2c452292011-08-25 23:11:21 -0700356 kfree(txq->skbs);
357 txq->skbs = NULL;
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700358 /* since txq->cmd has been zeroed,
359 * all non allocated cmd[i] will be NULL */
Emmanuel Grumbachdfa2bdb2011-08-25 23:11:23 -0700360 if (txq->cmd && txq_id == trans->shrd->cmd_queue)
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700361 for (i = 0; i < slots_num; i++)
362 kfree(txq->cmd[i]);
363 kfree(txq->meta);
364 kfree(txq->cmd);
365 txq->meta = NULL;
366 txq->cmd = NULL;
367
368 return -ENOMEM;
369
370}
371
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700372static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq,
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700373 int slots_num, u32 txq_id)
374{
375 int ret;
376
377 txq->need_update = 0;
378 memset(txq->meta, 0, sizeof(txq->meta[0]) * slots_num);
379
380 /*
381 * For the default queues 0-3, set up the swq_id
382 * already -- all others need to get one later
383 * (if they need one at all).
384 */
385 if (txq_id < 4)
386 iwl_set_swq_id(txq, txq_id, txq_id);
387
388 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
389 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
390 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
391
392 /* Initialize queue's high/low-water marks, and head/tail indexes */
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700393 ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700394 txq_id);
395 if (ret)
396 return ret;
397
398 /*
399 * Tell nic where to find circular buffer of Tx Frame Descriptors for
400 * given Tx queue, and enable the DMA channel used for that queue.
401 * Circular buffer (TFD queue in DRAM) physical base address */
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -0700402 iwl_write_direct32(bus(trans), FH_MEM_CBBC_QUEUE(txq_id),
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700403 txq->q.dma_addr >> 8);
404
405 return 0;
406}
407
408/**
Emmanuel Grumbachc170b862011-07-08 08:46:12 -0700409 * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
410 */
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700411static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
Emmanuel Grumbachc170b862011-07-08 08:46:12 -0700412{
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -0700413 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
414 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
Emmanuel Grumbachc170b862011-07-08 08:46:12 -0700415 struct iwl_queue *q = &txq->q;
416
417 if (!q->n_bd)
418 return;
419
420 while (q->write_ptr != q->read_ptr) {
421 /* The read_ptr needs to bound by q->n_window */
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700422 iwlagn_txq_free_tfd(trans, txq, get_cmd_index(q, q->read_ptr));
Emmanuel Grumbachc170b862011-07-08 08:46:12 -0700423 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
424 }
425}
426
427/**
Emmanuel Grumbach1359ca42011-07-08 08:46:10 -0700428 * iwl_tx_queue_free - Deallocate DMA queue.
429 * @txq: Transmit queue to deallocate.
430 *
431 * Empty queue by removing and destroying all BD's.
432 * Free all buffers.
433 * 0-fill, but do not free "txq" descriptor structure.
434 */
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700435static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
Emmanuel Grumbach1359ca42011-07-08 08:46:10 -0700436{
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -0700437 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
438 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700439 struct device *dev = bus(trans)->dev;
Emmanuel Grumbach1359ca42011-07-08 08:46:10 -0700440 int i;
441 if (WARN_ON(!txq))
442 return;
443
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700444 iwl_tx_queue_unmap(trans, txq_id);
Emmanuel Grumbach1359ca42011-07-08 08:46:10 -0700445
446 /* De-alloc array of command/tx buffers */
Emmanuel Grumbachdfa2bdb2011-08-25 23:11:23 -0700447
448 if (txq_id == trans->shrd->cmd_queue)
449 for (i = 0; i < txq->q.n_window; i++)
450 kfree(txq->cmd[i]);
Emmanuel Grumbach1359ca42011-07-08 08:46:10 -0700451
452 /* De-alloc circular buffer of TFDs */
453 if (txq->q.n_bd) {
Emmanuel Grumbachab9e2122011-08-25 23:11:10 -0700454 dma_free_coherent(dev, sizeof(struct iwl_tfd) *
Emmanuel Grumbach1359ca42011-07-08 08:46:10 -0700455 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
456 memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
457 }
458
459 /* De-alloc array of per-TFD driver data */
Emmanuel Grumbach2c452292011-08-25 23:11:21 -0700460 kfree(txq->skbs);
461 txq->skbs = NULL;
Emmanuel Grumbach1359ca42011-07-08 08:46:10 -0700462
463 /* deallocate arrays */
464 kfree(txq->cmd);
465 kfree(txq->meta);
466 txq->cmd = NULL;
467 txq->meta = NULL;
468
469 /* 0-fill queue descriptor structure */
470 memset(txq, 0, sizeof(*txq));
471}
472
473/**
474 * iwl_trans_tx_free - Free TXQ Context
475 *
476 * Destroy all TX DMA queues and structures
477 */
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700478static void iwl_trans_pcie_tx_free(struct iwl_trans *trans)
Emmanuel Grumbach1359ca42011-07-08 08:46:10 -0700479{
480 int txq_id;
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -0700481 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbach1359ca42011-07-08 08:46:10 -0700482
483 /* Tx queues */
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -0700484 if (trans_pcie->txq) {
Emmanuel Grumbachd6189122011-08-25 23:10:39 -0700485 for (txq_id = 0;
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700486 txq_id < hw_params(trans).max_txq_num; txq_id++)
487 iwl_tx_queue_free(trans, txq_id);
Emmanuel Grumbach1359ca42011-07-08 08:46:10 -0700488 }
489
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -0700490 kfree(trans_pcie->txq);
491 trans_pcie->txq = NULL;
Emmanuel Grumbach1359ca42011-07-08 08:46:10 -0700492
Emmanuel Grumbach9d6b2cb2011-08-25 23:11:12 -0700493 iwlagn_free_dma_ptr(trans, &trans_pcie->kw);
Emmanuel Grumbach1359ca42011-07-08 08:46:10 -0700494
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700495 iwlagn_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
Emmanuel Grumbach1359ca42011-07-08 08:46:10 -0700496}
497
498/**
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700499 * iwl_trans_tx_alloc - allocate TX context
500 * Allocate all Tx DMA structures and initialize them
501 *
502 * @param priv
503 * @return error code
504 */
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700505static int iwl_trans_tx_alloc(struct iwl_trans *trans)
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700506{
507 int ret;
508 int txq_id, slots_num;
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -0700509 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700510
Emmanuel Grumbachfd656932011-08-25 23:11:19 -0700511 u16 scd_bc_tbls_size = hw_params(trans).max_txq_num *
Emmanuel Grumbachab9e2122011-08-25 23:11:10 -0700512 sizeof(struct iwlagn_scd_bc_tbl);
513
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700514 /*It is not allowed to alloc twice, so warn when this happens.
515 * We cannot rely on the previous allocation, so free and fail */
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -0700516 if (WARN_ON(trans_pcie->txq)) {
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700517 ret = -EINVAL;
518 goto error;
519 }
520
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700521 ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
Emmanuel Grumbachab9e2122011-08-25 23:11:10 -0700522 scd_bc_tbls_size);
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700523 if (ret) {
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700524 IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700525 goto error;
526 }
527
528 /* Alloc keep-warm buffer */
Emmanuel Grumbach9d6b2cb2011-08-25 23:11:12 -0700529 ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700530 if (ret) {
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700531 IWL_ERR(trans, "Keep Warm allocation failed\n");
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700532 goto error;
533 }
534
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -0700535 trans_pcie->txq = kzalloc(sizeof(struct iwl_tx_queue) *
Emmanuel Grumbachfd656932011-08-25 23:11:19 -0700536 hw_params(trans).max_txq_num, GFP_KERNEL);
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -0700537 if (!trans_pcie->txq) {
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700538 IWL_ERR(trans, "Not enough memory for txq\n");
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700539 ret = ENOMEM;
540 goto error;
541 }
542
543 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700544 for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) {
545 slots_num = (txq_id == trans->shrd->cmd_queue) ?
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700546 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -0700547 ret = iwl_trans_txq_alloc(trans, &trans_pcie->txq[txq_id],
548 slots_num, txq_id);
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700549 if (ret) {
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700550 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700551 goto error;
552 }
553 }
554
555 return 0;
556
557error:
Emmanuel Grumbachae2c30b2011-08-25 23:11:20 -0700558 iwl_trans_pcie_tx_free(trans);
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700559
560 return ret;
561}
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700562static int iwl_tx_init(struct iwl_trans *trans)
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700563{
564 int ret;
565 int txq_id, slots_num;
566 unsigned long flags;
567 bool alloc = false;
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -0700568 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700569
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -0700570 if (!trans_pcie->txq) {
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700571 ret = iwl_trans_tx_alloc(trans);
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700572 if (ret)
573 goto error;
574 alloc = true;
575 }
576
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700577 spin_lock_irqsave(&trans->shrd->lock, flags);
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700578
579 /* Turn off all Tx DMA fifos */
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -0700580 iwl_write_prph(bus(trans), SCD_TXFACT, 0);
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700581
582 /* Tell NIC where to find the "keep warm" buffer */
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -0700583 iwl_write_direct32(bus(trans), FH_KW_MEM_ADDR_REG,
584 trans_pcie->kw.dma >> 4);
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700585
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700586 spin_unlock_irqrestore(&trans->shrd->lock, flags);
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700587
588 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700589 for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) {
590 slots_num = (txq_id == trans->shrd->cmd_queue) ?
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700591 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -0700592 ret = iwl_trans_txq_init(trans, &trans_pcie->txq[txq_id],
593 slots_num, txq_id);
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700594 if (ret) {
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700595 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700596 goto error;
597 }
598 }
599
600 return 0;
601error:
602 /*Upon error, free only if we allocated something */
603 if (alloc)
Emmanuel Grumbachae2c30b2011-08-25 23:11:20 -0700604 iwl_trans_pcie_tx_free(trans);
Emmanuel Grumbach02aca582011-06-28 08:58:41 -0700605 return ret;
606}
607
Emmanuel Grumbach3e10cae2011-09-06 09:31:18 -0700608static void iwl_set_pwr_vmain(struct iwl_trans *trans)
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300609{
610/*
611 * (for documentation purposes)
612 * to set power to V_AUX, do:
613
614 if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -0700615 iwl_set_bits_mask_prph(bus(trans), APMG_PS_CTRL_REG,
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300616 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
617 ~APMG_PS_CTRL_MSK_PWR_SRC);
618 */
619
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -0700620 iwl_set_bits_mask_prph(bus(trans), APMG_PS_CTRL_REG,
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300621 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
622 ~APMG_PS_CTRL_MSK_PWR_SRC);
623}
624
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700625static int iwl_nic_init(struct iwl_trans *trans)
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300626{
627 unsigned long flags;
628
629 /* nic_init */
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700630 spin_lock_irqsave(&trans->shrd->lock, flags);
Emmanuel Grumbach3e10cae2011-09-06 09:31:18 -0700631 iwl_apm_init(priv(trans));
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300632
633 /* Set interrupt coalescing calibration timer to default (512 usecs) */
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -0700634 iwl_write8(bus(trans), CSR_INT_COALESCING,
635 IWL_HOST_INT_CALIB_TIMEOUT_DEF);
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300636
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700637 spin_unlock_irqrestore(&trans->shrd->lock, flags);
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300638
Emmanuel Grumbach3e10cae2011-09-06 09:31:18 -0700639 iwl_set_pwr_vmain(trans);
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300640
Emmanuel Grumbach3e10cae2011-09-06 09:31:18 -0700641 priv(trans)->cfg->lib->nic_config(priv(trans));
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300642
643 /* Allocate the RX queue, or reset if it is already allocated */
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700644 iwl_rx_init(trans);
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300645
646 /* Allocate or reset and init all Tx and Command queues */
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700647 if (iwl_tx_init(trans))
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300648 return -ENOMEM;
649
Emmanuel Grumbachfd656932011-08-25 23:11:19 -0700650 if (hw_params(trans).shadow_reg_enable) {
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300651 /* enable shadow regs in HW */
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -0700652 iwl_set_bit(bus(trans), CSR_MAC_SHADOW_REG_CTRL,
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300653 0x800FFFFF);
654 }
655
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700656 set_bit(STATUS_INIT, &trans->shrd->status);
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300657
658 return 0;
659}
660
661#define HW_READY_TIMEOUT (50)
662
663/* Note: returns poll_bit return value, which is >= 0 if success */
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700664static int iwl_set_hw_ready(struct iwl_trans *trans)
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300665{
666 int ret;
667
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -0700668 iwl_set_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300669 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
670
671 /* See if we got it */
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -0700672 ret = iwl_poll_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300673 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
674 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
675 HW_READY_TIMEOUT);
676
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700677 IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300678 return ret;
679}
680
681/* Note: returns standard 0/-ERROR code */
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700682static int iwl_trans_pcie_prepare_card_hw(struct iwl_trans *trans)
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300683{
684 int ret;
685
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700686 IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300687
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700688 ret = iwl_set_hw_ready(trans);
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300689 if (ret >= 0)
690 return 0;
691
692 /* If HW is not ready, prepare the conditions to check again */
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -0700693 iwl_set_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300694 CSR_HW_IF_CONFIG_REG_PREPARE);
695
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -0700696 ret = iwl_poll_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300697 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
698 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
699
700 if (ret < 0)
701 return ret;
702
703 /* HW should be ready by now, check again. */
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700704 ret = iwl_set_hw_ready(trans);
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300705 if (ret >= 0)
706 return 0;
707 return ret;
708}
709
Emmanuel Grumbache13c0c52011-08-25 23:11:24 -0700710#define IWL_AC_UNSET -1
711
712struct queue_to_fifo_ac {
713 s8 fifo, ac;
714};
715
716static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = {
717 { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
718 { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
719 { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
720 { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
721 { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
722 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
723 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
724 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
725 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
726 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
727 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
728};
729
730static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = {
731 { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
732 { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
733 { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
734 { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
735 { IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, },
736 { IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, },
737 { IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, },
738 { IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, },
739 { IWL_TX_FIFO_BE_IPAN, 2, },
740 { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
741 { IWL_TX_FIFO_AUX, IWL_AC_UNSET, },
742};
743
744static const u8 iwlagn_bss_ac_to_fifo[] = {
745 IWL_TX_FIFO_VO,
746 IWL_TX_FIFO_VI,
747 IWL_TX_FIFO_BE,
748 IWL_TX_FIFO_BK,
749};
750static const u8 iwlagn_bss_ac_to_queue[] = {
751 0, 1, 2, 3,
752};
753static const u8 iwlagn_pan_ac_to_fifo[] = {
754 IWL_TX_FIFO_VO_IPAN,
755 IWL_TX_FIFO_VI_IPAN,
756 IWL_TX_FIFO_BE_IPAN,
757 IWL_TX_FIFO_BK_IPAN,
758};
759static const u8 iwlagn_pan_ac_to_queue[] = {
760 7, 6, 5, 4,
761};
762
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700763static int iwl_trans_pcie_start_device(struct iwl_trans *trans)
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300764{
765 int ret;
Emmanuel Grumbache13c0c52011-08-25 23:11:24 -0700766 struct iwl_trans_pcie *trans_pcie =
767 IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300768
Emmanuel Grumbachc91bd122011-08-25 23:11:28 -0700769 trans->shrd->ucode_owner = IWL_OWNERSHIP_DRIVER;
Emmanuel Grumbache13c0c52011-08-25 23:11:24 -0700770 trans_pcie->ac_to_queue[IWL_RXON_CTX_BSS] = iwlagn_bss_ac_to_queue;
771 trans_pcie->ac_to_queue[IWL_RXON_CTX_PAN] = iwlagn_pan_ac_to_queue;
772
773 trans_pcie->ac_to_fifo[IWL_RXON_CTX_BSS] = iwlagn_bss_ac_to_fifo;
774 trans_pcie->ac_to_fifo[IWL_RXON_CTX_PAN] = iwlagn_pan_ac_to_fifo;
775
776 trans_pcie->mcast_queue[IWL_RXON_CTX_BSS] = 0;
777 trans_pcie->mcast_queue[IWL_RXON_CTX_PAN] = IWL_IPAN_MCAST_QUEUE;
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300778
Emmanuel Grumbachc91bd122011-08-25 23:11:28 -0700779 if ((hw_params(trans).sku & EEPROM_SKU_CAP_AMT_ENABLE) &&
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700780 iwl_trans_pcie_prepare_card_hw(trans)) {
781 IWL_WARN(trans, "Exit HW not ready\n");
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300782 return -EIO;
783 }
784
785 /* If platform's RF_KILL switch is NOT set to KILL */
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -0700786 if (iwl_read32(bus(trans), CSR_GP_CNTRL) &
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300787 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700788 clear_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300789 else
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700790 set_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300791
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700792 if (iwl_is_rfkill(trans->shrd)) {
Emmanuel Grumbach3e10cae2011-09-06 09:31:18 -0700793 iwl_set_hw_rfkill_state(priv(trans), true);
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700794 iwl_enable_interrupts(trans);
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300795 return -ERFKILL;
796 }
797
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -0700798 iwl_write32(bus(trans), CSR_INT, 0xFFFFFFFF);
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300799
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700800 ret = iwl_nic_init(trans);
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300801 if (ret) {
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700802 IWL_ERR(trans, "Unable to init nic\n");
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300803 return ret;
804 }
805
806 /* make sure rfkill handshake bits are cleared */
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -0700807 iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
808 iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR,
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300809 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
810
811 /* clear (again), then enable host interrupts */
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -0700812 iwl_write32(bus(trans), CSR_INT, 0xFFFFFFFF);
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700813 iwl_enable_interrupts(trans);
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300814
815 /* really make sure rfkill handshake bits are cleared */
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -0700816 iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
817 iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
Emmanuel Grumbach392f8b72011-07-10 15:30:15 +0300818
819 return 0;
820}
821
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300822/*
823 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
Emmanuel Grumbach10b15e62011-08-25 23:10:43 -0700824 * must be called under priv->shrd->lock and mac access
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300825 */
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700826static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask)
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300827{
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -0700828 iwl_write_prph(bus(trans), SCD_TXFACT, mask);
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300829}
830
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700831static void iwl_trans_pcie_tx_start(struct iwl_trans *trans)
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300832{
833 const struct queue_to_fifo_ac *queue_to_fifo;
834 struct iwl_rxon_context *ctx;
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700835 struct iwl_priv *priv = priv(trans);
Emmanuel Grumbach105183b2011-08-25 23:11:02 -0700836 struct iwl_trans_pcie *trans_pcie =
837 IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300838 u32 a;
839 unsigned long flags;
840 int i, chan;
841 u32 reg_val;
842
Emmanuel Grumbach105183b2011-08-25 23:11:02 -0700843 spin_lock_irqsave(&trans->shrd->lock, flags);
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300844
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -0700845 trans_pcie->scd_base_addr =
846 iwl_read_prph(bus(trans), SCD_SRAM_BASE_ADDR);
Emmanuel Grumbach105183b2011-08-25 23:11:02 -0700847 a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300848 /* reset conext data memory */
Emmanuel Grumbach105183b2011-08-25 23:11:02 -0700849 for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300850 a += 4)
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -0700851 iwl_write_targ_mem(bus(trans), a, 0);
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300852 /* reset tx status memory */
Emmanuel Grumbach105183b2011-08-25 23:11:02 -0700853 for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300854 a += 4)
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -0700855 iwl_write_targ_mem(bus(trans), a, 0);
Emmanuel Grumbach105183b2011-08-25 23:11:02 -0700856 for (; a < trans_pcie->scd_base_addr +
Emmanuel Grumbachc91bd122011-08-25 23:11:28 -0700857 SCD_TRANS_TBL_OFFSET_QUEUE(hw_params(trans).max_txq_num);
Emmanuel Grumbachd6189122011-08-25 23:10:39 -0700858 a += 4)
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -0700859 iwl_write_targ_mem(bus(trans), a, 0);
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300860
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -0700861 iwl_write_prph(bus(trans), SCD_DRAM_BASE_ADDR,
Emmanuel Grumbach105183b2011-08-25 23:11:02 -0700862 trans_pcie->scd_bc_tbls.dma >> 10);
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300863
864 /* Enable DMA channel */
865 for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -0700866 iwl_write_direct32(bus(trans), FH_TCSR_CHNL_TX_CONFIG_REG(chan),
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300867 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
868 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
869
870 /* Update FH chicken bits */
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -0700871 reg_val = iwl_read_direct32(bus(trans), FH_TX_CHICKEN_BITS_REG);
872 iwl_write_direct32(bus(trans), FH_TX_CHICKEN_BITS_REG,
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300873 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
874
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -0700875 iwl_write_prph(bus(trans), SCD_QUEUECHAIN_SEL,
Emmanuel Grumbachc91bd122011-08-25 23:11:28 -0700876 SCD_QUEUECHAIN_SEL_ALL(trans));
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -0700877 iwl_write_prph(bus(trans), SCD_AGGR_SEL, 0);
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300878
879 /* initiate the queues */
Emmanuel Grumbachc91bd122011-08-25 23:11:28 -0700880 for (i = 0; i < hw_params(trans).max_txq_num; i++) {
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -0700881 iwl_write_prph(bus(trans), SCD_QUEUE_RDPTR(i), 0);
882 iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR, 0 | (i << 8));
883 iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr +
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300884 SCD_CONTEXT_QUEUE_OFFSET(i), 0);
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -0700885 iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr +
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300886 SCD_CONTEXT_QUEUE_OFFSET(i) +
887 sizeof(u32),
888 ((SCD_WIN_SIZE <<
889 SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
890 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
891 ((SCD_FRAME_LIMIT <<
892 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
893 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
894 }
895
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -0700896 iwl_write_prph(bus(trans), SCD_INTERRUPT_MASK,
Emmanuel Grumbach105183b2011-08-25 23:11:02 -0700897 IWL_MASK(0, hw_params(trans).max_txq_num));
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300898
899 /* Activate all Tx DMA/FIFO channels */
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700900 iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300901
902 /* map queues to FIFOs */
903 if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
904 queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
905 else
906 queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
907
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700908 iwl_trans_set_wr_ptrs(trans, trans->shrd->cmd_queue, 0);
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300909
910 /* make sure all queue are not stopped */
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -0700911 memset(&trans_pcie->queue_stopped[0], 0,
912 sizeof(trans_pcie->queue_stopped));
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300913 for (i = 0; i < 4; i++)
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -0700914 atomic_set(&trans_pcie->queue_stop_count[i], 0);
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300915 for_each_context(priv, ctx)
916 ctx->last_tx_rejected = false;
917
918 /* reset to 0 to enable all the queue first */
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -0700919 trans_pcie->txq_ctx_active_msk = 0;
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300920
Emmanuel Grumbacheffcea12011-08-25 23:11:03 -0700921 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) <
Johannes Berg72c04ce2011-07-23 10:24:40 -0700922 IWLAGN_FIRST_AMPDU_QUEUE);
Emmanuel Grumbacheffcea12011-08-25 23:11:03 -0700923 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) <
Johannes Berg72c04ce2011-07-23 10:24:40 -0700924 IWLAGN_FIRST_AMPDU_QUEUE);
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300925
Johannes Berg72c04ce2011-07-23 10:24:40 -0700926 for (i = 0; i < IWLAGN_FIRST_AMPDU_QUEUE; i++) {
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300927 int fifo = queue_to_fifo[i].fifo;
928 int ac = queue_to_fifo[i].ac;
929
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -0700930 iwl_txq_ctx_activate(trans_pcie, i);
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300931
932 if (fifo == IWL_TX_FIFO_UNUSED)
933 continue;
934
935 if (ac != IWL_AC_UNSET)
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -0700936 iwl_set_swq_id(&trans_pcie->txq[i], ac, i);
937 iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[i],
938 fifo, 0);
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300939 }
940
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700941 spin_unlock_irqrestore(&trans->shrd->lock, flags);
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300942
943 /* Enable L1-Active */
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -0700944 iwl_clear_bits_prph(bus(trans), APMG_PCIDEV_STT_REG,
Emmanuel Grumbachb3c2ce12011-07-07 15:50:10 +0300945 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
946}
947
Emmanuel Grumbachc170b862011-07-08 08:46:12 -0700948/**
949 * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
950 */
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700951static int iwl_trans_tx_stop(struct iwl_trans *trans)
Emmanuel Grumbachc170b862011-07-08 08:46:12 -0700952{
953 int ch, txq_id;
954 unsigned long flags;
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -0700955 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbachc170b862011-07-08 08:46:12 -0700956
957 /* Turn off all Tx DMA fifos */
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700958 spin_lock_irqsave(&trans->shrd->lock, flags);
Emmanuel Grumbachc170b862011-07-08 08:46:12 -0700959
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700960 iwl_trans_txq_set_sched(trans, 0);
Emmanuel Grumbachc170b862011-07-08 08:46:12 -0700961
962 /* Stop each Tx DMA channel, and wait for it to be idle */
Wey-Yi Guy02f6f652011-07-08 08:46:15 -0700963 for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -0700964 iwl_write_direct32(bus(trans),
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700965 FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -0700966 if (iwl_poll_direct_bit(bus(trans), FH_TSSR_TX_STATUS_REG,
Emmanuel Grumbachc170b862011-07-08 08:46:12 -0700967 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
968 1000))
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700969 IWL_ERR(trans, "Failing on timeout while stopping"
Emmanuel Grumbachc170b862011-07-08 08:46:12 -0700970 " DMA channel %d [0x%08x]", ch,
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -0700971 iwl_read_direct32(bus(trans),
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700972 FH_TSSR_TX_STATUS_REG));
Emmanuel Grumbachc170b862011-07-08 08:46:12 -0700973 }
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700974 spin_unlock_irqrestore(&trans->shrd->lock, flags);
Emmanuel Grumbachc170b862011-07-08 08:46:12 -0700975
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -0700976 if (!trans_pcie->txq) {
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700977 IWL_WARN(trans, "Stopping tx queues that aren't allocated...");
Emmanuel Grumbachc170b862011-07-08 08:46:12 -0700978 return 0;
979 }
980
981 /* Unmap DMA from host system and free skb's */
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -0700982 for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++)
983 iwl_tx_queue_unmap(trans, txq_id);
Emmanuel Grumbachc170b862011-07-08 08:46:12 -0700984
985 return 0;
986}
987
Emmanuel Grumbachae2c30b2011-08-25 23:11:20 -0700988static void iwl_trans_pcie_disable_sync_irq(struct iwl_trans *trans)
989{
990 unsigned long flags;
991 struct iwl_trans_pcie *trans_pcie =
992 IWL_TRANS_GET_PCIE_TRANS(trans);
993
994 spin_lock_irqsave(&trans->shrd->lock, flags);
995 iwl_disable_interrupts(trans);
996 spin_unlock_irqrestore(&trans->shrd->lock, flags);
997
998 /* wait to make sure we flush pending tasklet*/
999 synchronize_irq(bus(trans)->irq);
1000 tasklet_kill(&trans_pcie->irq_tasklet);
1001}
1002
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -07001003static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
Emmanuel Grumbachab6cf8e2011-07-07 14:37:26 +03001004{
Emmanuel Grumbachab6cf8e2011-07-07 14:37:26 +03001005 /* stop and reset the on-board processor */
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -07001006 iwl_write32(bus(trans), CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
Emmanuel Grumbachab6cf8e2011-07-07 14:37:26 +03001007
1008 /* tell the device to stop sending interrupts */
Emmanuel Grumbachae2c30b2011-08-25 23:11:20 -07001009 iwl_trans_pcie_disable_sync_irq(trans);
Emmanuel Grumbachab6cf8e2011-07-07 14:37:26 +03001010
1011 /* device going down, Stop using ICT table */
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -07001012 iwl_disable_ict(trans);
Emmanuel Grumbachab6cf8e2011-07-07 14:37:26 +03001013
1014 /*
1015 * If a HW restart happens during firmware loading,
1016 * then the firmware loading might call this function
1017 * and later it might be called again due to the
1018 * restart. So don't process again if the device is
1019 * already dead.
1020 */
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -07001021 if (test_bit(STATUS_DEVICE_ENABLED, &trans->shrd->status)) {
1022 iwl_trans_tx_stop(trans);
1023 iwl_trans_rx_stop(trans);
Emmanuel Grumbachab6cf8e2011-07-07 14:37:26 +03001024
1025 /* Power-down device's busmaster DMA clocks */
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -07001026 iwl_write_prph(bus(trans), APMG_CLK_DIS_REG,
Emmanuel Grumbachab6cf8e2011-07-07 14:37:26 +03001027 APMG_CLK_VAL_DMA_CLK_RQT);
1028 udelay(5);
1029 }
1030
1031 /* Make sure (redundant) we've released our request to stay awake */
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -07001032 iwl_clear_bit(bus(trans), CSR_GP_CNTRL,
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -07001033 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
Emmanuel Grumbachab6cf8e2011-07-07 14:37:26 +03001034
1035 /* Stop the device, and put it in low power state */
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -07001036 iwl_apm_stop(priv(trans));
Emmanuel Grumbachab6cf8e2011-07-07 14:37:26 +03001037}
1038
Emmanuel Grumbache13c0c52011-08-25 23:11:24 -07001039static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1040 struct iwl_device_cmd *dev_cmd, u8 ctx, u8 sta_id)
Emmanuel Grumbach47c1b492011-07-03 11:22:15 +03001041{
Emmanuel Grumbache13c0c52011-08-25 23:11:24 -07001042 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1043 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1044 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Emmanuel Grumbachdfa2bdb2011-08-25 23:11:23 -07001045 struct iwl_tx_cmd *tx_cmd = &dev_cmd->cmd.tx;
Emmanuel Grumbach47c1b492011-07-03 11:22:15 +03001046 struct iwl_cmd_meta *out_meta;
Emmanuel Grumbache13c0c52011-08-25 23:11:24 -07001047 struct iwl_tx_queue *txq;
1048 struct iwl_queue *q;
Emmanuel Grumbach47c1b492011-07-03 11:22:15 +03001049
1050 dma_addr_t phys_addr = 0;
1051 dma_addr_t txcmd_phys;
1052 dma_addr_t scratch_phys;
1053 u16 len, firstlen, secondlen;
Emmanuel Grumbache13c0c52011-08-25 23:11:24 -07001054 u16 seq_number = 0;
Emmanuel Grumbach47c1b492011-07-03 11:22:15 +03001055 u8 wait_write_ptr = 0;
Emmanuel Grumbache13c0c52011-08-25 23:11:24 -07001056 u8 txq_id;
1057 u8 tid = 0;
1058 bool is_agg = false;
1059 __le16 fc = hdr->frame_control;
Emmanuel Grumbach47c1b492011-07-03 11:22:15 +03001060 u8 hdr_len = ieee80211_hdrlen(fc);
1061
Emmanuel Grumbache13c0c52011-08-25 23:11:24 -07001062 /*
1063 * Send this frame after DTIM -- there's a special queue
1064 * reserved for this for contexts that support AP mode.
1065 */
1066 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
1067 txq_id = trans_pcie->mcast_queue[ctx];
1068
1069 /*
1070 * The microcode will clear the more data
1071 * bit in the last frame it transmits.
1072 */
1073 hdr->frame_control |=
1074 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1075 } else if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
1076 txq_id = IWL_AUX_QUEUE;
1077 else
1078 txq_id =
1079 trans_pcie->ac_to_queue[ctx][skb_get_queue_mapping(skb)];
1080
1081 if (ieee80211_is_data_qos(fc)) {
1082 u8 *qc = NULL;
1083 struct iwl_tid_data *tid_data;
1084 qc = ieee80211_get_qos_ctl(hdr);
1085 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
1086 tid_data = &trans->shrd->tid_data[sta_id][tid];
1087
1088 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
1089 return -1;
1090
1091 seq_number = tid_data->seq_number;
1092 seq_number &= IEEE80211_SCTL_SEQ;
1093 hdr->seq_ctrl = hdr->seq_ctrl &
1094 cpu_to_le16(IEEE80211_SCTL_FRAG);
1095 hdr->seq_ctrl |= cpu_to_le16(seq_number);
1096 seq_number += 0x10;
1097 /* aggregation is on for this <sta,tid> */
1098 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
1099 tid_data->agg.state == IWL_AGG_ON) {
1100 txq_id = tid_data->agg.txq_id;
1101 is_agg = true;
1102 }
1103 }
1104
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -07001105 txq = &trans_pcie->txq[txq_id];
Emmanuel Grumbache13c0c52011-08-25 23:11:24 -07001106 q = &txq->q;
1107
Emmanuel Grumbach47c1b492011-07-03 11:22:15 +03001108 /* Set up driver data for this TFD */
Emmanuel Grumbach2c452292011-08-25 23:11:21 -07001109 txq->skbs[q->write_ptr] = skb;
Emmanuel Grumbachdfa2bdb2011-08-25 23:11:23 -07001110 txq->cmd[q->write_ptr] = dev_cmd;
1111
1112 dev_cmd->hdr.cmd = REPLY_TX;
1113 dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
1114 INDEX_TO_SEQ(q->write_ptr)));
Emmanuel Grumbach47c1b492011-07-03 11:22:15 +03001115
1116 /* Set up first empty entry in queue's array of Tx/cmd buffers */
1117 out_meta = &txq->meta[q->write_ptr];
1118
1119 /*
1120 * Use the first empty entry in this queue's command buffer array
1121 * to contain the Tx command and MAC header concatenated together
1122 * (payload data will be in another buffer).
1123 * Size of this varies, due to varying MAC header length.
1124 * If end is not dword aligned, we'll have 2 extra bytes at the end
1125 * of the MAC header (device reads on dword boundaries).
1126 * We'll tell device about this padding later.
1127 */
1128 len = sizeof(struct iwl_tx_cmd) +
1129 sizeof(struct iwl_cmd_header) + hdr_len;
1130 firstlen = (len + 3) & ~3;
1131
1132 /* Tell NIC about any 2-byte padding after MAC header */
1133 if (firstlen != len)
1134 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
1135
1136 /* Physical address of this Tx command's header (not MAC header!),
1137 * within command buffer array. */
Emmanuel Grumbache13c0c52011-08-25 23:11:24 -07001138 txcmd_phys = dma_map_single(bus(trans)->dev,
Emmanuel Grumbach47c1b492011-07-03 11:22:15 +03001139 &dev_cmd->hdr, firstlen,
1140 DMA_BIDIRECTIONAL);
Emmanuel Grumbache13c0c52011-08-25 23:11:24 -07001141 if (unlikely(dma_mapping_error(bus(trans)->dev, txcmd_phys)))
Emmanuel Grumbach47c1b492011-07-03 11:22:15 +03001142 return -1;
1143 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
1144 dma_unmap_len_set(out_meta, len, firstlen);
1145
1146 if (!ieee80211_has_morefrags(fc)) {
1147 txq->need_update = 1;
1148 } else {
1149 wait_write_ptr = 1;
1150 txq->need_update = 0;
1151 }
1152
1153 /* Set up TFD's 2nd entry to point directly to remainder of skb,
1154 * if any (802.11 null frames have no payload). */
1155 secondlen = skb->len - hdr_len;
1156 if (secondlen > 0) {
Emmanuel Grumbache13c0c52011-08-25 23:11:24 -07001157 phys_addr = dma_map_single(bus(trans)->dev, skb->data + hdr_len,
Emmanuel Grumbach47c1b492011-07-03 11:22:15 +03001158 secondlen, DMA_TO_DEVICE);
Emmanuel Grumbache13c0c52011-08-25 23:11:24 -07001159 if (unlikely(dma_mapping_error(bus(trans)->dev, phys_addr))) {
1160 dma_unmap_single(bus(trans)->dev,
Emmanuel Grumbach47c1b492011-07-03 11:22:15 +03001161 dma_unmap_addr(out_meta, mapping),
1162 dma_unmap_len(out_meta, len),
1163 DMA_BIDIRECTIONAL);
1164 return -1;
1165 }
1166 }
1167
1168 /* Attach buffers to TFD */
Emmanuel Grumbache13c0c52011-08-25 23:11:24 -07001169 iwlagn_txq_attach_buf_to_tfd(trans, txq, txcmd_phys, firstlen, 1);
Emmanuel Grumbach47c1b492011-07-03 11:22:15 +03001170 if (secondlen > 0)
Emmanuel Grumbache13c0c52011-08-25 23:11:24 -07001171 iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
Emmanuel Grumbach47c1b492011-07-03 11:22:15 +03001172 secondlen, 0);
1173
1174 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
1175 offsetof(struct iwl_tx_cmd, scratch);
1176
1177 /* take back ownership of DMA buffer to enable update */
Emmanuel Grumbache13c0c52011-08-25 23:11:24 -07001178 dma_sync_single_for_cpu(bus(trans)->dev, txcmd_phys, firstlen,
Emmanuel Grumbach47c1b492011-07-03 11:22:15 +03001179 DMA_BIDIRECTIONAL);
1180 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1181 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1182
Emmanuel Grumbache13c0c52011-08-25 23:11:24 -07001183 IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n",
Emmanuel Grumbach47c1b492011-07-03 11:22:15 +03001184 le16_to_cpu(dev_cmd->hdr.sequence));
Emmanuel Grumbache13c0c52011-08-25 23:11:24 -07001185 IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
1186 iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
1187 iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
Emmanuel Grumbach47c1b492011-07-03 11:22:15 +03001188
1189 /* Set up entry for this TFD in Tx byte-count array */
Emmanuel Grumbache13c0c52011-08-25 23:11:24 -07001190 if (is_agg)
1191 iwl_trans_txq_update_byte_cnt_tbl(trans, txq,
Emmanuel Grumbach47c1b492011-07-03 11:22:15 +03001192 le16_to_cpu(tx_cmd->len));
1193
Emmanuel Grumbache13c0c52011-08-25 23:11:24 -07001194 dma_sync_single_for_device(bus(trans)->dev, txcmd_phys, firstlen,
Emmanuel Grumbach47c1b492011-07-03 11:22:15 +03001195 DMA_BIDIRECTIONAL);
1196
Emmanuel Grumbache13c0c52011-08-25 23:11:24 -07001197 trace_iwlwifi_dev_tx(priv(trans),
Emmanuel Grumbach47c1b492011-07-03 11:22:15 +03001198 &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
1199 sizeof(struct iwl_tfd),
1200 &dev_cmd->hdr, firstlen,
1201 skb->data + hdr_len, secondlen);
1202
1203 /* Tell device the write index *just past* this latest filled TFD */
1204 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
Emmanuel Grumbache13c0c52011-08-25 23:11:24 -07001205 iwl_txq_update_write_ptr(trans, txq);
1206
1207 if (ieee80211_is_data_qos(fc)) {
1208 trans->shrd->tid_data[sta_id][tid].tfds_in_queue++;
1209 if (!ieee80211_has_morefrags(fc))
1210 trans->shrd->tid_data[sta_id][tid].seq_number =
1211 seq_number;
1212 }
Emmanuel Grumbach47c1b492011-07-03 11:22:15 +03001213
1214 /*
1215 * At this point the frame is "transmitted" successfully
1216 * and we will get a TX status notification eventually,
1217 * regardless of the value of ret. "ret" only indicates
1218 * whether or not we should update the write pointer.
1219 */
Emmanuel Grumbacha0eaad72011-08-25 23:11:00 -07001220 if (iwl_queue_space(q) < q->high_mark) {
Emmanuel Grumbach47c1b492011-07-03 11:22:15 +03001221 if (wait_write_ptr) {
1222 txq->need_update = 1;
Emmanuel Grumbache13c0c52011-08-25 23:11:24 -07001223 iwl_txq_update_write_ptr(trans, txq);
Emmanuel Grumbach47c1b492011-07-03 11:22:15 +03001224 } else {
Emmanuel Grumbache20d43412011-08-25 23:11:31 -07001225 iwl_stop_queue(trans, txq);
Emmanuel Grumbach47c1b492011-07-03 11:22:15 +03001226 }
1227 }
1228 return 0;
1229}
1230
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -07001231static void iwl_trans_pcie_kick_nic(struct iwl_trans *trans)
Emmanuel Grumbach56d90f42011-07-07 18:20:01 +03001232{
1233 /* Remove all resets to allow NIC to operate */
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -07001234 iwl_write32(bus(trans), CSR_RESET, 0);
Emmanuel Grumbach56d90f42011-07-07 18:20:01 +03001235}
1236
Emmanuel Grumbache6bb4c92011-08-25 23:10:48 -07001237static int iwl_trans_pcie_request_irq(struct iwl_trans *trans)
Emmanuel Grumbacha27367d2011-07-04 09:06:44 +03001238{
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -07001239 struct iwl_trans_pcie *trans_pcie =
1240 IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbach34c1b7b2011-07-04 08:58:19 +03001241 int err;
1242
Emmanuel Grumbach0c325762011-08-25 23:10:53 -07001243 trans_pcie->inta_mask = CSR_INI_SET_MASK;
Emmanuel Grumbach1e89cba2011-07-20 17:51:22 -07001244
Emmanuel Grumbach0c325762011-08-25 23:10:53 -07001245 tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
1246 iwl_irq_tasklet, (unsigned long)trans);
1247
1248 iwl_alloc_isr_ict(trans);
Emmanuel Grumbach34c1b7b2011-07-04 08:58:19 +03001249
Emmanuel Grumbache6bb4c92011-08-25 23:10:48 -07001250 err = request_irq(bus(trans)->irq, iwl_isr_ict, IRQF_SHARED,
Emmanuel Grumbach0c325762011-08-25 23:10:53 -07001251 DRV_NAME, trans);
Emmanuel Grumbach34c1b7b2011-07-04 08:58:19 +03001252 if (err) {
Emmanuel Grumbach0c325762011-08-25 23:10:53 -07001253 IWL_ERR(trans, "Error allocating IRQ %d\n", bus(trans)->irq);
1254 iwl_free_isr_ict(trans);
Emmanuel Grumbach34c1b7b2011-07-04 08:58:19 +03001255 return err;
1256 }
1257
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -07001258 INIT_WORK(&trans_pcie->rx_replenish, iwl_bg_rx_replenish);
Emmanuel Grumbach34c1b7b2011-07-04 08:58:19 +03001259 return 0;
Emmanuel Grumbachc85eb612011-06-14 10:13:24 +03001260}
Emmanuel Grumbache6bb4c92011-08-25 23:10:48 -07001261
Emmanuel Grumbach464021f2011-08-25 23:11:26 -07001262static int iwlagn_txq_check_empty(struct iwl_trans *trans,
1263 int sta_id, u8 tid, int txq_id)
Emmanuel Grumbacha0eaad72011-08-25 23:11:00 -07001264{
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -07001265 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1266 struct iwl_queue *q = &trans_pcie->txq[txq_id].q;
Emmanuel Grumbach464021f2011-08-25 23:11:26 -07001267 struct iwl_tid_data *tid_data = &trans->shrd->tid_data[sta_id][tid];
1268
1269 lockdep_assert_held(&trans->shrd->sta_lock);
1270
1271 switch (trans->shrd->tid_data[sta_id][tid].agg.state) {
1272 case IWL_EMPTYING_HW_QUEUE_DELBA:
1273 /* We are reclaiming the last packet of the */
1274 /* aggregated HW queue */
1275 if ((txq_id == tid_data->agg.txq_id) &&
1276 (q->read_ptr == q->write_ptr)) {
1277 IWL_DEBUG_HT(trans,
1278 "HW queue empty: continue DELBA flow\n");
Emmanuel Grumbach7f01d562011-08-25 23:11:27 -07001279 iwl_trans_pcie_txq_agg_disable(trans, txq_id);
Emmanuel Grumbach464021f2011-08-25 23:11:26 -07001280 tid_data->agg.state = IWL_AGG_OFF;
1281 iwl_stop_tx_ba_trans_ready(priv(trans),
1282 NUM_IWL_RXON_CTX,
1283 sta_id, tid);
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -07001284 iwl_wake_queue(trans, &trans_pcie->txq[txq_id]);
Emmanuel Grumbach464021f2011-08-25 23:11:26 -07001285 }
1286 break;
1287 case IWL_EMPTYING_HW_QUEUE_ADDBA:
1288 /* We are reclaiming the last packet of the queue */
1289 if (tid_data->tfds_in_queue == 0) {
1290 IWL_DEBUG_HT(trans,
1291 "HW queue empty: continue ADDBA flow\n");
1292 tid_data->agg.state = IWL_AGG_ON;
1293 iwl_start_tx_ba_trans_ready(priv(trans),
1294 NUM_IWL_RXON_CTX,
1295 sta_id, tid);
1296 }
1297 break;
1298 }
1299
1300 return 0;
1301}
1302
1303static void iwl_free_tfds_in_queue(struct iwl_trans *trans,
1304 int sta_id, int tid, int freed)
1305{
1306 lockdep_assert_held(&trans->shrd->sta_lock);
1307
1308 if (trans->shrd->tid_data[sta_id][tid].tfds_in_queue >= freed)
1309 trans->shrd->tid_data[sta_id][tid].tfds_in_queue -= freed;
1310 else {
1311 IWL_DEBUG_TX(trans, "free more than tfds_in_queue (%u:%d)\n",
1312 trans->shrd->tid_data[sta_id][tid].tfds_in_queue,
1313 freed);
1314 trans->shrd->tid_data[sta_id][tid].tfds_in_queue = 0;
1315 }
1316}
1317
1318static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
1319 int txq_id, int ssn, u32 status,
1320 struct sk_buff_head *skbs)
1321{
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -07001322 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1323 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
Emmanuel Grumbacha0eaad72011-08-25 23:11:00 -07001324 /* n_bd is usually 256 => n_bd - 1 = 0xff */
1325 int tfd_num = ssn & (txq->q.n_bd - 1);
Emmanuel Grumbach464021f2011-08-25 23:11:26 -07001326 int freed = 0;
Emmanuel Grumbacha0eaad72011-08-25 23:11:00 -07001327 u8 agg_state;
1328 bool cond;
1329
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -07001330 txq->time_stamp = jiffies;
1331
Emmanuel Grumbacha0eaad72011-08-25 23:11:00 -07001332 if (txq->sched_retry) {
1333 agg_state =
Emmanuel Grumbach464021f2011-08-25 23:11:26 -07001334 trans->shrd->tid_data[txq->sta_id][txq->tid].agg.state;
Emmanuel Grumbacha0eaad72011-08-25 23:11:00 -07001335 cond = (agg_state != IWL_EMPTYING_HW_QUEUE_DELBA);
1336 } else {
1337 cond = (status != TX_STATUS_FAIL_PASSIVE_NO_RX);
1338 }
1339
1340 if (txq->q.read_ptr != tfd_num) {
1341 IWL_DEBUG_TX_REPLY(trans, "Retry scheduler reclaim "
1342 "scd_ssn=%d idx=%d txq=%d swq=%d\n",
1343 ssn , tfd_num, txq_id, txq->swq_id);
Emmanuel Grumbach464021f2011-08-25 23:11:26 -07001344 freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
Emmanuel Grumbacha0eaad72011-08-25 23:11:00 -07001345 if (iwl_queue_space(&txq->q) > txq->q.low_mark && cond)
Emmanuel Grumbache20d43412011-08-25 23:11:31 -07001346 iwl_wake_queue(trans, txq);
Emmanuel Grumbacha0eaad72011-08-25 23:11:00 -07001347 }
Emmanuel Grumbach464021f2011-08-25 23:11:26 -07001348
1349 iwl_free_tfds_in_queue(trans, sta_id, tid, freed);
1350 iwlagn_txq_check_empty(trans, sta_id, tid, txq_id);
Emmanuel Grumbacha0eaad72011-08-25 23:11:00 -07001351}
1352
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -07001353static void iwl_trans_pcie_free(struct iwl_trans *trans)
Emmanuel Grumbache6bb4c92011-08-25 23:10:48 -07001354{
Emmanuel Grumbachae2c30b2011-08-25 23:11:20 -07001355 iwl_trans_pcie_tx_free(trans);
1356 iwl_trans_pcie_rx_free(trans);
Emmanuel Grumbach6d8f6ee2011-08-25 23:11:06 -07001357 free_irq(bus(trans)->irq, trans);
1358 iwl_free_isr_ict(trans);
1359 trans->shrd->trans = NULL;
1360 kfree(trans);
Emmanuel Grumbache6bb4c92011-08-25 23:10:48 -07001361}
1362
Emmanuel Grumbach57210f72011-08-25 23:10:52 -07001363#ifdef CONFIG_PM
1364
1365static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
1366{
1367 /*
1368 * This function is called when system goes into suspend state
1369 * mac80211 will call iwl_mac_stop() from the mac80211 suspend function
1370 * first but since iwl_mac_stop() has no knowledge of who the caller is,
1371 * it will not call apm_ops.stop() to stop the DMA operation.
1372 * Calling apm_ops.stop here to make sure we stop the DMA.
1373 *
1374 * But of course ... if we have configured WoWLAN then we did other
1375 * things already :-)
1376 */
1377 if (!trans->shrd->wowlan)
1378 iwl_apm_stop(priv(trans));
1379
1380 return 0;
1381}
1382
1383static int iwl_trans_pcie_resume(struct iwl_trans *trans)
1384{
1385 bool hw_rfkill = false;
1386
Emmanuel Grumbach0c325762011-08-25 23:10:53 -07001387 iwl_enable_interrupts(trans);
Emmanuel Grumbach57210f72011-08-25 23:10:52 -07001388
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -07001389 if (!(iwl_read32(bus(trans), CSR_GP_CNTRL) &
Emmanuel Grumbach57210f72011-08-25 23:10:52 -07001390 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
1391 hw_rfkill = true;
1392
1393 if (hw_rfkill)
1394 set_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
1395 else
1396 clear_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
1397
Emmanuel Grumbach3e10cae2011-09-06 09:31:18 -07001398 iwl_set_hw_rfkill_state(priv(trans), hw_rfkill);
Emmanuel Grumbach57210f72011-08-25 23:10:52 -07001399
1400 return 0;
1401}
1402#else /* CONFIG_PM */
1403static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
1404{ return 0; }
1405
1406static int iwl_trans_pcie_resume(struct iwl_trans *trans)
1407{ return 0; }
1408
1409#endif /* CONFIG_PM */
1410
Emmanuel Grumbache13c0c52011-08-25 23:11:24 -07001411static void iwl_trans_pcie_wake_any_queue(struct iwl_trans *trans,
1412 u8 ctx)
1413{
1414 u8 ac, txq_id;
1415 struct iwl_trans_pcie *trans_pcie =
1416 IWL_TRANS_GET_PCIE_TRANS(trans);
1417
1418 for (ac = 0; ac < AC_NUM; ac++) {
1419 txq_id = trans_pcie->ac_to_queue[ctx][ac];
1420 IWL_DEBUG_INFO(trans, "Queue Status: Q[%d] %s\n",
1421 ac,
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -07001422 (atomic_read(&trans_pcie->queue_stop_count[ac]) > 0)
Emmanuel Grumbache13c0c52011-08-25 23:11:24 -07001423 ? "stopped" : "awake");
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -07001424 iwl_wake_queue(trans, &trans_pcie->txq[txq_id]);
Emmanuel Grumbache13c0c52011-08-25 23:11:24 -07001425 }
1426}
1427
Emmanuel Grumbache6bb4c92011-08-25 23:10:48 -07001428const struct iwl_trans_ops trans_ops_pcie;
1429
1430static struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd)
1431{
1432 struct iwl_trans *iwl_trans = kzalloc(sizeof(struct iwl_trans) +
1433 sizeof(struct iwl_trans_pcie),
1434 GFP_KERNEL);
1435 if (iwl_trans) {
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -07001436 struct iwl_trans_pcie *trans_pcie =
1437 IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
Emmanuel Grumbache6bb4c92011-08-25 23:10:48 -07001438 iwl_trans->ops = &trans_ops_pcie;
1439 iwl_trans->shrd = shrd;
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -07001440 trans_pcie->trans = iwl_trans;
Emmanuel Grumbach72012472011-08-25 23:11:07 -07001441 spin_lock_init(&iwl_trans->hcmd_lock);
Emmanuel Grumbache6bb4c92011-08-25 23:10:48 -07001442 }
1443
1444 return iwl_trans;
1445}
1446
Emmanuel Grumbache20d43412011-08-25 23:11:31 -07001447static void iwl_trans_pcie_stop_queue(struct iwl_trans *trans, int txq_id)
1448{
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -07001449 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1450
1451 iwl_stop_queue(trans, &trans_pcie->txq[txq_id]);
Emmanuel Grumbache20d43412011-08-25 23:11:31 -07001452}
1453
Emmanuel Grumbach5f178cd2011-08-25 23:11:29 -07001454#define IWL_FLUSH_WAIT_MS 2000
1455
1456static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans)
1457{
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -07001458 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbach5f178cd2011-08-25 23:11:29 -07001459 struct iwl_tx_queue *txq;
1460 struct iwl_queue *q;
1461 int cnt;
1462 unsigned long now = jiffies;
1463 int ret = 0;
1464
1465 /* waiting for all the tx frames complete might take a while */
1466 for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
1467 if (cnt == trans->shrd->cmd_queue)
1468 continue;
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -07001469 txq = &trans_pcie->txq[cnt];
Emmanuel Grumbach5f178cd2011-08-25 23:11:29 -07001470 q = &txq->q;
1471 while (q->read_ptr != q->write_ptr && !time_after(jiffies,
1472 now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
1473 msleep(1);
1474
1475 if (q->read_ptr != q->write_ptr) {
1476 IWL_ERR(trans, "fail to flush all tx fifo queues\n");
1477 ret = -ETIMEDOUT;
1478 break;
1479 }
1480 }
1481 return ret;
1482}
1483
Emmanuel Grumbachf22be622011-08-25 23:11:30 -07001484/*
1485 * On every watchdog tick we check (latest) time stamp. If it does not
1486 * change during timeout period and queue is not empty we reset firmware.
1487 */
1488static int iwl_trans_pcie_check_stuck_queue(struct iwl_trans *trans, int cnt)
1489{
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -07001490 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1491 struct iwl_tx_queue *txq = &trans_pcie->txq[cnt];
Emmanuel Grumbachf22be622011-08-25 23:11:30 -07001492 struct iwl_queue *q = &txq->q;
1493 unsigned long timeout;
1494
1495 if (q->read_ptr == q->write_ptr) {
1496 txq->time_stamp = jiffies;
1497 return 0;
1498 }
1499
1500 timeout = txq->time_stamp +
1501 msecs_to_jiffies(hw_params(trans).wd_timeout);
1502
1503 if (time_after(jiffies, timeout)) {
1504 IWL_ERR(trans, "Queue %d stuck for %u ms.\n", q->id,
1505 hw_params(trans).wd_timeout);
1506 return 1;
1507 }
1508
1509 return 0;
1510}
1511
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07001512#ifdef CONFIG_IWLWIFI_DEBUGFS
1513/* create and remove of files */
1514#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -07001515 if (!debugfs_create_file(#name, mode, parent, trans, \
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07001516 &iwl_dbgfs_##name##_ops)) \
1517 return -ENOMEM; \
1518} while (0)
1519
1520/* file operation */
1521#define DEBUGFS_READ_FUNC(name) \
1522static ssize_t iwl_dbgfs_##name##_read(struct file *file, \
1523 char __user *user_buf, \
1524 size_t count, loff_t *ppos);
1525
1526#define DEBUGFS_WRITE_FUNC(name) \
1527static ssize_t iwl_dbgfs_##name##_write(struct file *file, \
1528 const char __user *user_buf, \
1529 size_t count, loff_t *ppos);
1530
1531
1532static int iwl_dbgfs_open_file_generic(struct inode *inode, struct file *file)
1533{
1534 file->private_data = inode->i_private;
1535 return 0;
1536}
1537
1538#define DEBUGFS_READ_FILE_OPS(name) \
1539 DEBUGFS_READ_FUNC(name); \
1540static const struct file_operations iwl_dbgfs_##name##_ops = { \
1541 .read = iwl_dbgfs_##name##_read, \
1542 .open = iwl_dbgfs_open_file_generic, \
1543 .llseek = generic_file_llseek, \
1544};
1545
Emmanuel Grumbach16db88b2011-08-25 23:11:08 -07001546#define DEBUGFS_WRITE_FILE_OPS(name) \
1547 DEBUGFS_WRITE_FUNC(name); \
1548static const struct file_operations iwl_dbgfs_##name##_ops = { \
1549 .write = iwl_dbgfs_##name##_write, \
1550 .open = iwl_dbgfs_open_file_generic, \
1551 .llseek = generic_file_llseek, \
1552};
1553
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07001554#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
1555 DEBUGFS_READ_FUNC(name); \
1556 DEBUGFS_WRITE_FUNC(name); \
1557static const struct file_operations iwl_dbgfs_##name##_ops = { \
1558 .write = iwl_dbgfs_##name##_write, \
1559 .read = iwl_dbgfs_##name##_read, \
1560 .open = iwl_dbgfs_open_file_generic, \
1561 .llseek = generic_file_llseek, \
1562};
1563
1564static ssize_t iwl_dbgfs_traffic_log_read(struct file *file,
1565 char __user *user_buf,
1566 size_t count, loff_t *ppos)
1567{
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -07001568 struct iwl_trans *trans = file->private_data;
1569 struct iwl_priv *priv = priv(trans);
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07001570 int pos = 0, ofs = 0;
1571 int cnt = 0, entry;
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -07001572 struct iwl_trans_pcie *trans_pcie =
1573 IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07001574 struct iwl_tx_queue *txq;
1575 struct iwl_queue *q;
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -07001576 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07001577 char *buf;
1578 int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
Emmanuel Grumbachfd656932011-08-25 23:11:19 -07001579 (hw_params(trans).max_txq_num * 32 * 8) + 400;
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07001580 const u8 *ptr;
1581 ssize_t ret;
1582
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -07001583 if (!trans_pcie->txq) {
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -07001584 IWL_ERR(trans, "txq not ready\n");
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07001585 return -EAGAIN;
1586 }
1587 buf = kzalloc(bufsz, GFP_KERNEL);
1588 if (!buf) {
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -07001589 IWL_ERR(trans, "Can not allocate buffer\n");
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07001590 return -ENOMEM;
1591 }
1592 pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n");
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -07001593 for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -07001594 txq = &trans_pcie->txq[cnt];
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07001595 q = &txq->q;
1596 pos += scnprintf(buf + pos, bufsz - pos,
1597 "q[%d]: read_ptr: %u, write_ptr: %u\n",
1598 cnt, q->read_ptr, q->write_ptr);
1599 }
1600 if (priv->tx_traffic &&
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -07001601 (iwl_get_debug_level(trans->shrd) & IWL_DL_TX)) {
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07001602 ptr = priv->tx_traffic;
1603 pos += scnprintf(buf + pos, bufsz - pos,
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -07001604 "Tx Traffic idx: %u\n", priv->tx_traffic_idx);
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07001605 for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
1606 for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
1607 entry++, ofs += 16) {
1608 pos += scnprintf(buf + pos, bufsz - pos,
1609 "0x%.4x ", ofs);
1610 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
1611 buf + pos, bufsz - pos, 0);
1612 pos += strlen(buf + pos);
1613 if (bufsz - pos > 0)
1614 buf[pos++] = '\n';
1615 }
1616 }
1617 }
1618
1619 pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n");
1620 pos += scnprintf(buf + pos, bufsz - pos,
1621 "read: %u, write: %u\n",
1622 rxq->read, rxq->write);
1623
1624 if (priv->rx_traffic &&
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -07001625 (iwl_get_debug_level(trans->shrd) & IWL_DL_RX)) {
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07001626 ptr = priv->rx_traffic;
1627 pos += scnprintf(buf + pos, bufsz - pos,
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -07001628 "Rx Traffic idx: %u\n", priv->rx_traffic_idx);
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07001629 for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
1630 for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
1631 entry++, ofs += 16) {
1632 pos += scnprintf(buf + pos, bufsz - pos,
1633 "0x%.4x ", ofs);
1634 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
1635 buf + pos, bufsz - pos, 0);
1636 pos += strlen(buf + pos);
1637 if (bufsz - pos > 0)
1638 buf[pos++] = '\n';
1639 }
1640 }
1641 }
1642
1643 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1644 kfree(buf);
1645 return ret;
1646}
1647
1648static ssize_t iwl_dbgfs_traffic_log_write(struct file *file,
1649 const char __user *user_buf,
1650 size_t count, loff_t *ppos)
1651{
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -07001652 struct iwl_trans *trans = file->private_data;
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07001653 char buf[8];
1654 int buf_size;
1655 int traffic_log;
1656
1657 memset(buf, 0, sizeof(buf));
1658 buf_size = min(count, sizeof(buf) - 1);
1659 if (copy_from_user(buf, user_buf, buf_size))
1660 return -EFAULT;
1661 if (sscanf(buf, "%d", &traffic_log) != 1)
1662 return -EFAULT;
1663 if (traffic_log == 0)
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -07001664 iwl_reset_traffic_log(priv(trans));
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07001665
1666 return count;
1667}
1668
1669static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
1670 char __user *user_buf,
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -07001671 size_t count, loff_t *ppos)
1672{
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -07001673 struct iwl_trans *trans = file->private_data;
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -07001674 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07001675 struct iwl_tx_queue *txq;
1676 struct iwl_queue *q;
1677 char *buf;
1678 int pos = 0;
1679 int cnt;
1680 int ret;
Emmanuel Grumbachfd656932011-08-25 23:11:19 -07001681 const size_t bufsz = sizeof(char) * 64 * hw_params(trans).max_txq_num;
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07001682
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -07001683 if (!trans_pcie->txq) {
Emmanuel Grumbach3e10cae2011-09-06 09:31:18 -07001684 IWL_ERR(trans, "txq not ready\n");
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07001685 return -EAGAIN;
1686 }
1687 buf = kzalloc(bufsz, GFP_KERNEL);
1688 if (!buf)
1689 return -ENOMEM;
1690
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -07001691 for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -07001692 txq = &trans_pcie->txq[cnt];
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07001693 q = &txq->q;
1694 pos += scnprintf(buf + pos, bufsz - pos,
1695 "hwq %.2d: read=%u write=%u stop=%d"
1696 " swq_id=%#.2x (ac %d/hwq %d)\n",
1697 cnt, q->read_ptr, q->write_ptr,
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -07001698 !!test_bit(cnt, trans_pcie->queue_stopped),
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07001699 txq->swq_id, txq->swq_id & 3,
1700 (txq->swq_id >> 2) & 0x1f);
1701 if (cnt >= 4)
1702 continue;
1703 /* for the ACs, display the stop count too */
1704 pos += scnprintf(buf + pos, bufsz - pos,
Emmanuel Grumbach8ad71be2011-08-25 23:11:32 -07001705 " stop-count: %d\n",
1706 atomic_read(&trans_pcie->queue_stop_count[cnt]));
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07001707 }
1708 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1709 kfree(buf);
1710 return ret;
1711}
1712
1713static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
1714 char __user *user_buf,
1715 size_t count, loff_t *ppos) {
Emmanuel Grumbach5a878bf2011-08-25 23:10:51 -07001716 struct iwl_trans *trans = file->private_data;
1717 struct iwl_trans_pcie *trans_pcie =
1718 IWL_TRANS_GET_PCIE_TRANS(trans);
1719 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07001720 char buf[256];
1721 int pos = 0;
1722 const size_t bufsz = sizeof(buf);
1723
1724 pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
1725 rxq->read);
1726 pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
1727 rxq->write);
1728 pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
1729 rxq->free_count);
1730 if (rxq->rb_stts) {
1731 pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
1732 le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
1733 } else {
1734 pos += scnprintf(buf + pos, bufsz - pos,
1735 "closed_rb_num: Not Allocated\n");
1736 }
1737 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1738}
1739
Emmanuel Grumbach7ff94702011-08-25 23:10:54 -07001740static ssize_t iwl_dbgfs_log_event_read(struct file *file,
1741 char __user *user_buf,
1742 size_t count, loff_t *ppos)
1743{
1744 struct iwl_trans *trans = file->private_data;
1745 char *buf;
1746 int pos = 0;
1747 ssize_t ret = -ENOMEM;
1748
Emmanuel Grumbach6bb78842011-08-25 23:11:09 -07001749 ret = pos = iwl_dump_nic_event_log(trans, true, &buf, true);
Emmanuel Grumbach7ff94702011-08-25 23:10:54 -07001750 if (buf) {
1751 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1752 kfree(buf);
1753 }
1754 return ret;
1755}
1756
1757static ssize_t iwl_dbgfs_log_event_write(struct file *file,
1758 const char __user *user_buf,
1759 size_t count, loff_t *ppos)
1760{
1761 struct iwl_trans *trans = file->private_data;
1762 u32 event_log_flag;
1763 char buf[8];
1764 int buf_size;
1765
1766 memset(buf, 0, sizeof(buf));
1767 buf_size = min(count, sizeof(buf) - 1);
1768 if (copy_from_user(buf, user_buf, buf_size))
1769 return -EFAULT;
1770 if (sscanf(buf, "%d", &event_log_flag) != 1)
1771 return -EFAULT;
1772 if (event_log_flag == 1)
Emmanuel Grumbach6bb78842011-08-25 23:11:09 -07001773 iwl_dump_nic_event_log(trans, true, NULL, false);
Emmanuel Grumbach7ff94702011-08-25 23:10:54 -07001774
1775 return count;
1776}
1777
Emmanuel Grumbach1f7b6172011-08-25 23:10:59 -07001778static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
1779 char __user *user_buf,
1780 size_t count, loff_t *ppos) {
1781
1782 struct iwl_trans *trans = file->private_data;
1783 struct iwl_trans_pcie *trans_pcie =
1784 IWL_TRANS_GET_PCIE_TRANS(trans);
1785 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1786
1787 int pos = 0;
1788 char *buf;
1789 int bufsz = 24 * 64; /* 24 items * 64 char per item */
1790 ssize_t ret;
1791
1792 buf = kzalloc(bufsz, GFP_KERNEL);
1793 if (!buf) {
1794 IWL_ERR(trans, "Can not allocate Buffer\n");
1795 return -ENOMEM;
1796 }
1797
1798 pos += scnprintf(buf + pos, bufsz - pos,
1799 "Interrupt Statistics Report:\n");
1800
1801 pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
1802 isr_stats->hw);
1803 pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
1804 isr_stats->sw);
1805 if (isr_stats->sw || isr_stats->hw) {
1806 pos += scnprintf(buf + pos, bufsz - pos,
1807 "\tLast Restarting Code: 0x%X\n",
1808 isr_stats->err_code);
1809 }
1810#ifdef CONFIG_IWLWIFI_DEBUG
1811 pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
1812 isr_stats->sch);
1813 pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
1814 isr_stats->alive);
1815#endif
1816 pos += scnprintf(buf + pos, bufsz - pos,
1817 "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
1818
1819 pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
1820 isr_stats->ctkill);
1821
1822 pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
1823 isr_stats->wakeup);
1824
1825 pos += scnprintf(buf + pos, bufsz - pos,
1826 "Rx command responses:\t\t %u\n", isr_stats->rx);
1827
1828 pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
1829 isr_stats->tx);
1830
1831 pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
1832 isr_stats->unhandled);
1833
1834 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1835 kfree(buf);
1836 return ret;
1837}
1838
1839static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
1840 const char __user *user_buf,
1841 size_t count, loff_t *ppos)
1842{
1843 struct iwl_trans *trans = file->private_data;
1844 struct iwl_trans_pcie *trans_pcie =
1845 IWL_TRANS_GET_PCIE_TRANS(trans);
1846 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1847
1848 char buf[8];
1849 int buf_size;
1850 u32 reset_flag;
1851
1852 memset(buf, 0, sizeof(buf));
1853 buf_size = min(count, sizeof(buf) - 1);
1854 if (copy_from_user(buf, user_buf, buf_size))
1855 return -EFAULT;
1856 if (sscanf(buf, "%x", &reset_flag) != 1)
1857 return -EFAULT;
1858 if (reset_flag == 0)
1859 memset(isr_stats, 0, sizeof(*isr_stats));
1860
1861 return count;
1862}
1863
Emmanuel Grumbach16db88b2011-08-25 23:11:08 -07001864static const char *get_csr_string(int cmd)
1865{
1866 switch (cmd) {
1867 IWL_CMD(CSR_HW_IF_CONFIG_REG);
1868 IWL_CMD(CSR_INT_COALESCING);
1869 IWL_CMD(CSR_INT);
1870 IWL_CMD(CSR_INT_MASK);
1871 IWL_CMD(CSR_FH_INT_STATUS);
1872 IWL_CMD(CSR_GPIO_IN);
1873 IWL_CMD(CSR_RESET);
1874 IWL_CMD(CSR_GP_CNTRL);
1875 IWL_CMD(CSR_HW_REV);
1876 IWL_CMD(CSR_EEPROM_REG);
1877 IWL_CMD(CSR_EEPROM_GP);
1878 IWL_CMD(CSR_OTP_GP_REG);
1879 IWL_CMD(CSR_GIO_REG);
1880 IWL_CMD(CSR_GP_UCODE_REG);
1881 IWL_CMD(CSR_GP_DRIVER_REG);
1882 IWL_CMD(CSR_UCODE_DRV_GP1);
1883 IWL_CMD(CSR_UCODE_DRV_GP2);
1884 IWL_CMD(CSR_LED_REG);
1885 IWL_CMD(CSR_DRAM_INT_TBL_REG);
1886 IWL_CMD(CSR_GIO_CHICKEN_BITS);
1887 IWL_CMD(CSR_ANA_PLL_CFG);
1888 IWL_CMD(CSR_HW_REV_WA_REG);
1889 IWL_CMD(CSR_DBG_HPET_MEM_REG);
1890 default:
1891 return "UNKNOWN";
1892 }
1893}
1894
1895void iwl_dump_csr(struct iwl_trans *trans)
1896{
1897 int i;
1898 static const u32 csr_tbl[] = {
1899 CSR_HW_IF_CONFIG_REG,
1900 CSR_INT_COALESCING,
1901 CSR_INT,
1902 CSR_INT_MASK,
1903 CSR_FH_INT_STATUS,
1904 CSR_GPIO_IN,
1905 CSR_RESET,
1906 CSR_GP_CNTRL,
1907 CSR_HW_REV,
1908 CSR_EEPROM_REG,
1909 CSR_EEPROM_GP,
1910 CSR_OTP_GP_REG,
1911 CSR_GIO_REG,
1912 CSR_GP_UCODE_REG,
1913 CSR_GP_DRIVER_REG,
1914 CSR_UCODE_DRV_GP1,
1915 CSR_UCODE_DRV_GP2,
1916 CSR_LED_REG,
1917 CSR_DRAM_INT_TBL_REG,
1918 CSR_GIO_CHICKEN_BITS,
1919 CSR_ANA_PLL_CFG,
1920 CSR_HW_REV_WA_REG,
1921 CSR_DBG_HPET_MEM_REG
1922 };
1923 IWL_ERR(trans, "CSR values:\n");
1924 IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
1925 "CSR_INT_PERIODIC_REG)\n");
1926 for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
1927 IWL_ERR(trans, " %25s: 0X%08x\n",
1928 get_csr_string(csr_tbl[i]),
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -07001929 iwl_read32(bus(trans), csr_tbl[i]));
Emmanuel Grumbach16db88b2011-08-25 23:11:08 -07001930 }
1931}
1932
1933static ssize_t iwl_dbgfs_csr_write(struct file *file,
1934 const char __user *user_buf,
1935 size_t count, loff_t *ppos)
1936{
1937 struct iwl_trans *trans = file->private_data;
1938 char buf[8];
1939 int buf_size;
1940 int csr;
1941
1942 memset(buf, 0, sizeof(buf));
1943 buf_size = min(count, sizeof(buf) - 1);
1944 if (copy_from_user(buf, user_buf, buf_size))
1945 return -EFAULT;
1946 if (sscanf(buf, "%d", &csr) != 1)
1947 return -EFAULT;
1948
1949 iwl_dump_csr(trans);
1950
1951 return count;
1952}
1953
1954static const char *get_fh_string(int cmd)
1955{
1956 switch (cmd) {
1957 IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
1958 IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
1959 IWL_CMD(FH_RSCSR_CHNL0_WPTR);
1960 IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
1961 IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
1962 IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
1963 IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
1964 IWL_CMD(FH_TSSR_TX_STATUS_REG);
1965 IWL_CMD(FH_TSSR_TX_ERROR_REG);
1966 default:
1967 return "UNKNOWN";
1968 }
1969}
1970
1971int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
1972{
1973 int i;
1974#ifdef CONFIG_IWLWIFI_DEBUG
1975 int pos = 0;
1976 size_t bufsz = 0;
1977#endif
1978 static const u32 fh_tbl[] = {
1979 FH_RSCSR_CHNL0_STTS_WPTR_REG,
1980 FH_RSCSR_CHNL0_RBDCB_BASE_REG,
1981 FH_RSCSR_CHNL0_WPTR,
1982 FH_MEM_RCSR_CHNL0_CONFIG_REG,
1983 FH_MEM_RSSR_SHARED_CTRL_REG,
1984 FH_MEM_RSSR_RX_STATUS_REG,
1985 FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
1986 FH_TSSR_TX_STATUS_REG,
1987 FH_TSSR_TX_ERROR_REG
1988 };
1989#ifdef CONFIG_IWLWIFI_DEBUG
1990 if (display) {
1991 bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
1992 *buf = kmalloc(bufsz, GFP_KERNEL);
1993 if (!*buf)
1994 return -ENOMEM;
1995 pos += scnprintf(*buf + pos, bufsz - pos,
1996 "FH register values:\n");
1997 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1998 pos += scnprintf(*buf + pos, bufsz - pos,
1999 " %34s: 0X%08x\n",
2000 get_fh_string(fh_tbl[i]),
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -07002001 iwl_read_direct32(bus(trans), fh_tbl[i]));
Emmanuel Grumbach16db88b2011-08-25 23:11:08 -07002002 }
2003 return pos;
2004 }
2005#endif
2006 IWL_ERR(trans, "FH register values:\n");
2007 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
2008 IWL_ERR(trans, " %34s: 0X%08x\n",
2009 get_fh_string(fh_tbl[i]),
Emmanuel Grumbach83ed9012011-08-25 23:11:14 -07002010 iwl_read_direct32(bus(trans), fh_tbl[i]));
Emmanuel Grumbach16db88b2011-08-25 23:11:08 -07002011 }
2012 return 0;
2013}
2014
2015static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
2016 char __user *user_buf,
2017 size_t count, loff_t *ppos)
2018{
2019 struct iwl_trans *trans = file->private_data;
2020 char *buf;
2021 int pos = 0;
2022 ssize_t ret = -EFAULT;
2023
2024 ret = pos = iwl_dump_fh(trans, &buf, true);
2025 if (buf) {
2026 ret = simple_read_from_buffer(user_buf,
2027 count, ppos, buf, pos);
2028 kfree(buf);
2029 }
2030
2031 return ret;
2032}
2033
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07002034DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
Emmanuel Grumbach7ff94702011-08-25 23:10:54 -07002035DEBUGFS_READ_WRITE_FILE_OPS(log_event);
Emmanuel Grumbach1f7b6172011-08-25 23:10:59 -07002036DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
Emmanuel Grumbach16db88b2011-08-25 23:11:08 -07002037DEBUGFS_READ_FILE_OPS(fh_reg);
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07002038DEBUGFS_READ_FILE_OPS(rx_queue);
2039DEBUGFS_READ_FILE_OPS(tx_queue);
Emmanuel Grumbach16db88b2011-08-25 23:11:08 -07002040DEBUGFS_WRITE_FILE_OPS(csr);
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07002041
2042/*
2043 * Create the debugfs files and directories
2044 *
2045 */
2046static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
2047 struct dentry *dir)
2048{
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07002049 DEBUGFS_ADD_FILE(traffic_log, dir, S_IWUSR | S_IRUSR);
2050 DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
2051 DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
Emmanuel Grumbach7ff94702011-08-25 23:10:54 -07002052 DEBUGFS_ADD_FILE(log_event, dir, S_IWUSR | S_IRUSR);
Emmanuel Grumbach1f7b6172011-08-25 23:10:59 -07002053 DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
Emmanuel Grumbach16db88b2011-08-25 23:11:08 -07002054 DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
2055 DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07002056 return 0;
2057}
2058#else
2059static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
2060 struct dentry *dir)
2061{ return 0; }
2062
2063#endif /*CONFIG_IWLWIFI_DEBUGFS */
2064
Emmanuel Grumbache6bb4c92011-08-25 23:10:48 -07002065const struct iwl_trans_ops trans_ops_pcie = {
2066 .alloc = iwl_trans_pcie_alloc,
2067 .request_irq = iwl_trans_pcie_request_irq,
2068 .start_device = iwl_trans_pcie_start_device,
2069 .prepare_card_hw = iwl_trans_pcie_prepare_card_hw,
2070 .stop_device = iwl_trans_pcie_stop_device,
2071
2072 .tx_start = iwl_trans_pcie_tx_start,
Emmanuel Grumbache13c0c52011-08-25 23:11:24 -07002073 .wake_any_queue = iwl_trans_pcie_wake_any_queue,
Emmanuel Grumbache6bb4c92011-08-25 23:10:48 -07002074
Emmanuel Grumbache6bb4c92011-08-25 23:10:48 -07002075 .send_cmd = iwl_trans_pcie_send_cmd,
2076 .send_cmd_pdu = iwl_trans_pcie_send_cmd_pdu,
2077
Emmanuel Grumbache6bb4c92011-08-25 23:10:48 -07002078 .tx = iwl_trans_pcie_tx,
Emmanuel Grumbacha0eaad72011-08-25 23:11:00 -07002079 .reclaim = iwl_trans_pcie_reclaim,
Emmanuel Grumbache6bb4c92011-08-25 23:10:48 -07002080
Emmanuel Grumbach7f01d562011-08-25 23:11:27 -07002081 .tx_agg_disable = iwl_trans_pcie_tx_agg_disable,
Emmanuel Grumbach288712a2011-08-25 23:11:25 -07002082 .tx_agg_alloc = iwl_trans_pcie_tx_agg_alloc,
Emmanuel Grumbachc91bd122011-08-25 23:11:28 -07002083 .tx_agg_setup = iwl_trans_pcie_tx_agg_setup,
Emmanuel Grumbache6bb4c92011-08-25 23:10:48 -07002084
2085 .kick_nic = iwl_trans_pcie_kick_nic,
2086
Emmanuel Grumbache6bb4c92011-08-25 23:10:48 -07002087 .free = iwl_trans_pcie_free,
Emmanuel Grumbache20d43412011-08-25 23:11:31 -07002088 .stop_queue = iwl_trans_pcie_stop_queue,
Emmanuel Grumbach87e56662011-08-25 23:10:50 -07002089
2090 .dbgfs_register = iwl_trans_pcie_dbgfs_register,
Emmanuel Grumbach5f178cd2011-08-25 23:11:29 -07002091
2092 .wait_tx_queue_empty = iwl_trans_pcie_wait_tx_queue_empty,
Emmanuel Grumbachf22be622011-08-25 23:11:30 -07002093 .check_stuck_queue = iwl_trans_pcie_check_stuck_queue,
Emmanuel Grumbach5f178cd2011-08-25 23:11:29 -07002094
Emmanuel Grumbach57210f72011-08-25 23:10:52 -07002095 .suspend = iwl_trans_pcie_suspend,
2096 .resume = iwl_trans_pcie_resume,
Emmanuel Grumbache6bb4c92011-08-25 23:10:48 -07002097};
2098