blob: c706ccff159b45526513adec9435e74657f41b1f [file] [log] [blame]
Zhu Yib481de92007-09-25 17:54:57 -07001/******************************************************************************
2 *
Reinette Chatreeb7ae892008-03-11 16:17:17 -07003 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
Zhu Yib481de92007-09-25 17:54:57 -07004 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
Winkler, Tomas759ef892008-12-09 11:28:58 -080025 * Intel Linux Wireless <ilw@linux.intel.com>
Zhu Yib481de92007-09-25 17:54:57 -070026 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
Zhu Yib481de92007-09-25 17:54:57 -070030#include <linux/kernel.h>
31#include <linux/module.h>
Zhu Yib481de92007-09-25 17:54:57 -070032#include <linux/init.h>
33#include <linux/pci.h>
34#include <linux/dma-mapping.h>
35#include <linux/delay.h>
36#include <linux/skbuff.h>
37#include <linux/netdevice.h>
38#include <linux/wireless.h>
39#include <linux/firmware.h>
Zhu Yib481de92007-09-25 17:54:57 -070040#include <linux/etherdevice.h>
41#include <linux/if_arp.h>
42
43#include <net/ieee80211_radiotap.h>
John W. Linville7e272fc2008-09-24 18:13:14 -040044#include <net/lib80211.h>
Zhu Yib481de92007-09-25 17:54:57 -070045#include <net/mac80211.h>
46
47#include <asm/div64.h>
48
Tomas Winkler82b9a122008-03-04 18:09:30 -080049#include "iwl-3945-core.h"
Tomas Winkler600c0e12008-12-19 10:37:04 +080050#include "iwl-commands.h"
Zhu Yib481de92007-09-25 17:54:57 -070051#include "iwl-3945.h"
Tomas Winklerbddadf82008-12-19 10:37:01 +080052#include "iwl-3945-fh.h"
Zhu Yib481de92007-09-25 17:54:57 -070053#include "iwl-helpers.h"
54
Christoph Hellwigbb8c0932008-01-27 16:41:47 -080055static int iwl3945_tx_queue_update_write_ptr(struct iwl3945_priv *priv,
56 struct iwl3945_tx_queue *txq);
Christoph Hellwig416e1432007-10-25 17:15:49 +080057
Zhu Yib481de92007-09-25 17:54:57 -070058/******************************************************************************
59 *
60 * module boiler plate
61 *
62 ******************************************************************************/
63
64/* module parameters */
Cahill, Ben M6440adb2007-11-29 11:09:55 +080065static int iwl3945_param_disable_hw_scan; /* def: 0 = use 3945's h/w scan */
Wu, Fengguang95aa1942008-12-17 16:52:30 +080066static u32 iwl3945_param_debug; /* def: 0 = minimal debug log messages */
Cahill, Ben M6440adb2007-11-29 11:09:55 +080067static int iwl3945_param_disable; /* def: 0 = enable radio */
Ben Cahill9fbab512007-11-29 11:09:47 +080068static int iwl3945_param_antenna; /* def: 0 = both antennas (use diversity) */
Cahill, Ben M6440adb2007-11-29 11:09:55 +080069int iwl3945_param_hwcrypto; /* def: 0 = use software encryption */
Ron Rindjunskydfe7d452008-04-15 16:01:45 -070070int iwl3945_param_queues_num = IWL39_MAX_NUM_QUEUES; /* def: 8 Tx queues */
Zhu Yib481de92007-09-25 17:54:57 -070071
72/*
73 * module name, copyright, version, etc.
74 * NOTE: DRV_NAME is defined in iwlwifi.h for use by iwl-debug.h and printk
75 */
76
77#define DRV_DESCRIPTION \
78"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux"
79
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +080080#ifdef CONFIG_IWL3945_DEBUG
Zhu Yib481de92007-09-25 17:54:57 -070081#define VD "d"
82#else
83#define VD
84#endif
85
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +080086#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
Zhu Yib481de92007-09-25 17:54:57 -070087#define VS "s"
88#else
89#define VS
90#endif
91
Reinette Chatreb9e0b442008-02-08 16:39:11 -080092#define IWLWIFI_VERSION "1.2.26k" VD VS
Reinette Chatreeb7ae892008-03-11 16:17:17 -070093#define DRV_COPYRIGHT "Copyright(c) 2003-2008 Intel Corporation"
Tomas Winklera7b75202008-12-11 10:33:41 -080094#define DRV_AUTHOR "<ilw@linux.intel.com>"
Zhu Yib481de92007-09-25 17:54:57 -070095#define DRV_VERSION IWLWIFI_VERSION
96
Zhu Yib481de92007-09-25 17:54:57 -070097
98MODULE_DESCRIPTION(DRV_DESCRIPTION);
99MODULE_VERSION(DRV_VERSION);
Tomas Winklera7b75202008-12-11 10:33:41 -0800100MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
Zhu Yib481de92007-09-25 17:54:57 -0700101MODULE_LICENSE("GPL");
102
Johannes Berg8318d782008-01-24 19:38:38 +0100103static const struct ieee80211_supported_band *iwl3945_get_band(
104 struct iwl3945_priv *priv, enum ieee80211_band band)
Zhu Yib481de92007-09-25 17:54:57 -0700105{
Johannes Berg8318d782008-01-24 19:38:38 +0100106 return priv->hw->wiphy->bands[band];
Zhu Yib481de92007-09-25 17:54:57 -0700107}
108
Zhu Yib481de92007-09-25 17:54:57 -0700109/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
110 * DMA services
111 *
112 * Theory of operation
113 *
Cahill, Ben M6440adb2007-11-29 11:09:55 +0800114 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
115 * of buffer descriptors, each of which points to one or more data buffers for
116 * the device to read from or fill. Driver and device exchange status of each
117 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
118 * entries in each circular buffer, to protect against confusing empty and full
119 * queue states.
120 *
121 * The device reads or writes the data in the queues via the device's several
122 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
Zhu Yib481de92007-09-25 17:54:57 -0700123 *
124 * For Tx queue, there are low mark and high mark limits. If, after queuing
125 * the packet for Tx, free space become < low mark, Tx queue stopped. When
126 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
127 * Tx queue resumed.
128 *
Cahill, Ben M6440adb2007-11-29 11:09:55 +0800129 * The 3945 operates with six queues: One receive queue, one transmit queue
130 * (#4) for sending commands to the device firmware, and four transmit queues
131 * (#0-3) for data tx via EDCA. An additional 2 HCCA queues are unused.
Zhu Yib481de92007-09-25 17:54:57 -0700132 ***************************************************/
133
Tomas Winklerc54b6792008-03-06 17:36:53 -0800134int iwl3945_queue_space(const struct iwl3945_queue *q)
Zhu Yib481de92007-09-25 17:54:57 -0700135{
Tomas Winklerfc4b6852007-10-25 17:15:24 +0800136 int s = q->read_ptr - q->write_ptr;
Zhu Yib481de92007-09-25 17:54:57 -0700137
Tomas Winklerfc4b6852007-10-25 17:15:24 +0800138 if (q->read_ptr > q->write_ptr)
Zhu Yib481de92007-09-25 17:54:57 -0700139 s -= q->n_bd;
140
141 if (s <= 0)
142 s += q->n_window;
143 /* keep some reserve to not confuse empty and full situations */
144 s -= 2;
145 if (s < 0)
146 s = 0;
147 return s;
148}
149
Tomas Winklerc54b6792008-03-06 17:36:53 -0800150int iwl3945_x2_queue_used(const struct iwl3945_queue *q, int i)
Zhu Yib481de92007-09-25 17:54:57 -0700151{
Tomas Winklerfc4b6852007-10-25 17:15:24 +0800152 return q->write_ptr > q->read_ptr ?
153 (i >= q->read_ptr && i < q->write_ptr) :
154 !(i < q->read_ptr && i >= q->write_ptr);
Zhu Yib481de92007-09-25 17:54:57 -0700155}
156
Tomas Winklerc54b6792008-03-06 17:36:53 -0800157
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800158static inline u8 get_cmd_index(struct iwl3945_queue *q, u32 index, int is_huge)
Zhu Yib481de92007-09-25 17:54:57 -0700159{
Cahill, Ben M6440adb2007-11-29 11:09:55 +0800160 /* This is for scan command, the big buffer at end of command array */
Zhu Yib481de92007-09-25 17:54:57 -0700161 if (is_huge)
Cahill, Ben M6440adb2007-11-29 11:09:55 +0800162 return q->n_window; /* must be power of 2 */
Zhu Yib481de92007-09-25 17:54:57 -0700163
Cahill, Ben M6440adb2007-11-29 11:09:55 +0800164 /* Otherwise, use normal size buffers */
Zhu Yib481de92007-09-25 17:54:57 -0700165 return index & (q->n_window - 1);
166}
167
Cahill, Ben M6440adb2007-11-29 11:09:55 +0800168/**
169 * iwl3945_queue_init - Initialize queue's high/low-water and read/write indexes
170 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800171static int iwl3945_queue_init(struct iwl3945_priv *priv, struct iwl3945_queue *q,
Zhu Yib481de92007-09-25 17:54:57 -0700172 int count, int slots_num, u32 id)
173{
174 q->n_bd = count;
175 q->n_window = slots_num;
176 q->id = id;
177
Tomas Winklerc54b6792008-03-06 17:36:53 -0800178 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
179 * and iwl_queue_dec_wrap are broken. */
Zhu Yib481de92007-09-25 17:54:57 -0700180 BUG_ON(!is_power_of_2(count));
181
182 /* slots_num must be power-of-two size, otherwise
183 * get_cmd_index is broken. */
184 BUG_ON(!is_power_of_2(slots_num));
185
186 q->low_mark = q->n_window / 4;
187 if (q->low_mark < 4)
188 q->low_mark = 4;
189
190 q->high_mark = q->n_window / 8;
191 if (q->high_mark < 2)
192 q->high_mark = 2;
193
Tomas Winklerfc4b6852007-10-25 17:15:24 +0800194 q->write_ptr = q->read_ptr = 0;
Zhu Yib481de92007-09-25 17:54:57 -0700195
196 return 0;
197}
198
Cahill, Ben M6440adb2007-11-29 11:09:55 +0800199/**
200 * iwl3945_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
201 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800202static int iwl3945_tx_queue_alloc(struct iwl3945_priv *priv,
203 struct iwl3945_tx_queue *txq, u32 id)
Zhu Yib481de92007-09-25 17:54:57 -0700204{
205 struct pci_dev *dev = priv->pci_dev;
206
Cahill, Ben M6440adb2007-11-29 11:09:55 +0800207 /* Driver private data, only for Tx (not command) queues,
208 * not shared with device. */
Zhu Yib481de92007-09-25 17:54:57 -0700209 if (id != IWL_CMD_QUEUE_NUM) {
210 txq->txb = kmalloc(sizeof(txq->txb[0]) *
211 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
212 if (!txq->txb) {
Ian Schram01ebd062007-10-25 17:15:22 +0800213 IWL_ERROR("kmalloc for auxiliary BD "
Zhu Yib481de92007-09-25 17:54:57 -0700214 "structures failed\n");
215 goto error;
216 }
217 } else
218 txq->txb = NULL;
219
Cahill, Ben M6440adb2007-11-29 11:09:55 +0800220 /* Circular buffer of transmit frame descriptors (TFDs),
221 * shared with device */
Zhu Yib481de92007-09-25 17:54:57 -0700222 txq->bd = pci_alloc_consistent(dev,
223 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX,
224 &txq->q.dma_addr);
225
226 if (!txq->bd) {
227 IWL_ERROR("pci_alloc_consistent(%zd) failed\n",
228 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX);
229 goto error;
230 }
231 txq->q.id = id;
232
233 return 0;
234
235 error:
Tomas Winkler3ac7f142008-07-21 02:40:14 +0300236 kfree(txq->txb);
237 txq->txb = NULL;
Zhu Yib481de92007-09-25 17:54:57 -0700238
239 return -ENOMEM;
240}
241
Cahill, Ben M6440adb2007-11-29 11:09:55 +0800242/**
243 * iwl3945_tx_queue_init - Allocate and initialize one tx/cmd queue
244 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800245int iwl3945_tx_queue_init(struct iwl3945_priv *priv,
246 struct iwl3945_tx_queue *txq, int slots_num, u32 txq_id)
Zhu Yib481de92007-09-25 17:54:57 -0700247{
248 struct pci_dev *dev = priv->pci_dev;
249 int len;
250 int rc = 0;
251
Cahill, Ben M6440adb2007-11-29 11:09:55 +0800252 /*
253 * Alloc buffer array for commands (Tx or other types of commands).
254 * For the command queue (#4), allocate command space + one big
255 * command for scan, since scan command is very huge; the system will
256 * not have two scans at the same time, so only one is needed.
257 * For data Tx queues (all other queues), no super-size command
258 * space is needed.
259 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800260 len = sizeof(struct iwl3945_cmd) * slots_num;
Zhu Yib481de92007-09-25 17:54:57 -0700261 if (txq_id == IWL_CMD_QUEUE_NUM)
262 len += IWL_MAX_SCAN_SIZE;
263 txq->cmd = pci_alloc_consistent(dev, len, &txq->dma_addr_cmd);
264 if (!txq->cmd)
265 return -ENOMEM;
266
Cahill, Ben M6440adb2007-11-29 11:09:55 +0800267 /* Alloc driver data array and TFD circular buffer */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800268 rc = iwl3945_tx_queue_alloc(priv, txq, txq_id);
Zhu Yib481de92007-09-25 17:54:57 -0700269 if (rc) {
270 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
271
272 return -ENOMEM;
273 }
274 txq->need_update = 0;
275
276 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
Tomas Winklerc54b6792008-03-06 17:36:53 -0800277 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
Zhu Yib481de92007-09-25 17:54:57 -0700278 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
Cahill, Ben M6440adb2007-11-29 11:09:55 +0800279
280 /* Initialize queue high/low-water, head/tail indexes */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800281 iwl3945_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
Zhu Yib481de92007-09-25 17:54:57 -0700282
Cahill, Ben M6440adb2007-11-29 11:09:55 +0800283 /* Tell device where to find queue, enable DMA channel. */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800284 iwl3945_hw_tx_queue_init(priv, txq);
Zhu Yib481de92007-09-25 17:54:57 -0700285
286 return 0;
287}
288
289/**
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800290 * iwl3945_tx_queue_free - Deallocate DMA queue.
Zhu Yib481de92007-09-25 17:54:57 -0700291 * @txq: Transmit queue to deallocate.
292 *
293 * Empty queue by removing and destroying all BD's.
Cahill, Ben M6440adb2007-11-29 11:09:55 +0800294 * Free all buffers.
295 * 0-fill, but do not free "txq" descriptor structure.
Zhu Yib481de92007-09-25 17:54:57 -0700296 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800297void iwl3945_tx_queue_free(struct iwl3945_priv *priv, struct iwl3945_tx_queue *txq)
Zhu Yib481de92007-09-25 17:54:57 -0700298{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800299 struct iwl3945_queue *q = &txq->q;
Zhu Yib481de92007-09-25 17:54:57 -0700300 struct pci_dev *dev = priv->pci_dev;
301 int len;
302
303 if (q->n_bd == 0)
304 return;
305
306 /* first, empty all BD's */
Tomas Winklerfc4b6852007-10-25 17:15:24 +0800307 for (; q->write_ptr != q->read_ptr;
Tomas Winklerc54b6792008-03-06 17:36:53 -0800308 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800309 iwl3945_hw_txq_free_tfd(priv, txq);
Zhu Yib481de92007-09-25 17:54:57 -0700310
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800311 len = sizeof(struct iwl3945_cmd) * q->n_window;
Zhu Yib481de92007-09-25 17:54:57 -0700312 if (q->id == IWL_CMD_QUEUE_NUM)
313 len += IWL_MAX_SCAN_SIZE;
314
Cahill, Ben M6440adb2007-11-29 11:09:55 +0800315 /* De-alloc array of command/tx buffers */
Zhu Yib481de92007-09-25 17:54:57 -0700316 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
317
Cahill, Ben M6440adb2007-11-29 11:09:55 +0800318 /* De-alloc circular buffer of TFDs */
Zhu Yib481de92007-09-25 17:54:57 -0700319 if (txq->q.n_bd)
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800320 pci_free_consistent(dev, sizeof(struct iwl3945_tfd_frame) *
Zhu Yib481de92007-09-25 17:54:57 -0700321 txq->q.n_bd, txq->bd, txq->q.dma_addr);
322
Cahill, Ben M6440adb2007-11-29 11:09:55 +0800323 /* De-alloc array of per-TFD driver data */
Tomas Winkler3ac7f142008-07-21 02:40:14 +0300324 kfree(txq->txb);
325 txq->txb = NULL;
Zhu Yib481de92007-09-25 17:54:57 -0700326
Cahill, Ben M6440adb2007-11-29 11:09:55 +0800327 /* 0-fill queue descriptor structure */
Zhu Yib481de92007-09-25 17:54:57 -0700328 memset(txq, 0, sizeof(*txq));
329}
330
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800331const u8 iwl3945_broadcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
Zhu Yib481de92007-09-25 17:54:57 -0700332
333/*************** STATION TABLE MANAGEMENT ****
Ben Cahill9fbab512007-11-29 11:09:47 +0800334 * mac80211 should be examined to determine if sta_info is duplicating
Zhu Yib481de92007-09-25 17:54:57 -0700335 * the functionality provided here
336 */
337
338/**************************************************************/
Ian Schram01ebd062007-10-25 17:15:22 +0800339#if 0 /* temporary disable till we add real remove station */
Cahill, Ben M6440adb2007-11-29 11:09:55 +0800340/**
341 * iwl3945_remove_station - Remove driver's knowledge of station.
342 *
343 * NOTE: This does not remove station from device's station table.
344 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800345static u8 iwl3945_remove_station(struct iwl3945_priv *priv, const u8 *addr, int is_ap)
Zhu Yib481de92007-09-25 17:54:57 -0700346{
347 int index = IWL_INVALID_STATION;
348 int i;
349 unsigned long flags;
350
351 spin_lock_irqsave(&priv->sta_lock, flags);
352
353 if (is_ap)
354 index = IWL_AP_ID;
355 else if (is_broadcast_ether_addr(addr))
356 index = priv->hw_setting.bcast_sta_id;
357 else
358 for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++)
359 if (priv->stations[i].used &&
360 !compare_ether_addr(priv->stations[i].sta.sta.addr,
361 addr)) {
362 index = i;
363 break;
364 }
365
366 if (unlikely(index == IWL_INVALID_STATION))
367 goto out;
368
369 if (priv->stations[index].used) {
370 priv->stations[index].used = 0;
371 priv->num_stations--;
372 }
373
374 BUG_ON(priv->num_stations < 0);
375
376out:
377 spin_unlock_irqrestore(&priv->sta_lock, flags);
378 return 0;
379}
Zhu Yi556f8db2007-09-27 11:27:33 +0800380#endif
Cahill, Ben M6440adb2007-11-29 11:09:55 +0800381
382/**
383 * iwl3945_clear_stations_table - Clear the driver's station table
384 *
385 * NOTE: This does not clear or otherwise alter the device's station table.
386 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800387static void iwl3945_clear_stations_table(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700388{
389 unsigned long flags;
390
391 spin_lock_irqsave(&priv->sta_lock, flags);
392
393 priv->num_stations = 0;
394 memset(priv->stations, 0, sizeof(priv->stations));
395
396 spin_unlock_irqrestore(&priv->sta_lock, flags);
397}
398
Cahill, Ben M6440adb2007-11-29 11:09:55 +0800399/**
400 * iwl3945_add_station - Add station to station tables in driver and device
401 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800402u8 iwl3945_add_station(struct iwl3945_priv *priv, const u8 *addr, int is_ap, u8 flags)
Zhu Yib481de92007-09-25 17:54:57 -0700403{
404 int i;
405 int index = IWL_INVALID_STATION;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800406 struct iwl3945_station_entry *station;
Zhu Yib481de92007-09-25 17:54:57 -0700407 unsigned long flags_spin;
Zhu Yic14c5212007-09-27 11:27:35 +0800408 u8 rate;
Zhu Yib481de92007-09-25 17:54:57 -0700409
410 spin_lock_irqsave(&priv->sta_lock, flags_spin);
411 if (is_ap)
412 index = IWL_AP_ID;
413 else if (is_broadcast_ether_addr(addr))
414 index = priv->hw_setting.bcast_sta_id;
415 else
416 for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++) {
417 if (!compare_ether_addr(priv->stations[i].sta.sta.addr,
418 addr)) {
419 index = i;
420 break;
421 }
422
423 if (!priv->stations[i].used &&
424 index == IWL_INVALID_STATION)
425 index = i;
426 }
427
Ian Schram01ebd062007-10-25 17:15:22 +0800428 /* These two conditions has the same outcome but keep them separate
Zhu Yib481de92007-09-25 17:54:57 -0700429 since they have different meaning */
430 if (unlikely(index == IWL_INVALID_STATION)) {
431 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
432 return index;
433 }
434
435 if (priv->stations[index].used &&
436 !compare_ether_addr(priv->stations[index].sta.sta.addr, addr)) {
437 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
438 return index;
439 }
440
Johannes Berge1749612008-10-27 15:59:26 -0700441 IWL_DEBUG_ASSOC("Add STA ID %d: %pM\n", index, addr);
Zhu Yib481de92007-09-25 17:54:57 -0700442 station = &priv->stations[index];
443 station->used = 1;
444 priv->num_stations++;
445
Cahill, Ben M6440adb2007-11-29 11:09:55 +0800446 /* Set up the REPLY_ADD_STA command to send to device */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800447 memset(&station->sta, 0, sizeof(struct iwl3945_addsta_cmd));
Zhu Yib481de92007-09-25 17:54:57 -0700448 memcpy(station->sta.sta.addr, addr, ETH_ALEN);
449 station->sta.mode = 0;
450 station->sta.sta.sta_id = index;
451 station->sta.station_flags = 0;
452
Johannes Berg8318d782008-01-24 19:38:38 +0100453 if (priv->band == IEEE80211_BAND_5GHZ)
Tomas Winkler69946332007-10-25 17:15:27 +0800454 rate = IWL_RATE_6M_PLCP;
455 else
456 rate = IWL_RATE_1M_PLCP;
Zhu Yic14c5212007-09-27 11:27:35 +0800457
458 /* Turn on both antennas for the station... */
459 station->sta.rate_n_flags =
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800460 iwl3945_hw_set_rate_n_flags(rate, RATE_MCS_ANT_AB_MSK);
Zhu Yic14c5212007-09-27 11:27:35 +0800461
Zhu Yib481de92007-09-25 17:54:57 -0700462 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
Cahill, Ben M6440adb2007-11-29 11:09:55 +0800463
464 /* Add station to device's station table */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800465 iwl3945_send_add_station(priv, &station->sta, flags);
Zhu Yib481de92007-09-25 17:54:57 -0700466 return index;
467
468}
469
470/*************** DRIVER STATUS FUNCTIONS *****/
471
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800472static inline int iwl3945_is_ready(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700473{
474 /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
475 * set but EXIT_PENDING is not */
476 return test_bit(STATUS_READY, &priv->status) &&
477 test_bit(STATUS_GEO_CONFIGURED, &priv->status) &&
478 !test_bit(STATUS_EXIT_PENDING, &priv->status);
479}
480
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800481static inline int iwl3945_is_alive(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700482{
483 return test_bit(STATUS_ALIVE, &priv->status);
484}
485
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800486static inline int iwl3945_is_init(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700487{
488 return test_bit(STATUS_INIT, &priv->status);
489}
490
Adel Gadllah80fcc9e2008-07-01 17:49:50 +0200491static inline int iwl3945_is_rfkill_sw(struct iwl3945_priv *priv)
492{
493 return test_bit(STATUS_RF_KILL_SW, &priv->status);
494}
495
496static inline int iwl3945_is_rfkill_hw(struct iwl3945_priv *priv)
497{
498 return test_bit(STATUS_RF_KILL_HW, &priv->status);
499}
500
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800501static inline int iwl3945_is_rfkill(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700502{
Adel Gadllah80fcc9e2008-07-01 17:49:50 +0200503 return iwl3945_is_rfkill_hw(priv) ||
504 iwl3945_is_rfkill_sw(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700505}
506
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800507static inline int iwl3945_is_ready_rf(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700508{
509
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800510 if (iwl3945_is_rfkill(priv))
Zhu Yib481de92007-09-25 17:54:57 -0700511 return 0;
512
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800513 return iwl3945_is_ready(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700514}
515
516/*************** HOST COMMAND QUEUE FUNCTIONS *****/
517
Abhijeet Kolekarc3056062008-11-12 13:14:08 -0800518#define IWL_CMD(x) case x: return #x
Zhu Yib481de92007-09-25 17:54:57 -0700519
520static const char *get_cmd_string(u8 cmd)
521{
522 switch (cmd) {
523 IWL_CMD(REPLY_ALIVE);
524 IWL_CMD(REPLY_ERROR);
525 IWL_CMD(REPLY_RXON);
526 IWL_CMD(REPLY_RXON_ASSOC);
527 IWL_CMD(REPLY_QOS_PARAM);
528 IWL_CMD(REPLY_RXON_TIMING);
529 IWL_CMD(REPLY_ADD_STA);
530 IWL_CMD(REPLY_REMOVE_STA);
531 IWL_CMD(REPLY_REMOVE_ALL_STA);
532 IWL_CMD(REPLY_3945_RX);
533 IWL_CMD(REPLY_TX);
534 IWL_CMD(REPLY_RATE_SCALE);
535 IWL_CMD(REPLY_LEDS_CMD);
536 IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
537 IWL_CMD(RADAR_NOTIFICATION);
538 IWL_CMD(REPLY_QUIET_CMD);
539 IWL_CMD(REPLY_CHANNEL_SWITCH);
540 IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
541 IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
542 IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
543 IWL_CMD(POWER_TABLE_CMD);
544 IWL_CMD(PM_SLEEP_NOTIFICATION);
545 IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
546 IWL_CMD(REPLY_SCAN_CMD);
547 IWL_CMD(REPLY_SCAN_ABORT_CMD);
548 IWL_CMD(SCAN_START_NOTIFICATION);
549 IWL_CMD(SCAN_RESULTS_NOTIFICATION);
550 IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
551 IWL_CMD(BEACON_NOTIFICATION);
552 IWL_CMD(REPLY_TX_BEACON);
553 IWL_CMD(WHO_IS_AWAKE_NOTIFICATION);
554 IWL_CMD(QUIET_NOTIFICATION);
555 IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
556 IWL_CMD(MEASURE_ABORT_NOTIFICATION);
557 IWL_CMD(REPLY_BT_CONFIG);
558 IWL_CMD(REPLY_STATISTICS_CMD);
559 IWL_CMD(STATISTICS_NOTIFICATION);
560 IWL_CMD(REPLY_CARD_STATE_CMD);
561 IWL_CMD(CARD_STATE_NOTIFICATION);
562 IWL_CMD(MISSED_BEACONS_NOTIFICATION);
563 default:
564 return "UNKNOWN";
565
566 }
567}
568
569#define HOST_COMPLETE_TIMEOUT (HZ / 2)
570
571/**
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800572 * iwl3945_enqueue_hcmd - enqueue a uCode command
Zhu Yib481de92007-09-25 17:54:57 -0700573 * @priv: device private data point
574 * @cmd: a point to the ucode command structure
575 *
576 * The function returns < 0 values to indicate the operation is
577 * failed. On success, it turns the index (> 0) of command in the
578 * command queue.
579 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800580static int iwl3945_enqueue_hcmd(struct iwl3945_priv *priv, struct iwl3945_host_cmd *cmd)
Zhu Yib481de92007-09-25 17:54:57 -0700581{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800582 struct iwl3945_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
583 struct iwl3945_queue *q = &txq->q;
584 struct iwl3945_tfd_frame *tfd;
Zhu Yib481de92007-09-25 17:54:57 -0700585 u32 *control_flags;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800586 struct iwl3945_cmd *out_cmd;
Zhu Yib481de92007-09-25 17:54:57 -0700587 u32 idx;
588 u16 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
589 dma_addr_t phys_addr;
590 int pad;
591 u16 count;
592 int ret;
593 unsigned long flags;
594
595 /* If any of the command structures end up being larger than
596 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
597 * we will need to increase the size of the TFD entries */
598 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
599 !(cmd->meta.flags & CMD_SIZE_HUGE));
600
Gregory Greenmanc342a1b2008-02-06 11:20:40 -0800601
602 if (iwl3945_is_rfkill(priv)) {
603 IWL_DEBUG_INFO("Not sending command - RF KILL");
604 return -EIO;
605 }
606
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800607 if (iwl3945_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) {
Zhu Yib481de92007-09-25 17:54:57 -0700608 IWL_ERROR("No space for Tx\n");
609 return -ENOSPC;
610 }
611
612 spin_lock_irqsave(&priv->hcmd_lock, flags);
613
Tomas Winklerfc4b6852007-10-25 17:15:24 +0800614 tfd = &txq->bd[q->write_ptr];
Zhu Yib481de92007-09-25 17:54:57 -0700615 memset(tfd, 0, sizeof(*tfd));
616
617 control_flags = (u32 *) tfd;
618
Tomas Winklerfc4b6852007-10-25 17:15:24 +0800619 idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE);
Zhu Yib481de92007-09-25 17:54:57 -0700620 out_cmd = &txq->cmd[idx];
621
622 out_cmd->hdr.cmd = cmd->id;
623 memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta));
624 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
625
626 /* At this point, the out_cmd now has all of the incoming cmd
627 * information */
628
629 out_cmd->hdr.flags = 0;
630 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
Tomas Winklerfc4b6852007-10-25 17:15:24 +0800631 INDEX_TO_SEQ(q->write_ptr));
Zhu Yib481de92007-09-25 17:54:57 -0700632 if (out_cmd->meta.flags & CMD_SIZE_HUGE)
Tomas Winkler600c0e12008-12-19 10:37:04 +0800633 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
Zhu Yib481de92007-09-25 17:54:57 -0700634
635 phys_addr = txq->dma_addr_cmd + sizeof(txq->cmd[0]) * idx +
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800636 offsetof(struct iwl3945_cmd, hdr);
637 iwl3945_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size);
Zhu Yib481de92007-09-25 17:54:57 -0700638
639 pad = U32_PAD(cmd->len);
640 count = TFD_CTL_COUNT_GET(*control_flags);
641 *control_flags = TFD_CTL_COUNT_SET(count) | TFD_CTL_PAD_SET(pad);
642
643 IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, "
644 "%d bytes at %d[%d]:%d\n",
645 get_cmd_string(out_cmd->hdr.cmd),
646 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
Tomas Winklerfc4b6852007-10-25 17:15:24 +0800647 fix_size, q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
Zhu Yib481de92007-09-25 17:54:57 -0700648
649 txq->need_update = 1;
Cahill, Ben M6440adb2007-11-29 11:09:55 +0800650
651 /* Increment and update queue's write index */
Tomas Winklerc54b6792008-03-06 17:36:53 -0800652 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800653 ret = iwl3945_tx_queue_update_write_ptr(priv, txq);
Zhu Yib481de92007-09-25 17:54:57 -0700654
655 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
656 return ret ? ret : idx;
657}
658
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800659static int iwl3945_send_cmd_async(struct iwl3945_priv *priv, struct iwl3945_host_cmd *cmd)
Zhu Yib481de92007-09-25 17:54:57 -0700660{
661 int ret;
662
663 BUG_ON(!(cmd->meta.flags & CMD_ASYNC));
664
665 /* An asynchronous command can not expect an SKB to be set. */
666 BUG_ON(cmd->meta.flags & CMD_WANT_SKB);
667
668 /* An asynchronous command MUST have a callback. */
669 BUG_ON(!cmd->meta.u.callback);
670
671 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
672 return -EBUSY;
673
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800674 ret = iwl3945_enqueue_hcmd(priv, cmd);
Zhu Yib481de92007-09-25 17:54:57 -0700675 if (ret < 0) {
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800676 IWL_ERROR("Error sending %s: iwl3945_enqueue_hcmd failed: %d\n",
Zhu Yib481de92007-09-25 17:54:57 -0700677 get_cmd_string(cmd->id), ret);
678 return ret;
679 }
680 return 0;
681}
682
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800683static int iwl3945_send_cmd_sync(struct iwl3945_priv *priv, struct iwl3945_host_cmd *cmd)
Zhu Yib481de92007-09-25 17:54:57 -0700684{
685 int cmd_idx;
686 int ret;
Zhu Yib481de92007-09-25 17:54:57 -0700687
688 BUG_ON(cmd->meta.flags & CMD_ASYNC);
689
690 /* A synchronous command can not have a callback set. */
691 BUG_ON(cmd->meta.u.callback != NULL);
692
Tomas Winklere5472972008-03-28 16:21:12 -0700693 if (test_and_set_bit(STATUS_HCMD_SYNC_ACTIVE, &priv->status)) {
Zhu Yib481de92007-09-25 17:54:57 -0700694 IWL_ERROR("Error sending %s: Already sending a host command\n",
695 get_cmd_string(cmd->id));
Tomas Winklere5472972008-03-28 16:21:12 -0700696 ret = -EBUSY;
697 goto out;
Zhu Yib481de92007-09-25 17:54:57 -0700698 }
699
700 set_bit(STATUS_HCMD_ACTIVE, &priv->status);
701
702 if (cmd->meta.flags & CMD_WANT_SKB)
703 cmd->meta.source = &cmd->meta;
704
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800705 cmd_idx = iwl3945_enqueue_hcmd(priv, cmd);
Zhu Yib481de92007-09-25 17:54:57 -0700706 if (cmd_idx < 0) {
707 ret = cmd_idx;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800708 IWL_ERROR("Error sending %s: iwl3945_enqueue_hcmd failed: %d\n",
Zhu Yib481de92007-09-25 17:54:57 -0700709 get_cmd_string(cmd->id), ret);
710 goto out;
711 }
712
713 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
714 !test_bit(STATUS_HCMD_ACTIVE, &priv->status),
715 HOST_COMPLETE_TIMEOUT);
716 if (!ret) {
717 if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) {
718 IWL_ERROR("Error sending %s: time out after %dms.\n",
719 get_cmd_string(cmd->id),
720 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
721
722 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
723 ret = -ETIMEDOUT;
724 goto cancel;
725 }
726 }
727
728 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
729 IWL_DEBUG_INFO("Command %s aborted: RF KILL Switch\n",
730 get_cmd_string(cmd->id));
731 ret = -ECANCELED;
732 goto fail;
733 }
734 if (test_bit(STATUS_FW_ERROR, &priv->status)) {
735 IWL_DEBUG_INFO("Command %s failed: FW Error\n",
736 get_cmd_string(cmd->id));
737 ret = -EIO;
738 goto fail;
739 }
740 if ((cmd->meta.flags & CMD_WANT_SKB) && !cmd->meta.u.skb) {
741 IWL_ERROR("Error: Response NULL in '%s'\n",
742 get_cmd_string(cmd->id));
743 ret = -EIO;
Zhu Yi73e1a652009-01-08 10:19:58 -0800744 goto cancel;
Zhu Yib481de92007-09-25 17:54:57 -0700745 }
746
747 ret = 0;
748 goto out;
749
750cancel:
751 if (cmd->meta.flags & CMD_WANT_SKB) {
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800752 struct iwl3945_cmd *qcmd;
Zhu Yib481de92007-09-25 17:54:57 -0700753
754 /* Cancel the CMD_WANT_SKB flag for the cmd in the
755 * TX cmd queue. Otherwise in case the cmd comes
756 * in later, it will possibly set an invalid
757 * address (cmd->meta.source). */
758 qcmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_idx];
759 qcmd->meta.flags &= ~CMD_WANT_SKB;
760 }
761fail:
762 if (cmd->meta.u.skb) {
763 dev_kfree_skb_any(cmd->meta.u.skb);
764 cmd->meta.u.skb = NULL;
765 }
766out:
Tomas Winklere5472972008-03-28 16:21:12 -0700767 clear_bit(STATUS_HCMD_SYNC_ACTIVE, &priv->status);
Zhu Yib481de92007-09-25 17:54:57 -0700768 return ret;
769}
770
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800771int iwl3945_send_cmd(struct iwl3945_priv *priv, struct iwl3945_host_cmd *cmd)
Zhu Yib481de92007-09-25 17:54:57 -0700772{
Zhu Yib481de92007-09-25 17:54:57 -0700773 if (cmd->meta.flags & CMD_ASYNC)
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800774 return iwl3945_send_cmd_async(priv, cmd);
Zhu Yib481de92007-09-25 17:54:57 -0700775
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800776 return iwl3945_send_cmd_sync(priv, cmd);
Zhu Yib481de92007-09-25 17:54:57 -0700777}
778
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800779int iwl3945_send_cmd_pdu(struct iwl3945_priv *priv, u8 id, u16 len, const void *data)
Zhu Yib481de92007-09-25 17:54:57 -0700780{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800781 struct iwl3945_host_cmd cmd = {
Zhu Yib481de92007-09-25 17:54:57 -0700782 .id = id,
783 .len = len,
784 .data = data,
785 };
786
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800787 return iwl3945_send_cmd_sync(priv, &cmd);
Zhu Yib481de92007-09-25 17:54:57 -0700788}
789
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800790static int __must_check iwl3945_send_cmd_u32(struct iwl3945_priv *priv, u8 id, u32 val)
Zhu Yib481de92007-09-25 17:54:57 -0700791{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800792 struct iwl3945_host_cmd cmd = {
Zhu Yib481de92007-09-25 17:54:57 -0700793 .id = id,
794 .len = sizeof(val),
795 .data = &val,
796 };
797
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800798 return iwl3945_send_cmd_sync(priv, &cmd);
Zhu Yib481de92007-09-25 17:54:57 -0700799}
800
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800801int iwl3945_send_statistics_request(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700802{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800803 return iwl3945_send_cmd_u32(priv, REPLY_STATISTICS_CMD, 0);
Zhu Yib481de92007-09-25 17:54:57 -0700804}
805
806/**
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800807 * iwl3945_set_rxon_channel - Set the phymode and channel values in staging RXON
Johannes Berg8318d782008-01-24 19:38:38 +0100808 * @band: 2.4 or 5 GHz band
809 * @channel: Any channel valid for the requested band
Zhu Yib481de92007-09-25 17:54:57 -0700810
Johannes Berg8318d782008-01-24 19:38:38 +0100811 * In addition to setting the staging RXON, priv->band is also set.
Zhu Yib481de92007-09-25 17:54:57 -0700812 *
813 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
Johannes Berg8318d782008-01-24 19:38:38 +0100814 * in the staging RXON flag structure based on the band
Zhu Yib481de92007-09-25 17:54:57 -0700815 */
Johannes Berg8318d782008-01-24 19:38:38 +0100816static int iwl3945_set_rxon_channel(struct iwl3945_priv *priv,
817 enum ieee80211_band band,
818 u16 channel)
Zhu Yib481de92007-09-25 17:54:57 -0700819{
Johannes Berg8318d782008-01-24 19:38:38 +0100820 if (!iwl3945_get_channel_info(priv, band, channel)) {
Zhu Yib481de92007-09-25 17:54:57 -0700821 IWL_DEBUG_INFO("Could not set channel to %d [%d]\n",
Johannes Berg8318d782008-01-24 19:38:38 +0100822 channel, band);
Zhu Yib481de92007-09-25 17:54:57 -0700823 return -EINVAL;
824 }
825
826 if ((le16_to_cpu(priv->staging_rxon.channel) == channel) &&
Johannes Berg8318d782008-01-24 19:38:38 +0100827 (priv->band == band))
Zhu Yib481de92007-09-25 17:54:57 -0700828 return 0;
829
830 priv->staging_rxon.channel = cpu_to_le16(channel);
Johannes Berg8318d782008-01-24 19:38:38 +0100831 if (band == IEEE80211_BAND_5GHZ)
Zhu Yib481de92007-09-25 17:54:57 -0700832 priv->staging_rxon.flags &= ~RXON_FLG_BAND_24G_MSK;
833 else
834 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
835
Johannes Berg8318d782008-01-24 19:38:38 +0100836 priv->band = band;
Zhu Yib481de92007-09-25 17:54:57 -0700837
Johannes Berg8318d782008-01-24 19:38:38 +0100838 IWL_DEBUG_INFO("Staging channel set to %d [%d]\n", channel, band);
Zhu Yib481de92007-09-25 17:54:57 -0700839
840 return 0;
841}
842
843/**
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800844 * iwl3945_check_rxon_cmd - validate RXON structure is valid
Zhu Yib481de92007-09-25 17:54:57 -0700845 *
846 * NOTE: This is really only useful during development and can eventually
847 * be #ifdef'd out once the driver is stable and folks aren't actively
848 * making changes
849 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800850static int iwl3945_check_rxon_cmd(struct iwl3945_rxon_cmd *rxon)
Zhu Yib481de92007-09-25 17:54:57 -0700851{
852 int error = 0;
853 int counter = 1;
854
855 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
856 error |= le32_to_cpu(rxon->flags &
857 (RXON_FLG_TGJ_NARROW_BAND_MSK |
858 RXON_FLG_RADAR_DETECT_MSK));
859 if (error)
860 IWL_WARNING("check 24G fields %d | %d\n",
861 counter++, error);
862 } else {
863 error |= (rxon->flags & RXON_FLG_SHORT_SLOT_MSK) ?
864 0 : le32_to_cpu(RXON_FLG_SHORT_SLOT_MSK);
865 if (error)
866 IWL_WARNING("check 52 fields %d | %d\n",
867 counter++, error);
868 error |= le32_to_cpu(rxon->flags & RXON_FLG_CCK_MSK);
869 if (error)
870 IWL_WARNING("check 52 CCK %d | %d\n",
871 counter++, error);
872 }
873 error |= (rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1;
874 if (error)
875 IWL_WARNING("check mac addr %d | %d\n", counter++, error);
876
877 /* make sure basic rates 6Mbps and 1Mbps are supported */
878 error |= (((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0) &&
879 ((rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0));
880 if (error)
881 IWL_WARNING("check basic rate %d | %d\n", counter++, error);
882
883 error |= (le16_to_cpu(rxon->assoc_id) > 2007);
884 if (error)
885 IWL_WARNING("check assoc id %d | %d\n", counter++, error);
886
887 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
888 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK));
889 if (error)
890 IWL_WARNING("check CCK and short slot %d | %d\n",
891 counter++, error);
892
893 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
894 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK));
895 if (error)
896 IWL_WARNING("check CCK & auto detect %d | %d\n",
897 counter++, error);
898
899 error |= ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
900 RXON_FLG_TGG_PROTECT_MSK)) == RXON_FLG_TGG_PROTECT_MSK);
901 if (error)
902 IWL_WARNING("check TGG and auto detect %d | %d\n",
903 counter++, error);
904
905 if ((rxon->flags & RXON_FLG_DIS_DIV_MSK))
906 error |= ((rxon->flags & (RXON_FLG_ANT_B_MSK |
907 RXON_FLG_ANT_A_MSK)) == 0);
908 if (error)
909 IWL_WARNING("check antenna %d %d\n", counter++, error);
910
911 if (error)
912 IWL_WARNING("Tuning to channel %d\n",
913 le16_to_cpu(rxon->channel));
914
915 if (error) {
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800916 IWL_ERROR("Not a valid iwl3945_rxon_assoc_cmd field values\n");
Zhu Yib481de92007-09-25 17:54:57 -0700917 return -1;
918 }
919 return 0;
920}
921
922/**
Ben Cahill9fbab512007-11-29 11:09:47 +0800923 * iwl3945_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
Ian Schram01ebd062007-10-25 17:15:22 +0800924 * @priv: staging_rxon is compared to active_rxon
Zhu Yib481de92007-09-25 17:54:57 -0700925 *
Ben Cahill9fbab512007-11-29 11:09:47 +0800926 * If the RXON structure is changing enough to require a new tune,
927 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
928 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
Zhu Yib481de92007-09-25 17:54:57 -0700929 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800930static int iwl3945_full_rxon_required(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700931{
932
933 /* These items are only settable from the full RXON command */
Ron Rindjunsky5d1e2322008-06-30 17:23:04 +0800934 if (!(iwl3945_is_associated(priv)) ||
Zhu Yib481de92007-09-25 17:54:57 -0700935 compare_ether_addr(priv->staging_rxon.bssid_addr,
936 priv->active_rxon.bssid_addr) ||
937 compare_ether_addr(priv->staging_rxon.node_addr,
938 priv->active_rxon.node_addr) ||
939 compare_ether_addr(priv->staging_rxon.wlap_bssid_addr,
940 priv->active_rxon.wlap_bssid_addr) ||
941 (priv->staging_rxon.dev_type != priv->active_rxon.dev_type) ||
942 (priv->staging_rxon.channel != priv->active_rxon.channel) ||
943 (priv->staging_rxon.air_propagation !=
944 priv->active_rxon.air_propagation) ||
945 (priv->staging_rxon.assoc_id != priv->active_rxon.assoc_id))
946 return 1;
947
948 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
949 * be updated with the RXON_ASSOC command -- however only some
950 * flag transitions are allowed using RXON_ASSOC */
951
952 /* Check if we are not switching bands */
953 if ((priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) !=
954 (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK))
955 return 1;
956
957 /* Check if we are switching association toggle */
958 if ((priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) !=
959 (priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK))
960 return 1;
961
962 return 0;
963}
964
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800965static int iwl3945_send_rxon_assoc(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700966{
967 int rc = 0;
Tomas Winkler3d24a9f2008-12-19 10:37:07 +0800968 struct iwl_rx_packet *res = NULL;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800969 struct iwl3945_rxon_assoc_cmd rxon_assoc;
970 struct iwl3945_host_cmd cmd = {
Zhu Yib481de92007-09-25 17:54:57 -0700971 .id = REPLY_RXON_ASSOC,
972 .len = sizeof(rxon_assoc),
973 .meta.flags = CMD_WANT_SKB,
974 .data = &rxon_assoc,
975 };
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800976 const struct iwl3945_rxon_cmd *rxon1 = &priv->staging_rxon;
977 const struct iwl3945_rxon_cmd *rxon2 = &priv->active_rxon;
Zhu Yib481de92007-09-25 17:54:57 -0700978
979 if ((rxon1->flags == rxon2->flags) &&
980 (rxon1->filter_flags == rxon2->filter_flags) &&
981 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
982 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
983 IWL_DEBUG_INFO("Using current RXON_ASSOC. Not resending.\n");
984 return 0;
985 }
986
987 rxon_assoc.flags = priv->staging_rxon.flags;
988 rxon_assoc.filter_flags = priv->staging_rxon.filter_flags;
989 rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates;
990 rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates;
991 rxon_assoc.reserved = 0;
992
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800993 rc = iwl3945_send_cmd_sync(priv, &cmd);
Zhu Yib481de92007-09-25 17:54:57 -0700994 if (rc)
995 return rc;
996
Tomas Winkler3d24a9f2008-12-19 10:37:07 +0800997 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
Zhu Yib481de92007-09-25 17:54:57 -0700998 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
999 IWL_ERROR("Bad return from REPLY_RXON_ASSOC command\n");
1000 rc = -EIO;
1001 }
1002
1003 priv->alloc_rxb_skb--;
1004 dev_kfree_skb_any(cmd.meta.u.skb);
1005
1006 return rc;
1007}
1008
1009/**
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001010 * iwl3945_commit_rxon - commit staging_rxon to hardware
Zhu Yib481de92007-09-25 17:54:57 -07001011 *
Ian Schram01ebd062007-10-25 17:15:22 +08001012 * The RXON command in staging_rxon is committed to the hardware and
Zhu Yib481de92007-09-25 17:54:57 -07001013 * the active_rxon structure is updated with the new data. This
1014 * function correctly transitions out of the RXON_ASSOC_MSK state if
1015 * a HW tune is required based on the RXON structure changes.
1016 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001017static int iwl3945_commit_rxon(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07001018{
1019 /* cast away the const for active_rxon in this function */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001020 struct iwl3945_rxon_cmd *active_rxon = (void *)&priv->active_rxon;
Zhu Yib481de92007-09-25 17:54:57 -07001021 int rc = 0;
1022
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001023 if (!iwl3945_is_alive(priv))
Zhu Yib481de92007-09-25 17:54:57 -07001024 return -1;
1025
1026 /* always get timestamp with Rx frame */
1027 priv->staging_rxon.flags |= RXON_FLG_TSF2HOST_MSK;
1028
1029 /* select antenna */
1030 priv->staging_rxon.flags &=
1031 ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
1032 priv->staging_rxon.flags |= iwl3945_get_antenna_flags(priv);
1033
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001034 rc = iwl3945_check_rxon_cmd(&priv->staging_rxon);
Zhu Yib481de92007-09-25 17:54:57 -07001035 if (rc) {
1036 IWL_ERROR("Invalid RXON configuration. Not committing.\n");
1037 return -EINVAL;
1038 }
1039
1040 /* If we don't need to send a full RXON, we can use
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001041 * iwl3945_rxon_assoc_cmd which is used to reconfigure filter
Zhu Yib481de92007-09-25 17:54:57 -07001042 * and other flags for the current radio configuration. */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001043 if (!iwl3945_full_rxon_required(priv)) {
1044 rc = iwl3945_send_rxon_assoc(priv);
Zhu Yib481de92007-09-25 17:54:57 -07001045 if (rc) {
1046 IWL_ERROR("Error setting RXON_ASSOC "
1047 "configuration (%d).\n", rc);
1048 return rc;
1049 }
1050
1051 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
1052
1053 return 0;
1054 }
1055
1056 /* If we are currently associated and the new config requires
1057 * an RXON_ASSOC and the new config wants the associated mask enabled,
1058 * we must clear the associated from the active configuration
1059 * before we apply the new config */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001060 if (iwl3945_is_associated(priv) &&
Zhu Yib481de92007-09-25 17:54:57 -07001061 (priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK)) {
1062 IWL_DEBUG_INFO("Toggling associated bit on current RXON\n");
1063 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1064
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001065 rc = iwl3945_send_cmd_pdu(priv, REPLY_RXON,
1066 sizeof(struct iwl3945_rxon_cmd),
Zhu Yib481de92007-09-25 17:54:57 -07001067 &priv->active_rxon);
1068
1069 /* If the mask clearing failed then we set
1070 * active_rxon back to what it was previously */
1071 if (rc) {
1072 active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
1073 IWL_ERROR("Error clearing ASSOC_MSK on current "
1074 "configuration (%d).\n", rc);
1075 return rc;
1076 }
Zhu Yib481de92007-09-25 17:54:57 -07001077 }
1078
1079 IWL_DEBUG_INFO("Sending RXON\n"
1080 "* with%s RXON_FILTER_ASSOC_MSK\n"
1081 "* channel = %d\n"
Johannes Berge1749612008-10-27 15:59:26 -07001082 "* bssid = %pM\n",
Zhu Yib481de92007-09-25 17:54:57 -07001083 ((priv->staging_rxon.filter_flags &
1084 RXON_FILTER_ASSOC_MSK) ? "" : "out"),
1085 le16_to_cpu(priv->staging_rxon.channel),
Johannes Berge1749612008-10-27 15:59:26 -07001086 priv->staging_rxon.bssid_addr);
Zhu Yib481de92007-09-25 17:54:57 -07001087
1088 /* Apply the new configuration */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001089 rc = iwl3945_send_cmd_pdu(priv, REPLY_RXON,
1090 sizeof(struct iwl3945_rxon_cmd), &priv->staging_rxon);
Zhu Yib481de92007-09-25 17:54:57 -07001091 if (rc) {
1092 IWL_ERROR("Error setting new configuration (%d).\n", rc);
1093 return rc;
1094 }
1095
1096 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
1097
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001098 iwl3945_clear_stations_table(priv);
Zhu Yi556f8db2007-09-27 11:27:33 +08001099
Zhu Yib481de92007-09-25 17:54:57 -07001100 /* If we issue a new RXON command which required a tune then we must
1101 * send a new TXPOWER command or we won't be able to Tx any frames */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001102 rc = iwl3945_hw_reg_send_txpower(priv);
Zhu Yib481de92007-09-25 17:54:57 -07001103 if (rc) {
1104 IWL_ERROR("Error setting Tx power (%d).\n", rc);
1105 return rc;
1106 }
1107
1108 /* Add the broadcast address so we can send broadcast frames */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001109 if (iwl3945_add_station(priv, iwl3945_broadcast_addr, 0, 0) ==
Zhu Yib481de92007-09-25 17:54:57 -07001110 IWL_INVALID_STATION) {
1111 IWL_ERROR("Error adding BROADCAST address for transmit.\n");
1112 return -EIO;
1113 }
1114
1115 /* If we have set the ASSOC_MSK and we are in BSS mode then
1116 * add the IWL_AP_ID to the station rate table */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001117 if (iwl3945_is_associated(priv) &&
Johannes Berg05c914f2008-09-11 00:01:58 +02001118 (priv->iw_mode == NL80211_IFTYPE_STATION))
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001119 if (iwl3945_add_station(priv, priv->active_rxon.bssid_addr, 1, 0)
Zhu Yib481de92007-09-25 17:54:57 -07001120 == IWL_INVALID_STATION) {
1121 IWL_ERROR("Error adding AP address for transmit.\n");
1122 return -EIO;
1123 }
1124
Johannes Berg8318d782008-01-24 19:38:38 +01001125 /* Init the hardware's rate fallback order based on the band */
Zhu Yib481de92007-09-25 17:54:57 -07001126 rc = iwl3945_init_hw_rate_table(priv);
1127 if (rc) {
1128 IWL_ERROR("Error setting HW rate table: %02X\n", rc);
1129 return -EIO;
1130 }
1131
1132 return 0;
1133}
1134
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001135static int iwl3945_send_bt_config(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07001136{
Tomas Winkler4c897252008-12-19 10:37:05 +08001137 struct iwl_bt_cmd bt_cmd = {
Zhu Yib481de92007-09-25 17:54:57 -07001138 .flags = 3,
1139 .lead_time = 0xAA,
1140 .max_kill = 1,
1141 .kill_ack_mask = 0,
1142 .kill_cts_mask = 0,
1143 };
1144
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001145 return iwl3945_send_cmd_pdu(priv, REPLY_BT_CONFIG,
Tomas Winkler4c897252008-12-19 10:37:05 +08001146 sizeof(bt_cmd), &bt_cmd);
Zhu Yib481de92007-09-25 17:54:57 -07001147}
1148
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001149static int iwl3945_send_scan_abort(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07001150{
1151 int rc = 0;
Tomas Winkler3d24a9f2008-12-19 10:37:07 +08001152 struct iwl_rx_packet *res;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001153 struct iwl3945_host_cmd cmd = {
Zhu Yib481de92007-09-25 17:54:57 -07001154 .id = REPLY_SCAN_ABORT_CMD,
1155 .meta.flags = CMD_WANT_SKB,
1156 };
1157
1158 /* If there isn't a scan actively going on in the hardware
1159 * then we are in between scan bands and not actually
1160 * actively scanning, so don't send the abort command */
1161 if (!test_bit(STATUS_SCAN_HW, &priv->status)) {
1162 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1163 return 0;
1164 }
1165
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001166 rc = iwl3945_send_cmd_sync(priv, &cmd);
Zhu Yib481de92007-09-25 17:54:57 -07001167 if (rc) {
1168 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1169 return rc;
1170 }
1171
Tomas Winkler3d24a9f2008-12-19 10:37:07 +08001172 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
Zhu Yib481de92007-09-25 17:54:57 -07001173 if (res->u.status != CAN_ABORT_STATUS) {
1174 /* The scan abort will return 1 for success or
1175 * 2 for "failure". A failure condition can be
1176 * due to simply not being in an active scan which
1177 * can occur if we send the scan abort before we
1178 * the microcode has notified us that a scan is
1179 * completed. */
1180 IWL_DEBUG_INFO("SCAN_ABORT returned %d.\n", res->u.status);
1181 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1182 clear_bit(STATUS_SCAN_HW, &priv->status);
1183 }
1184
1185 dev_kfree_skb_any(cmd.meta.u.skb);
1186
1187 return rc;
1188}
1189
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001190static int iwl3945_card_state_sync_callback(struct iwl3945_priv *priv,
1191 struct iwl3945_cmd *cmd,
Zhu Yib481de92007-09-25 17:54:57 -07001192 struct sk_buff *skb)
1193{
1194 return 1;
1195}
1196
1197/*
1198 * CARD_STATE_CMD
1199 *
Ben Cahill9fbab512007-11-29 11:09:47 +08001200 * Use: Sets the device's internal card state to enable, disable, or halt
Zhu Yib481de92007-09-25 17:54:57 -07001201 *
1202 * When in the 'enable' state the card operates as normal.
1203 * When in the 'disable' state, the card enters into a low power mode.
1204 * When in the 'halt' state, the card is shut down and must be fully
1205 * restarted to come back on.
1206 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001207static int iwl3945_send_card_state(struct iwl3945_priv *priv, u32 flags, u8 meta_flag)
Zhu Yib481de92007-09-25 17:54:57 -07001208{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001209 struct iwl3945_host_cmd cmd = {
Zhu Yib481de92007-09-25 17:54:57 -07001210 .id = REPLY_CARD_STATE_CMD,
1211 .len = sizeof(u32),
1212 .data = &flags,
1213 .meta.flags = meta_flag,
1214 };
1215
1216 if (meta_flag & CMD_ASYNC)
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001217 cmd.meta.u.callback = iwl3945_card_state_sync_callback;
Zhu Yib481de92007-09-25 17:54:57 -07001218
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001219 return iwl3945_send_cmd(priv, &cmd);
Zhu Yib481de92007-09-25 17:54:57 -07001220}
1221
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001222static int iwl3945_add_sta_sync_callback(struct iwl3945_priv *priv,
1223 struct iwl3945_cmd *cmd, struct sk_buff *skb)
Zhu Yib481de92007-09-25 17:54:57 -07001224{
Tomas Winkler3d24a9f2008-12-19 10:37:07 +08001225 struct iwl_rx_packet *res = NULL;
Zhu Yib481de92007-09-25 17:54:57 -07001226
1227 if (!skb) {
1228 IWL_ERROR("Error: Response NULL in REPLY_ADD_STA.\n");
1229 return 1;
1230 }
1231
Tomas Winkler3d24a9f2008-12-19 10:37:07 +08001232 res = (struct iwl_rx_packet *)skb->data;
Zhu Yib481de92007-09-25 17:54:57 -07001233 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1234 IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n",
1235 res->hdr.flags);
1236 return 1;
1237 }
1238
1239 switch (res->u.add_sta.status) {
1240 case ADD_STA_SUCCESS_MSK:
1241 break;
1242 default:
1243 break;
1244 }
1245
1246 /* We didn't cache the SKB; let the caller free it */
1247 return 1;
1248}
1249
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001250int iwl3945_send_add_station(struct iwl3945_priv *priv,
1251 struct iwl3945_addsta_cmd *sta, u8 flags)
Zhu Yib481de92007-09-25 17:54:57 -07001252{
Tomas Winkler3d24a9f2008-12-19 10:37:07 +08001253 struct iwl_rx_packet *res = NULL;
Zhu Yib481de92007-09-25 17:54:57 -07001254 int rc = 0;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001255 struct iwl3945_host_cmd cmd = {
Zhu Yib481de92007-09-25 17:54:57 -07001256 .id = REPLY_ADD_STA,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001257 .len = sizeof(struct iwl3945_addsta_cmd),
Zhu Yib481de92007-09-25 17:54:57 -07001258 .meta.flags = flags,
1259 .data = sta,
1260 };
1261
1262 if (flags & CMD_ASYNC)
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001263 cmd.meta.u.callback = iwl3945_add_sta_sync_callback;
Zhu Yib481de92007-09-25 17:54:57 -07001264 else
1265 cmd.meta.flags |= CMD_WANT_SKB;
1266
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001267 rc = iwl3945_send_cmd(priv, &cmd);
Zhu Yib481de92007-09-25 17:54:57 -07001268
1269 if (rc || (flags & CMD_ASYNC))
1270 return rc;
1271
Tomas Winkler3d24a9f2008-12-19 10:37:07 +08001272 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
Zhu Yib481de92007-09-25 17:54:57 -07001273 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1274 IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n",
1275 res->hdr.flags);
1276 rc = -EIO;
1277 }
1278
1279 if (rc == 0) {
1280 switch (res->u.add_sta.status) {
1281 case ADD_STA_SUCCESS_MSK:
1282 IWL_DEBUG_INFO("REPLY_ADD_STA PASSED\n");
1283 break;
1284 default:
1285 rc = -EIO;
1286 IWL_WARNING("REPLY_ADD_STA failed\n");
1287 break;
1288 }
1289 }
1290
1291 priv->alloc_rxb_skb--;
1292 dev_kfree_skb_any(cmd.meta.u.skb);
1293
1294 return rc;
1295}
1296
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001297static int iwl3945_update_sta_key_info(struct iwl3945_priv *priv,
Zhu Yib481de92007-09-25 17:54:57 -07001298 struct ieee80211_key_conf *keyconf,
1299 u8 sta_id)
1300{
1301 unsigned long flags;
1302 __le16 key_flags = 0;
1303
1304 switch (keyconf->alg) {
1305 case ALG_CCMP:
1306 key_flags |= STA_KEY_FLG_CCMP;
1307 key_flags |= cpu_to_le16(
1308 keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
1309 key_flags &= ~STA_KEY_FLG_INVALID;
1310 break;
1311 case ALG_TKIP:
1312 case ALG_WEP:
Zhu Yib481de92007-09-25 17:54:57 -07001313 default:
1314 return -EINVAL;
1315 }
1316 spin_lock_irqsave(&priv->sta_lock, flags);
1317 priv->stations[sta_id].keyinfo.alg = keyconf->alg;
1318 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
1319 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
1320 keyconf->keylen);
1321
1322 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
1323 keyconf->keylen);
1324 priv->stations[sta_id].sta.key.key_flags = key_flags;
1325 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
1326 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1327
1328 spin_unlock_irqrestore(&priv->sta_lock, flags);
1329
1330 IWL_DEBUG_INFO("hwcrypto: modify ucode station key info\n");
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001331 iwl3945_send_add_station(priv, &priv->stations[sta_id].sta, 0);
Zhu Yib481de92007-09-25 17:54:57 -07001332 return 0;
1333}
1334
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001335static int iwl3945_clear_sta_key_info(struct iwl3945_priv *priv, u8 sta_id)
Zhu Yib481de92007-09-25 17:54:57 -07001336{
1337 unsigned long flags;
1338
1339 spin_lock_irqsave(&priv->sta_lock, flags);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001340 memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl3945_hw_key));
Tomas Winkler4c897252008-12-19 10:37:05 +08001341 memset(&priv->stations[sta_id].sta.key, 0,
1342 sizeof(struct iwl4965_keyinfo));
Zhu Yib481de92007-09-25 17:54:57 -07001343 priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
1344 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
1345 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1346 spin_unlock_irqrestore(&priv->sta_lock, flags);
1347
1348 IWL_DEBUG_INFO("hwcrypto: clear ucode station key info\n");
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001349 iwl3945_send_add_station(priv, &priv->stations[sta_id].sta, 0);
Zhu Yib481de92007-09-25 17:54:57 -07001350 return 0;
1351}
1352
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001353static void iwl3945_clear_free_frames(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07001354{
1355 struct list_head *element;
1356
1357 IWL_DEBUG_INFO("%d frames on pre-allocated heap on clear.\n",
1358 priv->frames_count);
1359
1360 while (!list_empty(&priv->free_frames)) {
1361 element = priv->free_frames.next;
1362 list_del(element);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001363 kfree(list_entry(element, struct iwl3945_frame, list));
Zhu Yib481de92007-09-25 17:54:57 -07001364 priv->frames_count--;
1365 }
1366
1367 if (priv->frames_count) {
1368 IWL_WARNING("%d frames still in use. Did we lose one?\n",
1369 priv->frames_count);
1370 priv->frames_count = 0;
1371 }
1372}
1373
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001374static struct iwl3945_frame *iwl3945_get_free_frame(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07001375{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001376 struct iwl3945_frame *frame;
Zhu Yib481de92007-09-25 17:54:57 -07001377 struct list_head *element;
1378 if (list_empty(&priv->free_frames)) {
1379 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
1380 if (!frame) {
1381 IWL_ERROR("Could not allocate frame!\n");
1382 return NULL;
1383 }
1384
1385 priv->frames_count++;
1386 return frame;
1387 }
1388
1389 element = priv->free_frames.next;
1390 list_del(element);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001391 return list_entry(element, struct iwl3945_frame, list);
Zhu Yib481de92007-09-25 17:54:57 -07001392}
1393
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001394static void iwl3945_free_frame(struct iwl3945_priv *priv, struct iwl3945_frame *frame)
Zhu Yib481de92007-09-25 17:54:57 -07001395{
1396 memset(frame, 0, sizeof(*frame));
1397 list_add(&frame->list, &priv->free_frames);
1398}
1399
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001400unsigned int iwl3945_fill_beacon_frame(struct iwl3945_priv *priv,
Zhu Yib481de92007-09-25 17:54:57 -07001401 struct ieee80211_hdr *hdr,
Rami Rosen73ec1cc2008-12-16 09:37:07 +02001402 int left)
Zhu Yib481de92007-09-25 17:54:57 -07001403{
1404
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001405 if (!iwl3945_is_associated(priv) || !priv->ibss_beacon ||
Johannes Berg05c914f2008-09-11 00:01:58 +02001406 ((priv->iw_mode != NL80211_IFTYPE_ADHOC) &&
1407 (priv->iw_mode != NL80211_IFTYPE_AP)))
Zhu Yib481de92007-09-25 17:54:57 -07001408 return 0;
1409
1410 if (priv->ibss_beacon->len > left)
1411 return 0;
1412
1413 memcpy(hdr, priv->ibss_beacon->data, priv->ibss_beacon->len);
1414
1415 return priv->ibss_beacon->len;
1416}
1417
Kolekar, Abhijeetc24f0812008-11-07 09:58:44 -08001418static u8 iwl3945_rate_get_lowest_plcp(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07001419{
1420 u8 i;
Kolekar, Abhijeetc24f0812008-11-07 09:58:44 -08001421 int rate_mask;
1422
1423 /* Set rate mask*/
1424 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)
Chatre, Reinettedbce56a2008-11-12 13:14:07 -08001425 rate_mask = priv->active_rate_basic & IWL_CCK_RATES_MASK;
Kolekar, Abhijeetc24f0812008-11-07 09:58:44 -08001426 else
Chatre, Reinettedbce56a2008-11-12 13:14:07 -08001427 rate_mask = priv->active_rate_basic & IWL_OFDM_RATES_MASK;
Zhu Yib481de92007-09-25 17:54:57 -07001428
1429 for (i = IWL_RATE_1M_INDEX; i != IWL_RATE_INVALID;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001430 i = iwl3945_rates[i].next_ieee) {
Zhu Yib481de92007-09-25 17:54:57 -07001431 if (rate_mask & (1 << i))
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001432 return iwl3945_rates[i].plcp;
Zhu Yib481de92007-09-25 17:54:57 -07001433 }
1434
Kolekar, Abhijeetc24f0812008-11-07 09:58:44 -08001435 /* No valid rate was found. Assign the lowest one */
1436 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)
1437 return IWL_RATE_1M_PLCP;
1438 else
1439 return IWL_RATE_6M_PLCP;
Zhu Yib481de92007-09-25 17:54:57 -07001440}
1441
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001442static int iwl3945_send_beacon_cmd(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07001443{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001444 struct iwl3945_frame *frame;
Zhu Yib481de92007-09-25 17:54:57 -07001445 unsigned int frame_size;
1446 int rc;
1447 u8 rate;
1448
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001449 frame = iwl3945_get_free_frame(priv);
Zhu Yib481de92007-09-25 17:54:57 -07001450
1451 if (!frame) {
1452 IWL_ERROR("Could not obtain free frame buffer for beacon "
1453 "command.\n");
1454 return -ENOMEM;
1455 }
1456
Kolekar, Abhijeetc24f0812008-11-07 09:58:44 -08001457 rate = iwl3945_rate_get_lowest_plcp(priv);
Zhu Yib481de92007-09-25 17:54:57 -07001458
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001459 frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate);
Zhu Yib481de92007-09-25 17:54:57 -07001460
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001461 rc = iwl3945_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
Zhu Yib481de92007-09-25 17:54:57 -07001462 &frame->u.cmd[0]);
1463
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001464 iwl3945_free_frame(priv, frame);
Zhu Yib481de92007-09-25 17:54:57 -07001465
1466 return rc;
1467}
1468
1469/******************************************************************************
1470 *
1471 * EEPROM related functions
1472 *
1473 ******************************************************************************/
1474
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001475static void get_eeprom_mac(struct iwl3945_priv *priv, u8 *mac)
Zhu Yib481de92007-09-25 17:54:57 -07001476{
1477 memcpy(mac, priv->eeprom.mac_address, 6);
1478}
1479
Reinette Chatre74a3a252008-01-23 10:15:19 -08001480/*
1481 * Clear the OWNER_MSK, to establish driver (instead of uCode running on
1482 * embedded controller) as EEPROM reader; each read is a series of pulses
1483 * to/from the EEPROM chip, not a single event, so even reads could conflict
1484 * if they weren't arbitrated by some ownership mechanism. Here, the driver
1485 * simply claims ownership, which should be safe when this function is called
1486 * (i.e. before loading uCode!).
1487 */
1488static inline int iwl3945_eeprom_acquire_semaphore(struct iwl3945_priv *priv)
1489{
1490 _iwl3945_clear_bit(priv, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK);
1491 return 0;
1492}
1493
Zhu Yib481de92007-09-25 17:54:57 -07001494/**
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001495 * iwl3945_eeprom_init - read EEPROM contents
Zhu Yib481de92007-09-25 17:54:57 -07001496 *
Cahill, Ben M6440adb2007-11-29 11:09:55 +08001497 * Load the EEPROM contents from adapter into priv->eeprom
Zhu Yib481de92007-09-25 17:54:57 -07001498 *
1499 * NOTE: This routine uses the non-debug IO access functions.
1500 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001501int iwl3945_eeprom_init(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07001502{
Tomas Winkler58ff6d42008-02-13 02:47:54 +02001503 u16 *e = (u16 *)&priv->eeprom;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001504 u32 gp = iwl3945_read32(priv, CSR_EEPROM_GP);
Zhu Yib481de92007-09-25 17:54:57 -07001505 int sz = sizeof(priv->eeprom);
Zhu, Yi3d5717a2008-12-11 10:33:36 -08001506 int ret;
Zhu Yib481de92007-09-25 17:54:57 -07001507 u16 addr;
1508
1509 /* The EEPROM structure has several padding buffers within it
1510 * and when adding new EEPROM maps is subject to programmer errors
1511 * which may be very difficult to identify without explicitly
1512 * checking the resulting size of the eeprom map. */
1513 BUILD_BUG_ON(sizeof(priv->eeprom) != IWL_EEPROM_IMAGE_SIZE);
1514
1515 if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) {
Jiri Slaby6f147922008-08-11 23:49:41 +02001516 IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x\n", gp);
Zhu Yib481de92007-09-25 17:54:57 -07001517 return -ENOENT;
1518 }
1519
Cahill, Ben M6440adb2007-11-29 11:09:55 +08001520 /* Make sure driver (instead of uCode) is allowed to read EEPROM */
Zhu, Yi3d5717a2008-12-11 10:33:36 -08001521 ret = iwl3945_eeprom_acquire_semaphore(priv);
1522 if (ret < 0) {
Ian Schram91e17472007-10-25 17:15:23 +08001523 IWL_ERROR("Failed to acquire EEPROM semaphore.\n");
Zhu Yib481de92007-09-25 17:54:57 -07001524 return -ENOENT;
1525 }
1526
1527 /* eeprom is an array of 16bit values */
1528 for (addr = 0; addr < sz; addr += sizeof(u16)) {
Zhu, Yi3d5717a2008-12-11 10:33:36 -08001529 u32 r;
1530
1531 _iwl3945_write32(priv, CSR_EEPROM_REG,
1532 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001533 _iwl3945_clear_bit(priv, CSR_EEPROM_REG, CSR_EEPROM_REG_BIT_CMD);
Zhu, Yi3d5717a2008-12-11 10:33:36 -08001534 ret = iwl3945_poll_direct_bit(priv, CSR_EEPROM_REG,
1535 CSR_EEPROM_REG_READ_VALID_MSK,
1536 IWL_EEPROM_ACCESS_TIMEOUT);
1537 if (ret < 0) {
Jiri Slaby6f147922008-08-11 23:49:41 +02001538 IWL_ERROR("Time out reading EEPROM[%d]\n", addr);
Zhu, Yi3d5717a2008-12-11 10:33:36 -08001539 return ret;
Zhu Yib481de92007-09-25 17:54:57 -07001540 }
Zhu, Yi3d5717a2008-12-11 10:33:36 -08001541
1542 r = _iwl3945_read_direct32(priv, CSR_EEPROM_REG);
Tomas Winkler58ff6d42008-02-13 02:47:54 +02001543 e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16));
Zhu Yib481de92007-09-25 17:54:57 -07001544 }
1545
1546 return 0;
1547}
1548
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001549static void iwl3945_unset_hw_setting(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07001550{
1551 if (priv->hw_setting.shared_virt)
1552 pci_free_consistent(priv->pci_dev,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001553 sizeof(struct iwl3945_shared),
Zhu Yib481de92007-09-25 17:54:57 -07001554 priv->hw_setting.shared_virt,
1555 priv->hw_setting.shared_phys);
1556}
1557
1558/**
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001559 * iwl3945_supported_rate_to_ie - fill in the supported rate in IE field
Zhu Yib481de92007-09-25 17:54:57 -07001560 *
1561 * return : set the bit for each supported rate insert in ie
1562 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001563static u16 iwl3945_supported_rate_to_ie(u8 *ie, u16 supported_rate,
Tomas Winklerc7c46672007-10-18 02:04:15 +02001564 u16 basic_rate, int *left)
Zhu Yib481de92007-09-25 17:54:57 -07001565{
1566 u16 ret_rates = 0, bit;
1567 int i;
Tomas Winklerc7c46672007-10-18 02:04:15 +02001568 u8 *cnt = ie;
1569 u8 *rates = ie + 1;
Zhu Yib481de92007-09-25 17:54:57 -07001570
1571 for (bit = 1, i = 0; i < IWL_RATE_COUNT; i++, bit <<= 1) {
1572 if (bit & supported_rate) {
1573 ret_rates |= bit;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001574 rates[*cnt] = iwl3945_rates[i].ieee |
Tomas Winklerc7c46672007-10-18 02:04:15 +02001575 ((bit & basic_rate) ? 0x80 : 0x00);
1576 (*cnt)++;
1577 (*left)--;
1578 if ((*left <= 0) ||
1579 (*cnt >= IWL_SUPPORTED_RATES_IE_LEN))
Zhu Yib481de92007-09-25 17:54:57 -07001580 break;
1581 }
1582 }
1583
1584 return ret_rates;
1585}
1586
1587/**
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001588 * iwl3945_fill_probe_req - fill in all required fields and IE for probe request
Zhu Yib481de92007-09-25 17:54:57 -07001589 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001590static u16 iwl3945_fill_probe_req(struct iwl3945_priv *priv,
Zhu Yib481de92007-09-25 17:54:57 -07001591 struct ieee80211_mgmt *frame,
Johannes Berg430cfe92008-10-28 18:06:02 +01001592 int left)
Zhu Yib481de92007-09-25 17:54:57 -07001593{
1594 int len = 0;
1595 u8 *pos = NULL;
Tomas Winklerc7c46672007-10-18 02:04:15 +02001596 u16 active_rates, ret_rates, cck_rates;
Zhu Yib481de92007-09-25 17:54:57 -07001597
1598 /* Make sure there is enough space for the probe request,
1599 * two mandatory IEs and the data */
1600 left -= 24;
1601 if (left < 0)
1602 return 0;
1603 len += 24;
1604
1605 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001606 memcpy(frame->da, iwl3945_broadcast_addr, ETH_ALEN);
Zhu Yib481de92007-09-25 17:54:57 -07001607 memcpy(frame->sa, priv->mac_addr, ETH_ALEN);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001608 memcpy(frame->bssid, iwl3945_broadcast_addr, ETH_ALEN);
Zhu Yib481de92007-09-25 17:54:57 -07001609 frame->seq_ctrl = 0;
1610
1611 /* fill in our indirect SSID IE */
1612 /* ...next IE... */
1613
1614 left -= 2;
1615 if (left < 0)
1616 return 0;
1617 len += 2;
1618 pos = &(frame->u.probe_req.variable[0]);
1619 *pos++ = WLAN_EID_SSID;
1620 *pos++ = 0;
1621
Zhu Yib481de92007-09-25 17:54:57 -07001622 /* fill in supported rate */
1623 /* ...next IE... */
1624 left -= 2;
1625 if (left < 0)
1626 return 0;
Tomas Winklerc7c46672007-10-18 02:04:15 +02001627
Zhu Yib481de92007-09-25 17:54:57 -07001628 /* ... fill it in... */
1629 *pos++ = WLAN_EID_SUPP_RATES;
1630 *pos = 0;
Tomas Winklerc7c46672007-10-18 02:04:15 +02001631
1632 priv->active_rate = priv->rates_mask;
1633 active_rates = priv->active_rate;
Zhu Yib481de92007-09-25 17:54:57 -07001634 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
1635
Tomas Winklerc7c46672007-10-18 02:04:15 +02001636 cck_rates = IWL_CCK_RATES_MASK & active_rates;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001637 ret_rates = iwl3945_supported_rate_to_ie(pos, cck_rates,
Tomas Winklerc7c46672007-10-18 02:04:15 +02001638 priv->active_rate_basic, &left);
1639 active_rates &= ~ret_rates;
1640
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001641 ret_rates = iwl3945_supported_rate_to_ie(pos, active_rates,
Tomas Winklerc7c46672007-10-18 02:04:15 +02001642 priv->active_rate_basic, &left);
1643 active_rates &= ~ret_rates;
1644
Zhu Yib481de92007-09-25 17:54:57 -07001645 len += 2 + *pos;
1646 pos += (*pos) + 1;
Tomas Winklerc7c46672007-10-18 02:04:15 +02001647 if (active_rates == 0)
Zhu Yib481de92007-09-25 17:54:57 -07001648 goto fill_end;
1649
1650 /* fill in supported extended rate */
1651 /* ...next IE... */
1652 left -= 2;
1653 if (left < 0)
1654 return 0;
1655 /* ... fill it in... */
1656 *pos++ = WLAN_EID_EXT_SUPP_RATES;
1657 *pos = 0;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001658 iwl3945_supported_rate_to_ie(pos, active_rates,
Tomas Winklerc7c46672007-10-18 02:04:15 +02001659 priv->active_rate_basic, &left);
Zhu Yib481de92007-09-25 17:54:57 -07001660 if (*pos > 0)
1661 len += 2 + *pos;
1662
1663 fill_end:
1664 return (u16)len;
1665}
1666
1667/*
1668 * QoS support
1669*/
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001670static int iwl3945_send_qos_params_command(struct iwl3945_priv *priv,
Tomas Winkler4c897252008-12-19 10:37:05 +08001671 struct iwl_qosparam_cmd *qos)
Zhu Yib481de92007-09-25 17:54:57 -07001672{
1673
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001674 return iwl3945_send_cmd_pdu(priv, REPLY_QOS_PARAM,
Tomas Winkler4c897252008-12-19 10:37:05 +08001675 sizeof(struct iwl_qosparam_cmd), qos);
Zhu Yib481de92007-09-25 17:54:57 -07001676}
1677
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001678static void iwl3945_reset_qos(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07001679{
1680 u16 cw_min = 15;
1681 u16 cw_max = 1023;
1682 u8 aifs = 2;
1683 u8 is_legacy = 0;
1684 unsigned long flags;
1685 int i;
1686
1687 spin_lock_irqsave(&priv->lock, flags);
1688 priv->qos_data.qos_active = 0;
1689
Winkler, Tomas6d1ef1a2008-12-09 11:29:00 -08001690 /* QoS always active in AP and ADHOC mode
1691 * In STA mode wait for association
1692 */
1693 if (priv->iw_mode == NL80211_IFTYPE_ADHOC ||
1694 priv->iw_mode == NL80211_IFTYPE_AP)
1695 priv->qos_data.qos_active = 1;
1696 else
1697 priv->qos_data.qos_active = 0;
1698
1699
1700 /* check for legacy mode */
1701 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC &&
1702 (priv->active_rate & IWL_OFDM_RATES_MASK) == 0) ||
1703 (priv->iw_mode == NL80211_IFTYPE_STATION &&
1704 (priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK) == 0)) {
Zhu Yib481de92007-09-25 17:54:57 -07001705 cw_min = 31;
1706 is_legacy = 1;
1707 }
1708
1709 if (priv->qos_data.qos_active)
1710 aifs = 3;
1711
1712 priv->qos_data.def_qos_parm.ac[0].cw_min = cpu_to_le16(cw_min);
1713 priv->qos_data.def_qos_parm.ac[0].cw_max = cpu_to_le16(cw_max);
1714 priv->qos_data.def_qos_parm.ac[0].aifsn = aifs;
1715 priv->qos_data.def_qos_parm.ac[0].edca_txop = 0;
1716 priv->qos_data.def_qos_parm.ac[0].reserved1 = 0;
1717
1718 if (priv->qos_data.qos_active) {
1719 i = 1;
1720 priv->qos_data.def_qos_parm.ac[i].cw_min = cpu_to_le16(cw_min);
1721 priv->qos_data.def_qos_parm.ac[i].cw_max = cpu_to_le16(cw_max);
1722 priv->qos_data.def_qos_parm.ac[i].aifsn = 7;
1723 priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
1724 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
1725
1726 i = 2;
1727 priv->qos_data.def_qos_parm.ac[i].cw_min =
1728 cpu_to_le16((cw_min + 1) / 2 - 1);
1729 priv->qos_data.def_qos_parm.ac[i].cw_max =
1730 cpu_to_le16(cw_max);
1731 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
1732 if (is_legacy)
1733 priv->qos_data.def_qos_parm.ac[i].edca_txop =
1734 cpu_to_le16(6016);
1735 else
1736 priv->qos_data.def_qos_parm.ac[i].edca_txop =
1737 cpu_to_le16(3008);
1738 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
1739
1740 i = 3;
1741 priv->qos_data.def_qos_parm.ac[i].cw_min =
1742 cpu_to_le16((cw_min + 1) / 4 - 1);
1743 priv->qos_data.def_qos_parm.ac[i].cw_max =
1744 cpu_to_le16((cw_max + 1) / 2 - 1);
1745 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
1746 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
1747 if (is_legacy)
1748 priv->qos_data.def_qos_parm.ac[i].edca_txop =
1749 cpu_to_le16(3264);
1750 else
1751 priv->qos_data.def_qos_parm.ac[i].edca_txop =
1752 cpu_to_le16(1504);
1753 } else {
1754 for (i = 1; i < 4; i++) {
1755 priv->qos_data.def_qos_parm.ac[i].cw_min =
1756 cpu_to_le16(cw_min);
1757 priv->qos_data.def_qos_parm.ac[i].cw_max =
1758 cpu_to_le16(cw_max);
1759 priv->qos_data.def_qos_parm.ac[i].aifsn = aifs;
1760 priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
1761 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
1762 }
1763 }
1764 IWL_DEBUG_QOS("set QoS to default \n");
1765
1766 spin_unlock_irqrestore(&priv->lock, flags);
1767}
1768
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001769static void iwl3945_activate_qos(struct iwl3945_priv *priv, u8 force)
Zhu Yib481de92007-09-25 17:54:57 -07001770{
1771 unsigned long flags;
1772
Zhu Yib481de92007-09-25 17:54:57 -07001773 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1774 return;
1775
Zhu Yib481de92007-09-25 17:54:57 -07001776 spin_lock_irqsave(&priv->lock, flags);
1777 priv->qos_data.def_qos_parm.qos_flags = 0;
1778
1779 if (priv->qos_data.qos_cap.q_AP.queue_request &&
1780 !priv->qos_data.qos_cap.q_AP.txop_request)
1781 priv->qos_data.def_qos_parm.qos_flags |=
1782 QOS_PARAM_FLG_TXOP_TYPE_MSK;
1783
1784 if (priv->qos_data.qos_active)
1785 priv->qos_data.def_qos_parm.qos_flags |=
1786 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
1787
1788 spin_unlock_irqrestore(&priv->lock, flags);
1789
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001790 if (force || iwl3945_is_associated(priv)) {
Tomas Winklera96a27f2008-10-23 23:48:56 -07001791 IWL_DEBUG_QOS("send QoS cmd with QoS active %d \n",
Zhu Yib481de92007-09-25 17:54:57 -07001792 priv->qos_data.qos_active);
1793
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001794 iwl3945_send_qos_params_command(priv,
Zhu Yib481de92007-09-25 17:54:57 -07001795 &(priv->qos_data.def_qos_parm));
1796 }
1797}
1798
Zhu Yib481de92007-09-25 17:54:57 -07001799/*
1800 * Power management (not Tx power!) functions
1801 */
1802#define MSEC_TO_USEC 1024
1803
Tomas Winkler600c0e12008-12-19 10:37:04 +08001804
1805#define NOSLP __constant_cpu_to_le16(0), 0, 0
1806#define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK, 0, 0
Zhu Yib481de92007-09-25 17:54:57 -07001807#define SLP_TIMEOUT(T) __constant_cpu_to_le32((T) * MSEC_TO_USEC)
1808#define SLP_VEC(X0, X1, X2, X3, X4) {__constant_cpu_to_le32(X0), \
1809 __constant_cpu_to_le32(X1), \
1810 __constant_cpu_to_le32(X2), \
1811 __constant_cpu_to_le32(X3), \
1812 __constant_cpu_to_le32(X4)}
1813
Zhu Yib481de92007-09-25 17:54:57 -07001814/* default power management (not Tx power) table values */
Tomas Winklera96a27f2008-10-23 23:48:56 -07001815/* for TIM 0-10 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001816static struct iwl3945_power_vec_entry range_0[IWL_POWER_AC] = {
Zhu Yib481de92007-09-25 17:54:57 -07001817 {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
1818 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0},
1819 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300), SLP_VEC(2, 4, 6, 7, 7)}, 0},
1820 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100), SLP_VEC(2, 6, 9, 9, 10)}, 0},
1821 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 10)}, 1},
1822 {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25), SLP_VEC(4, 7, 10, 10, 10)}, 1}
1823};
1824
Tomas Winklera96a27f2008-10-23 23:48:56 -07001825/* for TIM > 10 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001826static struct iwl3945_power_vec_entry range_1[IWL_POWER_AC] = {
Zhu Yib481de92007-09-25 17:54:57 -07001827 {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
1828 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500),
1829 SLP_VEC(1, 2, 3, 4, 0xFF)}, 0},
1830 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300),
1831 SLP_VEC(2, 4, 6, 7, 0xFF)}, 0},
1832 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100),
1833 SLP_VEC(2, 6, 9, 9, 0xFF)}, 0},
1834 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
1835 {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25),
1836 SLP_VEC(4, 7, 10, 10, 0xFF)}, 0}
1837};
1838
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001839int iwl3945_power_init_handle(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07001840{
1841 int rc = 0, i;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001842 struct iwl3945_power_mgr *pow_data;
1843 int size = sizeof(struct iwl3945_power_vec_entry) * IWL_POWER_AC;
Zhu Yib481de92007-09-25 17:54:57 -07001844 u16 pci_pm;
1845
1846 IWL_DEBUG_POWER("Initialize power \n");
1847
1848 pow_data = &(priv->power_data);
1849
1850 memset(pow_data, 0, sizeof(*pow_data));
1851
1852 pow_data->active_index = IWL_POWER_RANGE_0;
1853 pow_data->dtim_val = 0xffff;
1854
1855 memcpy(&pow_data->pwr_range_0[0], &range_0[0], size);
1856 memcpy(&pow_data->pwr_range_1[0], &range_1[0], size);
1857
1858 rc = pci_read_config_word(priv->pci_dev, PCI_LINK_CTRL, &pci_pm);
1859 if (rc != 0)
1860 return 0;
1861 else {
Tomas Winkler600c0e12008-12-19 10:37:04 +08001862 struct iwl_powertable_cmd *cmd;
Zhu Yib481de92007-09-25 17:54:57 -07001863
1864 IWL_DEBUG_POWER("adjust power command flags\n");
1865
1866 for (i = 0; i < IWL_POWER_AC; i++) {
1867 cmd = &pow_data->pwr_range_0[i].cmd;
1868
1869 if (pci_pm & 0x1)
1870 cmd->flags &= ~IWL_POWER_PCI_PM_MSK;
1871 else
1872 cmd->flags |= IWL_POWER_PCI_PM_MSK;
1873 }
1874 }
1875 return rc;
1876}
1877
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001878static int iwl3945_update_power_cmd(struct iwl3945_priv *priv,
Tomas Winkler600c0e12008-12-19 10:37:04 +08001879 struct iwl_powertable_cmd *cmd, u32 mode)
Zhu Yib481de92007-09-25 17:54:57 -07001880{
1881 int rc = 0, i;
1882 u8 skip;
1883 u32 max_sleep = 0;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001884 struct iwl3945_power_vec_entry *range;
Zhu Yib481de92007-09-25 17:54:57 -07001885 u8 period = 0;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001886 struct iwl3945_power_mgr *pow_data;
Zhu Yib481de92007-09-25 17:54:57 -07001887
1888 if (mode > IWL_POWER_INDEX_5) {
1889 IWL_DEBUG_POWER("Error invalid power mode \n");
1890 return -1;
1891 }
1892 pow_data = &(priv->power_data);
1893
1894 if (pow_data->active_index == IWL_POWER_RANGE_0)
1895 range = &pow_data->pwr_range_0[0];
1896 else
1897 range = &pow_data->pwr_range_1[1];
1898
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001899 memcpy(cmd, &range[mode].cmd, sizeof(struct iwl3945_powertable_cmd));
Zhu Yib481de92007-09-25 17:54:57 -07001900
1901#ifdef IWL_MAC80211_DISABLE
1902 if (priv->assoc_network != NULL) {
1903 unsigned long flags;
1904
1905 period = priv->assoc_network->tim.tim_period;
1906 }
1907#endif /*IWL_MAC80211_DISABLE */
1908 skip = range[mode].no_dtim;
1909
1910 if (period == 0) {
1911 period = 1;
1912 skip = 0;
1913 }
1914
1915 if (skip == 0) {
1916 max_sleep = period;
1917 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
1918 } else {
1919 __le32 slp_itrvl = cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1];
1920 max_sleep = (le32_to_cpu(slp_itrvl) / period) * period;
1921 cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK;
1922 }
1923
1924 for (i = 0; i < IWL_POWER_VEC_SIZE; i++) {
1925 if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep)
1926 cmd->sleep_interval[i] = cpu_to_le32(max_sleep);
1927 }
1928
1929 IWL_DEBUG_POWER("Flags value = 0x%08X\n", cmd->flags);
1930 IWL_DEBUG_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
1931 IWL_DEBUG_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
1932 IWL_DEBUG_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n",
1933 le32_to_cpu(cmd->sleep_interval[0]),
1934 le32_to_cpu(cmd->sleep_interval[1]),
1935 le32_to_cpu(cmd->sleep_interval[2]),
1936 le32_to_cpu(cmd->sleep_interval[3]),
1937 le32_to_cpu(cmd->sleep_interval[4]));
1938
1939 return rc;
1940}
1941
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001942static int iwl3945_send_power_mode(struct iwl3945_priv *priv, u32 mode)
Zhu Yib481de92007-09-25 17:54:57 -07001943{
John W. Linville9a62f732007-11-15 16:27:36 -05001944 u32 uninitialized_var(final_mode);
Zhu Yib481de92007-09-25 17:54:57 -07001945 int rc;
Tomas Winkler600c0e12008-12-19 10:37:04 +08001946 struct iwl_powertable_cmd cmd;
Zhu Yib481de92007-09-25 17:54:57 -07001947
1948 /* If on battery, set to 3,
Ian Schram01ebd062007-10-25 17:15:22 +08001949 * if plugged into AC power, set to CAM ("continuously aware mode"),
Zhu Yib481de92007-09-25 17:54:57 -07001950 * else user level */
1951 switch (mode) {
1952 case IWL_POWER_BATTERY:
1953 final_mode = IWL_POWER_INDEX_3;
1954 break;
1955 case IWL_POWER_AC:
1956 final_mode = IWL_POWER_MODE_CAM;
1957 break;
1958 default:
1959 final_mode = mode;
1960 break;
1961 }
1962
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001963 iwl3945_update_power_cmd(priv, &cmd, final_mode);
Zhu Yib481de92007-09-25 17:54:57 -07001964
Tomas Winkler600c0e12008-12-19 10:37:04 +08001965 /* FIXME use get_hcmd_size 3945 command is 4 bytes shorter */
1966 rc = iwl3945_send_cmd_pdu(priv, POWER_TABLE_CMD,
1967 sizeof(struct iwl3945_powertable_cmd), &cmd);
Zhu Yib481de92007-09-25 17:54:57 -07001968
1969 if (final_mode == IWL_POWER_MODE_CAM)
1970 clear_bit(STATUS_POWER_PMI, &priv->status);
1971 else
1972 set_bit(STATUS_POWER_PMI, &priv->status);
1973
1974 return rc;
1975}
1976
Zhu Yib481de92007-09-25 17:54:57 -07001977/**
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001978 * iwl3945_scan_cancel - Cancel any currently executing HW scan
Zhu Yib481de92007-09-25 17:54:57 -07001979 *
1980 * NOTE: priv->mutex is not required before calling this function
1981 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001982static int iwl3945_scan_cancel(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07001983{
1984 if (!test_bit(STATUS_SCAN_HW, &priv->status)) {
1985 clear_bit(STATUS_SCANNING, &priv->status);
1986 return 0;
1987 }
1988
1989 if (test_bit(STATUS_SCANNING, &priv->status)) {
1990 if (!test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
1991 IWL_DEBUG_SCAN("Queuing scan abort.\n");
1992 set_bit(STATUS_SCAN_ABORTING, &priv->status);
1993 queue_work(priv->workqueue, &priv->abort_scan);
1994
1995 } else
1996 IWL_DEBUG_SCAN("Scan abort already in progress.\n");
1997
1998 return test_bit(STATUS_SCANNING, &priv->status);
1999 }
2000
2001 return 0;
2002}
2003
2004/**
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002005 * iwl3945_scan_cancel_timeout - Cancel any currently executing HW scan
Zhu Yib481de92007-09-25 17:54:57 -07002006 * @ms: amount of time to wait (in milliseconds) for scan to abort
2007 *
2008 * NOTE: priv->mutex must be held before calling this function
2009 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002010static int iwl3945_scan_cancel_timeout(struct iwl3945_priv *priv, unsigned long ms)
Zhu Yib481de92007-09-25 17:54:57 -07002011{
2012 unsigned long now = jiffies;
2013 int ret;
2014
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002015 ret = iwl3945_scan_cancel(priv);
Zhu Yib481de92007-09-25 17:54:57 -07002016 if (ret && ms) {
2017 mutex_unlock(&priv->mutex);
2018 while (!time_after(jiffies, now + msecs_to_jiffies(ms)) &&
2019 test_bit(STATUS_SCANNING, &priv->status))
2020 msleep(1);
2021 mutex_lock(&priv->mutex);
2022
2023 return test_bit(STATUS_SCANNING, &priv->status);
2024 }
2025
2026 return ret;
2027}
2028
Zhu Yib481de92007-09-25 17:54:57 -07002029#define MAX_UCODE_BEACON_INTERVAL 1024
2030#define INTEL_CONN_LISTEN_INTERVAL __constant_cpu_to_le16(0xA)
2031
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002032static __le16 iwl3945_adjust_beacon_interval(u16 beacon_val)
Zhu Yib481de92007-09-25 17:54:57 -07002033{
2034 u16 new_val = 0;
2035 u16 beacon_factor = 0;
2036
2037 beacon_factor =
2038 (beacon_val + MAX_UCODE_BEACON_INTERVAL)
2039 / MAX_UCODE_BEACON_INTERVAL;
2040 new_val = beacon_val / beacon_factor;
2041
2042 return cpu_to_le16(new_val);
2043}
2044
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002045static void iwl3945_setup_rxon_timing(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07002046{
2047 u64 interval_tm_unit;
2048 u64 tsf, result;
2049 unsigned long flags;
2050 struct ieee80211_conf *conf = NULL;
2051 u16 beacon_int = 0;
2052
2053 conf = ieee80211_get_hw_conf(priv->hw);
2054
2055 spin_lock_irqsave(&priv->lock, flags);
Tomas Winkler28afaf92008-12-19 10:37:06 +08002056 priv->rxon_timing.timestamp = cpu_to_le64(priv->timestamp);
Zhu Yib481de92007-09-25 17:54:57 -07002057 priv->rxon_timing.listen_interval = INTEL_CONN_LISTEN_INTERVAL;
2058
Tomas Winkler28afaf92008-12-19 10:37:06 +08002059 tsf = priv->timestamp;
Zhu Yib481de92007-09-25 17:54:57 -07002060
2061 beacon_int = priv->beacon_int;
2062 spin_unlock_irqrestore(&priv->lock, flags);
2063
Johannes Berg05c914f2008-09-11 00:01:58 +02002064 if (priv->iw_mode == NL80211_IFTYPE_STATION) {
Zhu Yib481de92007-09-25 17:54:57 -07002065 if (beacon_int == 0) {
2066 priv->rxon_timing.beacon_interval = cpu_to_le16(100);
2067 priv->rxon_timing.beacon_init_val = cpu_to_le32(102400);
2068 } else {
2069 priv->rxon_timing.beacon_interval =
2070 cpu_to_le16(beacon_int);
2071 priv->rxon_timing.beacon_interval =
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002072 iwl3945_adjust_beacon_interval(
Zhu Yib481de92007-09-25 17:54:57 -07002073 le16_to_cpu(priv->rxon_timing.beacon_interval));
2074 }
2075
2076 priv->rxon_timing.atim_window = 0;
2077 } else {
2078 priv->rxon_timing.beacon_interval =
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002079 iwl3945_adjust_beacon_interval(conf->beacon_int);
Zhu Yib481de92007-09-25 17:54:57 -07002080 /* TODO: we need to get atim_window from upper stack
2081 * for now we set to 0 */
2082 priv->rxon_timing.atim_window = 0;
2083 }
2084
2085 interval_tm_unit =
2086 (le16_to_cpu(priv->rxon_timing.beacon_interval) * 1024);
2087 result = do_div(tsf, interval_tm_unit);
2088 priv->rxon_timing.beacon_init_val =
2089 cpu_to_le32((u32) ((u64) interval_tm_unit - result));
2090
2091 IWL_DEBUG_ASSOC
2092 ("beacon interval %d beacon timer %d beacon tim %d\n",
2093 le16_to_cpu(priv->rxon_timing.beacon_interval),
2094 le32_to_cpu(priv->rxon_timing.beacon_init_val),
2095 le16_to_cpu(priv->rxon_timing.atim_window));
2096}
2097
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002098static int iwl3945_scan_initiate(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07002099{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002100 if (!iwl3945_is_ready_rf(priv)) {
Zhu Yib481de92007-09-25 17:54:57 -07002101 IWL_DEBUG_SCAN("Aborting scan due to not ready.\n");
2102 return -EIO;
2103 }
2104
2105 if (test_bit(STATUS_SCANNING, &priv->status)) {
2106 IWL_DEBUG_SCAN("Scan already in progress.\n");
2107 return -EAGAIN;
2108 }
2109
2110 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
2111 IWL_DEBUG_SCAN("Scan request while abort pending. "
2112 "Queuing.\n");
2113 return -EAGAIN;
2114 }
2115
2116 IWL_DEBUG_INFO("Starting scan...\n");
Ron Rindjunsky66b50042008-06-25 16:46:31 +08002117 if (priv->cfg->sku & IWL_SKU_G)
2118 priv->scan_bands |= BIT(IEEE80211_BAND_2GHZ);
2119 if (priv->cfg->sku & IWL_SKU_A)
2120 priv->scan_bands |= BIT(IEEE80211_BAND_5GHZ);
Zhu Yib481de92007-09-25 17:54:57 -07002121 set_bit(STATUS_SCANNING, &priv->status);
2122 priv->scan_start = jiffies;
2123 priv->scan_pass_start = priv->scan_start;
2124
2125 queue_work(priv->workqueue, &priv->request_scan);
2126
2127 return 0;
2128}
2129
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002130static int iwl3945_set_rxon_hwcrypto(struct iwl3945_priv *priv, int hw_decrypt)
Zhu Yib481de92007-09-25 17:54:57 -07002131{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002132 struct iwl3945_rxon_cmd *rxon = &priv->staging_rxon;
Zhu Yib481de92007-09-25 17:54:57 -07002133
2134 if (hw_decrypt)
2135 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
2136 else
2137 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
2138
2139 return 0;
2140}
2141
Johannes Berg8318d782008-01-24 19:38:38 +01002142static void iwl3945_set_flags_for_phymode(struct iwl3945_priv *priv,
2143 enum ieee80211_band band)
Zhu Yib481de92007-09-25 17:54:57 -07002144{
Johannes Berg8318d782008-01-24 19:38:38 +01002145 if (band == IEEE80211_BAND_5GHZ) {
Zhu Yib481de92007-09-25 17:54:57 -07002146 priv->staging_rxon.flags &=
2147 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
2148 | RXON_FLG_CCK_MSK);
2149 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
2150 } else {
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002151 /* Copied from iwl3945_bg_post_associate() */
Zhu Yib481de92007-09-25 17:54:57 -07002152 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
2153 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
2154 else
2155 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2156
Johannes Berg05c914f2008-09-11 00:01:58 +02002157 if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
Zhu Yib481de92007-09-25 17:54:57 -07002158 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2159
2160 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
2161 priv->staging_rxon.flags |= RXON_FLG_AUTO_DETECT_MSK;
2162 priv->staging_rxon.flags &= ~RXON_FLG_CCK_MSK;
2163 }
2164}
2165
2166/*
Ian Schram01ebd062007-10-25 17:15:22 +08002167 * initialize rxon structure with default values from eeprom
Zhu Yib481de92007-09-25 17:54:57 -07002168 */
Zhu, Yi60294de2008-10-29 14:05:45 -07002169static void iwl3945_connection_init_rx_config(struct iwl3945_priv *priv,
2170 int mode)
Zhu Yib481de92007-09-25 17:54:57 -07002171{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002172 const struct iwl3945_channel_info *ch_info;
Zhu Yib481de92007-09-25 17:54:57 -07002173
2174 memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon));
2175
Zhu, Yi60294de2008-10-29 14:05:45 -07002176 switch (mode) {
Johannes Berg05c914f2008-09-11 00:01:58 +02002177 case NL80211_IFTYPE_AP:
Zhu Yib481de92007-09-25 17:54:57 -07002178 priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP;
2179 break;
2180
Johannes Berg05c914f2008-09-11 00:01:58 +02002181 case NL80211_IFTYPE_STATION:
Zhu Yib481de92007-09-25 17:54:57 -07002182 priv->staging_rxon.dev_type = RXON_DEV_TYPE_ESS;
2183 priv->staging_rxon.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
2184 break;
2185
Johannes Berg05c914f2008-09-11 00:01:58 +02002186 case NL80211_IFTYPE_ADHOC:
Zhu Yib481de92007-09-25 17:54:57 -07002187 priv->staging_rxon.dev_type = RXON_DEV_TYPE_IBSS;
2188 priv->staging_rxon.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
2189 priv->staging_rxon.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
2190 RXON_FILTER_ACCEPT_GRP_MSK;
2191 break;
2192
Johannes Berg05c914f2008-09-11 00:01:58 +02002193 case NL80211_IFTYPE_MONITOR:
Zhu Yib481de92007-09-25 17:54:57 -07002194 priv->staging_rxon.dev_type = RXON_DEV_TYPE_SNIFFER;
2195 priv->staging_rxon.filter_flags = RXON_FILTER_PROMISC_MSK |
2196 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
2197 break;
Tomas Winkler69dc5d92008-03-25 16:33:41 -07002198 default:
Zhu, Yi60294de2008-10-29 14:05:45 -07002199 IWL_ERROR("Unsupported interface type %d\n", mode);
Tomas Winkler69dc5d92008-03-25 16:33:41 -07002200 break;
Zhu Yib481de92007-09-25 17:54:57 -07002201 }
2202
2203#if 0
2204 /* TODO: Figure out when short_preamble would be set and cache from
2205 * that */
2206 if (!hw_to_local(priv->hw)->short_preamble)
2207 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2208 else
2209 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2210#endif
2211
Johannes Berg8318d782008-01-24 19:38:38 +01002212 ch_info = iwl3945_get_channel_info(priv, priv->band,
Rick Farrington25b3f572008-06-30 17:23:28 +08002213 le16_to_cpu(priv->active_rxon.channel));
Zhu Yib481de92007-09-25 17:54:57 -07002214
2215 if (!ch_info)
2216 ch_info = &priv->channel_info[0];
2217
2218 /*
2219 * in some case A channels are all non IBSS
2220 * in this case force B/G channel
2221 */
Zhu, Yi60294de2008-10-29 14:05:45 -07002222 if ((mode == NL80211_IFTYPE_ADHOC) && !(is_channel_ibss(ch_info)))
Zhu Yib481de92007-09-25 17:54:57 -07002223 ch_info = &priv->channel_info[0];
2224
2225 priv->staging_rxon.channel = cpu_to_le16(ch_info->channel);
2226 if (is_channel_a_band(ch_info))
Johannes Berg8318d782008-01-24 19:38:38 +01002227 priv->band = IEEE80211_BAND_5GHZ;
Zhu Yib481de92007-09-25 17:54:57 -07002228 else
Johannes Berg8318d782008-01-24 19:38:38 +01002229 priv->band = IEEE80211_BAND_2GHZ;
Zhu Yib481de92007-09-25 17:54:57 -07002230
Johannes Berg8318d782008-01-24 19:38:38 +01002231 iwl3945_set_flags_for_phymode(priv, priv->band);
Zhu Yib481de92007-09-25 17:54:57 -07002232
2233 priv->staging_rxon.ofdm_basic_rates =
2234 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
2235 priv->staging_rxon.cck_basic_rates =
2236 (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
2237}
2238
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002239static int iwl3945_set_mode(struct iwl3945_priv *priv, int mode)
Zhu Yib481de92007-09-25 17:54:57 -07002240{
Johannes Berg05c914f2008-09-11 00:01:58 +02002241 if (mode == NL80211_IFTYPE_ADHOC) {
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002242 const struct iwl3945_channel_info *ch_info;
Zhu Yib481de92007-09-25 17:54:57 -07002243
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002244 ch_info = iwl3945_get_channel_info(priv,
Johannes Berg8318d782008-01-24 19:38:38 +01002245 priv->band,
Zhu Yib481de92007-09-25 17:54:57 -07002246 le16_to_cpu(priv->staging_rxon.channel));
2247
2248 if (!ch_info || !is_channel_ibss(ch_info)) {
2249 IWL_ERROR("channel %d not IBSS channel\n",
2250 le16_to_cpu(priv->staging_rxon.channel));
2251 return -EINVAL;
2252 }
2253 }
2254
Zhu, Yi60294de2008-10-29 14:05:45 -07002255 iwl3945_connection_init_rx_config(priv, mode);
Zhu Yib481de92007-09-25 17:54:57 -07002256 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
2257
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002258 iwl3945_clear_stations_table(priv);
Zhu Yib481de92007-09-25 17:54:57 -07002259
Tomas Winklera96a27f2008-10-23 23:48:56 -07002260 /* don't commit rxon if rf-kill is on*/
Mohamed Abbasfde35712007-11-29 11:10:15 +08002261 if (!iwl3945_is_ready_rf(priv))
2262 return -EAGAIN;
2263
2264 cancel_delayed_work(&priv->scan_check);
2265 if (iwl3945_scan_cancel_timeout(priv, 100)) {
2266 IWL_WARNING("Aborted scan still in progress after 100ms\n");
2267 IWL_DEBUG_MAC80211("leaving - scan abort failed.\n");
2268 return -EAGAIN;
2269 }
2270
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002271 iwl3945_commit_rxon(priv);
Zhu Yib481de92007-09-25 17:54:57 -07002272
2273 return 0;
2274}
2275
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002276static void iwl3945_build_tx_cmd_hwcrypto(struct iwl3945_priv *priv,
Johannes Berge039fa42008-05-15 12:55:29 +02002277 struct ieee80211_tx_info *info,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002278 struct iwl3945_cmd *cmd,
Zhu Yib481de92007-09-25 17:54:57 -07002279 struct sk_buff *skb_frag,
2280 int last_frag)
2281{
Ivo van Doorn1c014422008-04-17 19:41:02 +02002282 struct iwl3945_hw_key *keyinfo =
Johannes Berge039fa42008-05-15 12:55:29 +02002283 &priv->stations[info->control.hw_key->hw_key_idx].keyinfo;
Zhu Yib481de92007-09-25 17:54:57 -07002284
2285 switch (keyinfo->alg) {
2286 case ALG_CCMP:
2287 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_CCM;
2288 memcpy(cmd->cmd.tx.key, keyinfo->key, keyinfo->keylen);
Tomas Winklera96a27f2008-10-23 23:48:56 -07002289 IWL_DEBUG_TX("tx_cmd with AES hwcrypto\n");
Zhu Yib481de92007-09-25 17:54:57 -07002290 break;
2291
2292 case ALG_TKIP:
2293#if 0
2294 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_TKIP;
2295
2296 if (last_frag)
2297 memcpy(cmd->cmd.tx.tkip_mic.byte, skb_frag->tail - 8,
2298 8);
2299 else
2300 memset(cmd->cmd.tx.tkip_mic.byte, 0, 8);
2301#endif
2302 break;
2303
2304 case ALG_WEP:
2305 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_WEP |
Johannes Berge039fa42008-05-15 12:55:29 +02002306 (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT;
Zhu Yib481de92007-09-25 17:54:57 -07002307
2308 if (keyinfo->keylen == 13)
2309 cmd->cmd.tx.sec_ctl |= TX_CMD_SEC_KEY128;
2310
2311 memcpy(&cmd->cmd.tx.key[3], keyinfo->key, keyinfo->keylen);
2312
2313 IWL_DEBUG_TX("Configuring packet for WEP encryption "
Johannes Berge039fa42008-05-15 12:55:29 +02002314 "with key %d\n", info->control.hw_key->hw_key_idx);
Zhu Yib481de92007-09-25 17:54:57 -07002315 break;
2316
Zhu Yib481de92007-09-25 17:54:57 -07002317 default:
2318 printk(KERN_ERR "Unknown encode alg %d\n", keyinfo->alg);
2319 break;
2320 }
2321}
2322
2323/*
2324 * handle build REPLY_TX command notification.
2325 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002326static void iwl3945_build_tx_cmd_basic(struct iwl3945_priv *priv,
2327 struct iwl3945_cmd *cmd,
Johannes Berge039fa42008-05-15 12:55:29 +02002328 struct ieee80211_tx_info *info,
Zhu Yib481de92007-09-25 17:54:57 -07002329 struct ieee80211_hdr *hdr,
2330 int is_unicast, u8 std_id)
2331{
Harvey Harrisonfd7c8a42008-06-11 14:21:56 -07002332 __le16 fc = hdr->frame_control;
Zhu Yib481de92007-09-25 17:54:57 -07002333 __le32 tx_flags = cmd->cmd.tx.tx_flags;
Johannes Berge6a98542008-10-21 12:40:02 +02002334 u8 rc_flags = info->control.rates[0].flags;
Zhu Yib481de92007-09-25 17:54:57 -07002335
2336 cmd->cmd.tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
Johannes Berge039fa42008-05-15 12:55:29 +02002337 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
Zhu Yib481de92007-09-25 17:54:57 -07002338 tx_flags |= TX_CMD_FLG_ACK_MSK;
Harvey Harrisonfd7c8a42008-06-11 14:21:56 -07002339 if (ieee80211_is_mgmt(fc))
Zhu Yib481de92007-09-25 17:54:57 -07002340 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
Harvey Harrisonfd7c8a42008-06-11 14:21:56 -07002341 if (ieee80211_is_probe_resp(fc) &&
Zhu Yib481de92007-09-25 17:54:57 -07002342 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
2343 tx_flags |= TX_CMD_FLG_TSF_MSK;
2344 } else {
2345 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
2346 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2347 }
2348
2349 cmd->cmd.tx.sta_id = std_id;
Harvey Harrison8b7b1e02008-06-11 14:21:56 -07002350 if (ieee80211_has_morefrags(fc))
Zhu Yib481de92007-09-25 17:54:57 -07002351 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
2352
Harvey Harrisonfd7c8a42008-06-11 14:21:56 -07002353 if (ieee80211_is_data_qos(fc)) {
2354 u8 *qc = ieee80211_get_qos_ctl(hdr);
Tomas Winkler54dbb522008-05-15 13:54:06 +08002355 cmd->cmd.tx.tid_tspec = qc[0] & 0xf;
Zhu Yib481de92007-09-25 17:54:57 -07002356 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
Tomas Winkler54dbb522008-05-15 13:54:06 +08002357 } else {
Zhu Yib481de92007-09-25 17:54:57 -07002358 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
Tomas Winkler54dbb522008-05-15 13:54:06 +08002359 }
Zhu Yib481de92007-09-25 17:54:57 -07002360
Johannes Berge6a98542008-10-21 12:40:02 +02002361 if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
Zhu Yib481de92007-09-25 17:54:57 -07002362 tx_flags |= TX_CMD_FLG_RTS_MSK;
2363 tx_flags &= ~TX_CMD_FLG_CTS_MSK;
Johannes Berge6a98542008-10-21 12:40:02 +02002364 } else if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
Zhu Yib481de92007-09-25 17:54:57 -07002365 tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2366 tx_flags |= TX_CMD_FLG_CTS_MSK;
2367 }
2368
2369 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
2370 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
2371
2372 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
Harvey Harrisonfd7c8a42008-06-11 14:21:56 -07002373 if (ieee80211_is_mgmt(fc)) {
2374 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
Ian Schrambc434dd2007-10-25 17:15:29 +08002375 cmd->cmd.tx.timeout.pm_frame_timeout = cpu_to_le16(3);
Zhu Yib481de92007-09-25 17:54:57 -07002376 else
Ian Schrambc434dd2007-10-25 17:15:29 +08002377 cmd->cmd.tx.timeout.pm_frame_timeout = cpu_to_le16(2);
Mohamed Abbasab53d8a2008-03-25 16:33:36 -07002378 } else {
Zhu Yib481de92007-09-25 17:54:57 -07002379 cmd->cmd.tx.timeout.pm_frame_timeout = 0;
Mohamed Abbasab53d8a2008-03-25 16:33:36 -07002380#ifdef CONFIG_IWL3945_LEDS
2381 priv->rxtxpackets += le16_to_cpu(cmd->cmd.tx.len);
2382#endif
2383 }
Zhu Yib481de92007-09-25 17:54:57 -07002384
2385 cmd->cmd.tx.driver_txop = 0;
2386 cmd->cmd.tx.tx_flags = tx_flags;
2387 cmd->cmd.tx.next_frame_len = 0;
2388}
2389
Cahill, Ben M6440adb2007-11-29 11:09:55 +08002390/**
2391 * iwl3945_get_sta_id - Find station's index within station table
2392 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002393static int iwl3945_get_sta_id(struct iwl3945_priv *priv, struct ieee80211_hdr *hdr)
Zhu Yib481de92007-09-25 17:54:57 -07002394{
2395 int sta_id;
2396 u16 fc = le16_to_cpu(hdr->frame_control);
2397
Cahill, Ben M6440adb2007-11-29 11:09:55 +08002398 /* If this frame is broadcast or management, use broadcast station id */
Zhu Yib481de92007-09-25 17:54:57 -07002399 if (((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) ||
2400 is_multicast_ether_addr(hdr->addr1))
2401 return priv->hw_setting.bcast_sta_id;
2402
2403 switch (priv->iw_mode) {
2404
Cahill, Ben M6440adb2007-11-29 11:09:55 +08002405 /* If we are a client station in a BSS network, use the special
2406 * AP station entry (that's the only station we communicate with) */
Johannes Berg05c914f2008-09-11 00:01:58 +02002407 case NL80211_IFTYPE_STATION:
Zhu Yib481de92007-09-25 17:54:57 -07002408 return IWL_AP_ID;
2409
2410 /* If we are an AP, then find the station, or use BCAST */
Johannes Berg05c914f2008-09-11 00:01:58 +02002411 case NL80211_IFTYPE_AP:
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002412 sta_id = iwl3945_hw_find_station(priv, hdr->addr1);
Zhu Yib481de92007-09-25 17:54:57 -07002413 if (sta_id != IWL_INVALID_STATION)
2414 return sta_id;
2415 return priv->hw_setting.bcast_sta_id;
2416
Cahill, Ben M6440adb2007-11-29 11:09:55 +08002417 /* If this frame is going out to an IBSS network, find the station,
2418 * or create a new station table entry */
Johannes Berg05c914f2008-09-11 00:01:58 +02002419 case NL80211_IFTYPE_ADHOC: {
Cahill, Ben M6440adb2007-11-29 11:09:55 +08002420 /* Create new station table entry */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002421 sta_id = iwl3945_hw_find_station(priv, hdr->addr1);
Zhu Yib481de92007-09-25 17:54:57 -07002422 if (sta_id != IWL_INVALID_STATION)
2423 return sta_id;
2424
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002425 sta_id = iwl3945_add_station(priv, hdr->addr1, 0, CMD_ASYNC);
Zhu Yib481de92007-09-25 17:54:57 -07002426
2427 if (sta_id != IWL_INVALID_STATION)
2428 return sta_id;
2429
Johannes Berge1749612008-10-27 15:59:26 -07002430 IWL_DEBUG_DROP("Station %pM not in station map. "
Zhu Yib481de92007-09-25 17:54:57 -07002431 "Defaulting to broadcast...\n",
Johannes Berge1749612008-10-27 15:59:26 -07002432 hdr->addr1);
Samuel Ortiz40b8ec02008-12-19 10:37:08 +08002433 iwl_print_hex_dump(priv, IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr));
Zhu Yib481de92007-09-25 17:54:57 -07002434 return priv->hw_setting.bcast_sta_id;
Joe Perches0795af52007-10-03 17:59:30 -07002435 }
Stefanik Gábor914233d2008-06-30 17:23:30 +08002436 /* If we are in monitor mode, use BCAST. This is required for
2437 * packet injection. */
Johannes Berg05c914f2008-09-11 00:01:58 +02002438 case NL80211_IFTYPE_MONITOR:
Stefanik Gábor914233d2008-06-30 17:23:30 +08002439 return priv->hw_setting.bcast_sta_id;
2440
Zhu Yib481de92007-09-25 17:54:57 -07002441 default:
Jiri Slaby6f147922008-08-11 23:49:41 +02002442 IWL_WARNING("Unknown mode of operation: %d\n", priv->iw_mode);
Zhu Yib481de92007-09-25 17:54:57 -07002443 return priv->hw_setting.bcast_sta_id;
2444 }
2445}
2446
2447/*
2448 * start REPLY_TX command process
2449 */
Johannes Berge039fa42008-05-15 12:55:29 +02002450static int iwl3945_tx_skb(struct iwl3945_priv *priv, struct sk_buff *skb)
Zhu Yib481de92007-09-25 17:54:57 -07002451{
2452 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Johannes Berge039fa42008-05-15 12:55:29 +02002453 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002454 struct iwl3945_tfd_frame *tfd;
Zhu Yib481de92007-09-25 17:54:57 -07002455 u32 *control_flags;
Johannes Berge2530082008-05-17 00:57:14 +02002456 int txq_id = skb_get_queue_mapping(skb);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002457 struct iwl3945_tx_queue *txq = NULL;
2458 struct iwl3945_queue *q = NULL;
Zhu Yib481de92007-09-25 17:54:57 -07002459 dma_addr_t phys_addr;
2460 dma_addr_t txcmd_phys;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002461 struct iwl3945_cmd *out_cmd = NULL;
Tomas Winkler54dbb522008-05-15 13:54:06 +08002462 u16 len, idx, len_org, hdr_len;
2463 u8 id;
2464 u8 unicast;
Zhu Yib481de92007-09-25 17:54:57 -07002465 u8 sta_id;
Tomas Winkler54dbb522008-05-15 13:54:06 +08002466 u8 tid = 0;
Zhu Yib481de92007-09-25 17:54:57 -07002467 u16 seq_number = 0;
Harvey Harrisonfd7c8a42008-06-11 14:21:56 -07002468 __le16 fc;
Zhu Yib481de92007-09-25 17:54:57 -07002469 u8 wait_write_ptr = 0;
Tomas Winkler54dbb522008-05-15 13:54:06 +08002470 u8 *qc = NULL;
Zhu Yib481de92007-09-25 17:54:57 -07002471 unsigned long flags;
2472 int rc;
2473
2474 spin_lock_irqsave(&priv->lock, flags);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002475 if (iwl3945_is_rfkill(priv)) {
Zhu Yib481de92007-09-25 17:54:57 -07002476 IWL_DEBUG_DROP("Dropping - RF KILL\n");
2477 goto drop_unlock;
2478 }
2479
Johannes Berge039fa42008-05-15 12:55:29 +02002480 if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) == IWL_INVALID_RATE) {
Zhu Yib481de92007-09-25 17:54:57 -07002481 IWL_ERROR("ERROR: No TX rate available.\n");
2482 goto drop_unlock;
2483 }
2484
2485 unicast = !is_multicast_ether_addr(hdr->addr1);
2486 id = 0;
2487
Harvey Harrisonfd7c8a42008-06-11 14:21:56 -07002488 fc = hdr->frame_control;
Zhu Yib481de92007-09-25 17:54:57 -07002489
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08002490#ifdef CONFIG_IWL3945_DEBUG
Zhu Yib481de92007-09-25 17:54:57 -07002491 if (ieee80211_is_auth(fc))
2492 IWL_DEBUG_TX("Sending AUTH frame\n");
Harvey Harrisonfd7c8a42008-06-11 14:21:56 -07002493 else if (ieee80211_is_assoc_req(fc))
Zhu Yib481de92007-09-25 17:54:57 -07002494 IWL_DEBUG_TX("Sending ASSOC frame\n");
Harvey Harrisonfd7c8a42008-06-11 14:21:56 -07002495 else if (ieee80211_is_reassoc_req(fc))
Zhu Yib481de92007-09-25 17:54:57 -07002496 IWL_DEBUG_TX("Sending REASSOC frame\n");
2497#endif
2498
Mohamed Abbas7878a5a2007-11-29 11:10:13 +08002499 /* drop all data frame if we are not associated */
Stefanik Gábor914233d2008-06-30 17:23:30 +08002500 if (ieee80211_is_data(fc) &&
Johannes Berg05c914f2008-09-11 00:01:58 +02002501 (priv->iw_mode != NL80211_IFTYPE_MONITOR) && /* packet injection */
Stefanik Gábor914233d2008-06-30 17:23:30 +08002502 (!iwl3945_is_associated(priv) ||
Johannes Berg05c914f2008-09-11 00:01:58 +02002503 ((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id))) {
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002504 IWL_DEBUG_DROP("Dropping - !iwl3945_is_associated\n");
Zhu Yib481de92007-09-25 17:54:57 -07002505 goto drop_unlock;
2506 }
2507
2508 spin_unlock_irqrestore(&priv->lock, flags);
2509
Harvey Harrison7294ec92008-07-15 18:43:59 -07002510 hdr_len = ieee80211_hdrlen(fc);
Cahill, Ben M6440adb2007-11-29 11:09:55 +08002511
2512 /* Find (or create) index into station table for destination station */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002513 sta_id = iwl3945_get_sta_id(priv, hdr);
Zhu Yib481de92007-09-25 17:54:57 -07002514 if (sta_id == IWL_INVALID_STATION) {
Johannes Berge1749612008-10-27 15:59:26 -07002515 IWL_DEBUG_DROP("Dropping - INVALID STATION: %pM\n",
2516 hdr->addr1);
Zhu Yib481de92007-09-25 17:54:57 -07002517 goto drop;
2518 }
2519
2520 IWL_DEBUG_RATE("station Id %d\n", sta_id);
2521
Harvey Harrisonfd7c8a42008-06-11 14:21:56 -07002522 if (ieee80211_is_data_qos(fc)) {
2523 qc = ieee80211_get_qos_ctl(hdr);
Harvey Harrison7294ec92008-07-15 18:43:59 -07002524 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
Zhu Yib481de92007-09-25 17:54:57 -07002525 seq_number = priv->stations[sta_id].tid[tid].seq_number &
2526 IEEE80211_SCTL_SEQ;
2527 hdr->seq_ctrl = cpu_to_le16(seq_number) |
2528 (hdr->seq_ctrl &
2529 __constant_cpu_to_le16(IEEE80211_SCTL_FRAG));
2530 seq_number += 0x10;
2531 }
Cahill, Ben M6440adb2007-11-29 11:09:55 +08002532
2533 /* Descriptor for chosen Tx queue */
Zhu Yib481de92007-09-25 17:54:57 -07002534 txq = &priv->txq[txq_id];
2535 q = &txq->q;
2536
2537 spin_lock_irqsave(&priv->lock, flags);
2538
Cahill, Ben M6440adb2007-11-29 11:09:55 +08002539 /* Set up first empty TFD within this queue's circular TFD buffer */
Tomas Winklerfc4b6852007-10-25 17:15:24 +08002540 tfd = &txq->bd[q->write_ptr];
Zhu Yib481de92007-09-25 17:54:57 -07002541 memset(tfd, 0, sizeof(*tfd));
2542 control_flags = (u32 *) tfd;
Tomas Winklerfc4b6852007-10-25 17:15:24 +08002543 idx = get_cmd_index(q, q->write_ptr, 0);
Zhu Yib481de92007-09-25 17:54:57 -07002544
Cahill, Ben M6440adb2007-11-29 11:09:55 +08002545 /* Set up driver data for this TFD */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002546 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl3945_tx_info));
Tomas Winklerfc4b6852007-10-25 17:15:24 +08002547 txq->txb[q->write_ptr].skb[0] = skb;
Cahill, Ben M6440adb2007-11-29 11:09:55 +08002548
2549 /* Init first empty entry in queue's array of Tx/cmd buffers */
Zhu Yib481de92007-09-25 17:54:57 -07002550 out_cmd = &txq->cmd[idx];
2551 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
2552 memset(&out_cmd->cmd.tx, 0, sizeof(out_cmd->cmd.tx));
Cahill, Ben M6440adb2007-11-29 11:09:55 +08002553
2554 /*
2555 * Set up the Tx-command (not MAC!) header.
2556 * Store the chosen Tx queue and TFD index within the sequence field;
2557 * after Tx, uCode's Tx response will return this value so driver can
2558 * locate the frame within the tx queue and do post-tx processing.
2559 */
Zhu Yib481de92007-09-25 17:54:57 -07002560 out_cmd->hdr.cmd = REPLY_TX;
2561 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
Tomas Winklerfc4b6852007-10-25 17:15:24 +08002562 INDEX_TO_SEQ(q->write_ptr)));
Cahill, Ben M6440adb2007-11-29 11:09:55 +08002563
2564 /* Copy MAC header from skb into command buffer */
Zhu Yib481de92007-09-25 17:54:57 -07002565 memcpy(out_cmd->cmd.tx.hdr, hdr, hdr_len);
2566
Cahill, Ben M6440adb2007-11-29 11:09:55 +08002567 /*
2568 * Use the first empty entry in this queue's command buffer array
2569 * to contain the Tx command and MAC header concatenated together
2570 * (payload data will be in another buffer).
2571 * Size of this varies, due to varying MAC header length.
2572 * If end is not dword aligned, we'll have 2 extra bytes at the end
2573 * of the MAC header (device reads on dword boundaries).
2574 * We'll tell device about this padding later.
2575 */
Zhu Yib481de92007-09-25 17:54:57 -07002576 len = priv->hw_setting.tx_cmd_len +
Tomas Winkler4c897252008-12-19 10:37:05 +08002577 sizeof(struct iwl_cmd_header) + hdr_len;
Zhu Yib481de92007-09-25 17:54:57 -07002578
2579 len_org = len;
2580 len = (len + 3) & ~3;
2581
2582 if (len_org != len)
2583 len_org = 1;
2584 else
2585 len_org = 0;
2586
Cahill, Ben M6440adb2007-11-29 11:09:55 +08002587 /* Physical address of this Tx command's header (not MAC header!),
2588 * within command buffer array. */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002589 txcmd_phys = txq->dma_addr_cmd + sizeof(struct iwl3945_cmd) * idx +
2590 offsetof(struct iwl3945_cmd, hdr);
Zhu Yib481de92007-09-25 17:54:57 -07002591
Cahill, Ben M6440adb2007-11-29 11:09:55 +08002592 /* Add buffer containing Tx command and MAC(!) header to TFD's
2593 * first entry */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002594 iwl3945_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len);
Zhu Yib481de92007-09-25 17:54:57 -07002595
Johannes Bergd0f09802008-07-29 11:32:07 +02002596 if (info->control.hw_key)
Johannes Berge039fa42008-05-15 12:55:29 +02002597 iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, 0);
Zhu Yib481de92007-09-25 17:54:57 -07002598
Cahill, Ben M6440adb2007-11-29 11:09:55 +08002599 /* Set up TFD's 2nd entry to point directly to remainder of skb,
2600 * if any (802.11 null frames have no payload). */
Zhu Yib481de92007-09-25 17:54:57 -07002601 len = skb->len - hdr_len;
2602 if (len) {
2603 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
2604 len, PCI_DMA_TODEVICE);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002605 iwl3945_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, len);
Zhu Yib481de92007-09-25 17:54:57 -07002606 }
2607
Zhu Yib481de92007-09-25 17:54:57 -07002608 if (!len)
Cahill, Ben M6440adb2007-11-29 11:09:55 +08002609 /* If there is no payload, then we use only one Tx buffer */
Zhu Yib481de92007-09-25 17:54:57 -07002610 *control_flags = TFD_CTL_COUNT_SET(1);
2611 else
Cahill, Ben M6440adb2007-11-29 11:09:55 +08002612 /* Else use 2 buffers.
2613 * Tell 3945 about any padding after MAC header */
Zhu Yib481de92007-09-25 17:54:57 -07002614 *control_flags = TFD_CTL_COUNT_SET(2) |
2615 TFD_CTL_PAD_SET(U32_PAD(len));
2616
Cahill, Ben M6440adb2007-11-29 11:09:55 +08002617 /* Total # bytes to be transmitted */
Zhu Yib481de92007-09-25 17:54:57 -07002618 len = (u16)skb->len;
2619 out_cmd->cmd.tx.len = cpu_to_le16(len);
2620
2621 /* TODO need this for burst mode later on */
Johannes Berge039fa42008-05-15 12:55:29 +02002622 iwl3945_build_tx_cmd_basic(priv, out_cmd, info, hdr, unicast, sta_id);
Zhu Yib481de92007-09-25 17:54:57 -07002623
2624 /* set is_hcca to 0; it probably will never be implemented */
Johannes Berge039fa42008-05-15 12:55:29 +02002625 iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, info, hdr, sta_id, 0);
Zhu Yib481de92007-09-25 17:54:57 -07002626
2627 out_cmd->cmd.tx.tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
2628 out_cmd->cmd.tx.tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
2629
Harvey Harrison8b7b1e02008-06-11 14:21:56 -07002630 if (!ieee80211_has_morefrags(hdr->frame_control)) {
Zhu Yib481de92007-09-25 17:54:57 -07002631 txq->need_update = 1;
Tomas Winkler3ac7f142008-07-21 02:40:14 +03002632 if (qc)
Zhu Yib481de92007-09-25 17:54:57 -07002633 priv->stations[sta_id].tid[tid].seq_number = seq_number;
Zhu Yib481de92007-09-25 17:54:57 -07002634 } else {
2635 wait_write_ptr = 1;
2636 txq->need_update = 0;
2637 }
2638
Samuel Ortiz40b8ec02008-12-19 10:37:08 +08002639 iwl_print_hex_dump(priv, IWL_DL_TX, out_cmd->cmd.payload,
Zhu Yib481de92007-09-25 17:54:57 -07002640 sizeof(out_cmd->cmd.tx));
2641
Samuel Ortiz40b8ec02008-12-19 10:37:08 +08002642 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)out_cmd->cmd.tx.hdr,
Harvey Harrison7294ec92008-07-15 18:43:59 -07002643 ieee80211_hdrlen(fc));
Zhu Yib481de92007-09-25 17:54:57 -07002644
Cahill, Ben M6440adb2007-11-29 11:09:55 +08002645 /* Tell device the write index *just past* this latest filled TFD */
Tomas Winklerc54b6792008-03-06 17:36:53 -08002646 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002647 rc = iwl3945_tx_queue_update_write_ptr(priv, txq);
Zhu Yib481de92007-09-25 17:54:57 -07002648 spin_unlock_irqrestore(&priv->lock, flags);
2649
2650 if (rc)
2651 return rc;
2652
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002653 if ((iwl3945_queue_space(q) < q->high_mark)
Zhu Yib481de92007-09-25 17:54:57 -07002654 && priv->mac80211_registered) {
2655 if (wait_write_ptr) {
2656 spin_lock_irqsave(&priv->lock, flags);
2657 txq->need_update = 1;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002658 iwl3945_tx_queue_update_write_ptr(priv, txq);
Zhu Yib481de92007-09-25 17:54:57 -07002659 spin_unlock_irqrestore(&priv->lock, flags);
2660 }
2661
Johannes Berge2530082008-05-17 00:57:14 +02002662 ieee80211_stop_queue(priv->hw, skb_get_queue_mapping(skb));
Zhu Yib481de92007-09-25 17:54:57 -07002663 }
2664
2665 return 0;
2666
2667drop_unlock:
2668 spin_unlock_irqrestore(&priv->lock, flags);
2669drop:
2670 return -1;
2671}
2672
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002673static void iwl3945_set_rate(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07002674{
Johannes Berg8318d782008-01-24 19:38:38 +01002675 const struct ieee80211_supported_band *sband = NULL;
Zhu Yib481de92007-09-25 17:54:57 -07002676 struct ieee80211_rate *rate;
2677 int i;
2678
Johannes Berg8318d782008-01-24 19:38:38 +01002679 sband = iwl3945_get_band(priv, priv->band);
2680 if (!sband) {
Saleem Abdulrasoolc4ba9622007-11-18 23:59:08 -08002681 IWL_ERROR("Failed to set rate: unable to get hw mode\n");
2682 return;
2683 }
Zhu Yib481de92007-09-25 17:54:57 -07002684
2685 priv->active_rate = 0;
2686 priv->active_rate_basic = 0;
2687
Johannes Berg8318d782008-01-24 19:38:38 +01002688 IWL_DEBUG_RATE("Setting rates for %s GHz\n",
2689 sband->band == IEEE80211_BAND_2GHZ ? "2.4" : "5");
Zhu Yib481de92007-09-25 17:54:57 -07002690
Johannes Berg8318d782008-01-24 19:38:38 +01002691 for (i = 0; i < sband->n_bitrates; i++) {
2692 rate = &sband->bitrates[i];
2693 if ((rate->hw_value < IWL_RATE_COUNT) &&
2694 !(rate->flags & IEEE80211_CHAN_DISABLED)) {
2695 IWL_DEBUG_RATE("Adding rate index %d (plcp %d)\n",
2696 rate->hw_value, iwl3945_rates[rate->hw_value].plcp);
2697 priv->active_rate |= (1 << rate->hw_value);
2698 }
Zhu Yib481de92007-09-25 17:54:57 -07002699 }
2700
2701 IWL_DEBUG_RATE("Set active_rate = %0x, active_rate_basic = %0x\n",
2702 priv->active_rate, priv->active_rate_basic);
2703
2704 /*
2705 * If a basic rate is configured, then use it (adding IWL_RATE_1M_MASK)
2706 * otherwise set it to the default of all CCK rates and 6, 12, 24 for
2707 * OFDM
2708 */
2709 if (priv->active_rate_basic & IWL_CCK_BASIC_RATES_MASK)
2710 priv->staging_rxon.cck_basic_rates =
2711 ((priv->active_rate_basic &
2712 IWL_CCK_RATES_MASK) >> IWL_FIRST_CCK_RATE) & 0xF;
2713 else
2714 priv->staging_rxon.cck_basic_rates =
2715 (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
2716
2717 if (priv->active_rate_basic & IWL_OFDM_BASIC_RATES_MASK)
2718 priv->staging_rxon.ofdm_basic_rates =
2719 ((priv->active_rate_basic &
2720 (IWL_OFDM_BASIC_RATES_MASK | IWL_RATE_6M_MASK)) >>
2721 IWL_FIRST_OFDM_RATE) & 0xFF;
2722 else
2723 priv->staging_rxon.ofdm_basic_rates =
2724 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
2725}
2726
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002727static void iwl3945_radio_kill_sw(struct iwl3945_priv *priv, int disable_radio)
Zhu Yib481de92007-09-25 17:54:57 -07002728{
2729 unsigned long flags;
2730
2731 if (!!disable_radio == test_bit(STATUS_RF_KILL_SW, &priv->status))
2732 return;
2733
2734 IWL_DEBUG_RF_KILL("Manual SW RF KILL set to: RADIO %s\n",
2735 disable_radio ? "OFF" : "ON");
2736
2737 if (disable_radio) {
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002738 iwl3945_scan_cancel(priv);
Zhu Yib481de92007-09-25 17:54:57 -07002739 /* FIXME: This is a workaround for AP */
Johannes Berg05c914f2008-09-11 00:01:58 +02002740 if (priv->iw_mode != NL80211_IFTYPE_AP) {
Zhu Yib481de92007-09-25 17:54:57 -07002741 spin_lock_irqsave(&priv->lock, flags);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002742 iwl3945_write32(priv, CSR_UCODE_DRV_GP1_SET,
Zhu Yib481de92007-09-25 17:54:57 -07002743 CSR_UCODE_SW_BIT_RFKILL);
2744 spin_unlock_irqrestore(&priv->lock, flags);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002745 iwl3945_send_card_state(priv, CARD_STATE_CMD_DISABLE, 0);
Zhu Yib481de92007-09-25 17:54:57 -07002746 set_bit(STATUS_RF_KILL_SW, &priv->status);
2747 }
2748 return;
2749 }
2750
2751 spin_lock_irqsave(&priv->lock, flags);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002752 iwl3945_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
Zhu Yib481de92007-09-25 17:54:57 -07002753
2754 clear_bit(STATUS_RF_KILL_SW, &priv->status);
2755 spin_unlock_irqrestore(&priv->lock, flags);
2756
2757 /* wake up ucode */
2758 msleep(10);
2759
2760 spin_lock_irqsave(&priv->lock, flags);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002761 iwl3945_read32(priv, CSR_UCODE_DRV_GP1);
2762 if (!iwl3945_grab_nic_access(priv))
2763 iwl3945_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -07002764 spin_unlock_irqrestore(&priv->lock, flags);
2765
2766 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
2767 IWL_DEBUG_RF_KILL("Can not turn radio back on - "
2768 "disabled by HW switch\n");
2769 return;
2770 }
2771
Zhu Yi808e72a2008-06-12 09:47:17 +08002772 if (priv->is_open)
2773 queue_work(priv->workqueue, &priv->restart);
Zhu Yib481de92007-09-25 17:54:57 -07002774 return;
2775}
2776
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002777void iwl3945_set_decrypted_flag(struct iwl3945_priv *priv, struct sk_buff *skb,
Zhu Yib481de92007-09-25 17:54:57 -07002778 u32 decrypt_res, struct ieee80211_rx_status *stats)
2779{
2780 u16 fc =
2781 le16_to_cpu(((struct ieee80211_hdr *)skb->data)->frame_control);
2782
2783 if (priv->active_rxon.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
2784 return;
2785
2786 if (!(fc & IEEE80211_FCTL_PROTECTED))
2787 return;
2788
2789 IWL_DEBUG_RX("decrypt_res:0x%x\n", decrypt_res);
2790 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
2791 case RX_RES_STATUS_SEC_TYPE_TKIP:
2792 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2793 RX_RES_STATUS_BAD_ICV_MIC)
2794 stats->flag |= RX_FLAG_MMIC_ERROR;
2795 case RX_RES_STATUS_SEC_TYPE_WEP:
2796 case RX_RES_STATUS_SEC_TYPE_CCMP:
2797 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2798 RX_RES_STATUS_DECRYPT_OK) {
2799 IWL_DEBUG_RX("hw decrypt successfully!!!\n");
2800 stats->flag |= RX_FLAG_DECRYPTED;
2801 }
2802 break;
2803
2804 default:
2805 break;
2806 }
2807}
2808
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08002809#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
Zhu Yib481de92007-09-25 17:54:57 -07002810
2811#include "iwl-spectrum.h"
2812
2813#define BEACON_TIME_MASK_LOW 0x00FFFFFF
2814#define BEACON_TIME_MASK_HIGH 0xFF000000
2815#define TIME_UNIT 1024
2816
2817/*
2818 * extended beacon time format
2819 * time in usec will be changed into a 32-bit value in 8:24 format
2820 * the high 1 byte is the beacon counts
2821 * the lower 3 bytes is the time in usec within one beacon interval
2822 */
2823
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002824static u32 iwl3945_usecs_to_beacons(u32 usec, u32 beacon_interval)
Zhu Yib481de92007-09-25 17:54:57 -07002825{
2826 u32 quot;
2827 u32 rem;
2828 u32 interval = beacon_interval * 1024;
2829
2830 if (!interval || !usec)
2831 return 0;
2832
2833 quot = (usec / interval) & (BEACON_TIME_MASK_HIGH >> 24);
2834 rem = (usec % interval) & BEACON_TIME_MASK_LOW;
2835
2836 return (quot << 24) + rem;
2837}
2838
2839/* base is usually what we get from ucode with each received frame,
2840 * the same as HW timer counter counting down
2841 */
2842
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002843static __le32 iwl3945_add_beacon_time(u32 base, u32 addon, u32 beacon_interval)
Zhu Yib481de92007-09-25 17:54:57 -07002844{
2845 u32 base_low = base & BEACON_TIME_MASK_LOW;
2846 u32 addon_low = addon & BEACON_TIME_MASK_LOW;
2847 u32 interval = beacon_interval * TIME_UNIT;
2848 u32 res = (base & BEACON_TIME_MASK_HIGH) +
2849 (addon & BEACON_TIME_MASK_HIGH);
2850
2851 if (base_low > addon_low)
2852 res += base_low - addon_low;
2853 else if (base_low < addon_low) {
2854 res += interval + base_low - addon_low;
2855 res += (1 << 24);
2856 } else
2857 res += (1 << 24);
2858
2859 return cpu_to_le32(res);
2860}
2861
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002862static int iwl3945_get_measurement(struct iwl3945_priv *priv,
Zhu Yib481de92007-09-25 17:54:57 -07002863 struct ieee80211_measurement_params *params,
2864 u8 type)
2865{
Tomas Winkler600c0e12008-12-19 10:37:04 +08002866 struct iwl_spectrum_cmd spectrum;
Tomas Winkler3d24a9f2008-12-19 10:37:07 +08002867 struct iwl_rx_packet *res;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002868 struct iwl3945_host_cmd cmd = {
Zhu Yib481de92007-09-25 17:54:57 -07002869 .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
2870 .data = (void *)&spectrum,
2871 .meta.flags = CMD_WANT_SKB,
2872 };
2873 u32 add_time = le64_to_cpu(params->start_time);
2874 int rc;
2875 int spectrum_resp_status;
2876 int duration = le16_to_cpu(params->duration);
2877
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002878 if (iwl3945_is_associated(priv))
Zhu Yib481de92007-09-25 17:54:57 -07002879 add_time =
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002880 iwl3945_usecs_to_beacons(
Zhu Yib481de92007-09-25 17:54:57 -07002881 le64_to_cpu(params->start_time) - priv->last_tsf,
2882 le16_to_cpu(priv->rxon_timing.beacon_interval));
2883
2884 memset(&spectrum, 0, sizeof(spectrum));
2885
2886 spectrum.channel_count = cpu_to_le16(1);
2887 spectrum.flags =
2888 RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
2889 spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
2890 cmd.len = sizeof(spectrum);
2891 spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
2892
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002893 if (iwl3945_is_associated(priv))
Zhu Yib481de92007-09-25 17:54:57 -07002894 spectrum.start_time =
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002895 iwl3945_add_beacon_time(priv->last_beacon_time,
Zhu Yib481de92007-09-25 17:54:57 -07002896 add_time,
2897 le16_to_cpu(priv->rxon_timing.beacon_interval));
2898 else
2899 spectrum.start_time = 0;
2900
2901 spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
2902 spectrum.channels[0].channel = params->channel;
2903 spectrum.channels[0].type = type;
2904 if (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK)
2905 spectrum.flags |= RXON_FLG_BAND_24G_MSK |
2906 RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
2907
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002908 rc = iwl3945_send_cmd_sync(priv, &cmd);
Zhu Yib481de92007-09-25 17:54:57 -07002909 if (rc)
2910 return rc;
2911
Tomas Winkler3d24a9f2008-12-19 10:37:07 +08002912 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
Zhu Yib481de92007-09-25 17:54:57 -07002913 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
2914 IWL_ERROR("Bad return from REPLY_RX_ON_ASSOC command\n");
2915 rc = -EIO;
2916 }
2917
2918 spectrum_resp_status = le16_to_cpu(res->u.spectrum.status);
2919 switch (spectrum_resp_status) {
2920 case 0: /* Command will be handled */
2921 if (res->u.spectrum.id != 0xff) {
Ian Schrambc434dd2007-10-25 17:15:29 +08002922 IWL_DEBUG_INFO("Replaced existing measurement: %d\n",
2923 res->u.spectrum.id);
Zhu Yib481de92007-09-25 17:54:57 -07002924 priv->measurement_status &= ~MEASUREMENT_READY;
2925 }
2926 priv->measurement_status |= MEASUREMENT_ACTIVE;
2927 rc = 0;
2928 break;
2929
2930 case 1: /* Command will not be handled */
2931 rc = -EAGAIN;
2932 break;
2933 }
2934
2935 dev_kfree_skb_any(cmd.meta.u.skb);
2936
2937 return rc;
2938}
2939#endif
2940
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002941static void iwl3945_rx_reply_alive(struct iwl3945_priv *priv,
2942 struct iwl3945_rx_mem_buffer *rxb)
Zhu Yib481de92007-09-25 17:54:57 -07002943{
Tomas Winkler3d24a9f2008-12-19 10:37:07 +08002944 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
2945 struct iwl_alive_resp *palive;
Zhu Yib481de92007-09-25 17:54:57 -07002946 struct delayed_work *pwork;
2947
2948 palive = &pkt->u.alive_frame;
2949
2950 IWL_DEBUG_INFO("Alive ucode status 0x%08X revision "
2951 "0x%01X 0x%01X\n",
2952 palive->is_valid, palive->ver_type,
2953 palive->ver_subtype);
2954
2955 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
2956 IWL_DEBUG_INFO("Initialization Alive received.\n");
Tomas Winkler3d24a9f2008-12-19 10:37:07 +08002957 memcpy(&priv->card_alive_init, &pkt->u.alive_frame,
2958 sizeof(struct iwl_alive_resp));
Zhu Yib481de92007-09-25 17:54:57 -07002959 pwork = &priv->init_alive_start;
2960 } else {
2961 IWL_DEBUG_INFO("Runtime Alive received.\n");
2962 memcpy(&priv->card_alive, &pkt->u.alive_frame,
Tomas Winkler3d24a9f2008-12-19 10:37:07 +08002963 sizeof(struct iwl_alive_resp));
Zhu Yib481de92007-09-25 17:54:57 -07002964 pwork = &priv->alive_start;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002965 iwl3945_disable_events(priv);
Zhu Yib481de92007-09-25 17:54:57 -07002966 }
2967
2968 /* We delay the ALIVE response by 5ms to
2969 * give the HW RF Kill time to activate... */
2970 if (palive->is_valid == UCODE_VALID_OK)
2971 queue_delayed_work(priv->workqueue, pwork,
2972 msecs_to_jiffies(5));
2973 else
2974 IWL_WARNING("uCode did not respond OK.\n");
2975}
2976
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002977static void iwl3945_rx_reply_add_sta(struct iwl3945_priv *priv,
2978 struct iwl3945_rx_mem_buffer *rxb)
Zhu Yib481de92007-09-25 17:54:57 -07002979{
Tomas Winkler3d24a9f2008-12-19 10:37:07 +08002980 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
Zhu Yib481de92007-09-25 17:54:57 -07002981
2982 IWL_DEBUG_RX("Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
2983 return;
2984}
2985
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002986static void iwl3945_rx_reply_error(struct iwl3945_priv *priv,
2987 struct iwl3945_rx_mem_buffer *rxb)
Zhu Yib481de92007-09-25 17:54:57 -07002988{
Tomas Winkler3d24a9f2008-12-19 10:37:07 +08002989 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
Zhu Yib481de92007-09-25 17:54:57 -07002990
2991 IWL_ERROR("Error Reply type 0x%08X cmd %s (0x%02X) "
2992 "seq 0x%04X ser 0x%08X\n",
2993 le32_to_cpu(pkt->u.err_resp.error_type),
2994 get_cmd_string(pkt->u.err_resp.cmd_id),
2995 pkt->u.err_resp.cmd_id,
2996 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
2997 le32_to_cpu(pkt->u.err_resp.error_info));
2998}
2999
3000#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
3001
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003002static void iwl3945_rx_csa(struct iwl3945_priv *priv, struct iwl3945_rx_mem_buffer *rxb)
Zhu Yib481de92007-09-25 17:54:57 -07003003{
Tomas Winkler3d24a9f2008-12-19 10:37:07 +08003004 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003005 struct iwl3945_rxon_cmd *rxon = (void *)&priv->active_rxon;
Tomas Winkler600c0e12008-12-19 10:37:04 +08003006 struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
Zhu Yib481de92007-09-25 17:54:57 -07003007 IWL_DEBUG_11H("CSA notif: channel %d, status %d\n",
3008 le16_to_cpu(csa->channel), le32_to_cpu(csa->status));
3009 rxon->channel = csa->channel;
3010 priv->staging_rxon.channel = csa->channel;
3011}
3012
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003013static void iwl3945_rx_spectrum_measure_notif(struct iwl3945_priv *priv,
3014 struct iwl3945_rx_mem_buffer *rxb)
Zhu Yib481de92007-09-25 17:54:57 -07003015{
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08003016#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
Tomas Winkler3d24a9f2008-12-19 10:37:07 +08003017 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
Tomas Winkler600c0e12008-12-19 10:37:04 +08003018 struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
Zhu Yib481de92007-09-25 17:54:57 -07003019
3020 if (!report->state) {
3021 IWL_DEBUG(IWL_DL_11H | IWL_DL_INFO,
3022 "Spectrum Measure Notification: Start\n");
3023 return;
3024 }
3025
3026 memcpy(&priv->measure_report, report, sizeof(*report));
3027 priv->measurement_status |= MEASUREMENT_READY;
3028#endif
3029}
3030
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003031static void iwl3945_rx_pm_sleep_notif(struct iwl3945_priv *priv,
3032 struct iwl3945_rx_mem_buffer *rxb)
Zhu Yib481de92007-09-25 17:54:57 -07003033{
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08003034#ifdef CONFIG_IWL3945_DEBUG
Tomas Winkler3d24a9f2008-12-19 10:37:07 +08003035 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
Tomas Winkler600c0e12008-12-19 10:37:04 +08003036 struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
Zhu Yib481de92007-09-25 17:54:57 -07003037 IWL_DEBUG_RX("sleep mode: %d, src: %d\n",
3038 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
3039#endif
3040}
3041
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003042static void iwl3945_rx_pm_debug_statistics_notif(struct iwl3945_priv *priv,
3043 struct iwl3945_rx_mem_buffer *rxb)
Zhu Yib481de92007-09-25 17:54:57 -07003044{
Tomas Winkler3d24a9f2008-12-19 10:37:07 +08003045 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
Zhu Yib481de92007-09-25 17:54:57 -07003046 IWL_DEBUG_RADIO("Dumping %d bytes of unhandled "
3047 "notification for %s:\n",
3048 le32_to_cpu(pkt->len), get_cmd_string(pkt->hdr.cmd));
Samuel Ortiz40b8ec02008-12-19 10:37:08 +08003049 iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw,
3050 le32_to_cpu(pkt->len));
Zhu Yib481de92007-09-25 17:54:57 -07003051}
3052
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003053static void iwl3945_bg_beacon_update(struct work_struct *work)
Zhu Yib481de92007-09-25 17:54:57 -07003054{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003055 struct iwl3945_priv *priv =
3056 container_of(work, struct iwl3945_priv, beacon_update);
Zhu Yib481de92007-09-25 17:54:57 -07003057 struct sk_buff *beacon;
3058
3059 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */
Johannes Berge039fa42008-05-15 12:55:29 +02003060 beacon = ieee80211_beacon_get(priv->hw, priv->vif);
Zhu Yib481de92007-09-25 17:54:57 -07003061
3062 if (!beacon) {
3063 IWL_ERROR("update beacon failed\n");
3064 return;
3065 }
3066
3067 mutex_lock(&priv->mutex);
3068 /* new beacon skb is allocated every time; dispose previous.*/
3069 if (priv->ibss_beacon)
3070 dev_kfree_skb(priv->ibss_beacon);
3071
3072 priv->ibss_beacon = beacon;
3073 mutex_unlock(&priv->mutex);
3074
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003075 iwl3945_send_beacon_cmd(priv);
Zhu Yib481de92007-09-25 17:54:57 -07003076}
3077
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003078static void iwl3945_rx_beacon_notif(struct iwl3945_priv *priv,
3079 struct iwl3945_rx_mem_buffer *rxb)
Zhu Yib481de92007-09-25 17:54:57 -07003080{
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08003081#ifdef CONFIG_IWL3945_DEBUG
Tomas Winkler3d24a9f2008-12-19 10:37:07 +08003082 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003083 struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status);
Zhu Yib481de92007-09-25 17:54:57 -07003084 u8 rate = beacon->beacon_notify_hdr.rate;
3085
3086 IWL_DEBUG_RX("beacon status %x retries %d iss %d "
3087 "tsf %d %d rate %d\n",
3088 le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK,
3089 beacon->beacon_notify_hdr.failure_frame,
3090 le32_to_cpu(beacon->ibss_mgr_status),
3091 le32_to_cpu(beacon->high_tsf),
3092 le32_to_cpu(beacon->low_tsf), rate);
3093#endif
3094
Johannes Berg05c914f2008-09-11 00:01:58 +02003095 if ((priv->iw_mode == NL80211_IFTYPE_AP) &&
Zhu Yib481de92007-09-25 17:54:57 -07003096 (!test_bit(STATUS_EXIT_PENDING, &priv->status)))
3097 queue_work(priv->workqueue, &priv->beacon_update);
3098}
3099
3100/* Service response to REPLY_SCAN_CMD (0x80) */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003101static void iwl3945_rx_reply_scan(struct iwl3945_priv *priv,
3102 struct iwl3945_rx_mem_buffer *rxb)
Zhu Yib481de92007-09-25 17:54:57 -07003103{
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08003104#ifdef CONFIG_IWL3945_DEBUG
Tomas Winkler3d24a9f2008-12-19 10:37:07 +08003105 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
Tomas Winkler4c897252008-12-19 10:37:05 +08003106 struct iwl_scanreq_notification *notif =
3107 (struct iwl_scanreq_notification *)pkt->u.raw;
Zhu Yib481de92007-09-25 17:54:57 -07003108
3109 IWL_DEBUG_RX("Scan request status = 0x%x\n", notif->status);
3110#endif
3111}
3112
3113/* Service SCAN_START_NOTIFICATION (0x82) */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003114static void iwl3945_rx_scan_start_notif(struct iwl3945_priv *priv,
3115 struct iwl3945_rx_mem_buffer *rxb)
Zhu Yib481de92007-09-25 17:54:57 -07003116{
Tomas Winkler3d24a9f2008-12-19 10:37:07 +08003117 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
Tomas Winkler4c897252008-12-19 10:37:05 +08003118 struct iwl_scanstart_notification *notif =
3119 (struct iwl_scanstart_notification *)pkt->u.raw;
Zhu Yib481de92007-09-25 17:54:57 -07003120 priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
3121 IWL_DEBUG_SCAN("Scan start: "
3122 "%d [802.11%s] "
3123 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n",
3124 notif->channel,
3125 notif->band ? "bg" : "a",
3126 notif->tsf_high,
3127 notif->tsf_low, notif->status, notif->beacon_timer);
3128}
3129
3130/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003131static void iwl3945_rx_scan_results_notif(struct iwl3945_priv *priv,
3132 struct iwl3945_rx_mem_buffer *rxb)
Zhu Yib481de92007-09-25 17:54:57 -07003133{
Tomas Winkler3d24a9f2008-12-19 10:37:07 +08003134 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
Tomas Winkler4c897252008-12-19 10:37:05 +08003135 struct iwl_scanresults_notification *notif =
3136 (struct iwl_scanresults_notification *)pkt->u.raw;
Zhu Yib481de92007-09-25 17:54:57 -07003137
3138 IWL_DEBUG_SCAN("Scan ch.res: "
3139 "%d [802.11%s] "
3140 "(TSF: 0x%08X:%08X) - %d "
3141 "elapsed=%lu usec (%dms since last)\n",
3142 notif->channel,
3143 notif->band ? "bg" : "a",
3144 le32_to_cpu(notif->tsf_high),
3145 le32_to_cpu(notif->tsf_low),
3146 le32_to_cpu(notif->statistics[0]),
3147 le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf,
3148 jiffies_to_msecs(elapsed_jiffies
3149 (priv->last_scan_jiffies, jiffies)));
3150
3151 priv->last_scan_jiffies = jiffies;
Mohamed Abbas7878a5a2007-11-29 11:10:13 +08003152 priv->next_scan_jiffies = 0;
Zhu Yib481de92007-09-25 17:54:57 -07003153}
3154
3155/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003156static void iwl3945_rx_scan_complete_notif(struct iwl3945_priv *priv,
3157 struct iwl3945_rx_mem_buffer *rxb)
Zhu Yib481de92007-09-25 17:54:57 -07003158{
Tomas Winkler3d24a9f2008-12-19 10:37:07 +08003159 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
Tomas Winkler4c897252008-12-19 10:37:05 +08003160 struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
Zhu Yib481de92007-09-25 17:54:57 -07003161
3162 IWL_DEBUG_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
3163 scan_notif->scanned_channels,
3164 scan_notif->tsf_low,
3165 scan_notif->tsf_high, scan_notif->status);
3166
3167 /* The HW is no longer scanning */
3168 clear_bit(STATUS_SCAN_HW, &priv->status);
3169
3170 /* The scan completion notification came in, so kill that timer... */
3171 cancel_delayed_work(&priv->scan_check);
3172
3173 IWL_DEBUG_INFO("Scan pass on %sGHz took %dms\n",
Ron Rindjunsky66b50042008-06-25 16:46:31 +08003174 (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) ?
3175 "2.4" : "5.2",
Zhu Yib481de92007-09-25 17:54:57 -07003176 jiffies_to_msecs(elapsed_jiffies
3177 (priv->scan_pass_start, jiffies)));
3178
Ron Rindjunsky66b50042008-06-25 16:46:31 +08003179 /* Remove this scanned band from the list of pending
3180 * bands to scan, band G precedes A in order of scanning
3181 * as seen in iwl3945_bg_request_scan */
3182 if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ))
3183 priv->scan_bands &= ~BIT(IEEE80211_BAND_2GHZ);
3184 else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ))
3185 priv->scan_bands &= ~BIT(IEEE80211_BAND_5GHZ);
Zhu Yib481de92007-09-25 17:54:57 -07003186
3187 /* If a request to abort was given, or the scan did not succeed
3188 * then we reset the scan state machine and terminate,
3189 * re-queuing another scan if one has been requested */
3190 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
3191 IWL_DEBUG_INFO("Aborted scan completed.\n");
3192 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
3193 } else {
3194 /* If there are more bands on this scan pass reschedule */
3195 if (priv->scan_bands > 0)
3196 goto reschedule;
3197 }
3198
3199 priv->last_scan_jiffies = jiffies;
Mohamed Abbas7878a5a2007-11-29 11:10:13 +08003200 priv->next_scan_jiffies = 0;
Zhu Yib481de92007-09-25 17:54:57 -07003201 IWL_DEBUG_INFO("Setting scan to off\n");
3202
3203 clear_bit(STATUS_SCANNING, &priv->status);
3204
3205 IWL_DEBUG_INFO("Scan took %dms\n",
3206 jiffies_to_msecs(elapsed_jiffies(priv->scan_start, jiffies)));
3207
3208 queue_work(priv->workqueue, &priv->scan_completed);
3209
3210 return;
3211
3212reschedule:
3213 priv->scan_pass_start = jiffies;
3214 queue_work(priv->workqueue, &priv->request_scan);
3215}
3216
3217/* Handle notification from uCode that card's power state is changing
3218 * due to software, hardware, or critical temperature RFKILL */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003219static void iwl3945_rx_card_state_notif(struct iwl3945_priv *priv,
3220 struct iwl3945_rx_mem_buffer *rxb)
Zhu Yib481de92007-09-25 17:54:57 -07003221{
Tomas Winkler3d24a9f2008-12-19 10:37:07 +08003222 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
Zhu Yib481de92007-09-25 17:54:57 -07003223 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
3224 unsigned long status = priv->status;
3225
3226 IWL_DEBUG_RF_KILL("Card state received: HW:%s SW:%s\n",
3227 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
3228 (flags & SW_CARD_DISABLED) ? "Kill" : "On");
3229
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003230 iwl3945_write32(priv, CSR_UCODE_DRV_GP1_SET,
Zhu Yib481de92007-09-25 17:54:57 -07003231 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3232
3233 if (flags & HW_CARD_DISABLED)
3234 set_bit(STATUS_RF_KILL_HW, &priv->status);
3235 else
3236 clear_bit(STATUS_RF_KILL_HW, &priv->status);
3237
3238
3239 if (flags & SW_CARD_DISABLED)
3240 set_bit(STATUS_RF_KILL_SW, &priv->status);
3241 else
3242 clear_bit(STATUS_RF_KILL_SW, &priv->status);
3243
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003244 iwl3945_scan_cancel(priv);
Zhu Yib481de92007-09-25 17:54:57 -07003245
3246 if ((test_bit(STATUS_RF_KILL_HW, &status) !=
3247 test_bit(STATUS_RF_KILL_HW, &priv->status)) ||
3248 (test_bit(STATUS_RF_KILL_SW, &status) !=
3249 test_bit(STATUS_RF_KILL_SW, &priv->status)))
3250 queue_work(priv->workqueue, &priv->rf_kill);
3251 else
3252 wake_up_interruptible(&priv->wait_command_queue);
3253}
3254
3255/**
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003256 * iwl3945_setup_rx_handlers - Initialize Rx handler callbacks
Zhu Yib481de92007-09-25 17:54:57 -07003257 *
3258 * Setup the RX handlers for each of the reply types sent from the uCode
3259 * to the host.
3260 *
3261 * This function chains into the hardware specific files for them to setup
3262 * any hardware specific handlers as well.
3263 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003264static void iwl3945_setup_rx_handlers(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07003265{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003266 priv->rx_handlers[REPLY_ALIVE] = iwl3945_rx_reply_alive;
3267 priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta;
3268 priv->rx_handlers[REPLY_ERROR] = iwl3945_rx_reply_error;
3269 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl3945_rx_csa;
Zhu Yib481de92007-09-25 17:54:57 -07003270 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003271 iwl3945_rx_spectrum_measure_notif;
3272 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl3945_rx_pm_sleep_notif;
Zhu Yib481de92007-09-25 17:54:57 -07003273 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003274 iwl3945_rx_pm_debug_statistics_notif;
3275 priv->rx_handlers[BEACON_NOTIFICATION] = iwl3945_rx_beacon_notif;
Zhu Yib481de92007-09-25 17:54:57 -07003276
Ben Cahill9fbab512007-11-29 11:09:47 +08003277 /*
3278 * The same handler is used for both the REPLY to a discrete
3279 * statistics request from the host as well as for the periodic
3280 * statistics notifications (after received beacons) from the uCode.
Zhu Yib481de92007-09-25 17:54:57 -07003281 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003282 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_hw_rx_statistics;
3283 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics;
Zhu Yib481de92007-09-25 17:54:57 -07003284
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003285 priv->rx_handlers[REPLY_SCAN_CMD] = iwl3945_rx_reply_scan;
3286 priv->rx_handlers[SCAN_START_NOTIFICATION] = iwl3945_rx_scan_start_notif;
Zhu Yib481de92007-09-25 17:54:57 -07003287 priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] =
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003288 iwl3945_rx_scan_results_notif;
Zhu Yib481de92007-09-25 17:54:57 -07003289 priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003290 iwl3945_rx_scan_complete_notif;
3291 priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif;
Zhu Yib481de92007-09-25 17:54:57 -07003292
Ben Cahill9fbab512007-11-29 11:09:47 +08003293 /* Set up hardware specific Rx handlers */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003294 iwl3945_hw_rx_handler_setup(priv);
Zhu Yib481de92007-09-25 17:54:57 -07003295}
3296
3297/**
Tomas Winkler91c066f2008-03-06 17:36:55 -08003298 * iwl3945_cmd_queue_reclaim - Reclaim CMD queue entries
3299 * When FW advances 'R' index, all entries between old and new 'R' index
3300 * need to be reclaimed.
3301 */
3302static void iwl3945_cmd_queue_reclaim(struct iwl3945_priv *priv,
3303 int txq_id, int index)
3304{
3305 struct iwl3945_tx_queue *txq = &priv->txq[txq_id];
3306 struct iwl3945_queue *q = &txq->q;
3307 int nfreed = 0;
3308
3309 if ((index >= q->n_bd) || (iwl3945_x2_queue_used(q, index) == 0)) {
3310 IWL_ERROR("Read index for DMA queue txq id (%d), index %d, "
3311 "is out of range [0-%d] %d %d.\n", txq_id,
3312 index, q->n_bd, q->write_ptr, q->read_ptr);
3313 return;
3314 }
3315
3316 for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index;
3317 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
3318 if (nfreed > 1) {
3319 IWL_ERROR("HCMD skipped: index (%d) %d %d\n", index,
3320 q->write_ptr, q->read_ptr);
3321 queue_work(priv->workqueue, &priv->restart);
3322 break;
3323 }
3324 nfreed++;
3325 }
3326}
3327
3328
3329/**
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003330 * iwl3945_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
Zhu Yib481de92007-09-25 17:54:57 -07003331 * @rxb: Rx buffer to reclaim
3332 *
3333 * If an Rx buffer has an async callback associated with it the callback
3334 * will be executed. The attached skb (if present) will only be freed
3335 * if the callback returns 1
3336 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003337static void iwl3945_tx_cmd_complete(struct iwl3945_priv *priv,
3338 struct iwl3945_rx_mem_buffer *rxb)
Zhu Yib481de92007-09-25 17:54:57 -07003339{
Tomas Winkler3d24a9f2008-12-19 10:37:07 +08003340 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
Zhu Yib481de92007-09-25 17:54:57 -07003341 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
3342 int txq_id = SEQ_TO_QUEUE(sequence);
3343 int index = SEQ_TO_INDEX(sequence);
Tomas Winkler600c0e12008-12-19 10:37:04 +08003344 int huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
Zhu Yib481de92007-09-25 17:54:57 -07003345 int cmd_index;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003346 struct iwl3945_cmd *cmd;
Zhu Yib481de92007-09-25 17:54:57 -07003347
Zhu Yib481de92007-09-25 17:54:57 -07003348 BUG_ON(txq_id != IWL_CMD_QUEUE_NUM);
3349
3350 cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
3351 cmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
3352
3353 /* Input error checking is done when commands are added to queue. */
3354 if (cmd->meta.flags & CMD_WANT_SKB) {
3355 cmd->meta.source->u.skb = rxb->skb;
3356 rxb->skb = NULL;
3357 } else if (cmd->meta.u.callback &&
3358 !cmd->meta.u.callback(priv, cmd, rxb->skb))
3359 rxb->skb = NULL;
3360
Tomas Winkler91c066f2008-03-06 17:36:55 -08003361 iwl3945_cmd_queue_reclaim(priv, txq_id, index);
Zhu Yib481de92007-09-25 17:54:57 -07003362
3363 if (!(cmd->meta.flags & CMD_ASYNC)) {
3364 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
3365 wake_up_interruptible(&priv->wait_command_queue);
3366 }
3367}
3368
3369/************************** RX-FUNCTIONS ****************************/
3370/*
3371 * Rx theory of operation
3372 *
3373 * The host allocates 32 DMA target addresses and passes the host address
3374 * to the firmware at register IWL_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
3375 * 0 to 31
3376 *
3377 * Rx Queue Indexes
3378 * The host/firmware share two index registers for managing the Rx buffers.
3379 *
3380 * The READ index maps to the first position that the firmware may be writing
3381 * to -- the driver can read up to (but not including) this position and get
3382 * good data.
3383 * The READ index is managed by the firmware once the card is enabled.
3384 *
3385 * The WRITE index maps to the last position the driver has read from -- the
3386 * position preceding WRITE is the last slot the firmware can place a packet.
3387 *
3388 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
3389 * WRITE = READ.
3390 *
Ben Cahill9fbab512007-11-29 11:09:47 +08003391 * During initialization, the host sets up the READ queue position to the first
Zhu Yib481de92007-09-25 17:54:57 -07003392 * INDEX position, and WRITE to the last (READ - 1 wrapped)
3393 *
Ben Cahill9fbab512007-11-29 11:09:47 +08003394 * When the firmware places a packet in a buffer, it will advance the READ index
Zhu Yib481de92007-09-25 17:54:57 -07003395 * and fire the RX interrupt. The driver can then query the READ index and
3396 * process as many packets as possible, moving the WRITE index forward as it
3397 * resets the Rx queue buffers with new memory.
3398 *
3399 * The management in the driver is as follows:
3400 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
3401 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
Ian Schram01ebd062007-10-25 17:15:22 +08003402 * to replenish the iwl->rxq->rx_free.
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003403 * + In iwl3945_rx_replenish (scheduled) if 'processed' != 'read' then the
Zhu Yib481de92007-09-25 17:54:57 -07003404 * iwl->rxq is replenished and the READ INDEX is updated (updating the
3405 * 'processed' and 'read' driver indexes as well)
3406 * + A received packet is processed and handed to the kernel network stack,
3407 * detached from the iwl->rxq. The driver 'processed' index is updated.
3408 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
3409 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
3410 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
3411 * were enough free buffers and RX_STALLED is set it is cleared.
3412 *
3413 *
3414 * Driver sequence:
3415 *
Ben Cahill9fbab512007-11-29 11:09:47 +08003416 * iwl3945_rx_queue_alloc() Allocates rx_free
3417 * iwl3945_rx_replenish() Replenishes rx_free list from rx_used, and calls
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003418 * iwl3945_rx_queue_restock
Ben Cahill9fbab512007-11-29 11:09:47 +08003419 * iwl3945_rx_queue_restock() Moves available buffers from rx_free into Rx
Zhu Yib481de92007-09-25 17:54:57 -07003420 * queue, updates firmware pointers, and updates
3421 * the WRITE index. If insufficient rx_free buffers
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003422 * are available, schedules iwl3945_rx_replenish
Zhu Yib481de92007-09-25 17:54:57 -07003423 *
3424 * -- enable interrupts --
Ben Cahill9fbab512007-11-29 11:09:47 +08003425 * ISR - iwl3945_rx() Detach iwl3945_rx_mem_buffers from pool up to the
Zhu Yib481de92007-09-25 17:54:57 -07003426 * READ INDEX, detaching the SKB from the pool.
3427 * Moves the packet buffer from queue to rx_used.
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003428 * Calls iwl3945_rx_queue_restock to refill any empty
Zhu Yib481de92007-09-25 17:54:57 -07003429 * slots.
3430 * ...
3431 *
3432 */
3433
3434/**
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003435 * iwl3945_rx_queue_space - Return number of free slots available in queue.
Zhu Yib481de92007-09-25 17:54:57 -07003436 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003437static int iwl3945_rx_queue_space(const struct iwl3945_rx_queue *q)
Zhu Yib481de92007-09-25 17:54:57 -07003438{
3439 int s = q->read - q->write;
3440 if (s <= 0)
3441 s += RX_QUEUE_SIZE;
3442 /* keep some buffer to not confuse full and empty queue */
3443 s -= 2;
3444 if (s < 0)
3445 s = 0;
3446 return s;
3447}
3448
3449/**
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003450 * iwl3945_rx_queue_update_write_ptr - Update the write pointer for the RX queue
Zhu Yib481de92007-09-25 17:54:57 -07003451 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003452int iwl3945_rx_queue_update_write_ptr(struct iwl3945_priv *priv, struct iwl3945_rx_queue *q)
Zhu Yib481de92007-09-25 17:54:57 -07003453{
3454 u32 reg = 0;
3455 int rc = 0;
3456 unsigned long flags;
3457
3458 spin_lock_irqsave(&q->lock, flags);
3459
3460 if (q->need_update == 0)
3461 goto exit_unlock;
3462
Cahill, Ben M6440adb2007-11-29 11:09:55 +08003463 /* If power-saving is in use, make sure device is awake */
Zhu Yib481de92007-09-25 17:54:57 -07003464 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003465 reg = iwl3945_read32(priv, CSR_UCODE_DRV_GP1);
Zhu Yib481de92007-09-25 17:54:57 -07003466
3467 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003468 iwl3945_set_bit(priv, CSR_GP_CNTRL,
Zhu Yib481de92007-09-25 17:54:57 -07003469 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
3470 goto exit_unlock;
3471 }
3472
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003473 rc = iwl3945_grab_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -07003474 if (rc)
3475 goto exit_unlock;
3476
Cahill, Ben M6440adb2007-11-29 11:09:55 +08003477 /* Device expects a multiple of 8 */
Tomas Winklerbddadf82008-12-19 10:37:01 +08003478 iwl3945_write_direct32(priv, FH39_RSCSR_CHNL0_WPTR,
Zhu Yib481de92007-09-25 17:54:57 -07003479 q->write & ~0x7);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003480 iwl3945_release_nic_access(priv);
Cahill, Ben M6440adb2007-11-29 11:09:55 +08003481
3482 /* Else device is assumed to be awake */
Zhu Yib481de92007-09-25 17:54:57 -07003483 } else
Cahill, Ben M6440adb2007-11-29 11:09:55 +08003484 /* Device expects a multiple of 8 */
Tomas Winklerbddadf82008-12-19 10:37:01 +08003485 iwl3945_write32(priv, FH39_RSCSR_CHNL0_WPTR, q->write & ~0x7);
Zhu Yib481de92007-09-25 17:54:57 -07003486
3487
3488 q->need_update = 0;
3489
3490 exit_unlock:
3491 spin_unlock_irqrestore(&q->lock, flags);
3492 return rc;
3493}
3494
3495/**
Ben Cahill9fbab512007-11-29 11:09:47 +08003496 * iwl3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
Zhu Yib481de92007-09-25 17:54:57 -07003497 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003498static inline __le32 iwl3945_dma_addr2rbd_ptr(struct iwl3945_priv *priv,
Zhu Yib481de92007-09-25 17:54:57 -07003499 dma_addr_t dma_addr)
3500{
3501 return cpu_to_le32((u32)dma_addr);
3502}
3503
3504/**
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003505 * iwl3945_rx_queue_restock - refill RX queue from pre-allocated pool
Zhu Yib481de92007-09-25 17:54:57 -07003506 *
Ben Cahill9fbab512007-11-29 11:09:47 +08003507 * If there are slots in the RX queue that need to be restocked,
Zhu Yib481de92007-09-25 17:54:57 -07003508 * and we have free pre-allocated buffers, fill the ranks as much
Ben Cahill9fbab512007-11-29 11:09:47 +08003509 * as we can, pulling from rx_free.
Zhu Yib481de92007-09-25 17:54:57 -07003510 *
3511 * This moves the 'write' index forward to catch up with 'processed', and
3512 * also updates the memory address in the firmware to reference the new
3513 * target buffer.
3514 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003515static int iwl3945_rx_queue_restock(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07003516{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003517 struct iwl3945_rx_queue *rxq = &priv->rxq;
Zhu Yib481de92007-09-25 17:54:57 -07003518 struct list_head *element;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003519 struct iwl3945_rx_mem_buffer *rxb;
Zhu Yib481de92007-09-25 17:54:57 -07003520 unsigned long flags;
3521 int write, rc;
3522
3523 spin_lock_irqsave(&rxq->lock, flags);
3524 write = rxq->write & ~0x7;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003525 while ((iwl3945_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
Cahill, Ben M6440adb2007-11-29 11:09:55 +08003526 /* Get next free Rx buffer, remove from free list */
Zhu Yib481de92007-09-25 17:54:57 -07003527 element = rxq->rx_free.next;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003528 rxb = list_entry(element, struct iwl3945_rx_mem_buffer, list);
Zhu Yib481de92007-09-25 17:54:57 -07003529 list_del(element);
Cahill, Ben M6440adb2007-11-29 11:09:55 +08003530
3531 /* Point to Rx buffer via next RBD in circular buffer */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003532 rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->dma_addr);
Zhu Yib481de92007-09-25 17:54:57 -07003533 rxq->queue[rxq->write] = rxb;
3534 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
3535 rxq->free_count--;
3536 }
3537 spin_unlock_irqrestore(&rxq->lock, flags);
3538 /* If the pre-allocated buffer pool is dropping low, schedule to
3539 * refill it */
3540 if (rxq->free_count <= RX_LOW_WATERMARK)
3541 queue_work(priv->workqueue, &priv->rx_replenish);
3542
3543
Cahill, Ben M6440adb2007-11-29 11:09:55 +08003544 /* If we've added more space for the firmware to place data, tell it.
3545 * Increment device's write pointer in multiples of 8. */
Zhu Yib481de92007-09-25 17:54:57 -07003546 if ((write != (rxq->write & ~0x7))
3547 || (abs(rxq->write - rxq->read) > 7)) {
3548 spin_lock_irqsave(&rxq->lock, flags);
3549 rxq->need_update = 1;
3550 spin_unlock_irqrestore(&rxq->lock, flags);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003551 rc = iwl3945_rx_queue_update_write_ptr(priv, rxq);
Zhu Yib481de92007-09-25 17:54:57 -07003552 if (rc)
3553 return rc;
3554 }
3555
3556 return 0;
3557}
3558
3559/**
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003560 * iwl3945_rx_replenish - Move all used packet from rx_used to rx_free
Zhu Yib481de92007-09-25 17:54:57 -07003561 *
3562 * When moving to rx_free an SKB is allocated for the slot.
3563 *
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003564 * Also restock the Rx queue via iwl3945_rx_queue_restock.
Ian Schram01ebd062007-10-25 17:15:22 +08003565 * This is called as a scheduled work item (except for during initialization)
Zhu Yib481de92007-09-25 17:54:57 -07003566 */
Mohamed Abbas5c0eef92007-11-29 11:10:14 +08003567static void iwl3945_rx_allocate(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07003568{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003569 struct iwl3945_rx_queue *rxq = &priv->rxq;
Zhu Yib481de92007-09-25 17:54:57 -07003570 struct list_head *element;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003571 struct iwl3945_rx_mem_buffer *rxb;
Zhu Yib481de92007-09-25 17:54:57 -07003572 unsigned long flags;
3573 spin_lock_irqsave(&rxq->lock, flags);
3574 while (!list_empty(&rxq->rx_used)) {
3575 element = rxq->rx_used.next;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003576 rxb = list_entry(element, struct iwl3945_rx_mem_buffer, list);
Cahill, Ben M6440adb2007-11-29 11:09:55 +08003577
3578 /* Alloc a new receive buffer */
Zhu Yib481de92007-09-25 17:54:57 -07003579 rxb->skb =
3580 alloc_skb(IWL_RX_BUF_SIZE, __GFP_NOWARN | GFP_ATOMIC);
3581 if (!rxb->skb) {
3582 if (net_ratelimit())
3583 printk(KERN_CRIT DRV_NAME
3584 ": Can not allocate SKB buffers\n");
3585 /* We don't reschedule replenish work here -- we will
3586 * call the restock method and if it still needs
3587 * more buffers it will schedule replenish */
3588 break;
3589 }
Zhu Yi12342c42007-12-20 11:27:32 +08003590
3591 /* If radiotap head is required, reserve some headroom here.
3592 * The physical head count is a variable rx_stats->phy_count.
3593 * We reserve 4 bytes here. Plus these extra bytes, the
3594 * headroom of the physical head should be enough for the
3595 * radiotap head that iwl3945 supported. See iwl3945_rt.
3596 */
3597 skb_reserve(rxb->skb, 4);
3598
Zhu Yib481de92007-09-25 17:54:57 -07003599 priv->alloc_rxb_skb++;
3600 list_del(element);
Cahill, Ben M6440adb2007-11-29 11:09:55 +08003601
3602 /* Get physical address of RB/SKB */
Zhu Yib481de92007-09-25 17:54:57 -07003603 rxb->dma_addr =
3604 pci_map_single(priv->pci_dev, rxb->skb->data,
3605 IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3606 list_add_tail(&rxb->list, &rxq->rx_free);
3607 rxq->free_count++;
3608 }
3609 spin_unlock_irqrestore(&rxq->lock, flags);
Mohamed Abbas5c0eef92007-11-29 11:10:14 +08003610}
3611
3612/*
3613 * this should be called while priv->lock is locked
3614 */
Tomas Winkler4fd1f842007-12-05 20:59:58 +02003615static void __iwl3945_rx_replenish(void *data)
Mohamed Abbas5c0eef92007-11-29 11:10:14 +08003616{
3617 struct iwl3945_priv *priv = data;
3618
3619 iwl3945_rx_allocate(priv);
3620 iwl3945_rx_queue_restock(priv);
3621}
3622
3623
3624void iwl3945_rx_replenish(void *data)
3625{
3626 struct iwl3945_priv *priv = data;
3627 unsigned long flags;
3628
3629 iwl3945_rx_allocate(priv);
Zhu Yib481de92007-09-25 17:54:57 -07003630
3631 spin_lock_irqsave(&priv->lock, flags);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003632 iwl3945_rx_queue_restock(priv);
Zhu Yib481de92007-09-25 17:54:57 -07003633 spin_unlock_irqrestore(&priv->lock, flags);
3634}
3635
3636/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
Ben Cahill9fbab512007-11-29 11:09:47 +08003637 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
Zhu Yib481de92007-09-25 17:54:57 -07003638 * This free routine walks the list of POOL entries and if SKB is set to
3639 * non NULL it is unmapped and freed
3640 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003641static void iwl3945_rx_queue_free(struct iwl3945_priv *priv, struct iwl3945_rx_queue *rxq)
Zhu Yib481de92007-09-25 17:54:57 -07003642{
3643 int i;
3644 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
3645 if (rxq->pool[i].skb != NULL) {
3646 pci_unmap_single(priv->pci_dev,
3647 rxq->pool[i].dma_addr,
3648 IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3649 dev_kfree_skb(rxq->pool[i].skb);
3650 }
3651 }
3652
3653 pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
3654 rxq->dma_addr);
3655 rxq->bd = NULL;
3656}
3657
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003658int iwl3945_rx_queue_alloc(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07003659{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003660 struct iwl3945_rx_queue *rxq = &priv->rxq;
Zhu Yib481de92007-09-25 17:54:57 -07003661 struct pci_dev *dev = priv->pci_dev;
3662 int i;
3663
3664 spin_lock_init(&rxq->lock);
3665 INIT_LIST_HEAD(&rxq->rx_free);
3666 INIT_LIST_HEAD(&rxq->rx_used);
Cahill, Ben M6440adb2007-11-29 11:09:55 +08003667
3668 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
Zhu Yib481de92007-09-25 17:54:57 -07003669 rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr);
3670 if (!rxq->bd)
3671 return -ENOMEM;
Cahill, Ben M6440adb2007-11-29 11:09:55 +08003672
Zhu Yib481de92007-09-25 17:54:57 -07003673 /* Fill the rx_used queue with _all_ of the Rx buffers */
3674 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
3675 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
Cahill, Ben M6440adb2007-11-29 11:09:55 +08003676
Zhu Yib481de92007-09-25 17:54:57 -07003677 /* Set us so that we have processed and used all buffers, but have
3678 * not restocked the Rx queue with fresh buffers */
3679 rxq->read = rxq->write = 0;
3680 rxq->free_count = 0;
3681 rxq->need_update = 0;
3682 return 0;
3683}
3684
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003685void iwl3945_rx_queue_reset(struct iwl3945_priv *priv, struct iwl3945_rx_queue *rxq)
Zhu Yib481de92007-09-25 17:54:57 -07003686{
3687 unsigned long flags;
3688 int i;
3689 spin_lock_irqsave(&rxq->lock, flags);
3690 INIT_LIST_HEAD(&rxq->rx_free);
3691 INIT_LIST_HEAD(&rxq->rx_used);
3692 /* Fill the rx_used queue with _all_ of the Rx buffers */
3693 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3694 /* In the reset function, these buffers may have been allocated
3695 * to an SKB, so we need to unmap and free potential storage */
3696 if (rxq->pool[i].skb != NULL) {
3697 pci_unmap_single(priv->pci_dev,
3698 rxq->pool[i].dma_addr,
3699 IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3700 priv->alloc_rxb_skb--;
3701 dev_kfree_skb(rxq->pool[i].skb);
3702 rxq->pool[i].skb = NULL;
3703 }
3704 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3705 }
3706
3707 /* Set us so that we have processed and used all buffers, but have
3708 * not restocked the Rx queue with fresh buffers */
3709 rxq->read = rxq->write = 0;
3710 rxq->free_count = 0;
3711 spin_unlock_irqrestore(&rxq->lock, flags);
3712}
3713
3714/* Convert linear signal-to-noise ratio into dB */
3715static u8 ratio2dB[100] = {
3716/* 0 1 2 3 4 5 6 7 8 9 */
3717 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
3718 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
3719 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
3720 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
3721 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
3722 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
3723 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
3724 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
3725 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
3726 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */
3727};
3728
3729/* Calculates a relative dB value from a ratio of linear
3730 * (i.e. not dB) signal levels.
3731 * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003732int iwl3945_calc_db_from_ratio(int sig_ratio)
Zhu Yib481de92007-09-25 17:54:57 -07003733{
Adrian Bunk221c80c2008-02-02 23:19:01 +02003734 /* 1000:1 or higher just report as 60 dB */
3735 if (sig_ratio >= 1000)
Zhu Yib481de92007-09-25 17:54:57 -07003736 return 60;
3737
Adrian Bunk221c80c2008-02-02 23:19:01 +02003738 /* 100:1 or higher, divide by 10 and use table,
Zhu Yib481de92007-09-25 17:54:57 -07003739 * add 20 dB to make up for divide by 10 */
Adrian Bunk221c80c2008-02-02 23:19:01 +02003740 if (sig_ratio >= 100)
Tomas Winkler3ac7f142008-07-21 02:40:14 +03003741 return 20 + (int)ratio2dB[sig_ratio/10];
Zhu Yib481de92007-09-25 17:54:57 -07003742
3743 /* We shouldn't see this */
3744 if (sig_ratio < 1)
3745 return 0;
3746
3747 /* Use table for ratios 1:1 - 99:1 */
3748 return (int)ratio2dB[sig_ratio];
3749}
3750
3751#define PERFECT_RSSI (-20) /* dBm */
3752#define WORST_RSSI (-95) /* dBm */
3753#define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI)
3754
3755/* Calculate an indication of rx signal quality (a percentage, not dBm!).
3756 * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info
3757 * about formulas used below. */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003758int iwl3945_calc_sig_qual(int rssi_dbm, int noise_dbm)
Zhu Yib481de92007-09-25 17:54:57 -07003759{
3760 int sig_qual;
3761 int degradation = PERFECT_RSSI - rssi_dbm;
3762
3763 /* If we get a noise measurement, use signal-to-noise ratio (SNR)
3764 * as indicator; formula is (signal dbm - noise dbm).
3765 * SNR at or above 40 is a great signal (100%).
3766 * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator.
3767 * Weakest usable signal is usually 10 - 15 dB SNR. */
3768 if (noise_dbm) {
3769 if (rssi_dbm - noise_dbm >= 40)
3770 return 100;
3771 else if (rssi_dbm < noise_dbm)
3772 return 0;
3773 sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2;
3774
3775 /* Else use just the signal level.
3776 * This formula is a least squares fit of data points collected and
3777 * compared with a reference system that had a percentage (%) display
3778 * for signal quality. */
3779 } else
3780 sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation *
3781 (15 * RSSI_RANGE + 62 * degradation)) /
3782 (RSSI_RANGE * RSSI_RANGE);
3783
3784 if (sig_qual > 100)
3785 sig_qual = 100;
3786 else if (sig_qual < 1)
3787 sig_qual = 0;
3788
3789 return sig_qual;
3790}
3791
3792/**
Ben Cahill9fbab512007-11-29 11:09:47 +08003793 * iwl3945_rx_handle - Main entry function for receiving responses from uCode
Zhu Yib481de92007-09-25 17:54:57 -07003794 *
3795 * Uses the priv->rx_handlers callback function array to invoke
3796 * the appropriate handlers, including command responses,
3797 * frame-received notifications, and other notifications.
3798 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003799static void iwl3945_rx_handle(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07003800{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003801 struct iwl3945_rx_mem_buffer *rxb;
Tomas Winkler3d24a9f2008-12-19 10:37:07 +08003802 struct iwl_rx_packet *pkt;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003803 struct iwl3945_rx_queue *rxq = &priv->rxq;
Zhu Yib481de92007-09-25 17:54:57 -07003804 u32 r, i;
3805 int reclaim;
3806 unsigned long flags;
Mohamed Abbas5c0eef92007-11-29 11:10:14 +08003807 u8 fill_rx = 0;
Mohamed Abbasd68ab682008-02-07 13:16:33 -08003808 u32 count = 8;
Zhu Yib481de92007-09-25 17:54:57 -07003809
Cahill, Ben M6440adb2007-11-29 11:09:55 +08003810 /* uCode's read index (stored in shared DRAM) indicates the last Rx
3811 * buffer that the driver may process (last buffer filled by ucode). */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003812 r = iwl3945_hw_get_rx_read(priv);
Zhu Yib481de92007-09-25 17:54:57 -07003813 i = rxq->read;
3814
Mohamed Abbas5c0eef92007-11-29 11:10:14 +08003815 if (iwl3945_rx_queue_space(rxq) > (RX_QUEUE_SIZE / 2))
3816 fill_rx = 1;
Zhu Yib481de92007-09-25 17:54:57 -07003817 /* Rx interrupt, but nothing sent from uCode */
3818 if (i == r)
3819 IWL_DEBUG(IWL_DL_RX | IWL_DL_ISR, "r = %d, i = %d\n", r, i);
3820
3821 while (i != r) {
3822 rxb = rxq->queue[i];
3823
Ben Cahill9fbab512007-11-29 11:09:47 +08003824 /* If an RXB doesn't have a Rx queue slot associated with it,
Zhu Yib481de92007-09-25 17:54:57 -07003825 * then a bug has been introduced in the queue refilling
3826 * routines -- catch it here */
3827 BUG_ON(rxb == NULL);
3828
3829 rxq->queue[i] = NULL;
3830
3831 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
3832 IWL_RX_BUF_SIZE,
3833 PCI_DMA_FROMDEVICE);
Tomas Winkler3d24a9f2008-12-19 10:37:07 +08003834 pkt = (struct iwl_rx_packet *)rxb->skb->data;
Zhu Yib481de92007-09-25 17:54:57 -07003835
3836 /* Reclaim a command buffer only if this packet is a response
3837 * to a (driver-originated) command.
3838 * If the packet (e.g. Rx frame) originated from uCode,
3839 * there is no command buffer to reclaim.
3840 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
3841 * but apparently a few don't get set; catch them here. */
3842 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
3843 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
3844 (pkt->hdr.cmd != REPLY_TX);
3845
3846 /* Based on type of command response or notification,
3847 * handle those that need handling via function in
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003848 * rx_handlers table. See iwl3945_setup_rx_handlers() */
Zhu Yib481de92007-09-25 17:54:57 -07003849 if (priv->rx_handlers[pkt->hdr.cmd]) {
Samuel Ortiz40b8ec02008-12-19 10:37:08 +08003850 IWL_DEBUG(IWL_DL_HCMD | IWL_DL_RX | IWL_DL_ISR,
Zhu Yib481de92007-09-25 17:54:57 -07003851 "r = %d, i = %d, %s, 0x%02x\n", r, i,
3852 get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
3853 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
3854 } else {
3855 /* No handling needed */
Samuel Ortiz40b8ec02008-12-19 10:37:08 +08003856 IWL_DEBUG(IWL_DL_HCMD | IWL_DL_RX | IWL_DL_ISR,
Zhu Yib481de92007-09-25 17:54:57 -07003857 "r %d i %d No handler needed for %s, 0x%02x\n",
3858 r, i, get_cmd_string(pkt->hdr.cmd),
3859 pkt->hdr.cmd);
3860 }
3861
3862 if (reclaim) {
Ben Cahill9fbab512007-11-29 11:09:47 +08003863 /* Invoke any callbacks, transfer the skb to caller, and
3864 * fire off the (possibly) blocking iwl3945_send_cmd()
Zhu Yib481de92007-09-25 17:54:57 -07003865 * as we reclaim the driver command queue */
3866 if (rxb && rxb->skb)
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003867 iwl3945_tx_cmd_complete(priv, rxb);
Zhu Yib481de92007-09-25 17:54:57 -07003868 else
3869 IWL_WARNING("Claim null rxb?\n");
3870 }
3871
3872 /* For now we just don't re-use anything. We can tweak this
3873 * later to try and re-use notification packets and SKBs that
3874 * fail to Rx correctly */
3875 if (rxb->skb != NULL) {
3876 priv->alloc_rxb_skb--;
3877 dev_kfree_skb_any(rxb->skb);
3878 rxb->skb = NULL;
3879 }
3880
3881 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
3882 IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3883 spin_lock_irqsave(&rxq->lock, flags);
3884 list_add_tail(&rxb->list, &priv->rxq.rx_used);
3885 spin_unlock_irqrestore(&rxq->lock, flags);
3886 i = (i + 1) & RX_QUEUE_MASK;
Mohamed Abbas5c0eef92007-11-29 11:10:14 +08003887 /* If there are a lot of unused frames,
3888 * restock the Rx queue so ucode won't assert. */
3889 if (fill_rx) {
3890 count++;
3891 if (count >= 8) {
3892 priv->rxq.read = i;
3893 __iwl3945_rx_replenish(priv);
3894 count = 0;
3895 }
3896 }
Zhu Yib481de92007-09-25 17:54:57 -07003897 }
3898
3899 /* Backtrack one entry */
3900 priv->rxq.read = i;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003901 iwl3945_rx_queue_restock(priv);
Zhu Yib481de92007-09-25 17:54:57 -07003902}
3903
Cahill, Ben M6440adb2007-11-29 11:09:55 +08003904/**
3905 * iwl3945_tx_queue_update_write_ptr - Send new write index to hardware
3906 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003907static int iwl3945_tx_queue_update_write_ptr(struct iwl3945_priv *priv,
3908 struct iwl3945_tx_queue *txq)
Zhu Yib481de92007-09-25 17:54:57 -07003909{
3910 u32 reg = 0;
3911 int rc = 0;
3912 int txq_id = txq->q.id;
3913
3914 if (txq->need_update == 0)
3915 return rc;
3916
3917 /* if we're trying to save power */
3918 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
3919 /* wake up nic if it's powered down ...
3920 * uCode will wake up, and interrupt us again, so next
3921 * time we'll skip this part. */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003922 reg = iwl3945_read32(priv, CSR_UCODE_DRV_GP1);
Zhu Yib481de92007-09-25 17:54:57 -07003923
3924 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
3925 IWL_DEBUG_INFO("Requesting wakeup, GP1 = 0x%x\n", reg);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003926 iwl3945_set_bit(priv, CSR_GP_CNTRL,
Zhu Yib481de92007-09-25 17:54:57 -07003927 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
3928 return rc;
3929 }
3930
3931 /* restore this queue's parameters in nic hardware. */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003932 rc = iwl3945_grab_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -07003933 if (rc)
3934 return rc;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003935 iwl3945_write_direct32(priv, HBUS_TARG_WRPTR,
Tomas Winklerfc4b6852007-10-25 17:15:24 +08003936 txq->q.write_ptr | (txq_id << 8));
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003937 iwl3945_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -07003938
3939 /* else not in power-save mode, uCode will never sleep when we're
3940 * trying to tx (during RFKILL, we're not trying to tx). */
3941 } else
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003942 iwl3945_write32(priv, HBUS_TARG_WRPTR,
Tomas Winklerfc4b6852007-10-25 17:15:24 +08003943 txq->q.write_ptr | (txq_id << 8));
Zhu Yib481de92007-09-25 17:54:57 -07003944
3945 txq->need_update = 0;
3946
3947 return rc;
3948}
3949
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08003950#ifdef CONFIG_IWL3945_DEBUG
Samuel Ortiz40b8ec02008-12-19 10:37:08 +08003951static void iwl3945_print_rx_config_cmd(struct iwl3945_priv *priv,
3952 struct iwl3945_rxon_cmd *rxon)
Zhu Yib481de92007-09-25 17:54:57 -07003953{
3954 IWL_DEBUG_RADIO("RX CONFIG:\n");
Samuel Ortiz40b8ec02008-12-19 10:37:08 +08003955 iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
Zhu Yib481de92007-09-25 17:54:57 -07003956 IWL_DEBUG_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
3957 IWL_DEBUG_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
3958 IWL_DEBUG_RADIO("u32 filter_flags: 0x%08x\n",
3959 le32_to_cpu(rxon->filter_flags));
3960 IWL_DEBUG_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type);
3961 IWL_DEBUG_RADIO("u8 ofdm_basic_rates: 0x%02x\n",
3962 rxon->ofdm_basic_rates);
3963 IWL_DEBUG_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
Johannes Berge1749612008-10-27 15:59:26 -07003964 IWL_DEBUG_RADIO("u8[6] node_addr: %pM\n", rxon->node_addr);
3965 IWL_DEBUG_RADIO("u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
Zhu Yib481de92007-09-25 17:54:57 -07003966 IWL_DEBUG_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
3967}
3968#endif
3969
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003970static void iwl3945_enable_interrupts(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07003971{
3972 IWL_DEBUG_ISR("Enabling interrupts\n");
3973 set_bit(STATUS_INT_ENABLED, &priv->status);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003974 iwl3945_write32(priv, CSR_INT_MASK, CSR_INI_SET_MASK);
Zhu Yib481de92007-09-25 17:54:57 -07003975}
3976
Mohamed Abbas0359fac2008-03-28 16:21:08 -07003977
3978/* call this function to flush any scheduled tasklet */
3979static inline void iwl_synchronize_irq(struct iwl3945_priv *priv)
3980{
Tomas Winklera96a27f2008-10-23 23:48:56 -07003981 /* wait to make sure we flush pending tasklet*/
Mohamed Abbas0359fac2008-03-28 16:21:08 -07003982 synchronize_irq(priv->pci_dev->irq);
3983 tasklet_kill(&priv->irq_tasklet);
3984}
3985
3986
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003987static inline void iwl3945_disable_interrupts(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07003988{
3989 clear_bit(STATUS_INT_ENABLED, &priv->status);
3990
3991 /* disable interrupts from uCode/NIC to host */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003992 iwl3945_write32(priv, CSR_INT_MASK, 0x00000000);
Zhu Yib481de92007-09-25 17:54:57 -07003993
3994 /* acknowledge/clear/reset any interrupts still pending
3995 * from uCode or flow handler (Rx/Tx DMA) */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003996 iwl3945_write32(priv, CSR_INT, 0xffffffff);
3997 iwl3945_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
Zhu Yib481de92007-09-25 17:54:57 -07003998 IWL_DEBUG_ISR("Disabled interrupts\n");
3999}
4000
4001static const char *desc_lookup(int i)
4002{
4003 switch (i) {
4004 case 1:
4005 return "FAIL";
4006 case 2:
4007 return "BAD_PARAM";
4008 case 3:
4009 return "BAD_CHECKSUM";
4010 case 4:
4011 return "NMI_INTERRUPT";
4012 case 5:
4013 return "SYSASSERT";
4014 case 6:
4015 return "FATAL_ERROR";
4016 }
4017
4018 return "UNKNOWN";
4019}
4020
4021#define ERROR_START_OFFSET (1 * sizeof(u32))
4022#define ERROR_ELEM_SIZE (7 * sizeof(u32))
4023
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004024static void iwl3945_dump_nic_error_log(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07004025{
4026 u32 i;
4027 u32 desc, time, count, base, data1;
4028 u32 blink1, blink2, ilink1, ilink2;
4029 int rc;
4030
4031 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
4032
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004033 if (!iwl3945_hw_valid_rtc_data_addr(base)) {
Zhu Yib481de92007-09-25 17:54:57 -07004034 IWL_ERROR("Not valid error log pointer 0x%08X\n", base);
4035 return;
4036 }
4037
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004038 rc = iwl3945_grab_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -07004039 if (rc) {
4040 IWL_WARNING("Can not read from adapter at this time.\n");
4041 return;
4042 }
4043
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004044 count = iwl3945_read_targ_mem(priv, base);
Zhu Yib481de92007-09-25 17:54:57 -07004045
4046 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
4047 IWL_ERROR("Start IWL Error Log Dump:\n");
Tomas Winkler2acae162008-03-02 01:25:59 +02004048 IWL_ERROR("Status: 0x%08lX, count: %d\n", priv->status, count);
Zhu Yib481de92007-09-25 17:54:57 -07004049 }
4050
4051 IWL_ERROR("Desc Time asrtPC blink2 "
4052 "ilink1 nmiPC Line\n");
4053 for (i = ERROR_START_OFFSET;
4054 i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET;
4055 i += ERROR_ELEM_SIZE) {
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004056 desc = iwl3945_read_targ_mem(priv, base + i);
Zhu Yib481de92007-09-25 17:54:57 -07004057 time =
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004058 iwl3945_read_targ_mem(priv, base + i + 1 * sizeof(u32));
Zhu Yib481de92007-09-25 17:54:57 -07004059 blink1 =
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004060 iwl3945_read_targ_mem(priv, base + i + 2 * sizeof(u32));
Zhu Yib481de92007-09-25 17:54:57 -07004061 blink2 =
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004062 iwl3945_read_targ_mem(priv, base + i + 3 * sizeof(u32));
Zhu Yib481de92007-09-25 17:54:57 -07004063 ilink1 =
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004064 iwl3945_read_targ_mem(priv, base + i + 4 * sizeof(u32));
Zhu Yib481de92007-09-25 17:54:57 -07004065 ilink2 =
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004066 iwl3945_read_targ_mem(priv, base + i + 5 * sizeof(u32));
Zhu Yib481de92007-09-25 17:54:57 -07004067 data1 =
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004068 iwl3945_read_targ_mem(priv, base + i + 6 * sizeof(u32));
Zhu Yib481de92007-09-25 17:54:57 -07004069
4070 IWL_ERROR
4071 ("%-13s (#%d) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
4072 desc_lookup(desc), desc, time, blink1, blink2,
4073 ilink1, ilink2, data1);
4074 }
4075
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004076 iwl3945_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -07004077
4078}
4079
Ben Cahillf58177b2007-11-29 11:09:43 +08004080#define EVENT_START_OFFSET (6 * sizeof(u32))
Zhu Yib481de92007-09-25 17:54:57 -07004081
4082/**
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004083 * iwl3945_print_event_log - Dump error event log to syslog
Zhu Yib481de92007-09-25 17:54:57 -07004084 *
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004085 * NOTE: Must be called with iwl3945_grab_nic_access() already obtained!
Zhu Yib481de92007-09-25 17:54:57 -07004086 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004087static void iwl3945_print_event_log(struct iwl3945_priv *priv, u32 start_idx,
Zhu Yib481de92007-09-25 17:54:57 -07004088 u32 num_events, u32 mode)
4089{
4090 u32 i;
4091 u32 base; /* SRAM byte address of event log header */
4092 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
4093 u32 ptr; /* SRAM byte address of log data */
4094 u32 ev, time, data; /* event log data */
4095
4096 if (num_events == 0)
4097 return;
4098
4099 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
4100
4101 if (mode == 0)
4102 event_size = 2 * sizeof(u32);
4103 else
4104 event_size = 3 * sizeof(u32);
4105
4106 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
4107
4108 /* "time" is actually "data" for mode 0 (no timestamp).
4109 * place event id # at far right for easier visual parsing. */
4110 for (i = 0; i < num_events; i++) {
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004111 ev = iwl3945_read_targ_mem(priv, ptr);
Zhu Yib481de92007-09-25 17:54:57 -07004112 ptr += sizeof(u32);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004113 time = iwl3945_read_targ_mem(priv, ptr);
Zhu Yib481de92007-09-25 17:54:57 -07004114 ptr += sizeof(u32);
4115 if (mode == 0)
4116 IWL_ERROR("0x%08x\t%04u\n", time, ev); /* data, ev */
4117 else {
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004118 data = iwl3945_read_targ_mem(priv, ptr);
Zhu Yib481de92007-09-25 17:54:57 -07004119 ptr += sizeof(u32);
4120 IWL_ERROR("%010u\t0x%08x\t%04u\n", time, data, ev);
4121 }
4122 }
4123}
4124
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004125static void iwl3945_dump_nic_event_log(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07004126{
4127 int rc;
4128 u32 base; /* SRAM byte address of event log header */
4129 u32 capacity; /* event log capacity in # entries */
4130 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
4131 u32 num_wraps; /* # times uCode wrapped to top of log */
4132 u32 next_entry; /* index of next entry to be written by uCode */
4133 u32 size; /* # entries that we'll print */
4134
4135 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004136 if (!iwl3945_hw_valid_rtc_data_addr(base)) {
Zhu Yib481de92007-09-25 17:54:57 -07004137 IWL_ERROR("Invalid event log pointer 0x%08X\n", base);
4138 return;
4139 }
4140
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004141 rc = iwl3945_grab_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -07004142 if (rc) {
4143 IWL_WARNING("Can not read from adapter at this time.\n");
4144 return;
4145 }
4146
4147 /* event log header */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004148 capacity = iwl3945_read_targ_mem(priv, base);
4149 mode = iwl3945_read_targ_mem(priv, base + (1 * sizeof(u32)));
4150 num_wraps = iwl3945_read_targ_mem(priv, base + (2 * sizeof(u32)));
4151 next_entry = iwl3945_read_targ_mem(priv, base + (3 * sizeof(u32)));
Zhu Yib481de92007-09-25 17:54:57 -07004152
4153 size = num_wraps ? capacity : next_entry;
4154
4155 /* bail out if nothing in log */
4156 if (size == 0) {
Zhu Yi583fab32007-09-27 11:27:30 +08004157 IWL_ERROR("Start IWL Event Log Dump: nothing in log\n");
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004158 iwl3945_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -07004159 return;
4160 }
4161
Zhu Yi583fab32007-09-27 11:27:30 +08004162 IWL_ERROR("Start IWL Event Log Dump: display count %d, wraps %d\n",
Zhu Yib481de92007-09-25 17:54:57 -07004163 size, num_wraps);
4164
4165 /* if uCode has wrapped back to top of log, start at the oldest entry,
4166 * i.e the next one that uCode would fill. */
4167 if (num_wraps)
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004168 iwl3945_print_event_log(priv, next_entry,
Zhu Yib481de92007-09-25 17:54:57 -07004169 capacity - next_entry, mode);
4170
4171 /* (then/else) start at top of log */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004172 iwl3945_print_event_log(priv, 0, next_entry, mode);
Zhu Yib481de92007-09-25 17:54:57 -07004173
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004174 iwl3945_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -07004175}
4176
4177/**
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004178 * iwl3945_irq_handle_error - called for HW or SW error interrupt from card
Zhu Yib481de92007-09-25 17:54:57 -07004179 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004180static void iwl3945_irq_handle_error(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07004181{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004182 /* Set the FW error flag -- cleared on iwl3945_down */
Zhu Yib481de92007-09-25 17:54:57 -07004183 set_bit(STATUS_FW_ERROR, &priv->status);
4184
4185 /* Cancel currently queued command. */
4186 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
4187
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08004188#ifdef CONFIG_IWL3945_DEBUG
Samuel Ortiz40b8ec02008-12-19 10:37:08 +08004189 if (priv->debug_level & IWL_DL_FW_ERRORS) {
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004190 iwl3945_dump_nic_error_log(priv);
4191 iwl3945_dump_nic_event_log(priv);
Samuel Ortiz40b8ec02008-12-19 10:37:08 +08004192 iwl3945_print_rx_config_cmd(priv, &priv->staging_rxon);
Zhu Yib481de92007-09-25 17:54:57 -07004193 }
4194#endif
4195
4196 wake_up_interruptible(&priv->wait_command_queue);
4197
4198 /* Keep the restart process from trying to send host
4199 * commands by clearing the INIT status bit */
4200 clear_bit(STATUS_READY, &priv->status);
4201
4202 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
4203 IWL_DEBUG(IWL_DL_INFO | IWL_DL_FW_ERRORS,
4204 "Restarting adapter due to uCode error.\n");
4205
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004206 if (iwl3945_is_associated(priv)) {
Zhu Yib481de92007-09-25 17:54:57 -07004207 memcpy(&priv->recovery_rxon, &priv->active_rxon,
4208 sizeof(priv->recovery_rxon));
4209 priv->error_recovering = 1;
4210 }
4211 queue_work(priv->workqueue, &priv->restart);
4212 }
4213}
4214
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004215static void iwl3945_error_recovery(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07004216{
4217 unsigned long flags;
4218
4219 memcpy(&priv->staging_rxon, &priv->recovery_rxon,
4220 sizeof(priv->staging_rxon));
4221 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004222 iwl3945_commit_rxon(priv);
Zhu Yib481de92007-09-25 17:54:57 -07004223
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004224 iwl3945_add_station(priv, priv->bssid, 1, 0);
Zhu Yib481de92007-09-25 17:54:57 -07004225
4226 spin_lock_irqsave(&priv->lock, flags);
4227 priv->assoc_id = le16_to_cpu(priv->staging_rxon.assoc_id);
4228 priv->error_recovering = 0;
4229 spin_unlock_irqrestore(&priv->lock, flags);
4230}
4231
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004232static void iwl3945_irq_tasklet(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07004233{
4234 u32 inta, handled = 0;
4235 u32 inta_fh;
4236 unsigned long flags;
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08004237#ifdef CONFIG_IWL3945_DEBUG
Zhu Yib481de92007-09-25 17:54:57 -07004238 u32 inta_mask;
4239#endif
4240
4241 spin_lock_irqsave(&priv->lock, flags);
4242
4243 /* Ack/clear/reset pending uCode interrupts.
4244 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
4245 * and will clear only when CSR_FH_INT_STATUS gets cleared. */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004246 inta = iwl3945_read32(priv, CSR_INT);
4247 iwl3945_write32(priv, CSR_INT, inta);
Zhu Yib481de92007-09-25 17:54:57 -07004248
4249 /* Ack/clear/reset pending flow-handler (DMA) interrupts.
4250 * Any new interrupts that happen after this, either while we're
4251 * in this tasklet, or later, will show up in next ISR/tasklet. */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004252 inta_fh = iwl3945_read32(priv, CSR_FH_INT_STATUS);
4253 iwl3945_write32(priv, CSR_FH_INT_STATUS, inta_fh);
Zhu Yib481de92007-09-25 17:54:57 -07004254
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08004255#ifdef CONFIG_IWL3945_DEBUG
Samuel Ortiz40b8ec02008-12-19 10:37:08 +08004256 if (priv->debug_level & IWL_DL_ISR) {
Ben Cahill9fbab512007-11-29 11:09:47 +08004257 /* just for debug */
4258 inta_mask = iwl3945_read32(priv, CSR_INT_MASK);
Zhu Yib481de92007-09-25 17:54:57 -07004259 IWL_DEBUG_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
4260 inta, inta_mask, inta_fh);
4261 }
4262#endif
4263
4264 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
4265 * atomic, make sure that inta covers all the interrupts that
4266 * we've discovered, even if FH interrupt came in just after
4267 * reading CSR_INT. */
Tomas Winkler6f83eaa2008-03-04 18:09:28 -08004268 if (inta_fh & CSR39_FH_INT_RX_MASK)
Zhu Yib481de92007-09-25 17:54:57 -07004269 inta |= CSR_INT_BIT_FH_RX;
Tomas Winkler6f83eaa2008-03-04 18:09:28 -08004270 if (inta_fh & CSR39_FH_INT_TX_MASK)
Zhu Yib481de92007-09-25 17:54:57 -07004271 inta |= CSR_INT_BIT_FH_TX;
4272
4273 /* Now service all interrupt bits discovered above. */
4274 if (inta & CSR_INT_BIT_HW_ERR) {
4275 IWL_ERROR("Microcode HW error detected. Restarting.\n");
4276
4277 /* Tell the device to stop sending interrupts */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004278 iwl3945_disable_interrupts(priv);
Zhu Yib481de92007-09-25 17:54:57 -07004279
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004280 iwl3945_irq_handle_error(priv);
Zhu Yib481de92007-09-25 17:54:57 -07004281
4282 handled |= CSR_INT_BIT_HW_ERR;
4283
4284 spin_unlock_irqrestore(&priv->lock, flags);
4285
4286 return;
4287 }
4288
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08004289#ifdef CONFIG_IWL3945_DEBUG
Samuel Ortiz40b8ec02008-12-19 10:37:08 +08004290 if (priv->debug_level & (IWL_DL_ISR)) {
Zhu Yib481de92007-09-25 17:54:57 -07004291 /* NIC fires this, but we don't use it, redundant with WAKEUP */
Joonwoo Park25c03d82008-01-23 10:15:20 -08004292 if (inta & CSR_INT_BIT_SCD)
4293 IWL_DEBUG_ISR("Scheduler finished to transmit "
4294 "the frame/frames.\n");
Zhu Yib481de92007-09-25 17:54:57 -07004295
4296 /* Alive notification via Rx interrupt will do the real work */
4297 if (inta & CSR_INT_BIT_ALIVE)
4298 IWL_DEBUG_ISR("Alive interrupt\n");
4299 }
4300#endif
4301 /* Safely ignore these bits for debug checks below */
Joonwoo Park25c03d82008-01-23 10:15:20 -08004302 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
Zhu Yib481de92007-09-25 17:54:57 -07004303
Zhu Yib481de92007-09-25 17:54:57 -07004304 /* Error detected by uCode */
4305 if (inta & CSR_INT_BIT_SW_ERR) {
4306 IWL_ERROR("Microcode SW error detected. Restarting 0x%X.\n",
4307 inta);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004308 iwl3945_irq_handle_error(priv);
Zhu Yib481de92007-09-25 17:54:57 -07004309 handled |= CSR_INT_BIT_SW_ERR;
4310 }
4311
4312 /* uCode wakes up after power-down sleep */
4313 if (inta & CSR_INT_BIT_WAKEUP) {
4314 IWL_DEBUG_ISR("Wakeup interrupt\n");
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004315 iwl3945_rx_queue_update_write_ptr(priv, &priv->rxq);
4316 iwl3945_tx_queue_update_write_ptr(priv, &priv->txq[0]);
4317 iwl3945_tx_queue_update_write_ptr(priv, &priv->txq[1]);
4318 iwl3945_tx_queue_update_write_ptr(priv, &priv->txq[2]);
4319 iwl3945_tx_queue_update_write_ptr(priv, &priv->txq[3]);
4320 iwl3945_tx_queue_update_write_ptr(priv, &priv->txq[4]);
4321 iwl3945_tx_queue_update_write_ptr(priv, &priv->txq[5]);
Zhu Yib481de92007-09-25 17:54:57 -07004322
4323 handled |= CSR_INT_BIT_WAKEUP;
4324 }
4325
4326 /* All uCode command responses, including Tx command responses,
4327 * Rx "responses" (frame-received notification), and other
4328 * notifications from uCode come through here*/
4329 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004330 iwl3945_rx_handle(priv);
Zhu Yib481de92007-09-25 17:54:57 -07004331 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
4332 }
4333
4334 if (inta & CSR_INT_BIT_FH_TX) {
4335 IWL_DEBUG_ISR("Tx interrupt\n");
4336
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004337 iwl3945_write32(priv, CSR_FH_INT_STATUS, (1 << 6));
4338 if (!iwl3945_grab_nic_access(priv)) {
Tomas Winklerbddadf82008-12-19 10:37:01 +08004339 iwl3945_write_direct32(priv, FH39_TCSR_CREDIT
4340 (FH39_SRVC_CHNL), 0x0);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004341 iwl3945_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -07004342 }
4343 handled |= CSR_INT_BIT_FH_TX;
4344 }
4345
4346 if (inta & ~handled)
4347 IWL_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
4348
4349 if (inta & ~CSR_INI_SET_MASK) {
4350 IWL_WARNING("Disabled INTA bits 0x%08x were pending\n",
4351 inta & ~CSR_INI_SET_MASK);
4352 IWL_WARNING(" with FH_INT = 0x%08x\n", inta_fh);
4353 }
4354
4355 /* Re-enable all interrupts */
Mohamed Abbas0359fac2008-03-28 16:21:08 -07004356 /* only Re-enable if disabled by irq */
4357 if (test_bit(STATUS_INT_ENABLED, &priv->status))
4358 iwl3945_enable_interrupts(priv);
Zhu Yib481de92007-09-25 17:54:57 -07004359
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08004360#ifdef CONFIG_IWL3945_DEBUG
Samuel Ortiz40b8ec02008-12-19 10:37:08 +08004361 if (priv->debug_level & (IWL_DL_ISR)) {
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004362 inta = iwl3945_read32(priv, CSR_INT);
4363 inta_mask = iwl3945_read32(priv, CSR_INT_MASK);
4364 inta_fh = iwl3945_read32(priv, CSR_FH_INT_STATUS);
Zhu Yib481de92007-09-25 17:54:57 -07004365 IWL_DEBUG_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
4366 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
4367 }
4368#endif
4369 spin_unlock_irqrestore(&priv->lock, flags);
4370}
4371
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004372static irqreturn_t iwl3945_isr(int irq, void *data)
Zhu Yib481de92007-09-25 17:54:57 -07004373{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004374 struct iwl3945_priv *priv = data;
Zhu Yib481de92007-09-25 17:54:57 -07004375 u32 inta, inta_mask;
4376 u32 inta_fh;
4377 if (!priv)
4378 return IRQ_NONE;
4379
4380 spin_lock(&priv->lock);
4381
4382 /* Disable (but don't clear!) interrupts here to avoid
4383 * back-to-back ISRs and sporadic interrupts from our NIC.
4384 * If we have something to service, the tasklet will re-enable ints.
4385 * If we *don't* have something, we'll re-enable before leaving here. */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004386 inta_mask = iwl3945_read32(priv, CSR_INT_MASK); /* just for debug */
4387 iwl3945_write32(priv, CSR_INT_MASK, 0x00000000);
Zhu Yib481de92007-09-25 17:54:57 -07004388
4389 /* Discover which interrupts are active/pending */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004390 inta = iwl3945_read32(priv, CSR_INT);
4391 inta_fh = iwl3945_read32(priv, CSR_FH_INT_STATUS);
Zhu Yib481de92007-09-25 17:54:57 -07004392
4393 /* Ignore interrupt if there's nothing in NIC to service.
4394 * This may be due to IRQ shared with another device,
4395 * or due to sporadic interrupts thrown from our NIC. */
4396 if (!inta && !inta_fh) {
4397 IWL_DEBUG_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n");
4398 goto none;
4399 }
4400
4401 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
4402 /* Hardware disappeared */
Zhu Yi99df6302008-12-02 12:13:58 -08004403 IWL_WARNING("HARDWARE GONE?? INTA == 0x%08x\n", inta);
Oliver Neukumcb4da1a2007-11-13 21:10:32 -08004404 goto unplugged;
Zhu Yib481de92007-09-25 17:54:57 -07004405 }
4406
4407 IWL_DEBUG_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
4408 inta, inta_mask, inta_fh);
4409
Joonwoo Park25c03d82008-01-23 10:15:20 -08004410 inta &= ~CSR_INT_BIT_SCD;
4411
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004412 /* iwl3945_irq_tasklet() will service interrupts and re-enable them */
Joonwoo Park25c03d82008-01-23 10:15:20 -08004413 if (likely(inta || inta_fh))
4414 tasklet_schedule(&priv->irq_tasklet);
Oliver Neukumcb4da1a2007-11-13 21:10:32 -08004415unplugged:
Zhu Yib481de92007-09-25 17:54:57 -07004416 spin_unlock(&priv->lock);
4417
4418 return IRQ_HANDLED;
4419
4420 none:
4421 /* re-enable interrupts here since we don't have anything to service. */
Mohamed Abbas0359fac2008-03-28 16:21:08 -07004422 /* only Re-enable if disabled by irq */
4423 if (test_bit(STATUS_INT_ENABLED, &priv->status))
4424 iwl3945_enable_interrupts(priv);
Zhu Yib481de92007-09-25 17:54:57 -07004425 spin_unlock(&priv->lock);
4426 return IRQ_NONE;
4427}
4428
4429/************************** EEPROM BANDS ****************************
4430 *
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004431 * The iwl3945_eeprom_band definitions below provide the mapping from the
Zhu Yib481de92007-09-25 17:54:57 -07004432 * EEPROM contents to the specific channel number supported for each
4433 * band.
4434 *
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004435 * For example, iwl3945_priv->eeprom.band_3_channels[4] from the band_3
Zhu Yib481de92007-09-25 17:54:57 -07004436 * definition below maps to physical channel 42 in the 5.2GHz spectrum.
4437 * The specific geography and calibration information for that channel
4438 * is contained in the eeprom map itself.
4439 *
4440 * During init, we copy the eeprom information and channel map
4441 * information into priv->channel_info_24/52 and priv->channel_map_24/52
4442 *
4443 * channel_map_24/52 provides the index in the channel_info array for a
4444 * given channel. We have to have two separate maps as there is channel
4445 * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
4446 * band_2
4447 *
4448 * A value of 0xff stored in the channel_map indicates that the channel
4449 * is not supported by the hardware at all.
4450 *
4451 * A value of 0xfe in the channel_map indicates that the channel is not
4452 * valid for Tx with the current hardware. This means that
4453 * while the system can tune and receive on a given channel, it may not
4454 * be able to associate or transmit any frames on that
4455 * channel. There is no corresponding channel information for that
4456 * entry.
4457 *
4458 *********************************************************************/
4459
4460/* 2.4 GHz */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004461static const u8 iwl3945_eeprom_band_1[14] = {
Zhu Yib481de92007-09-25 17:54:57 -07004462 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
4463};
4464
4465/* 5.2 GHz bands */
Ben Cahill9fbab512007-11-29 11:09:47 +08004466static const u8 iwl3945_eeprom_band_2[] = { /* 4915-5080MHz */
Zhu Yib481de92007-09-25 17:54:57 -07004467 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
4468};
4469
Ben Cahill9fbab512007-11-29 11:09:47 +08004470static const u8 iwl3945_eeprom_band_3[] = { /* 5170-5320MHz */
Zhu Yib481de92007-09-25 17:54:57 -07004471 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
4472};
4473
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004474static const u8 iwl3945_eeprom_band_4[] = { /* 5500-5700MHz */
Zhu Yib481de92007-09-25 17:54:57 -07004475 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
4476};
4477
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004478static const u8 iwl3945_eeprom_band_5[] = { /* 5725-5825MHz */
Zhu Yib481de92007-09-25 17:54:57 -07004479 145, 149, 153, 157, 161, 165
4480};
4481
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004482static void iwl3945_init_band_reference(const struct iwl3945_priv *priv, int band,
Zhu Yib481de92007-09-25 17:54:57 -07004483 int *eeprom_ch_count,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004484 const struct iwl3945_eeprom_channel
Zhu Yib481de92007-09-25 17:54:57 -07004485 **eeprom_ch_info,
4486 const u8 **eeprom_ch_index)
4487{
4488 switch (band) {
4489 case 1: /* 2.4GHz band */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004490 *eeprom_ch_count = ARRAY_SIZE(iwl3945_eeprom_band_1);
Zhu Yib481de92007-09-25 17:54:57 -07004491 *eeprom_ch_info = priv->eeprom.band_1_channels;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004492 *eeprom_ch_index = iwl3945_eeprom_band_1;
Zhu Yib481de92007-09-25 17:54:57 -07004493 break;
Ben Cahill9fbab512007-11-29 11:09:47 +08004494 case 2: /* 4.9GHz band */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004495 *eeprom_ch_count = ARRAY_SIZE(iwl3945_eeprom_band_2);
Zhu Yib481de92007-09-25 17:54:57 -07004496 *eeprom_ch_info = priv->eeprom.band_2_channels;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004497 *eeprom_ch_index = iwl3945_eeprom_band_2;
Zhu Yib481de92007-09-25 17:54:57 -07004498 break;
4499 case 3: /* 5.2GHz band */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004500 *eeprom_ch_count = ARRAY_SIZE(iwl3945_eeprom_band_3);
Zhu Yib481de92007-09-25 17:54:57 -07004501 *eeprom_ch_info = priv->eeprom.band_3_channels;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004502 *eeprom_ch_index = iwl3945_eeprom_band_3;
Zhu Yib481de92007-09-25 17:54:57 -07004503 break;
Ben Cahill9fbab512007-11-29 11:09:47 +08004504 case 4: /* 5.5GHz band */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004505 *eeprom_ch_count = ARRAY_SIZE(iwl3945_eeprom_band_4);
Zhu Yib481de92007-09-25 17:54:57 -07004506 *eeprom_ch_info = priv->eeprom.band_4_channels;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004507 *eeprom_ch_index = iwl3945_eeprom_band_4;
Zhu Yib481de92007-09-25 17:54:57 -07004508 break;
Ben Cahill9fbab512007-11-29 11:09:47 +08004509 case 5: /* 5.7GHz band */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004510 *eeprom_ch_count = ARRAY_SIZE(iwl3945_eeprom_band_5);
Zhu Yib481de92007-09-25 17:54:57 -07004511 *eeprom_ch_info = priv->eeprom.band_5_channels;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004512 *eeprom_ch_index = iwl3945_eeprom_band_5;
Zhu Yib481de92007-09-25 17:54:57 -07004513 break;
4514 default:
4515 BUG();
4516 return;
4517 }
4518}
4519
Cahill, Ben M6440adb2007-11-29 11:09:55 +08004520/**
4521 * iwl3945_get_channel_info - Find driver's private channel info
4522 *
4523 * Based on band and channel number.
4524 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004525const struct iwl3945_channel_info *iwl3945_get_channel_info(const struct iwl3945_priv *priv,
Johannes Berg8318d782008-01-24 19:38:38 +01004526 enum ieee80211_band band, u16 channel)
Zhu Yib481de92007-09-25 17:54:57 -07004527{
4528 int i;
4529
Johannes Berg8318d782008-01-24 19:38:38 +01004530 switch (band) {
4531 case IEEE80211_BAND_5GHZ:
Zhu Yib481de92007-09-25 17:54:57 -07004532 for (i = 14; i < priv->channel_count; i++) {
4533 if (priv->channel_info[i].channel == channel)
4534 return &priv->channel_info[i];
4535 }
4536 break;
4537
Johannes Berg8318d782008-01-24 19:38:38 +01004538 case IEEE80211_BAND_2GHZ:
Zhu Yib481de92007-09-25 17:54:57 -07004539 if (channel >= 1 && channel <= 14)
4540 return &priv->channel_info[channel - 1];
4541 break;
Johannes Berg8318d782008-01-24 19:38:38 +01004542 case IEEE80211_NUM_BANDS:
4543 WARN_ON(1);
Zhu Yib481de92007-09-25 17:54:57 -07004544 }
4545
4546 return NULL;
4547}
4548
4549#define CHECK_AND_PRINT(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
4550 ? # x " " : "")
4551
Cahill, Ben M6440adb2007-11-29 11:09:55 +08004552/**
4553 * iwl3945_init_channel_map - Set up driver's info for all possible channels
4554 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004555static int iwl3945_init_channel_map(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07004556{
4557 int eeprom_ch_count = 0;
4558 const u8 *eeprom_ch_index = NULL;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004559 const struct iwl3945_eeprom_channel *eeprom_ch_info = NULL;
Zhu Yib481de92007-09-25 17:54:57 -07004560 int band, ch;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004561 struct iwl3945_channel_info *ch_info;
Zhu Yib481de92007-09-25 17:54:57 -07004562
4563 if (priv->channel_count) {
4564 IWL_DEBUG_INFO("Channel map already initialized.\n");
4565 return 0;
4566 }
4567
4568 if (priv->eeprom.version < 0x2f) {
4569 IWL_WARNING("Unsupported EEPROM version: 0x%04X\n",
4570 priv->eeprom.version);
4571 return -EINVAL;
4572 }
4573
4574 IWL_DEBUG_INFO("Initializing regulatory info from EEPROM\n");
4575
4576 priv->channel_count =
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004577 ARRAY_SIZE(iwl3945_eeprom_band_1) +
4578 ARRAY_SIZE(iwl3945_eeprom_band_2) +
4579 ARRAY_SIZE(iwl3945_eeprom_band_3) +
4580 ARRAY_SIZE(iwl3945_eeprom_band_4) +
4581 ARRAY_SIZE(iwl3945_eeprom_band_5);
Zhu Yib481de92007-09-25 17:54:57 -07004582
4583 IWL_DEBUG_INFO("Parsing data for %d channels.\n", priv->channel_count);
4584
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004585 priv->channel_info = kzalloc(sizeof(struct iwl3945_channel_info) *
Zhu Yib481de92007-09-25 17:54:57 -07004586 priv->channel_count, GFP_KERNEL);
4587 if (!priv->channel_info) {
4588 IWL_ERROR("Could not allocate channel_info\n");
4589 priv->channel_count = 0;
4590 return -ENOMEM;
4591 }
4592
4593 ch_info = priv->channel_info;
4594
4595 /* Loop through the 5 EEPROM bands adding them in order to the
4596 * channel map we maintain (that contains additional information than
4597 * what just in the EEPROM) */
4598 for (band = 1; band <= 5; band++) {
4599
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004600 iwl3945_init_band_reference(priv, band, &eeprom_ch_count,
Zhu Yib481de92007-09-25 17:54:57 -07004601 &eeprom_ch_info, &eeprom_ch_index);
4602
4603 /* Loop through each band adding each of the channels */
4604 for (ch = 0; ch < eeprom_ch_count; ch++) {
4605 ch_info->channel = eeprom_ch_index[ch];
Johannes Berg8318d782008-01-24 19:38:38 +01004606 ch_info->band = (band == 1) ? IEEE80211_BAND_2GHZ :
4607 IEEE80211_BAND_5GHZ;
Zhu Yib481de92007-09-25 17:54:57 -07004608
4609 /* permanently store EEPROM's channel regulatory flags
4610 * and max power in channel info database. */
4611 ch_info->eeprom = eeprom_ch_info[ch];
4612
4613 /* Copy the run-time flags so they are there even on
4614 * invalid channels */
4615 ch_info->flags = eeprom_ch_info[ch].flags;
4616
4617 if (!(is_channel_valid(ch_info))) {
4618 IWL_DEBUG_INFO("Ch. %d Flags %x [%sGHz] - "
4619 "No traffic\n",
4620 ch_info->channel,
4621 ch_info->flags,
4622 is_channel_a_band(ch_info) ?
4623 "5.2" : "2.4");
4624 ch_info++;
4625 continue;
4626 }
4627
4628 /* Initialize regulatory-based run-time data */
4629 ch_info->max_power_avg = ch_info->curr_txpow =
4630 eeprom_ch_info[ch].max_power_avg;
4631 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
4632 ch_info->min_power = 0;
4633
Guy Cohenfe7c4042008-04-21 15:41:56 -07004634 IWL_DEBUG_INFO("Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x"
Zhu Yib481de92007-09-25 17:54:57 -07004635 " %ddBm): Ad-Hoc %ssupported\n",
4636 ch_info->channel,
4637 is_channel_a_band(ch_info) ?
4638 "5.2" : "2.4",
Tomas Winkler8211ef72008-03-02 01:36:04 +02004639 CHECK_AND_PRINT(VALID),
Zhu Yib481de92007-09-25 17:54:57 -07004640 CHECK_AND_PRINT(IBSS),
4641 CHECK_AND_PRINT(ACTIVE),
4642 CHECK_AND_PRINT(RADAR),
4643 CHECK_AND_PRINT(WIDE),
Zhu Yib481de92007-09-25 17:54:57 -07004644 CHECK_AND_PRINT(DFS),
4645 eeprom_ch_info[ch].flags,
4646 eeprom_ch_info[ch].max_power_avg,
4647 ((eeprom_ch_info[ch].
4648 flags & EEPROM_CHANNEL_IBSS)
4649 && !(eeprom_ch_info[ch].
4650 flags & EEPROM_CHANNEL_RADAR))
4651 ? "" : "not ");
4652
4653 /* Set the user_txpower_limit to the highest power
4654 * supported by any channel */
4655 if (eeprom_ch_info[ch].max_power_avg >
4656 priv->user_txpower_limit)
4657 priv->user_txpower_limit =
4658 eeprom_ch_info[ch].max_power_avg;
4659
4660 ch_info++;
4661 }
4662 }
4663
Cahill, Ben M6440adb2007-11-29 11:09:55 +08004664 /* Set up txpower settings in driver for all channels */
Zhu Yib481de92007-09-25 17:54:57 -07004665 if (iwl3945_txpower_set_from_eeprom(priv))
4666 return -EIO;
4667
4668 return 0;
4669}
4670
Reinette Chatre849e0dc2008-01-23 10:15:18 -08004671/*
4672 * iwl3945_free_channel_map - undo allocations in iwl3945_init_channel_map
4673 */
4674static void iwl3945_free_channel_map(struct iwl3945_priv *priv)
4675{
4676 kfree(priv->channel_info);
4677 priv->channel_count = 0;
4678}
4679
Zhu Yib481de92007-09-25 17:54:57 -07004680/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
4681 * sending probe req. This should be set long enough to hear probe responses
4682 * from more than one AP. */
Abhijeet Kolekarf9340522008-09-03 11:26:58 +08004683#define IWL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */
4684#define IWL_ACTIVE_DWELL_TIME_52 (20)
4685
4686#define IWL_ACTIVE_DWELL_FACTOR_24GHZ (3)
4687#define IWL_ACTIVE_DWELL_FACTOR_52GHZ (2)
Zhu Yib481de92007-09-25 17:54:57 -07004688
4689/* For faster active scanning, scan will move to the next channel if fewer than
4690 * PLCP_QUIET_THRESH packets are heard on this channel within
4691 * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
4692 * time if it's a quiet channel (nothing responded to our probe, and there's
4693 * no other traffic).
4694 * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
4695#define IWL_PLCP_QUIET_THRESH __constant_cpu_to_le16(1) /* packets */
Abhijeet Kolekarf9340522008-09-03 11:26:58 +08004696#define IWL_ACTIVE_QUIET_TIME __constant_cpu_to_le16(10) /* msec */
Zhu Yib481de92007-09-25 17:54:57 -07004697
4698/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
4699 * Must be set longer than active dwell time.
4700 * For the most reliable scan, set > AP beacon interval (typically 100msec). */
4701#define IWL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */
4702#define IWL_PASSIVE_DWELL_TIME_52 (10)
4703#define IWL_PASSIVE_DWELL_BASE (100)
4704#define IWL_CHANNEL_TUNE_TIME 5
4705
Kolekar, Abhijeete720ce92008-11-07 09:58:42 -08004706#define IWL_SCAN_PROBE_MASK(n) (BIT(n) | (BIT(n) - BIT(1)))
Abhijeet Kolekarf9340522008-09-03 11:26:58 +08004707
Johannes Berg8318d782008-01-24 19:38:38 +01004708static inline u16 iwl3945_get_active_dwell_time(struct iwl3945_priv *priv,
Abhijeet Kolekarf9340522008-09-03 11:26:58 +08004709 enum ieee80211_band band,
4710 u8 n_probes)
Zhu Yib481de92007-09-25 17:54:57 -07004711{
Johannes Berg8318d782008-01-24 19:38:38 +01004712 if (band == IEEE80211_BAND_5GHZ)
Abhijeet Kolekarf9340522008-09-03 11:26:58 +08004713 return IWL_ACTIVE_DWELL_TIME_52 +
4714 IWL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
Zhu Yib481de92007-09-25 17:54:57 -07004715 else
Abhijeet Kolekarf9340522008-09-03 11:26:58 +08004716 return IWL_ACTIVE_DWELL_TIME_24 +
4717 IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
Zhu Yib481de92007-09-25 17:54:57 -07004718}
4719
Johannes Berg8318d782008-01-24 19:38:38 +01004720static u16 iwl3945_get_passive_dwell_time(struct iwl3945_priv *priv,
4721 enum ieee80211_band band)
Zhu Yib481de92007-09-25 17:54:57 -07004722{
Johannes Berg8318d782008-01-24 19:38:38 +01004723 u16 passive = (band == IEEE80211_BAND_2GHZ) ?
Zhu Yib481de92007-09-25 17:54:57 -07004724 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
4725 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
4726
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004727 if (iwl3945_is_associated(priv)) {
Zhu Yib481de92007-09-25 17:54:57 -07004728 /* If we're associated, we clamp the maximum passive
4729 * dwell time to be 98% of the beacon interval (minus
4730 * 2 * channel tune time) */
4731 passive = priv->beacon_int;
4732 if ((passive > IWL_PASSIVE_DWELL_BASE) || !passive)
4733 passive = IWL_PASSIVE_DWELL_BASE;
4734 passive = (passive * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
4735 }
4736
Zhu Yib481de92007-09-25 17:54:57 -07004737 return passive;
4738}
4739
Johannes Berg8318d782008-01-24 19:38:38 +01004740static int iwl3945_get_channels_for_scan(struct iwl3945_priv *priv,
4741 enum ieee80211_band band,
Abhijeet Kolekarf9340522008-09-03 11:26:58 +08004742 u8 is_active, u8 n_probes,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004743 struct iwl3945_scan_channel *scan_ch)
Zhu Yib481de92007-09-25 17:54:57 -07004744{
4745 const struct ieee80211_channel *channels = NULL;
Johannes Berg8318d782008-01-24 19:38:38 +01004746 const struct ieee80211_supported_band *sband;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004747 const struct iwl3945_channel_info *ch_info;
Zhu Yib481de92007-09-25 17:54:57 -07004748 u16 passive_dwell = 0;
4749 u16 active_dwell = 0;
4750 int added, i;
4751
Johannes Berg8318d782008-01-24 19:38:38 +01004752 sband = iwl3945_get_band(priv, band);
4753 if (!sband)
Zhu Yib481de92007-09-25 17:54:57 -07004754 return 0;
4755
Johannes Berg8318d782008-01-24 19:38:38 +01004756 channels = sband->channels;
Zhu Yib481de92007-09-25 17:54:57 -07004757
Abhijeet Kolekarf9340522008-09-03 11:26:58 +08004758 active_dwell = iwl3945_get_active_dwell_time(priv, band, n_probes);
Johannes Berg8318d782008-01-24 19:38:38 +01004759 passive_dwell = iwl3945_get_passive_dwell_time(priv, band);
Zhu Yib481de92007-09-25 17:54:57 -07004760
Abhijeet Kolekar8f4807a2008-09-03 11:26:31 +08004761 if (passive_dwell <= active_dwell)
4762 passive_dwell = active_dwell + 1;
4763
Johannes Berg8318d782008-01-24 19:38:38 +01004764 for (i = 0, added = 0; i < sband->n_channels; i++) {
Johannes Berg182e2e62008-04-04 10:41:56 +02004765 if (channels[i].flags & IEEE80211_CHAN_DISABLED)
4766 continue;
4767
Johannes Berg8318d782008-01-24 19:38:38 +01004768 scan_ch->channel = channels[i].hw_value;
Zhu Yib481de92007-09-25 17:54:57 -07004769
Johannes Berg8318d782008-01-24 19:38:38 +01004770 ch_info = iwl3945_get_channel_info(priv, band, scan_ch->channel);
Zhu Yib481de92007-09-25 17:54:57 -07004771 if (!is_channel_valid(ch_info)) {
Ron Rindjunsky66b50042008-06-25 16:46:31 +08004772 IWL_DEBUG_SCAN("Channel %d is INVALID for this band.\n",
Zhu Yib481de92007-09-25 17:54:57 -07004773 scan_ch->channel);
4774 continue;
4775 }
4776
Zhu Yib481de92007-09-25 17:54:57 -07004777 scan_ch->active_dwell = cpu_to_le16(active_dwell);
4778 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
Abhijeet Kolekar011a0332008-12-02 12:14:07 -08004779 /* If passive , set up for auto-switch
4780 * and use long active_dwell time.
4781 */
4782 if (!is_active || is_channel_passive(ch_info) ||
4783 (channels[i].flags & IEEE80211_CHAN_PASSIVE_SCAN)) {
4784 scan_ch->type = 0; /* passive */
4785 if (IWL_UCODE_API(priv->ucode_ver) == 1)
4786 scan_ch->active_dwell = cpu_to_le16(passive_dwell - 1);
4787 } else {
4788 scan_ch->type = 1; /* active */
4789 }
4790
4791 /* Set direct probe bits. These may be used both for active
4792 * scan channels (probes gets sent right away),
4793 * or for passive channels (probes get se sent only after
4794 * hearing clear Rx packet).*/
4795 if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
4796 if (n_probes)
4797 scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
4798 } else {
4799 /* uCode v1 does not allow setting direct probe bits on
4800 * passive channel. */
4801 if ((scan_ch->type & 1) && n_probes)
4802 scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
4803 }
Zhu Yib481de92007-09-25 17:54:57 -07004804
Ben Cahill9fbab512007-11-29 11:09:47 +08004805 /* Set txpower levels to defaults */
Zhu Yib481de92007-09-25 17:54:57 -07004806 scan_ch->tpc.dsp_atten = 110;
4807 /* scan_pwr_info->tpc.dsp_atten; */
4808
4809 /*scan_pwr_info->tpc.tx_gain; */
Johannes Berg8318d782008-01-24 19:38:38 +01004810 if (band == IEEE80211_BAND_5GHZ)
Zhu Yib481de92007-09-25 17:54:57 -07004811 scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
4812 else {
4813 scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
4814 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
Ben Cahill9fbab512007-11-29 11:09:47 +08004815 * power level:
Reinette Chatre8a1b0242008-01-14 17:46:25 -08004816 * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3;
Zhu Yib481de92007-09-25 17:54:57 -07004817 */
4818 }
4819
4820 IWL_DEBUG_SCAN("Scanning %d [%s %d]\n",
4821 scan_ch->channel,
4822 (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE",
4823 (scan_ch->type & 1) ?
4824 active_dwell : passive_dwell);
4825
4826 scan_ch++;
4827 added++;
4828 }
4829
4830 IWL_DEBUG_SCAN("total channels to scan %d \n", added);
4831 return added;
4832}
4833
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004834static void iwl3945_init_hw_rates(struct iwl3945_priv *priv,
Zhu Yib481de92007-09-25 17:54:57 -07004835 struct ieee80211_rate *rates)
4836{
4837 int i;
4838
4839 for (i = 0; i < IWL_RATE_COUNT; i++) {
Johannes Berg8318d782008-01-24 19:38:38 +01004840 rates[i].bitrate = iwl3945_rates[i].ieee * 5;
4841 rates[i].hw_value = i; /* Rate scaling will work on indexes */
4842 rates[i].hw_value_short = i;
4843 rates[i].flags = 0;
4844 if ((i > IWL_LAST_OFDM_RATE) || (i < IWL_FIRST_OFDM_RATE)) {
Zhu Yib481de92007-09-25 17:54:57 -07004845 /*
Johannes Berg8318d782008-01-24 19:38:38 +01004846 * If CCK != 1M then set short preamble rate flag.
Zhu Yib481de92007-09-25 17:54:57 -07004847 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004848 rates[i].flags |= (iwl3945_rates[i].plcp == 10) ?
Johannes Berg8318d782008-01-24 19:38:38 +01004849 0 : IEEE80211_RATE_SHORT_PREAMBLE;
Zhu Yib481de92007-09-25 17:54:57 -07004850 }
Zhu Yib481de92007-09-25 17:54:57 -07004851 }
4852}
4853
4854/**
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004855 * iwl3945_init_geos - Initialize mac80211's geo/channel info based from eeprom
Zhu Yib481de92007-09-25 17:54:57 -07004856 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004857static int iwl3945_init_geos(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07004858{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004859 struct iwl3945_channel_info *ch;
Tomas Winkler8211ef72008-03-02 01:36:04 +02004860 struct ieee80211_supported_band *sband;
Zhu Yib481de92007-09-25 17:54:57 -07004861 struct ieee80211_channel *channels;
4862 struct ieee80211_channel *geo_ch;
4863 struct ieee80211_rate *rates;
4864 int i = 0;
Zhu Yib481de92007-09-25 17:54:57 -07004865
Johannes Berg8318d782008-01-24 19:38:38 +01004866 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
4867 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
Zhu Yib481de92007-09-25 17:54:57 -07004868 IWL_DEBUG_INFO("Geography modes already initialized.\n");
4869 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
4870 return 0;
4871 }
4872
Zhu Yib481de92007-09-25 17:54:57 -07004873 channels = kzalloc(sizeof(struct ieee80211_channel) *
4874 priv->channel_count, GFP_KERNEL);
Johannes Berg8318d782008-01-24 19:38:38 +01004875 if (!channels)
Zhu Yib481de92007-09-25 17:54:57 -07004876 return -ENOMEM;
Zhu Yib481de92007-09-25 17:54:57 -07004877
Tomas Winkler8211ef72008-03-02 01:36:04 +02004878 rates = kzalloc((sizeof(struct ieee80211_rate) * (IWL_RATE_COUNT + 1)),
Zhu Yib481de92007-09-25 17:54:57 -07004879 GFP_KERNEL);
4880 if (!rates) {
Zhu Yib481de92007-09-25 17:54:57 -07004881 kfree(channels);
4882 return -ENOMEM;
4883 }
4884
Zhu Yib481de92007-09-25 17:54:57 -07004885 /* 5.2GHz channels start after the 2.4GHz channels */
Tomas Winkler8211ef72008-03-02 01:36:04 +02004886 sband = &priv->bands[IEEE80211_BAND_5GHZ];
4887 sband->channels = &channels[ARRAY_SIZE(iwl3945_eeprom_band_1)];
4888 /* just OFDM */
4889 sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
4890 sband->n_bitrates = IWL_RATE_COUNT - IWL_FIRST_OFDM_RATE;
Zhu Yib481de92007-09-25 17:54:57 -07004891
Tomas Winkler8211ef72008-03-02 01:36:04 +02004892 sband = &priv->bands[IEEE80211_BAND_2GHZ];
4893 sband->channels = channels;
4894 /* OFDM & CCK */
4895 sband->bitrates = rates;
4896 sband->n_bitrates = IWL_RATE_COUNT;
Zhu Yib481de92007-09-25 17:54:57 -07004897
4898 priv->ieee_channels = channels;
4899 priv->ieee_rates = rates;
4900
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004901 iwl3945_init_hw_rates(priv, rates);
Zhu Yib481de92007-09-25 17:54:57 -07004902
Tomas Winkler8211ef72008-03-02 01:36:04 +02004903 for (i = 0; i < priv->channel_count; i++) {
Zhu Yib481de92007-09-25 17:54:57 -07004904 ch = &priv->channel_info[i];
4905
Tomas Winkler8211ef72008-03-02 01:36:04 +02004906 /* FIXME: might be removed if scan is OK*/
4907 if (!is_channel_valid(ch))
Zhu Yib481de92007-09-25 17:54:57 -07004908 continue;
Zhu Yib481de92007-09-25 17:54:57 -07004909
4910 if (is_channel_a_band(ch))
Tomas Winkler8211ef72008-03-02 01:36:04 +02004911 sband = &priv->bands[IEEE80211_BAND_5GHZ];
Johannes Berg8318d782008-01-24 19:38:38 +01004912 else
Tomas Winkler8211ef72008-03-02 01:36:04 +02004913 sband = &priv->bands[IEEE80211_BAND_2GHZ];
Zhu Yib481de92007-09-25 17:54:57 -07004914
Tomas Winkler8211ef72008-03-02 01:36:04 +02004915 geo_ch = &sband->channels[sband->n_channels++];
4916
4917 geo_ch->center_freq = ieee80211_channel_to_frequency(ch->channel);
Johannes Berg8318d782008-01-24 19:38:38 +01004918 geo_ch->max_power = ch->max_power_avg;
4919 geo_ch->max_antenna_gain = 0xff;
Mohamed Abbas7b723042008-01-31 21:46:40 -08004920 geo_ch->hw_value = ch->channel;
Zhu Yib481de92007-09-25 17:54:57 -07004921
4922 if (is_channel_valid(ch)) {
Johannes Berg8318d782008-01-24 19:38:38 +01004923 if (!(ch->flags & EEPROM_CHANNEL_IBSS))
4924 geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
Zhu Yib481de92007-09-25 17:54:57 -07004925
Johannes Berg8318d782008-01-24 19:38:38 +01004926 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
4927 geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
Zhu Yib481de92007-09-25 17:54:57 -07004928
4929 if (ch->flags & EEPROM_CHANNEL_RADAR)
Johannes Berg8318d782008-01-24 19:38:38 +01004930 geo_ch->flags |= IEEE80211_CHAN_RADAR;
Zhu Yib481de92007-09-25 17:54:57 -07004931
4932 if (ch->max_power_avg > priv->max_channel_txpower_limit)
4933 priv->max_channel_txpower_limit =
4934 ch->max_power_avg;
Tomas Winkler8211ef72008-03-02 01:36:04 +02004935 } else {
Johannes Berg8318d782008-01-24 19:38:38 +01004936 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
Tomas Winkler8211ef72008-03-02 01:36:04 +02004937 }
4938
4939 /* Save flags for reg domain usage */
4940 geo_ch->orig_flags = geo_ch->flags;
4941
4942 IWL_DEBUG_INFO("Channel %d Freq=%d[%sGHz] %s flag=0%X\n",
4943 ch->channel, geo_ch->center_freq,
4944 is_channel_a_band(ch) ? "5.2" : "2.4",
4945 geo_ch->flags & IEEE80211_CHAN_DISABLED ?
4946 "restricted" : "valid",
4947 geo_ch->flags);
Zhu Yib481de92007-09-25 17:54:57 -07004948 }
4949
Tomas Winkler82b9a122008-03-04 18:09:30 -08004950 if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
4951 priv->cfg->sku & IWL_SKU_A) {
Zhu Yib481de92007-09-25 17:54:57 -07004952 printk(KERN_INFO DRV_NAME
4953 ": Incorrectly detected BG card as ABG. Please send "
4954 "your PCI ID 0x%04X:0x%04X to maintainer.\n",
4955 priv->pci_dev->device, priv->pci_dev->subsystem_device);
Tomas Winkler82b9a122008-03-04 18:09:30 -08004956 priv->cfg->sku &= ~IWL_SKU_A;
Zhu Yib481de92007-09-25 17:54:57 -07004957 }
4958
4959 printk(KERN_INFO DRV_NAME
4960 ": Tunable channels: %d 802.11bg, %d 802.11a channels\n",
Johannes Berg8318d782008-01-24 19:38:38 +01004961 priv->bands[IEEE80211_BAND_2GHZ].n_channels,
4962 priv->bands[IEEE80211_BAND_5GHZ].n_channels);
Zhu Yib481de92007-09-25 17:54:57 -07004963
John W. Linvillee0e0a672008-03-25 15:58:40 -04004964 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
4965 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
4966 &priv->bands[IEEE80211_BAND_2GHZ];
4967 if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
4968 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
4969 &priv->bands[IEEE80211_BAND_5GHZ];
Zhu Yib481de92007-09-25 17:54:57 -07004970
Zhu Yib481de92007-09-25 17:54:57 -07004971 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
4972
4973 return 0;
4974}
4975
Reinette Chatre849e0dc2008-01-23 10:15:18 -08004976/*
4977 * iwl3945_free_geos - undo allocations in iwl3945_init_geos
4978 */
4979static void iwl3945_free_geos(struct iwl3945_priv *priv)
4980{
Reinette Chatre849e0dc2008-01-23 10:15:18 -08004981 kfree(priv->ieee_channels);
4982 kfree(priv->ieee_rates);
4983 clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
4984}
4985
Zhu Yib481de92007-09-25 17:54:57 -07004986/******************************************************************************
4987 *
4988 * uCode download functions
4989 *
4990 ******************************************************************************/
4991
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004992static void iwl3945_dealloc_ucode_pci(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07004993{
Tomas Winkler98c92212008-01-14 17:46:20 -08004994 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_code);
4995 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data);
4996 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
4997 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init);
4998 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
4999 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
Zhu Yib481de92007-09-25 17:54:57 -07005000}
5001
5002/**
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005003 * iwl3945_verify_inst_full - verify runtime uCode image in card vs. host,
Zhu Yib481de92007-09-25 17:54:57 -07005004 * looking at all data.
5005 */
Tomas Winkler3ac7f142008-07-21 02:40:14 +03005006static int iwl3945_verify_inst_full(struct iwl3945_priv *priv, __le32 *image, u32 len)
Zhu Yib481de92007-09-25 17:54:57 -07005007{
5008 u32 val;
5009 u32 save_len = len;
5010 int rc = 0;
5011 u32 errcnt;
5012
5013 IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
5014
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005015 rc = iwl3945_grab_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -07005016 if (rc)
5017 return rc;
5018
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005019 iwl3945_write_direct32(priv, HBUS_TARG_MEM_RADDR, RTC_INST_LOWER_BOUND);
Zhu Yib481de92007-09-25 17:54:57 -07005020
5021 errcnt = 0;
5022 for (; len > 0; len -= sizeof(u32), image++) {
5023 /* read data comes through single port, auto-incr addr */
5024 /* NOTE: Use the debugless read so we don't flood kernel log
5025 * if IWL_DL_IO is set */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005026 val = _iwl3945_read_direct32(priv, HBUS_TARG_MEM_RDAT);
Zhu Yib481de92007-09-25 17:54:57 -07005027 if (val != le32_to_cpu(*image)) {
5028 IWL_ERROR("uCode INST section is invalid at "
5029 "offset 0x%x, is 0x%x, s/b 0x%x\n",
5030 save_len - len, val, le32_to_cpu(*image));
5031 rc = -EIO;
5032 errcnt++;
5033 if (errcnt >= 20)
5034 break;
5035 }
5036 }
5037
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005038 iwl3945_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -07005039
5040 if (!errcnt)
Ian Schrambc434dd2007-10-25 17:15:29 +08005041 IWL_DEBUG_INFO("ucode image in INSTRUCTION memory is good\n");
Zhu Yib481de92007-09-25 17:54:57 -07005042
5043 return rc;
5044}
5045
5046
5047/**
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005048 * iwl3945_verify_inst_sparse - verify runtime uCode image in card vs. host,
Zhu Yib481de92007-09-25 17:54:57 -07005049 * using sample data 100 bytes apart. If these sample points are good,
5050 * it's a pretty good bet that everything between them is good, too.
5051 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005052static int iwl3945_verify_inst_sparse(struct iwl3945_priv *priv, __le32 *image, u32 len)
Zhu Yib481de92007-09-25 17:54:57 -07005053{
5054 u32 val;
5055 int rc = 0;
5056 u32 errcnt = 0;
5057 u32 i;
5058
5059 IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
5060
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005061 rc = iwl3945_grab_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -07005062 if (rc)
5063 return rc;
5064
5065 for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
5066 /* read data comes through single port, auto-incr addr */
5067 /* NOTE: Use the debugless read so we don't flood kernel log
5068 * if IWL_DL_IO is set */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005069 iwl3945_write_direct32(priv, HBUS_TARG_MEM_RADDR,
Zhu Yib481de92007-09-25 17:54:57 -07005070 i + RTC_INST_LOWER_BOUND);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005071 val = _iwl3945_read_direct32(priv, HBUS_TARG_MEM_RDAT);
Zhu Yib481de92007-09-25 17:54:57 -07005072 if (val != le32_to_cpu(*image)) {
5073#if 0 /* Enable this if you want to see details */
5074 IWL_ERROR("uCode INST section is invalid at "
5075 "offset 0x%x, is 0x%x, s/b 0x%x\n",
5076 i, val, *image);
5077#endif
5078 rc = -EIO;
5079 errcnt++;
5080 if (errcnt >= 3)
5081 break;
5082 }
5083 }
5084
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005085 iwl3945_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -07005086
5087 return rc;
5088}
5089
5090
5091/**
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005092 * iwl3945_verify_ucode - determine which instruction image is in SRAM,
Zhu Yib481de92007-09-25 17:54:57 -07005093 * and verify its contents
5094 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005095static int iwl3945_verify_ucode(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07005096{
5097 __le32 *image;
5098 u32 len;
5099 int rc = 0;
5100
5101 /* Try bootstrap */
5102 image = (__le32 *)priv->ucode_boot.v_addr;
5103 len = priv->ucode_boot.len;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005104 rc = iwl3945_verify_inst_sparse(priv, image, len);
Zhu Yib481de92007-09-25 17:54:57 -07005105 if (rc == 0) {
5106 IWL_DEBUG_INFO("Bootstrap uCode is good in inst SRAM\n");
5107 return 0;
5108 }
5109
5110 /* Try initialize */
5111 image = (__le32 *)priv->ucode_init.v_addr;
5112 len = priv->ucode_init.len;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005113 rc = iwl3945_verify_inst_sparse(priv, image, len);
Zhu Yib481de92007-09-25 17:54:57 -07005114 if (rc == 0) {
5115 IWL_DEBUG_INFO("Initialize uCode is good in inst SRAM\n");
5116 return 0;
5117 }
5118
5119 /* Try runtime/protocol */
5120 image = (__le32 *)priv->ucode_code.v_addr;
5121 len = priv->ucode_code.len;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005122 rc = iwl3945_verify_inst_sparse(priv, image, len);
Zhu Yib481de92007-09-25 17:54:57 -07005123 if (rc == 0) {
5124 IWL_DEBUG_INFO("Runtime uCode is good in inst SRAM\n");
5125 return 0;
5126 }
5127
5128 IWL_ERROR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
5129
Ben Cahill9fbab512007-11-29 11:09:47 +08005130 /* Since nothing seems to match, show first several data entries in
5131 * instruction SRAM, so maybe visual inspection will give a clue.
5132 * Selection of bootstrap image (vs. other images) is arbitrary. */
Zhu Yib481de92007-09-25 17:54:57 -07005133 image = (__le32 *)priv->ucode_boot.v_addr;
5134 len = priv->ucode_boot.len;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005135 rc = iwl3945_verify_inst_full(priv, image, len);
Zhu Yib481de92007-09-25 17:54:57 -07005136
5137 return rc;
5138}
5139
5140
5141/* check contents of special bootstrap uCode SRAM */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005142static int iwl3945_verify_bsm(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07005143{
5144 __le32 *image = priv->ucode_boot.v_addr;
5145 u32 len = priv->ucode_boot.len;
5146 u32 reg;
5147 u32 val;
5148
5149 IWL_DEBUG_INFO("Begin verify bsm\n");
5150
5151 /* verify BSM SRAM contents */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005152 val = iwl3945_read_prph(priv, BSM_WR_DWCOUNT_REG);
Zhu Yib481de92007-09-25 17:54:57 -07005153 for (reg = BSM_SRAM_LOWER_BOUND;
5154 reg < BSM_SRAM_LOWER_BOUND + len;
Tomas Winkler3ac7f142008-07-21 02:40:14 +03005155 reg += sizeof(u32), image++) {
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005156 val = iwl3945_read_prph(priv, reg);
Zhu Yib481de92007-09-25 17:54:57 -07005157 if (val != le32_to_cpu(*image)) {
5158 IWL_ERROR("BSM uCode verification failed at "
5159 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
5160 BSM_SRAM_LOWER_BOUND,
5161 reg - BSM_SRAM_LOWER_BOUND, len,
5162 val, le32_to_cpu(*image));
5163 return -EIO;
5164 }
5165 }
5166
5167 IWL_DEBUG_INFO("BSM bootstrap uCode image OK\n");
5168
5169 return 0;
5170}
5171
5172/**
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005173 * iwl3945_load_bsm - Load bootstrap instructions
Zhu Yib481de92007-09-25 17:54:57 -07005174 *
5175 * BSM operation:
5176 *
5177 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
5178 * in special SRAM that does not power down during RFKILL. When powering back
5179 * up after power-saving sleeps (or during initial uCode load), the BSM loads
5180 * the bootstrap program into the on-board processor, and starts it.
5181 *
5182 * The bootstrap program loads (via DMA) instructions and data for a new
5183 * program from host DRAM locations indicated by the host driver in the
5184 * BSM_DRAM_* registers. Once the new program is loaded, it starts
5185 * automatically.
5186 *
5187 * When initializing the NIC, the host driver points the BSM to the
5188 * "initialize" uCode image. This uCode sets up some internal data, then
5189 * notifies host via "initialize alive" that it is complete.
5190 *
5191 * The host then replaces the BSM_DRAM_* pointer values to point to the
5192 * normal runtime uCode instructions and a backup uCode data cache buffer
5193 * (filled initially with starting data values for the on-board processor),
5194 * then triggers the "initialize" uCode to load and launch the runtime uCode,
5195 * which begins normal operation.
5196 *
5197 * When doing a power-save shutdown, runtime uCode saves data SRAM into
5198 * the backup data cache in DRAM before SRAM is powered down.
5199 *
5200 * When powering back up, the BSM loads the bootstrap program. This reloads
5201 * the runtime uCode instructions and the backup data cache into SRAM,
5202 * and re-launches the runtime uCode from where it left off.
5203 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005204static int iwl3945_load_bsm(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07005205{
5206 __le32 *image = priv->ucode_boot.v_addr;
5207 u32 len = priv->ucode_boot.len;
5208 dma_addr_t pinst;
5209 dma_addr_t pdata;
5210 u32 inst_len;
5211 u32 data_len;
5212 int rc;
5213 int i;
5214 u32 done;
5215 u32 reg_offset;
5216
5217 IWL_DEBUG_INFO("Begin load bsm\n");
5218
5219 /* make sure bootstrap program is no larger than BSM's SRAM size */
5220 if (len > IWL_MAX_BSM_SIZE)
5221 return -EINVAL;
5222
5223 /* Tell bootstrap uCode where to find the "Initialize" uCode
Ben Cahill9fbab512007-11-29 11:09:47 +08005224 * in host DRAM ... host DRAM physical address bits 31:0 for 3945.
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005225 * NOTE: iwl3945_initialize_alive_start() will replace these values,
Zhu Yib481de92007-09-25 17:54:57 -07005226 * after the "initialize" uCode has run, to point to
5227 * runtime/protocol instructions and backup data cache. */
5228 pinst = priv->ucode_init.p_addr;
5229 pdata = priv->ucode_init_data.p_addr;
5230 inst_len = priv->ucode_init.len;
5231 data_len = priv->ucode_init_data.len;
5232
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005233 rc = iwl3945_grab_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -07005234 if (rc)
5235 return rc;
5236
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005237 iwl3945_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
5238 iwl3945_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
5239 iwl3945_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
5240 iwl3945_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
Zhu Yib481de92007-09-25 17:54:57 -07005241
5242 /* Fill BSM memory with bootstrap instructions */
5243 for (reg_offset = BSM_SRAM_LOWER_BOUND;
5244 reg_offset < BSM_SRAM_LOWER_BOUND + len;
5245 reg_offset += sizeof(u32), image++)
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005246 _iwl3945_write_prph(priv, reg_offset,
Zhu Yib481de92007-09-25 17:54:57 -07005247 le32_to_cpu(*image));
5248
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005249 rc = iwl3945_verify_bsm(priv);
Zhu Yib481de92007-09-25 17:54:57 -07005250 if (rc) {
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005251 iwl3945_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -07005252 return rc;
5253 }
5254
5255 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005256 iwl3945_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
5257 iwl3945_write_prph(priv, BSM_WR_MEM_DST_REG,
Zhu Yib481de92007-09-25 17:54:57 -07005258 RTC_INST_LOWER_BOUND);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005259 iwl3945_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
Zhu Yib481de92007-09-25 17:54:57 -07005260
5261 /* Load bootstrap code into instruction SRAM now,
5262 * to prepare to load "initialize" uCode */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005263 iwl3945_write_prph(priv, BSM_WR_CTRL_REG,
Zhu Yib481de92007-09-25 17:54:57 -07005264 BSM_WR_CTRL_REG_BIT_START);
5265
5266 /* Wait for load of bootstrap uCode to finish */
5267 for (i = 0; i < 100; i++) {
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005268 done = iwl3945_read_prph(priv, BSM_WR_CTRL_REG);
Zhu Yib481de92007-09-25 17:54:57 -07005269 if (!(done & BSM_WR_CTRL_REG_BIT_START))
5270 break;
5271 udelay(10);
5272 }
5273 if (i < 100)
5274 IWL_DEBUG_INFO("BSM write complete, poll %d iterations\n", i);
5275 else {
5276 IWL_ERROR("BSM write did not complete!\n");
5277 return -EIO;
5278 }
5279
5280 /* Enable future boot loads whenever power management unit triggers it
5281 * (e.g. when powering back up after power-save shutdown) */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005282 iwl3945_write_prph(priv, BSM_WR_CTRL_REG,
Zhu Yib481de92007-09-25 17:54:57 -07005283 BSM_WR_CTRL_REG_BIT_START_EN);
5284
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005285 iwl3945_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -07005286
5287 return 0;
5288}
5289
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005290static void iwl3945_nic_start(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07005291{
5292 /* Remove all resets to allow NIC to operate */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005293 iwl3945_write32(priv, CSR_RESET, 0);
Zhu Yib481de92007-09-25 17:54:57 -07005294}
5295
5296/**
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005297 * iwl3945_read_ucode - Read uCode images from disk file.
Zhu Yib481de92007-09-25 17:54:57 -07005298 *
5299 * Copy into buffers for card to fetch via bus-mastering
5300 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005301static int iwl3945_read_ucode(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07005302{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005303 struct iwl3945_ucode *ucode;
Reinette Chatrea0987a82008-12-02 12:14:06 -08005304 int ret = -EINVAL, index;
Zhu Yib481de92007-09-25 17:54:57 -07005305 const struct firmware *ucode_raw;
5306 /* firmware file name contains uCode/driver compatibility version */
Reinette Chatrea0987a82008-12-02 12:14:06 -08005307 const char *name_pre = priv->cfg->fw_name_pre;
5308 const unsigned int api_max = priv->cfg->ucode_api_max;
5309 const unsigned int api_min = priv->cfg->ucode_api_min;
5310 char buf[25];
Zhu Yib481de92007-09-25 17:54:57 -07005311 u8 *src;
5312 size_t len;
Reinette Chatrea0987a82008-12-02 12:14:06 -08005313 u32 api_ver, inst_size, data_size, init_size, init_data_size, boot_size;
Zhu Yib481de92007-09-25 17:54:57 -07005314
5315 /* Ask kernel firmware_class module to get the boot firmware off disk.
5316 * request_firmware() is synchronous, file is in memory on return. */
Reinette Chatrea0987a82008-12-02 12:14:06 -08005317 for (index = api_max; index >= api_min; index--) {
5318 sprintf(buf, "%s%u%s", name_pre, index, ".ucode");
5319 ret = request_firmware(&ucode_raw, buf, &priv->pci_dev->dev);
5320 if (ret < 0) {
5321 IWL_ERROR("%s firmware file req failed: Reason %d\n",
5322 buf, ret);
5323 if (ret == -ENOENT)
5324 continue;
5325 else
5326 goto error;
5327 } else {
5328 if (index < api_max)
5329 IWL_ERROR("Loaded firmware %s, which is deprecated. Please use API v%u instead.\n",
5330 buf, api_max);
5331 IWL_DEBUG_INFO("Got firmware '%s' file (%zd bytes) from disk\n",
5332 buf, ucode_raw->size);
5333 break;
5334 }
Zhu Yib481de92007-09-25 17:54:57 -07005335 }
5336
Reinette Chatrea0987a82008-12-02 12:14:06 -08005337 if (ret < 0)
5338 goto error;
Zhu Yib481de92007-09-25 17:54:57 -07005339
5340 /* Make sure that we got at least our header! */
5341 if (ucode_raw->size < sizeof(*ucode)) {
5342 IWL_ERROR("File size way too small!\n");
Tomas Winkler90e759d2007-11-29 11:09:41 +08005343 ret = -EINVAL;
Zhu Yib481de92007-09-25 17:54:57 -07005344 goto err_release;
5345 }
5346
5347 /* Data from ucode file: header followed by uCode images */
5348 ucode = (void *)ucode_raw->data;
5349
Chatre, Reinettec02b3ac2008-12-02 12:14:05 -08005350 priv->ucode_ver = le32_to_cpu(ucode->ver);
Reinette Chatrea0987a82008-12-02 12:14:06 -08005351 api_ver = IWL_UCODE_API(priv->ucode_ver);
Zhu Yib481de92007-09-25 17:54:57 -07005352 inst_size = le32_to_cpu(ucode->inst_size);
5353 data_size = le32_to_cpu(ucode->data_size);
5354 init_size = le32_to_cpu(ucode->init_size);
5355 init_data_size = le32_to_cpu(ucode->init_data_size);
5356 boot_size = le32_to_cpu(ucode->boot_size);
5357
Reinette Chatrea0987a82008-12-02 12:14:06 -08005358 /* api_ver should match the api version forming part of the
5359 * firmware filename ... but we don't check for that and only rely
5360 * on the API version read from firware header from here on forward */
5361
5362 if (api_ver < api_min || api_ver > api_max) {
5363 IWL_ERROR("Driver unable to support your firmware API. "
5364 "Driver supports v%u, firmware is v%u.\n",
5365 api_max, api_ver);
5366 priv->ucode_ver = 0;
5367 ret = -EINVAL;
5368 goto err_release;
5369 }
5370 if (api_ver != api_max)
5371 IWL_ERROR("Firmware has old API version. Expected %u, "
5372 "got %u. New firmware can be obtained "
5373 "from http://www.intellinuxwireless.org.\n",
5374 api_max, api_ver);
5375
5376 printk(KERN_INFO DRV_NAME " loaded firmware version %u.%u.%u.%u\n",
Chatre, Reinettec02b3ac2008-12-02 12:14:05 -08005377 IWL_UCODE_MAJOR(priv->ucode_ver),
5378 IWL_UCODE_MINOR(priv->ucode_ver),
5379 IWL_UCODE_API(priv->ucode_ver),
5380 IWL_UCODE_SERIAL(priv->ucode_ver));
Reinette Chatrea0987a82008-12-02 12:14:06 -08005381 IWL_DEBUG_INFO("f/w package hdr ucode version raw = 0x%x\n",
5382 priv->ucode_ver);
Ian Schrambc434dd2007-10-25 17:15:29 +08005383 IWL_DEBUG_INFO("f/w package hdr runtime inst size = %u\n", inst_size);
5384 IWL_DEBUG_INFO("f/w package hdr runtime data size = %u\n", data_size);
5385 IWL_DEBUG_INFO("f/w package hdr init inst size = %u\n", init_size);
5386 IWL_DEBUG_INFO("f/w package hdr init data size = %u\n", init_data_size);
5387 IWL_DEBUG_INFO("f/w package hdr boot inst size = %u\n", boot_size);
Zhu Yib481de92007-09-25 17:54:57 -07005388
Reinette Chatrea0987a82008-12-02 12:14:06 -08005389
Zhu Yib481de92007-09-25 17:54:57 -07005390 /* Verify size of file vs. image size info in file's header */
5391 if (ucode_raw->size < sizeof(*ucode) +
5392 inst_size + data_size + init_size +
5393 init_data_size + boot_size) {
5394
5395 IWL_DEBUG_INFO("uCode file size %d too small\n",
5396 (int)ucode_raw->size);
Tomas Winkler90e759d2007-11-29 11:09:41 +08005397 ret = -EINVAL;
Zhu Yib481de92007-09-25 17:54:57 -07005398 goto err_release;
5399 }
5400
5401 /* Verify that uCode images will fit in card's SRAM */
5402 if (inst_size > IWL_MAX_INST_SIZE) {
Tomas Winkler90e759d2007-11-29 11:09:41 +08005403 IWL_DEBUG_INFO("uCode instr len %d too large to fit in\n",
5404 inst_size);
5405 ret = -EINVAL;
Zhu Yib481de92007-09-25 17:54:57 -07005406 goto err_release;
5407 }
5408
5409 if (data_size > IWL_MAX_DATA_SIZE) {
Tomas Winkler90e759d2007-11-29 11:09:41 +08005410 IWL_DEBUG_INFO("uCode data len %d too large to fit in\n",
5411 data_size);
5412 ret = -EINVAL;
Zhu Yib481de92007-09-25 17:54:57 -07005413 goto err_release;
5414 }
5415 if (init_size > IWL_MAX_INST_SIZE) {
Tomas Winkler90e759d2007-11-29 11:09:41 +08005416 IWL_DEBUG_INFO("uCode init instr len %d too large to fit in\n",
5417 init_size);
5418 ret = -EINVAL;
Zhu Yib481de92007-09-25 17:54:57 -07005419 goto err_release;
5420 }
5421 if (init_data_size > IWL_MAX_DATA_SIZE) {
Tomas Winkler90e759d2007-11-29 11:09:41 +08005422 IWL_DEBUG_INFO("uCode init data len %d too large to fit in\n",
5423 init_data_size);
5424 ret = -EINVAL;
Zhu Yib481de92007-09-25 17:54:57 -07005425 goto err_release;
5426 }
5427 if (boot_size > IWL_MAX_BSM_SIZE) {
Tomas Winkler90e759d2007-11-29 11:09:41 +08005428 IWL_DEBUG_INFO("uCode boot instr len %d too large to fit in\n",
5429 boot_size);
5430 ret = -EINVAL;
Zhu Yib481de92007-09-25 17:54:57 -07005431 goto err_release;
5432 }
5433
5434 /* Allocate ucode buffers for card's bus-master loading ... */
5435
5436 /* Runtime instructions and 2 copies of data:
5437 * 1) unmodified from disk
5438 * 2) backup cache for save/restore during power-downs */
5439 priv->ucode_code.len = inst_size;
Tomas Winkler98c92212008-01-14 17:46:20 -08005440 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
Zhu Yib481de92007-09-25 17:54:57 -07005441
5442 priv->ucode_data.len = data_size;
Tomas Winkler98c92212008-01-14 17:46:20 -08005443 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
Zhu Yib481de92007-09-25 17:54:57 -07005444
5445 priv->ucode_data_backup.len = data_size;
Tomas Winkler98c92212008-01-14 17:46:20 -08005446 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
Zhu Yib481de92007-09-25 17:54:57 -07005447
5448 if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
Tomas Winkler90e759d2007-11-29 11:09:41 +08005449 !priv->ucode_data_backup.v_addr)
Zhu Yib481de92007-09-25 17:54:57 -07005450 goto err_pci_alloc;
5451
Tomas Winkler90e759d2007-11-29 11:09:41 +08005452 /* Initialization instructions and data */
5453 if (init_size && init_data_size) {
5454 priv->ucode_init.len = init_size;
Tomas Winkler98c92212008-01-14 17:46:20 -08005455 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
Tomas Winkler90e759d2007-11-29 11:09:41 +08005456
5457 priv->ucode_init_data.len = init_data_size;
Tomas Winkler98c92212008-01-14 17:46:20 -08005458 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
Tomas Winkler90e759d2007-11-29 11:09:41 +08005459
5460 if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
5461 goto err_pci_alloc;
5462 }
5463
5464 /* Bootstrap (instructions only, no data) */
5465 if (boot_size) {
5466 priv->ucode_boot.len = boot_size;
Tomas Winkler98c92212008-01-14 17:46:20 -08005467 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
Tomas Winkler90e759d2007-11-29 11:09:41 +08005468
5469 if (!priv->ucode_boot.v_addr)
5470 goto err_pci_alloc;
5471 }
5472
Zhu Yib481de92007-09-25 17:54:57 -07005473 /* Copy images into buffers for card's bus-master reads ... */
5474
5475 /* Runtime instructions (first block of data in file) */
5476 src = &ucode->data[0];
5477 len = priv->ucode_code.len;
Tomas Winkler90e759d2007-11-29 11:09:41 +08005478 IWL_DEBUG_INFO("Copying (but not loading) uCode instr len %Zd\n", len);
Zhu Yib481de92007-09-25 17:54:57 -07005479 memcpy(priv->ucode_code.v_addr, src, len);
5480 IWL_DEBUG_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
5481 priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
5482
5483 /* Runtime data (2nd block)
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005484 * NOTE: Copy into backup buffer will be done in iwl3945_up() */
Zhu Yib481de92007-09-25 17:54:57 -07005485 src = &ucode->data[inst_size];
5486 len = priv->ucode_data.len;
Tomas Winkler90e759d2007-11-29 11:09:41 +08005487 IWL_DEBUG_INFO("Copying (but not loading) uCode data len %Zd\n", len);
Zhu Yib481de92007-09-25 17:54:57 -07005488 memcpy(priv->ucode_data.v_addr, src, len);
5489 memcpy(priv->ucode_data_backup.v_addr, src, len);
5490
5491 /* Initialization instructions (3rd block) */
5492 if (init_size) {
5493 src = &ucode->data[inst_size + data_size];
5494 len = priv->ucode_init.len;
Tomas Winkler90e759d2007-11-29 11:09:41 +08005495 IWL_DEBUG_INFO("Copying (but not loading) init instr len %Zd\n",
5496 len);
Zhu Yib481de92007-09-25 17:54:57 -07005497 memcpy(priv->ucode_init.v_addr, src, len);
5498 }
5499
5500 /* Initialization data (4th block) */
5501 if (init_data_size) {
5502 src = &ucode->data[inst_size + data_size + init_size];
5503 len = priv->ucode_init_data.len;
5504 IWL_DEBUG_INFO("Copying (but not loading) init data len %d\n",
5505 (int)len);
5506 memcpy(priv->ucode_init_data.v_addr, src, len);
5507 }
5508
5509 /* Bootstrap instructions (5th block) */
5510 src = &ucode->data[inst_size + data_size + init_size + init_data_size];
5511 len = priv->ucode_boot.len;
5512 IWL_DEBUG_INFO("Copying (but not loading) boot instr len %d\n",
5513 (int)len);
5514 memcpy(priv->ucode_boot.v_addr, src, len);
5515
5516 /* We have our copies now, allow OS release its copies */
5517 release_firmware(ucode_raw);
5518 return 0;
5519
5520 err_pci_alloc:
5521 IWL_ERROR("failed to allocate pci memory\n");
Tomas Winkler90e759d2007-11-29 11:09:41 +08005522 ret = -ENOMEM;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005523 iwl3945_dealloc_ucode_pci(priv);
Zhu Yib481de92007-09-25 17:54:57 -07005524
5525 err_release:
5526 release_firmware(ucode_raw);
5527
5528 error:
Tomas Winkler90e759d2007-11-29 11:09:41 +08005529 return ret;
Zhu Yib481de92007-09-25 17:54:57 -07005530}
5531
5532
5533/**
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005534 * iwl3945_set_ucode_ptrs - Set uCode address location
Zhu Yib481de92007-09-25 17:54:57 -07005535 *
5536 * Tell initialization uCode where to find runtime uCode.
5537 *
5538 * BSM registers initially contain pointers to initialization uCode.
5539 * We need to replace them to load runtime uCode inst and data,
5540 * and to save runtime data when powering down.
5541 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005542static int iwl3945_set_ucode_ptrs(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07005543{
5544 dma_addr_t pinst;
5545 dma_addr_t pdata;
5546 int rc = 0;
5547 unsigned long flags;
5548
5549 /* bits 31:0 for 3945 */
5550 pinst = priv->ucode_code.p_addr;
5551 pdata = priv->ucode_data_backup.p_addr;
5552
5553 spin_lock_irqsave(&priv->lock, flags);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005554 rc = iwl3945_grab_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -07005555 if (rc) {
5556 spin_unlock_irqrestore(&priv->lock, flags);
5557 return rc;
5558 }
5559
5560 /* Tell bootstrap uCode where to find image to load */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005561 iwl3945_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
5562 iwl3945_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
5563 iwl3945_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
Zhu Yib481de92007-09-25 17:54:57 -07005564 priv->ucode_data.len);
5565
Tomas Winklera96a27f2008-10-23 23:48:56 -07005566 /* Inst byte count must be last to set up, bit 31 signals uCode
Zhu Yib481de92007-09-25 17:54:57 -07005567 * that all new ptr/size info is in place */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005568 iwl3945_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
Zhu Yib481de92007-09-25 17:54:57 -07005569 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
5570
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005571 iwl3945_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -07005572
5573 spin_unlock_irqrestore(&priv->lock, flags);
5574
5575 IWL_DEBUG_INFO("Runtime uCode pointers are set.\n");
5576
5577 return rc;
5578}
5579
5580/**
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005581 * iwl3945_init_alive_start - Called after REPLY_ALIVE notification received
Zhu Yib481de92007-09-25 17:54:57 -07005582 *
5583 * Called after REPLY_ALIVE notification received from "initialize" uCode.
5584 *
Zhu Yib481de92007-09-25 17:54:57 -07005585 * Tell "initialize" uCode to go ahead and load the runtime uCode.
Ben Cahill9fbab512007-11-29 11:09:47 +08005586 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005587static void iwl3945_init_alive_start(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07005588{
5589 /* Check alive response for "valid" sign from uCode */
5590 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
5591 /* We had an error bringing up the hardware, so take it
5592 * all the way back down so we can try again */
5593 IWL_DEBUG_INFO("Initialize Alive failed.\n");
5594 goto restart;
5595 }
5596
5597 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
5598 * This is a paranoid check, because we would not have gotten the
5599 * "initialize" alive if code weren't properly loaded. */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005600 if (iwl3945_verify_ucode(priv)) {
Zhu Yib481de92007-09-25 17:54:57 -07005601 /* Runtime instruction load was bad;
5602 * take it all the way back down so we can try again */
5603 IWL_DEBUG_INFO("Bad \"initialize\" uCode load.\n");
5604 goto restart;
5605 }
5606
5607 /* Send pointers to protocol/runtime uCode image ... init code will
5608 * load and launch runtime uCode, which will send us another "Alive"
5609 * notification. */
5610 IWL_DEBUG_INFO("Initialization Alive received.\n");
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005611 if (iwl3945_set_ucode_ptrs(priv)) {
Zhu Yib481de92007-09-25 17:54:57 -07005612 /* Runtime instruction load won't happen;
5613 * take it all the way back down so we can try again */
5614 IWL_DEBUG_INFO("Couldn't set up uCode pointers.\n");
5615 goto restart;
5616 }
5617 return;
5618
5619 restart:
5620 queue_work(priv->workqueue, &priv->restart);
5621}
5622
5623
Mohamed Abbas9bdf5ec2008-11-07 09:58:35 -08005624/* temporary */
5625static int iwl3945_mac_beacon_update(struct ieee80211_hw *hw,
5626 struct sk_buff *skb);
5627
Zhu Yib481de92007-09-25 17:54:57 -07005628/**
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005629 * iwl3945_alive_start - called after REPLY_ALIVE notification received
Zhu Yib481de92007-09-25 17:54:57 -07005630 * from protocol/runtime uCode (initialization uCode's
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005631 * Alive gets handled by iwl3945_init_alive_start()).
Zhu Yib481de92007-09-25 17:54:57 -07005632 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005633static void iwl3945_alive_start(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07005634{
5635 int rc = 0;
5636 int thermal_spin = 0;
5637 u32 rfkill;
5638
5639 IWL_DEBUG_INFO("Runtime Alive received.\n");
5640
5641 if (priv->card_alive.is_valid != UCODE_VALID_OK) {
5642 /* We had an error bringing up the hardware, so take it
5643 * all the way back down so we can try again */
5644 IWL_DEBUG_INFO("Alive failed.\n");
5645 goto restart;
5646 }
5647
5648 /* Initialize uCode has loaded Runtime uCode ... verify inst image.
5649 * This is a paranoid check, because we would not have gotten the
5650 * "runtime" alive if code weren't properly loaded. */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005651 if (iwl3945_verify_ucode(priv)) {
Zhu Yib481de92007-09-25 17:54:57 -07005652 /* Runtime instruction load was bad;
5653 * take it all the way back down so we can try again */
5654 IWL_DEBUG_INFO("Bad runtime uCode load.\n");
5655 goto restart;
5656 }
5657
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005658 iwl3945_clear_stations_table(priv);
Zhu Yib481de92007-09-25 17:54:57 -07005659
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005660 rc = iwl3945_grab_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -07005661 if (rc) {
Tomas Winklera96a27f2008-10-23 23:48:56 -07005662 IWL_WARNING("Can not read RFKILL status from adapter\n");
Zhu Yib481de92007-09-25 17:54:57 -07005663 return;
5664 }
5665
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005666 rfkill = iwl3945_read_prph(priv, APMG_RFKILL_REG);
Zhu Yib481de92007-09-25 17:54:57 -07005667 IWL_DEBUG_INFO("RFKILL status: 0x%x\n", rfkill);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005668 iwl3945_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -07005669
5670 if (rfkill & 0x1) {
5671 clear_bit(STATUS_RF_KILL_HW, &priv->status);
Tomas Winklera96a27f2008-10-23 23:48:56 -07005672 /* if RFKILL is not on, then wait for thermal
Zhu Yib481de92007-09-25 17:54:57 -07005673 * sensor in adapter to kick in */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005674 while (iwl3945_hw_get_temperature(priv) == 0) {
Zhu Yib481de92007-09-25 17:54:57 -07005675 thermal_spin++;
5676 udelay(10);
5677 }
5678
5679 if (thermal_spin)
5680 IWL_DEBUG_INFO("Thermal calibration took %dus\n",
5681 thermal_spin * 10);
5682 } else
5683 set_bit(STATUS_RF_KILL_HW, &priv->status);
5684
Ben Cahill9fbab512007-11-29 11:09:47 +08005685 /* After the ALIVE response, we can send commands to 3945 uCode */
Zhu Yib481de92007-09-25 17:54:57 -07005686 set_bit(STATUS_ALIVE, &priv->status);
5687
5688 /* Clear out the uCode error bit if it is set */
5689 clear_bit(STATUS_FW_ERROR, &priv->status);
5690
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005691 if (iwl3945_is_rfkill(priv))
Zhu Yib481de92007-09-25 17:54:57 -07005692 return;
5693
Johannes Berg36d68252008-05-15 12:55:26 +02005694 ieee80211_wake_queues(priv->hw);
Zhu Yib481de92007-09-25 17:54:57 -07005695
5696 priv->active_rate = priv->rates_mask;
5697 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
5698
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005699 iwl3945_send_power_mode(priv, IWL_POWER_LEVEL(priv->power_mode));
Zhu Yib481de92007-09-25 17:54:57 -07005700
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005701 if (iwl3945_is_associated(priv)) {
5702 struct iwl3945_rxon_cmd *active_rxon =
5703 (struct iwl3945_rxon_cmd *)(&priv->active_rxon);
Zhu Yib481de92007-09-25 17:54:57 -07005704
5705 memcpy(&priv->staging_rxon, &priv->active_rxon,
5706 sizeof(priv->staging_rxon));
5707 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5708 } else {
5709 /* Initialize our rx_config data */
Zhu, Yi60294de2008-10-29 14:05:45 -07005710 iwl3945_connection_init_rx_config(priv, priv->iw_mode);
Zhu Yib481de92007-09-25 17:54:57 -07005711 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
5712 }
5713
Ben Cahill9fbab512007-11-29 11:09:47 +08005714 /* Configure Bluetooth device coexistence support */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005715 iwl3945_send_bt_config(priv);
Zhu Yib481de92007-09-25 17:54:57 -07005716
5717 /* Configure the adapter for unassociated operation */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005718 iwl3945_commit_rxon(priv);
Zhu Yib481de92007-09-25 17:54:57 -07005719
Zhu Yib481de92007-09-25 17:54:57 -07005720 iwl3945_reg_txpower_periodic(priv);
5721
Reinette Chatrefe00b5a2008-04-03 16:05:23 -07005722 iwl3945_led_register(priv);
5723
Zhu Yib481de92007-09-25 17:54:57 -07005724 IWL_DEBUG_INFO("ALIVE processing complete.\n");
Rick Farringtona9f46782008-03-18 14:57:49 -07005725 set_bit(STATUS_READY, &priv->status);
Zhu Yi5a669262008-01-14 17:46:18 -08005726 wake_up_interruptible(&priv->wait_command_queue);
Zhu Yib481de92007-09-25 17:54:57 -07005727
5728 if (priv->error_recovering)
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005729 iwl3945_error_recovery(priv);
Zhu Yib481de92007-09-25 17:54:57 -07005730
Mohamed Abbas9bdf5ec2008-11-07 09:58:35 -08005731 /* reassociate for ADHOC mode */
5732 if (priv->vif && (priv->iw_mode == NL80211_IFTYPE_ADHOC)) {
5733 struct sk_buff *beacon = ieee80211_beacon_get(priv->hw,
5734 priv->vif);
5735 if (beacon)
5736 iwl3945_mac_beacon_update(priv->hw, beacon);
5737 }
5738
Zhu Yib481de92007-09-25 17:54:57 -07005739 return;
5740
5741 restart:
5742 queue_work(priv->workqueue, &priv->restart);
5743}
5744
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005745static void iwl3945_cancel_deferred_work(struct iwl3945_priv *priv);
Zhu Yib481de92007-09-25 17:54:57 -07005746
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005747static void __iwl3945_down(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07005748{
5749 unsigned long flags;
5750 int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status);
5751 struct ieee80211_conf *conf = NULL;
5752
5753 IWL_DEBUG_INFO(DRV_NAME " is going down\n");
5754
5755 conf = ieee80211_get_hw_conf(priv->hw);
5756
5757 if (!exit_pending)
5758 set_bit(STATUS_EXIT_PENDING, &priv->status);
5759
Mohamed Abbasab53d8a2008-03-25 16:33:36 -07005760 iwl3945_led_unregister(priv);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005761 iwl3945_clear_stations_table(priv);
Zhu Yib481de92007-09-25 17:54:57 -07005762
5763 /* Unblock any waiting calls */
5764 wake_up_interruptible_all(&priv->wait_command_queue);
5765
Zhu Yib481de92007-09-25 17:54:57 -07005766 /* Wipe out the EXIT_PENDING status bit if we are not actually
5767 * exiting the module */
5768 if (!exit_pending)
5769 clear_bit(STATUS_EXIT_PENDING, &priv->status);
5770
5771 /* stop and reset the on-board processor */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005772 iwl3945_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
Zhu Yib481de92007-09-25 17:54:57 -07005773
5774 /* tell the device to stop sending interrupts */
Mohamed Abbas0359fac2008-03-28 16:21:08 -07005775 spin_lock_irqsave(&priv->lock, flags);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005776 iwl3945_disable_interrupts(priv);
Mohamed Abbas0359fac2008-03-28 16:21:08 -07005777 spin_unlock_irqrestore(&priv->lock, flags);
5778 iwl_synchronize_irq(priv);
Zhu Yib481de92007-09-25 17:54:57 -07005779
5780 if (priv->mac80211_registered)
5781 ieee80211_stop_queues(priv->hw);
5782
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005783 /* If we have not previously called iwl3945_init() then
Zhu Yib481de92007-09-25 17:54:57 -07005784 * clear all bits but the RF Kill and SUSPEND bits and return */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005785 if (!iwl3945_is_init(priv)) {
Zhu Yib481de92007-09-25 17:54:57 -07005786 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
5787 STATUS_RF_KILL_HW |
5788 test_bit(STATUS_RF_KILL_SW, &priv->status) <<
5789 STATUS_RF_KILL_SW |
Reinette Chatre97888642008-02-06 11:20:38 -08005790 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
5791 STATUS_GEO_CONFIGURED |
Zhu Yib481de92007-09-25 17:54:57 -07005792 test_bit(STATUS_IN_SUSPEND, &priv->status) <<
Abhijeet Kolekarebef2002008-06-30 17:23:18 +08005793 STATUS_IN_SUSPEND |
5794 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
5795 STATUS_EXIT_PENDING;
Zhu Yib481de92007-09-25 17:54:57 -07005796 goto exit;
5797 }
5798
5799 /* ...otherwise clear out all the status bits but the RF Kill and
5800 * SUSPEND bits and continue taking the NIC down. */
5801 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
5802 STATUS_RF_KILL_HW |
5803 test_bit(STATUS_RF_KILL_SW, &priv->status) <<
5804 STATUS_RF_KILL_SW |
Reinette Chatre97888642008-02-06 11:20:38 -08005805 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
5806 STATUS_GEO_CONFIGURED |
Zhu Yib481de92007-09-25 17:54:57 -07005807 test_bit(STATUS_IN_SUSPEND, &priv->status) <<
5808 STATUS_IN_SUSPEND |
5809 test_bit(STATUS_FW_ERROR, &priv->status) <<
Abhijeet Kolekarebef2002008-06-30 17:23:18 +08005810 STATUS_FW_ERROR |
5811 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
5812 STATUS_EXIT_PENDING;
Zhu Yib481de92007-09-25 17:54:57 -07005813
5814 spin_lock_irqsave(&priv->lock, flags);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005815 iwl3945_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
Zhu Yib481de92007-09-25 17:54:57 -07005816 spin_unlock_irqrestore(&priv->lock, flags);
5817
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005818 iwl3945_hw_txq_ctx_stop(priv);
5819 iwl3945_hw_rxq_stop(priv);
Zhu Yib481de92007-09-25 17:54:57 -07005820
5821 spin_lock_irqsave(&priv->lock, flags);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005822 if (!iwl3945_grab_nic_access(priv)) {
5823 iwl3945_write_prph(priv, APMG_CLK_DIS_REG,
Zhu Yib481de92007-09-25 17:54:57 -07005824 APMG_CLK_VAL_DMA_CLK_RQT);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005825 iwl3945_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -07005826 }
5827 spin_unlock_irqrestore(&priv->lock, flags);
5828
5829 udelay(5);
5830
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005831 iwl3945_hw_nic_stop_master(priv);
5832 iwl3945_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
5833 iwl3945_hw_nic_reset(priv);
Zhu Yib481de92007-09-25 17:54:57 -07005834
5835 exit:
Tomas Winkler3d24a9f2008-12-19 10:37:07 +08005836 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
Zhu Yib481de92007-09-25 17:54:57 -07005837
5838 if (priv->ibss_beacon)
5839 dev_kfree_skb(priv->ibss_beacon);
5840 priv->ibss_beacon = NULL;
5841
5842 /* clear out any free frames */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005843 iwl3945_clear_free_frames(priv);
Zhu Yib481de92007-09-25 17:54:57 -07005844}
5845
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005846static void iwl3945_down(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07005847{
5848 mutex_lock(&priv->mutex);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005849 __iwl3945_down(priv);
Zhu Yib481de92007-09-25 17:54:57 -07005850 mutex_unlock(&priv->mutex);
Zhu Yib24d22b2007-12-19 13:59:52 +08005851
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005852 iwl3945_cancel_deferred_work(priv);
Zhu Yib481de92007-09-25 17:54:57 -07005853}
5854
5855#define MAX_HW_RESTARTS 5
5856
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005857static int __iwl3945_up(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07005858{
5859 int rc, i;
5860
5861 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
5862 IWL_WARNING("Exit pending; will not bring the NIC up\n");
5863 return -EIO;
5864 }
5865
5866 if (test_bit(STATUS_RF_KILL_SW, &priv->status)) {
5867 IWL_WARNING("Radio disabled by SW RF kill (module "
5868 "parameter)\n");
Zhu Yie655b9f2008-01-24 02:19:38 -08005869 return -ENODEV;
5870 }
5871
Reinette Chatree903fbd2008-01-30 22:05:15 -08005872 if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
Tomas Winklera96a27f2008-10-23 23:48:56 -07005873 IWL_ERROR("ucode not available for device bring up\n");
Reinette Chatree903fbd2008-01-30 22:05:15 -08005874 return -EIO;
5875 }
5876
Zhu Yie655b9f2008-01-24 02:19:38 -08005877 /* If platform's RF_KILL switch is NOT set to KILL */
5878 if (iwl3945_read32(priv, CSR_GP_CNTRL) &
5879 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
5880 clear_bit(STATUS_RF_KILL_HW, &priv->status);
5881 else {
5882 set_bit(STATUS_RF_KILL_HW, &priv->status);
5883 if (!test_bit(STATUS_IN_SUSPEND, &priv->status)) {
5884 IWL_WARNING("Radio disabled by HW RF Kill switch\n");
5885 return -ENODEV;
5886 }
Zhu Yib481de92007-09-25 17:54:57 -07005887 }
Adel Gadllah80fcc9e2008-07-01 17:49:50 +02005888
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005889 iwl3945_write32(priv, CSR_INT, 0xFFFFFFFF);
Zhu Yib481de92007-09-25 17:54:57 -07005890
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005891 rc = iwl3945_hw_nic_init(priv);
Zhu Yib481de92007-09-25 17:54:57 -07005892 if (rc) {
5893 IWL_ERROR("Unable to int nic\n");
5894 return rc;
5895 }
5896
5897 /* make sure rfkill handshake bits are cleared */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005898 iwl3945_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5899 iwl3945_write32(priv, CSR_UCODE_DRV_GP1_CLR,
Zhu Yib481de92007-09-25 17:54:57 -07005900 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
5901
5902 /* clear (again), then enable host interrupts */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005903 iwl3945_write32(priv, CSR_INT, 0xFFFFFFFF);
5904 iwl3945_enable_interrupts(priv);
Zhu Yib481de92007-09-25 17:54:57 -07005905
5906 /* really make sure rfkill handshake bits are cleared */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005907 iwl3945_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5908 iwl3945_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
Zhu Yib481de92007-09-25 17:54:57 -07005909
5910 /* Copy original ucode data image from disk into backup cache.
5911 * This will be used to initialize the on-board processor's
5912 * data SRAM for a clean start when the runtime program first loads. */
5913 memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
Zhu Yi5a669262008-01-14 17:46:18 -08005914 priv->ucode_data.len);
Zhu Yib481de92007-09-25 17:54:57 -07005915
Zhu Yie655b9f2008-01-24 02:19:38 -08005916 /* We return success when we resume from suspend and rf_kill is on. */
5917 if (test_bit(STATUS_RF_KILL_HW, &priv->status))
5918 return 0;
5919
Zhu Yib481de92007-09-25 17:54:57 -07005920 for (i = 0; i < MAX_HW_RESTARTS; i++) {
5921
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005922 iwl3945_clear_stations_table(priv);
Zhu Yib481de92007-09-25 17:54:57 -07005923
5924 /* load bootstrap state machine,
5925 * load bootstrap program into processor's memory,
5926 * prepare to load the "initialize" uCode */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005927 rc = iwl3945_load_bsm(priv);
Zhu Yib481de92007-09-25 17:54:57 -07005928
5929 if (rc) {
5930 IWL_ERROR("Unable to set up bootstrap uCode: %d\n", rc);
5931 continue;
5932 }
5933
5934 /* start card; "initialize" will load runtime ucode */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005935 iwl3945_nic_start(priv);
Zhu Yib481de92007-09-25 17:54:57 -07005936
Zhu Yib481de92007-09-25 17:54:57 -07005937 IWL_DEBUG_INFO(DRV_NAME " is coming up\n");
5938
5939 return 0;
5940 }
5941
5942 set_bit(STATUS_EXIT_PENDING, &priv->status);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005943 __iwl3945_down(priv);
Abhijeet Kolekarebef2002008-06-30 17:23:18 +08005944 clear_bit(STATUS_EXIT_PENDING, &priv->status);
Zhu Yib481de92007-09-25 17:54:57 -07005945
5946 /* tried to restart and config the device for as long as our
5947 * patience could withstand */
5948 IWL_ERROR("Unable to initialize device after %d attempts.\n", i);
5949 return -EIO;
5950}
5951
5952
5953/*****************************************************************************
5954 *
5955 * Workqueue callbacks
5956 *
5957 *****************************************************************************/
5958
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005959static void iwl3945_bg_init_alive_start(struct work_struct *data)
Zhu Yib481de92007-09-25 17:54:57 -07005960{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005961 struct iwl3945_priv *priv =
5962 container_of(data, struct iwl3945_priv, init_alive_start.work);
Zhu Yib481de92007-09-25 17:54:57 -07005963
5964 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
5965 return;
5966
5967 mutex_lock(&priv->mutex);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005968 iwl3945_init_alive_start(priv);
Zhu Yib481de92007-09-25 17:54:57 -07005969 mutex_unlock(&priv->mutex);
5970}
5971
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005972static void iwl3945_bg_alive_start(struct work_struct *data)
Zhu Yib481de92007-09-25 17:54:57 -07005973{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005974 struct iwl3945_priv *priv =
5975 container_of(data, struct iwl3945_priv, alive_start.work);
Zhu Yib481de92007-09-25 17:54:57 -07005976
5977 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
5978 return;
5979
5980 mutex_lock(&priv->mutex);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005981 iwl3945_alive_start(priv);
Zhu Yib481de92007-09-25 17:54:57 -07005982 mutex_unlock(&priv->mutex);
5983}
5984
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005985static void iwl3945_bg_rf_kill(struct work_struct *work)
Zhu Yib481de92007-09-25 17:54:57 -07005986{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005987 struct iwl3945_priv *priv = container_of(work, struct iwl3945_priv, rf_kill);
Zhu Yib481de92007-09-25 17:54:57 -07005988
5989 wake_up_interruptible(&priv->wait_command_queue);
5990
5991 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
5992 return;
5993
5994 mutex_lock(&priv->mutex);
5995
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08005996 if (!iwl3945_is_rfkill(priv)) {
Zhu Yib481de92007-09-25 17:54:57 -07005997 IWL_DEBUG(IWL_DL_INFO | IWL_DL_RF_KILL,
5998 "HW and/or SW RF Kill no longer active, restarting "
5999 "device\n");
6000 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
6001 queue_work(priv->workqueue, &priv->restart);
6002 } else {
6003
6004 if (!test_bit(STATUS_RF_KILL_HW, &priv->status))
6005 IWL_DEBUG_RF_KILL("Can not turn radio back on - "
6006 "disabled by SW switch\n");
6007 else
6008 IWL_WARNING("Radio Frequency Kill Switch is On:\n"
6009 "Kill switch must be turned off for "
6010 "wireless networking to work.\n");
6011 }
Abhijeet Kolekarebef2002008-06-30 17:23:18 +08006012
Zhu Yib481de92007-09-25 17:54:57 -07006013 mutex_unlock(&priv->mutex);
Adel Gadllah80fcc9e2008-07-01 17:49:50 +02006014 iwl3945_rfkill_set_hw_state(priv);
Zhu Yib481de92007-09-25 17:54:57 -07006015}
6016
6017#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ)
6018
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006019static void iwl3945_bg_scan_check(struct work_struct *data)
Zhu Yib481de92007-09-25 17:54:57 -07006020{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006021 struct iwl3945_priv *priv =
6022 container_of(data, struct iwl3945_priv, scan_check.work);
Zhu Yib481de92007-09-25 17:54:57 -07006023
6024 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6025 return;
6026
6027 mutex_lock(&priv->mutex);
6028 if (test_bit(STATUS_SCANNING, &priv->status) ||
6029 test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
6030 IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN,
6031 "Scan completion watchdog resetting adapter (%dms)\n",
6032 jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG));
Mohamed Abbas15e869d2007-10-25 17:15:46 +08006033
Zhu Yib481de92007-09-25 17:54:57 -07006034 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006035 iwl3945_send_scan_abort(priv);
Zhu Yib481de92007-09-25 17:54:57 -07006036 }
6037 mutex_unlock(&priv->mutex);
6038}
6039
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006040static void iwl3945_bg_request_scan(struct work_struct *data)
Zhu Yib481de92007-09-25 17:54:57 -07006041{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006042 struct iwl3945_priv *priv =
6043 container_of(data, struct iwl3945_priv, request_scan);
6044 struct iwl3945_host_cmd cmd = {
Zhu Yib481de92007-09-25 17:54:57 -07006045 .id = REPLY_SCAN_CMD,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006046 .len = sizeof(struct iwl3945_scan_cmd),
Zhu Yib481de92007-09-25 17:54:57 -07006047 .meta.flags = CMD_SIZE_HUGE,
6048 };
6049 int rc = 0;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006050 struct iwl3945_scan_cmd *scan;
Zhu Yib481de92007-09-25 17:54:57 -07006051 struct ieee80211_conf *conf = NULL;
Abhijeet Kolekarf9340522008-09-03 11:26:58 +08006052 u8 n_probes = 2;
Johannes Berg8318d782008-01-24 19:38:38 +01006053 enum ieee80211_band band;
John W. Linville9387b7c2008-09-30 20:59:05 -04006054 DECLARE_SSID_BUF(ssid);
Zhu Yib481de92007-09-25 17:54:57 -07006055
6056 conf = ieee80211_get_hw_conf(priv->hw);
6057
6058 mutex_lock(&priv->mutex);
6059
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006060 if (!iwl3945_is_ready(priv)) {
Zhu Yib481de92007-09-25 17:54:57 -07006061 IWL_WARNING("request scan called when driver not ready.\n");
6062 goto done;
6063 }
6064
Tomas Winklera96a27f2008-10-23 23:48:56 -07006065 /* Make sure the scan wasn't canceled before this queued work
Zhu Yib481de92007-09-25 17:54:57 -07006066 * was given the chance to run... */
6067 if (!test_bit(STATUS_SCANNING, &priv->status))
6068 goto done;
6069
6070 /* This should never be called or scheduled if there is currently
6071 * a scan active in the hardware. */
6072 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
6073 IWL_DEBUG_INFO("Multiple concurrent scan requests in parallel. "
6074 "Ignoring second request.\n");
6075 rc = -EIO;
6076 goto done;
6077 }
6078
6079 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
6080 IWL_DEBUG_SCAN("Aborting scan due to device shutdown\n");
6081 goto done;
6082 }
6083
6084 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
6085 IWL_DEBUG_HC("Scan request while abort pending. Queuing.\n");
6086 goto done;
6087 }
6088
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006089 if (iwl3945_is_rfkill(priv)) {
Zhu Yib481de92007-09-25 17:54:57 -07006090 IWL_DEBUG_HC("Aborting scan due to RF Kill activation\n");
6091 goto done;
6092 }
6093
6094 if (!test_bit(STATUS_READY, &priv->status)) {
6095 IWL_DEBUG_HC("Scan request while uninitialized. Queuing.\n");
6096 goto done;
6097 }
6098
6099 if (!priv->scan_bands) {
6100 IWL_DEBUG_HC("Aborting scan due to no requested bands\n");
6101 goto done;
6102 }
6103
6104 if (!priv->scan) {
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006105 priv->scan = kmalloc(sizeof(struct iwl3945_scan_cmd) +
Zhu Yib481de92007-09-25 17:54:57 -07006106 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
6107 if (!priv->scan) {
6108 rc = -ENOMEM;
6109 goto done;
6110 }
6111 }
6112 scan = priv->scan;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006113 memset(scan, 0, sizeof(struct iwl3945_scan_cmd) + IWL_MAX_SCAN_SIZE);
Zhu Yib481de92007-09-25 17:54:57 -07006114
6115 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
6116 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
6117
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006118 if (iwl3945_is_associated(priv)) {
Zhu Yib481de92007-09-25 17:54:57 -07006119 u16 interval = 0;
6120 u32 extra;
6121 u32 suspend_time = 100;
6122 u32 scan_suspend_time = 100;
6123 unsigned long flags;
6124
6125 IWL_DEBUG_INFO("Scanning while associated...\n");
6126
6127 spin_lock_irqsave(&priv->lock, flags);
6128 interval = priv->beacon_int;
6129 spin_unlock_irqrestore(&priv->lock, flags);
6130
6131 scan->suspend_time = 0;
Mohamed Abbas15e869d2007-10-25 17:15:46 +08006132 scan->max_out_time = cpu_to_le32(200 * 1024);
Zhu Yib481de92007-09-25 17:54:57 -07006133 if (!interval)
6134 interval = suspend_time;
6135 /*
6136 * suspend time format:
6137 * 0-19: beacon interval in usec (time before exec.)
6138 * 20-23: 0
6139 * 24-31: number of beacons (suspend between channels)
6140 */
6141
6142 extra = (suspend_time / interval) << 24;
6143 scan_suspend_time = 0xFF0FFFFF &
6144 (extra | ((suspend_time % interval) * 1024));
6145
6146 scan->suspend_time = cpu_to_le32(scan_suspend_time);
6147 IWL_DEBUG_SCAN("suspend_time 0x%X beacon interval %d\n",
6148 scan_suspend_time, interval);
6149 }
6150
6151 /* We should add the ability for user to lock to PASSIVE ONLY */
6152 if (priv->one_direct_scan) {
6153 IWL_DEBUG_SCAN
6154 ("Kicking off one direct scan for '%s'\n",
John W. Linville9387b7c2008-09-30 20:59:05 -04006155 print_ssid(ssid, priv->direct_ssid,
6156 priv->direct_ssid_len));
Zhu Yib481de92007-09-25 17:54:57 -07006157 scan->direct_scan[0].id = WLAN_EID_SSID;
6158 scan->direct_scan[0].len = priv->direct_ssid_len;
6159 memcpy(scan->direct_scan[0].ssid,
6160 priv->direct_ssid, priv->direct_ssid_len);
Abhijeet Kolekarf9340522008-09-03 11:26:58 +08006161 n_probes++;
Abhijeet Kolekarf9340522008-09-03 11:26:58 +08006162 } else
Bill Moss786b4552008-04-17 16:03:40 -07006163 IWL_DEBUG_SCAN("Kicking off one indirect scan.\n");
Zhu Yib481de92007-09-25 17:54:57 -07006164
6165 /* We don't build a direct scan probe request; the uCode will do
6166 * that based on the direct_mask added to each channel entry */
6167 scan->tx_cmd.len = cpu_to_le16(
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006168 iwl3945_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data,
Johannes Berg430cfe92008-10-28 18:06:02 +01006169 IWL_MAX_SCAN_SIZE - sizeof(*scan)));
Zhu Yib481de92007-09-25 17:54:57 -07006170 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
6171 scan->tx_cmd.sta_id = priv->hw_setting.bcast_sta_id;
6172 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
6173
6174 /* flags + rate selection */
6175
Ron Rindjunsky66b50042008-06-25 16:46:31 +08006176 if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) {
Zhu Yib481de92007-09-25 17:54:57 -07006177 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
6178 scan->tx_cmd.rate = IWL_RATE_1M_PLCP;
6179 scan->good_CRC_th = 0;
Johannes Berg8318d782008-01-24 19:38:38 +01006180 band = IEEE80211_BAND_2GHZ;
Ron Rindjunsky66b50042008-06-25 16:46:31 +08006181 } else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ)) {
Zhu Yib481de92007-09-25 17:54:57 -07006182 scan->tx_cmd.rate = IWL_RATE_6M_PLCP;
6183 scan->good_CRC_th = IWL_GOOD_CRC_TH;
Johannes Berg8318d782008-01-24 19:38:38 +01006184 band = IEEE80211_BAND_5GHZ;
Ron Rindjunsky66b50042008-06-25 16:46:31 +08006185 } else {
Zhu Yib481de92007-09-25 17:54:57 -07006186 IWL_WARNING("Invalid scan band count\n");
6187 goto done;
6188 }
6189
6190 /* select Rx antennas */
6191 scan->flags |= iwl3945_get_antenna_flags(priv);
6192
Johannes Berg05c914f2008-09-11 00:01:58 +02006193 if (priv->iw_mode == NL80211_IFTYPE_MONITOR)
Zhu Yib481de92007-09-25 17:54:57 -07006194 scan->filter_flags = RXON_FILTER_PROMISC_MSK;
6195
Abhijeet Kolekarf9340522008-09-03 11:26:58 +08006196 scan->channel_count =
6197 iwl3945_get_channels_for_scan(priv, band, 1, /* active */
6198 n_probes,
6199 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
Zhu Yib481de92007-09-25 17:54:57 -07006200
Reinette Chatre14b54332008-11-04 12:21:35 -08006201 if (scan->channel_count == 0) {
6202 IWL_DEBUG_SCAN("channel count %d\n", scan->channel_count);
6203 goto done;
6204 }
6205
Zhu Yib481de92007-09-25 17:54:57 -07006206 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006207 scan->channel_count * sizeof(struct iwl3945_scan_channel);
Zhu Yib481de92007-09-25 17:54:57 -07006208 cmd.data = scan;
6209 scan->len = cpu_to_le16(cmd.len);
6210
6211 set_bit(STATUS_SCAN_HW, &priv->status);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006212 rc = iwl3945_send_cmd_sync(priv, &cmd);
Zhu Yib481de92007-09-25 17:54:57 -07006213 if (rc)
6214 goto done;
6215
6216 queue_delayed_work(priv->workqueue, &priv->scan_check,
6217 IWL_SCAN_CHECK_WATCHDOG);
6218
6219 mutex_unlock(&priv->mutex);
6220 return;
6221
6222 done:
Mohamed Abbas2420ebc2008-11-04 12:21:34 -08006223 /* can not perform scan make sure we clear scanning
6224 * bits from status so next scan request can be performed.
6225 * if we dont clear scanning status bit here all next scan
6226 * will fail
6227 */
6228 clear_bit(STATUS_SCAN_HW, &priv->status);
6229 clear_bit(STATUS_SCANNING, &priv->status);
6230
Ian Schram01ebd062007-10-25 17:15:22 +08006231 /* inform mac80211 scan aborted */
Zhu Yib481de92007-09-25 17:54:57 -07006232 queue_work(priv->workqueue, &priv->scan_completed);
6233 mutex_unlock(&priv->mutex);
6234}
6235
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006236static void iwl3945_bg_up(struct work_struct *data)
Zhu Yib481de92007-09-25 17:54:57 -07006237{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006238 struct iwl3945_priv *priv = container_of(data, struct iwl3945_priv, up);
Zhu Yib481de92007-09-25 17:54:57 -07006239
6240 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6241 return;
6242
6243 mutex_lock(&priv->mutex);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006244 __iwl3945_up(priv);
Zhu Yib481de92007-09-25 17:54:57 -07006245 mutex_unlock(&priv->mutex);
Adel Gadllah80fcc9e2008-07-01 17:49:50 +02006246 iwl3945_rfkill_set_hw_state(priv);
Zhu Yib481de92007-09-25 17:54:57 -07006247}
6248
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006249static void iwl3945_bg_restart(struct work_struct *data)
Zhu Yib481de92007-09-25 17:54:57 -07006250{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006251 struct iwl3945_priv *priv = container_of(data, struct iwl3945_priv, restart);
Zhu Yib481de92007-09-25 17:54:57 -07006252
6253 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6254 return;
6255
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006256 iwl3945_down(priv);
Zhu Yib481de92007-09-25 17:54:57 -07006257 queue_work(priv->workqueue, &priv->up);
6258}
6259
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006260static void iwl3945_bg_rx_replenish(struct work_struct *data)
Zhu Yib481de92007-09-25 17:54:57 -07006261{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006262 struct iwl3945_priv *priv =
6263 container_of(data, struct iwl3945_priv, rx_replenish);
Zhu Yib481de92007-09-25 17:54:57 -07006264
6265 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6266 return;
6267
6268 mutex_lock(&priv->mutex);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006269 iwl3945_rx_replenish(priv);
Zhu Yib481de92007-09-25 17:54:57 -07006270 mutex_unlock(&priv->mutex);
6271}
6272
Mohamed Abbas7878a5a2007-11-29 11:10:13 +08006273#define IWL_DELAY_NEXT_SCAN (HZ*2)
6274
Abhijeet Kolekarcd56d332008-09-03 11:26:21 +08006275static void iwl3945_post_associate(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07006276{
Zhu Yib481de92007-09-25 17:54:57 -07006277 int rc = 0;
6278 struct ieee80211_conf *conf = NULL;
6279
Johannes Berg05c914f2008-09-11 00:01:58 +02006280 if (priv->iw_mode == NL80211_IFTYPE_AP) {
Tomas Winkler3ac7f142008-07-21 02:40:14 +03006281 IWL_ERROR("%s Should not be called in AP mode\n", __func__);
Zhu Yib481de92007-09-25 17:54:57 -07006282 return;
6283 }
6284
6285
Johannes Berge1749612008-10-27 15:59:26 -07006286 IWL_DEBUG_ASSOC("Associated as %d to: %pM\n",
6287 priv->assoc_id, priv->active_rxon.bssid_addr);
Zhu Yib481de92007-09-25 17:54:57 -07006288
6289 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6290 return;
6291
Abhijeet Kolekar322a9812008-09-03 11:26:27 +08006292 if (!priv->vif || !priv->is_open)
Mohamed Abbas6ef89d02007-10-25 17:15:47 +08006293 return;
Abhijeet Kolekar322a9812008-09-03 11:26:27 +08006294
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006295 iwl3945_scan_cancel_timeout(priv, 200);
Mohamed Abbas15e869d2007-10-25 17:15:46 +08006296
Zhu Yib481de92007-09-25 17:54:57 -07006297 conf = ieee80211_get_hw_conf(priv->hw);
6298
6299 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006300 iwl3945_commit_rxon(priv);
Zhu Yib481de92007-09-25 17:54:57 -07006301
Tomas Winkler28afaf92008-12-19 10:37:06 +08006302 memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd));
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006303 iwl3945_setup_rxon_timing(priv);
6304 rc = iwl3945_send_cmd_pdu(priv, REPLY_RXON_TIMING,
Zhu Yib481de92007-09-25 17:54:57 -07006305 sizeof(priv->rxon_timing), &priv->rxon_timing);
6306 if (rc)
6307 IWL_WARNING("REPLY_RXON_TIMING failed - "
6308 "Attempting to continue.\n");
6309
6310 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
6311
6312 priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
6313
6314 IWL_DEBUG_ASSOC("assoc id %d beacon interval %d\n",
6315 priv->assoc_id, priv->beacon_int);
6316
6317 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
6318 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
6319 else
6320 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
6321
6322 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
6323 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
6324 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
6325 else
6326 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
6327
Johannes Berg05c914f2008-09-11 00:01:58 +02006328 if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
Zhu Yib481de92007-09-25 17:54:57 -07006329 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
6330
6331 }
6332
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006333 iwl3945_commit_rxon(priv);
Zhu Yib481de92007-09-25 17:54:57 -07006334
6335 switch (priv->iw_mode) {
Johannes Berg05c914f2008-09-11 00:01:58 +02006336 case NL80211_IFTYPE_STATION:
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006337 iwl3945_rate_scale_init(priv->hw, IWL_AP_ID);
Zhu Yib481de92007-09-25 17:54:57 -07006338 break;
6339
Johannes Berg05c914f2008-09-11 00:01:58 +02006340 case NL80211_IFTYPE_ADHOC:
Zhu Yib481de92007-09-25 17:54:57 -07006341
Abhijeet Kolekarce546fd2008-11-19 15:32:22 -08006342 priv->assoc_id = 1;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006343 iwl3945_add_station(priv, priv->bssid, 0, 0);
Zhu Yib481de92007-09-25 17:54:57 -07006344 iwl3945_sync_sta(priv, IWL_STA_ID,
Johannes Berg8318d782008-01-24 19:38:38 +01006345 (priv->band == IEEE80211_BAND_5GHZ) ?
Zhu Yib481de92007-09-25 17:54:57 -07006346 IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP,
6347 CMD_ASYNC);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006348 iwl3945_rate_scale_init(priv->hw, IWL_STA_ID);
6349 iwl3945_send_beacon_cmd(priv);
Zhu Yib481de92007-09-25 17:54:57 -07006350
6351 break;
6352
6353 default:
6354 IWL_ERROR("%s Should not be called in %d mode\n",
Tomas Winkler3ac7f142008-07-21 02:40:14 +03006355 __func__, priv->iw_mode);
Zhu Yib481de92007-09-25 17:54:57 -07006356 break;
6357 }
6358
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006359 iwl3945_activate_qos(priv, 0);
Ron Rindjunsky292ae172008-02-06 11:20:39 -08006360
Mohamed Abbas7878a5a2007-11-29 11:10:13 +08006361 /* we have just associated, don't start scan too early */
6362 priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN;
Abhijeet Kolekarcd56d332008-09-03 11:26:21 +08006363}
6364
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006365static void iwl3945_bg_abort_scan(struct work_struct *work)
Zhu Yib481de92007-09-25 17:54:57 -07006366{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006367 struct iwl3945_priv *priv = container_of(work, struct iwl3945_priv, abort_scan);
Zhu Yib481de92007-09-25 17:54:57 -07006368
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006369 if (!iwl3945_is_ready(priv))
Zhu Yib481de92007-09-25 17:54:57 -07006370 return;
6371
6372 mutex_lock(&priv->mutex);
6373
6374 set_bit(STATUS_SCAN_ABORTING, &priv->status);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006375 iwl3945_send_scan_abort(priv);
Zhu Yib481de92007-09-25 17:54:57 -07006376
6377 mutex_unlock(&priv->mutex);
6378}
6379
Johannes Berge8975582008-10-09 12:18:51 +02006380static int iwl3945_mac_config(struct ieee80211_hw *hw, u32 changed);
Zhu Yi76bb77e2007-11-22 10:53:22 +08006381
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006382static void iwl3945_bg_scan_completed(struct work_struct *work)
Zhu Yib481de92007-09-25 17:54:57 -07006383{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006384 struct iwl3945_priv *priv =
6385 container_of(work, struct iwl3945_priv, scan_completed);
Zhu Yib481de92007-09-25 17:54:57 -07006386
6387 IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN, "SCAN complete scan\n");
6388
6389 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6390 return;
6391
Zhu Yia0646472007-12-20 14:10:01 +08006392 if (test_bit(STATUS_CONF_PENDING, &priv->status))
Johannes Berge8975582008-10-09 12:18:51 +02006393 iwl3945_mac_config(priv->hw, 0);
Zhu Yi76bb77e2007-11-22 10:53:22 +08006394
Zhu Yib481de92007-09-25 17:54:57 -07006395 ieee80211_scan_completed(priv->hw);
6396
6397 /* Since setting the TXPOWER may have been deferred while
6398 * performing the scan, fire one off */
6399 mutex_lock(&priv->mutex);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006400 iwl3945_hw_reg_send_txpower(priv);
Zhu Yib481de92007-09-25 17:54:57 -07006401 mutex_unlock(&priv->mutex);
6402}
6403
6404/*****************************************************************************
6405 *
6406 * mac80211 entry point functions
6407 *
6408 *****************************************************************************/
6409
Zhu Yi5a669262008-01-14 17:46:18 -08006410#define UCODE_READY_TIMEOUT (2 * HZ)
6411
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006412static int iwl3945_mac_start(struct ieee80211_hw *hw)
Zhu Yib481de92007-09-25 17:54:57 -07006413{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006414 struct iwl3945_priv *priv = hw->priv;
Zhu Yi5a669262008-01-14 17:46:18 -08006415 int ret;
Zhu Yib481de92007-09-25 17:54:57 -07006416
6417 IWL_DEBUG_MAC80211("enter\n");
6418
Zhu Yi5a669262008-01-14 17:46:18 -08006419 if (pci_enable_device(priv->pci_dev)) {
6420 IWL_ERROR("Fail to pci_enable_device\n");
6421 return -ENODEV;
6422 }
6423 pci_restore_state(priv->pci_dev);
6424 pci_enable_msi(priv->pci_dev);
6425
6426 ret = request_irq(priv->pci_dev->irq, iwl3945_isr, IRQF_SHARED,
6427 DRV_NAME, priv);
6428 if (ret) {
6429 IWL_ERROR("Error allocating IRQ %d\n", priv->pci_dev->irq);
6430 goto out_disable_msi;
6431 }
6432
Zhu Yib481de92007-09-25 17:54:57 -07006433 /* we should be verifying the device is ready to be opened */
6434 mutex_lock(&priv->mutex);
6435
Zhu Yi5a669262008-01-14 17:46:18 -08006436 memset(&priv->staging_rxon, 0, sizeof(struct iwl3945_rxon_cmd));
6437 /* fetch ucode file from disk, alloc and copy to bus-master buffers ...
6438 * ucode filename and max sizes are card-specific. */
6439
6440 if (!priv->ucode_code.len) {
6441 ret = iwl3945_read_ucode(priv);
6442 if (ret) {
6443 IWL_ERROR("Could not read microcode: %d\n", ret);
6444 mutex_unlock(&priv->mutex);
6445 goto out_release_irq;
6446 }
6447 }
6448
Zhu Yie655b9f2008-01-24 02:19:38 -08006449 ret = __iwl3945_up(priv);
Zhu Yi5a669262008-01-14 17:46:18 -08006450
Zhu Yib481de92007-09-25 17:54:57 -07006451 mutex_unlock(&priv->mutex);
Zhu Yi5a669262008-01-14 17:46:18 -08006452
Adel Gadllah80fcc9e2008-07-01 17:49:50 +02006453 iwl3945_rfkill_set_hw_state(priv);
6454
Zhu Yie655b9f2008-01-24 02:19:38 -08006455 if (ret)
6456 goto out_release_irq;
6457
6458 IWL_DEBUG_INFO("Start UP work.\n");
6459
6460 if (test_bit(STATUS_IN_SUSPEND, &priv->status))
6461 return 0;
6462
Zhu Yi5a669262008-01-14 17:46:18 -08006463 /* Wait for START_ALIVE from ucode. Otherwise callbacks from
6464 * mac80211 will not be run successfully. */
6465 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
6466 test_bit(STATUS_READY, &priv->status),
6467 UCODE_READY_TIMEOUT);
6468 if (!ret) {
6469 if (!test_bit(STATUS_READY, &priv->status)) {
6470 IWL_ERROR("Wait for START_ALIVE timeout after %dms.\n",
6471 jiffies_to_msecs(UCODE_READY_TIMEOUT));
6472 ret = -ETIMEDOUT;
6473 goto out_release_irq;
6474 }
6475 }
6476
Zhu Yie655b9f2008-01-24 02:19:38 -08006477 priv->is_open = 1;
Zhu Yib481de92007-09-25 17:54:57 -07006478 IWL_DEBUG_MAC80211("leave\n");
6479 return 0;
Zhu Yi5a669262008-01-14 17:46:18 -08006480
6481out_release_irq:
6482 free_irq(priv->pci_dev->irq, priv);
6483out_disable_msi:
6484 pci_disable_msi(priv->pci_dev);
Zhu Yie655b9f2008-01-24 02:19:38 -08006485 pci_disable_device(priv->pci_dev);
6486 priv->is_open = 0;
6487 IWL_DEBUG_MAC80211("leave - failed\n");
Zhu Yi5a669262008-01-14 17:46:18 -08006488 return ret;
Zhu Yib481de92007-09-25 17:54:57 -07006489}
6490
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006491static void iwl3945_mac_stop(struct ieee80211_hw *hw)
Zhu Yib481de92007-09-25 17:54:57 -07006492{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006493 struct iwl3945_priv *priv = hw->priv;
Zhu Yib481de92007-09-25 17:54:57 -07006494
6495 IWL_DEBUG_MAC80211("enter\n");
Mohamed Abbas6ef89d02007-10-25 17:15:47 +08006496
Zhu Yie655b9f2008-01-24 02:19:38 -08006497 if (!priv->is_open) {
6498 IWL_DEBUG_MAC80211("leave - skip\n");
6499 return;
6500 }
6501
Zhu Yib481de92007-09-25 17:54:57 -07006502 priv->is_open = 0;
Zhu Yi5a669262008-01-14 17:46:18 -08006503
6504 if (iwl3945_is_ready_rf(priv)) {
Zhu Yie655b9f2008-01-24 02:19:38 -08006505 /* stop mac, cancel any scan request and clear
6506 * RXON_FILTER_ASSOC_MSK BIT
6507 */
Zhu Yi5a669262008-01-14 17:46:18 -08006508 mutex_lock(&priv->mutex);
6509 iwl3945_scan_cancel_timeout(priv, 100);
Mohamed Abbasfde35712007-11-29 11:10:15 +08006510 mutex_unlock(&priv->mutex);
Mohamed Abbasfde35712007-11-29 11:10:15 +08006511 }
6512
Zhu Yi5a669262008-01-14 17:46:18 -08006513 iwl3945_down(priv);
6514
6515 flush_workqueue(priv->workqueue);
6516 free_irq(priv->pci_dev->irq, priv);
6517 pci_disable_msi(priv->pci_dev);
6518 pci_save_state(priv->pci_dev);
6519 pci_disable_device(priv->pci_dev);
Mohamed Abbas6ef89d02007-10-25 17:15:47 +08006520
Zhu Yib481de92007-09-25 17:54:57 -07006521 IWL_DEBUG_MAC80211("leave\n");
Zhu Yib481de92007-09-25 17:54:57 -07006522}
6523
Johannes Berge039fa42008-05-15 12:55:29 +02006524static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
Zhu Yib481de92007-09-25 17:54:57 -07006525{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006526 struct iwl3945_priv *priv = hw->priv;
Zhu Yib481de92007-09-25 17:54:57 -07006527
6528 IWL_DEBUG_MAC80211("enter\n");
6529
Zhu Yib481de92007-09-25 17:54:57 -07006530 IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
Johannes Berge039fa42008-05-15 12:55:29 +02006531 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
Zhu Yib481de92007-09-25 17:54:57 -07006532
Johannes Berge039fa42008-05-15 12:55:29 +02006533 if (iwl3945_tx_skb(priv, skb))
Zhu Yib481de92007-09-25 17:54:57 -07006534 dev_kfree_skb_any(skb);
6535
6536 IWL_DEBUG_MAC80211("leave\n");
Reinette Chatre637f8832009-01-19 15:30:32 -08006537 return NETDEV_TX_OK;
Zhu Yib481de92007-09-25 17:54:57 -07006538}
6539
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006540static int iwl3945_mac_add_interface(struct ieee80211_hw *hw,
Zhu Yib481de92007-09-25 17:54:57 -07006541 struct ieee80211_if_init_conf *conf)
6542{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006543 struct iwl3945_priv *priv = hw->priv;
Zhu Yib481de92007-09-25 17:54:57 -07006544 unsigned long flags;
6545
Johannes Berg32bfd352007-12-19 01:31:26 +01006546 IWL_DEBUG_MAC80211("enter: type %d\n", conf->type);
Zhu Yib481de92007-09-25 17:54:57 -07006547
Johannes Berg32bfd352007-12-19 01:31:26 +01006548 if (priv->vif) {
6549 IWL_DEBUG_MAC80211("leave - vif != NULL\n");
Tomas Winkler864792e2007-11-27 21:00:52 +02006550 return -EOPNOTSUPP;
Zhu Yib481de92007-09-25 17:54:57 -07006551 }
6552
6553 spin_lock_irqsave(&priv->lock, flags);
Johannes Berg32bfd352007-12-19 01:31:26 +01006554 priv->vif = conf->vif;
Zhu, Yi60294de2008-10-29 14:05:45 -07006555 priv->iw_mode = conf->type;
Zhu Yib481de92007-09-25 17:54:57 -07006556
6557 spin_unlock_irqrestore(&priv->lock, flags);
6558
6559 mutex_lock(&priv->mutex);
Tomas Winkler864792e2007-11-27 21:00:52 +02006560
6561 if (conf->mac_addr) {
Johannes Berge1749612008-10-27 15:59:26 -07006562 IWL_DEBUG_MAC80211("Set: %pM\n", conf->mac_addr);
Tomas Winkler864792e2007-11-27 21:00:52 +02006563 memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN);
6564 }
6565
Zhu Yi5a669262008-01-14 17:46:18 -08006566 if (iwl3945_is_ready(priv))
6567 iwl3945_set_mode(priv, conf->type);
Zhu Yib481de92007-09-25 17:54:57 -07006568
Zhu Yib481de92007-09-25 17:54:57 -07006569 mutex_unlock(&priv->mutex);
6570
Zhu Yi5a669262008-01-14 17:46:18 -08006571 IWL_DEBUG_MAC80211("leave\n");
Zhu Yib481de92007-09-25 17:54:57 -07006572 return 0;
6573}
6574
6575/**
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006576 * iwl3945_mac_config - mac80211 config callback
Zhu Yib481de92007-09-25 17:54:57 -07006577 *
6578 * We ignore conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME since it seems to
6579 * be set inappropriately and the driver currently sets the hardware up to
6580 * use it whenever needed.
6581 */
Johannes Berge8975582008-10-09 12:18:51 +02006582static int iwl3945_mac_config(struct ieee80211_hw *hw, u32 changed)
Zhu Yib481de92007-09-25 17:54:57 -07006583{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006584 struct iwl3945_priv *priv = hw->priv;
6585 const struct iwl3945_channel_info *ch_info;
Johannes Berge8975582008-10-09 12:18:51 +02006586 struct ieee80211_conf *conf = &hw->conf;
Zhu Yib481de92007-09-25 17:54:57 -07006587 unsigned long flags;
Zhu Yi76bb77e2007-11-22 10:53:22 +08006588 int ret = 0;
Zhu Yib481de92007-09-25 17:54:57 -07006589
6590 mutex_lock(&priv->mutex);
Johannes Berg8318d782008-01-24 19:38:38 +01006591 IWL_DEBUG_MAC80211("enter to channel %d\n", conf->channel->hw_value);
Zhu Yib481de92007-09-25 17:54:57 -07006592
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006593 if (!iwl3945_is_ready(priv)) {
Zhu Yib481de92007-09-25 17:54:57 -07006594 IWL_DEBUG_MAC80211("leave - not ready\n");
Zhu Yi76bb77e2007-11-22 10:53:22 +08006595 ret = -EIO;
6596 goto out;
Zhu Yib481de92007-09-25 17:54:57 -07006597 }
6598
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006599 if (unlikely(!iwl3945_param_disable_hw_scan &&
Zhu Yib481de92007-09-25 17:54:57 -07006600 test_bit(STATUS_SCANNING, &priv->status))) {
Zhu Yia0646472007-12-20 14:10:01 +08006601 IWL_DEBUG_MAC80211("leave - scanning\n");
6602 set_bit(STATUS_CONF_PENDING, &priv->status);
Zhu Yib481de92007-09-25 17:54:57 -07006603 mutex_unlock(&priv->mutex);
Zhu Yia0646472007-12-20 14:10:01 +08006604 return 0;
Zhu Yib481de92007-09-25 17:54:57 -07006605 }
6606
6607 spin_lock_irqsave(&priv->lock, flags);
6608
Johannes Berg8318d782008-01-24 19:38:38 +01006609 ch_info = iwl3945_get_channel_info(priv, conf->channel->band,
6610 conf->channel->hw_value);
Zhu Yib481de92007-09-25 17:54:57 -07006611 if (!is_channel_valid(ch_info)) {
Ron Rindjunsky66b50042008-06-25 16:46:31 +08006612 IWL_DEBUG_SCAN("Channel %d [%d] is INVALID for this band.\n",
Johannes Berg8318d782008-01-24 19:38:38 +01006613 conf->channel->hw_value, conf->channel->band);
Zhu Yib481de92007-09-25 17:54:57 -07006614 IWL_DEBUG_MAC80211("leave - invalid channel\n");
6615 spin_unlock_irqrestore(&priv->lock, flags);
Zhu Yi76bb77e2007-11-22 10:53:22 +08006616 ret = -EINVAL;
6617 goto out;
Zhu Yib481de92007-09-25 17:54:57 -07006618 }
6619
Johannes Berg8318d782008-01-24 19:38:38 +01006620 iwl3945_set_rxon_channel(priv, conf->channel->band, conf->channel->hw_value);
Zhu Yib481de92007-09-25 17:54:57 -07006621
Johannes Berg8318d782008-01-24 19:38:38 +01006622 iwl3945_set_flags_for_phymode(priv, conf->channel->band);
Zhu Yib481de92007-09-25 17:54:57 -07006623
6624 /* The list of supported rates and rate mask can be different
6625 * for each phymode; since the phymode may have changed, reset
6626 * the rate mask to what mac80211 lists */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006627 iwl3945_set_rate(priv);
Zhu Yib481de92007-09-25 17:54:57 -07006628
6629 spin_unlock_irqrestore(&priv->lock, flags);
6630
6631#ifdef IEEE80211_CONF_CHANNEL_SWITCH
6632 if (conf->flags & IEEE80211_CONF_CHANNEL_SWITCH) {
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006633 iwl3945_hw_channel_switch(priv, conf->channel);
Zhu Yi76bb77e2007-11-22 10:53:22 +08006634 goto out;
Zhu Yib481de92007-09-25 17:54:57 -07006635 }
6636#endif
6637
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006638 iwl3945_radio_kill_sw(priv, !conf->radio_enabled);
Zhu Yib481de92007-09-25 17:54:57 -07006639
6640 if (!conf->radio_enabled) {
6641 IWL_DEBUG_MAC80211("leave - radio disabled\n");
Zhu Yi76bb77e2007-11-22 10:53:22 +08006642 goto out;
Zhu Yib481de92007-09-25 17:54:57 -07006643 }
6644
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006645 if (iwl3945_is_rfkill(priv)) {
Zhu Yib481de92007-09-25 17:54:57 -07006646 IWL_DEBUG_MAC80211("leave - RF kill\n");
Zhu Yi76bb77e2007-11-22 10:53:22 +08006647 ret = -EIO;
6648 goto out;
Zhu Yib481de92007-09-25 17:54:57 -07006649 }
6650
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006651 iwl3945_set_rate(priv);
Zhu Yib481de92007-09-25 17:54:57 -07006652
6653 if (memcmp(&priv->active_rxon,
6654 &priv->staging_rxon, sizeof(priv->staging_rxon)))
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006655 iwl3945_commit_rxon(priv);
Zhu Yib481de92007-09-25 17:54:57 -07006656 else
6657 IWL_DEBUG_INFO("No re-sending same RXON configuration.\n");
6658
6659 IWL_DEBUG_MAC80211("leave\n");
6660
Zhu Yi76bb77e2007-11-22 10:53:22 +08006661out:
Zhu Yia0646472007-12-20 14:10:01 +08006662 clear_bit(STATUS_CONF_PENDING, &priv->status);
Zhu Yib481de92007-09-25 17:54:57 -07006663 mutex_unlock(&priv->mutex);
Zhu Yi76bb77e2007-11-22 10:53:22 +08006664 return ret;
Zhu Yib481de92007-09-25 17:54:57 -07006665}
6666
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006667static void iwl3945_config_ap(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07006668{
6669 int rc = 0;
6670
Maarten Lankhorstd986bcd2008-01-23 10:15:16 -08006671 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
Zhu Yib481de92007-09-25 17:54:57 -07006672 return;
6673
6674 /* The following should be done only at AP bring up */
Ron Rindjunsky5d1e2322008-06-30 17:23:04 +08006675 if (!(iwl3945_is_associated(priv))) {
Zhu Yib481de92007-09-25 17:54:57 -07006676
6677 /* RXON - unassoc (to set timing command) */
6678 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006679 iwl3945_commit_rxon(priv);
Zhu Yib481de92007-09-25 17:54:57 -07006680
6681 /* RXON Timing */
Tomas Winkler28afaf92008-12-19 10:37:06 +08006682 memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd));
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006683 iwl3945_setup_rxon_timing(priv);
6684 rc = iwl3945_send_cmd_pdu(priv, REPLY_RXON_TIMING,
Zhu Yib481de92007-09-25 17:54:57 -07006685 sizeof(priv->rxon_timing), &priv->rxon_timing);
6686 if (rc)
6687 IWL_WARNING("REPLY_RXON_TIMING failed - "
6688 "Attempting to continue.\n");
6689
6690 /* FIXME: what should be the assoc_id for AP? */
6691 priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
6692 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
6693 priv->staging_rxon.flags |=
6694 RXON_FLG_SHORT_PREAMBLE_MSK;
6695 else
6696 priv->staging_rxon.flags &=
6697 ~RXON_FLG_SHORT_PREAMBLE_MSK;
6698
6699 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
6700 if (priv->assoc_capability &
6701 WLAN_CAPABILITY_SHORT_SLOT_TIME)
6702 priv->staging_rxon.flags |=
6703 RXON_FLG_SHORT_SLOT_MSK;
6704 else
6705 priv->staging_rxon.flags &=
6706 ~RXON_FLG_SHORT_SLOT_MSK;
6707
Johannes Berg05c914f2008-09-11 00:01:58 +02006708 if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
Zhu Yib481de92007-09-25 17:54:57 -07006709 priv->staging_rxon.flags &=
6710 ~RXON_FLG_SHORT_SLOT_MSK;
6711 }
6712 /* restore RXON assoc */
6713 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006714 iwl3945_commit_rxon(priv);
6715 iwl3945_add_station(priv, iwl3945_broadcast_addr, 0, 0);
Zhu Yi556f8db2007-09-27 11:27:33 +08006716 }
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006717 iwl3945_send_beacon_cmd(priv);
Zhu Yib481de92007-09-25 17:54:57 -07006718
6719 /* FIXME - we need to add code here to detect a totally new
6720 * configuration, reset the AP, unassoc, rxon timing, assoc,
6721 * clear sta table, add BCAST sta... */
6722}
6723
Johannes Berg32bfd352007-12-19 01:31:26 +01006724static int iwl3945_mac_config_interface(struct ieee80211_hw *hw,
6725 struct ieee80211_vif *vif,
Zhu Yib481de92007-09-25 17:54:57 -07006726 struct ieee80211_if_conf *conf)
6727{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006728 struct iwl3945_priv *priv = hw->priv;
Zhu Yib481de92007-09-25 17:54:57 -07006729 int rc;
6730
6731 if (conf == NULL)
6732 return -EIO;
6733
Emmanuel Grumbachb716bb92008-03-04 18:09:32 -08006734 if (priv->vif != vif) {
6735 IWL_DEBUG_MAC80211("leave - priv->vif != vif\n");
Emmanuel Grumbachb716bb92008-03-04 18:09:32 -08006736 return 0;
6737 }
6738
Johannes Berg9d139c82008-07-09 14:40:37 +02006739 /* handle this temporarily here */
Johannes Berg05c914f2008-09-11 00:01:58 +02006740 if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
Johannes Berg9d139c82008-07-09 14:40:37 +02006741 conf->changed & IEEE80211_IFCC_BEACON) {
6742 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
6743 if (!beacon)
6744 return -ENOMEM;
Mohamed Abbas9bdf5ec2008-11-07 09:58:35 -08006745 mutex_lock(&priv->mutex);
Johannes Berg9d139c82008-07-09 14:40:37 +02006746 rc = iwl3945_mac_beacon_update(hw, beacon);
Mohamed Abbas9bdf5ec2008-11-07 09:58:35 -08006747 mutex_unlock(&priv->mutex);
Johannes Berg9d139c82008-07-09 14:40:37 +02006748 if (rc)
6749 return rc;
6750 }
6751
Zhu Yi5a669262008-01-14 17:46:18 -08006752 if (!iwl3945_is_alive(priv))
6753 return -EAGAIN;
6754
Zhu Yib481de92007-09-25 17:54:57 -07006755 mutex_lock(&priv->mutex);
6756
Zhu Yib481de92007-09-25 17:54:57 -07006757 if (conf->bssid)
Johannes Berge1749612008-10-27 15:59:26 -07006758 IWL_DEBUG_MAC80211("bssid: %pM\n", conf->bssid);
Zhu Yib481de92007-09-25 17:54:57 -07006759
Johannes Berg4150c572007-09-17 01:29:23 -04006760/*
6761 * very dubious code was here; the probe filtering flag is never set:
6762 *
Zhu Yib481de92007-09-25 17:54:57 -07006763 if (unlikely(test_bit(STATUS_SCANNING, &priv->status)) &&
6764 !(priv->hw->flags & IEEE80211_HW_NO_PROBE_FILTERING)) {
Johannes Berg4150c572007-09-17 01:29:23 -04006765 */
Zhu Yib481de92007-09-25 17:54:57 -07006766
Johannes Berg05c914f2008-09-11 00:01:58 +02006767 if (priv->iw_mode == NL80211_IFTYPE_AP) {
Zhu Yib481de92007-09-25 17:54:57 -07006768 if (!conf->bssid) {
6769 conf->bssid = priv->mac_addr;
6770 memcpy(priv->bssid, priv->mac_addr, ETH_ALEN);
Johannes Berge1749612008-10-27 15:59:26 -07006771 IWL_DEBUG_MAC80211("bssid was set to: %pM\n",
6772 conf->bssid);
Zhu Yib481de92007-09-25 17:54:57 -07006773 }
6774 if (priv->ibss_beacon)
6775 dev_kfree_skb(priv->ibss_beacon);
6776
Johannes Berg9d139c82008-07-09 14:40:37 +02006777 priv->ibss_beacon = ieee80211_beacon_get(hw, vif);
Zhu Yib481de92007-09-25 17:54:57 -07006778 }
6779
Mohamed Abbasfde35712007-11-29 11:10:15 +08006780 if (iwl3945_is_rfkill(priv))
6781 goto done;
6782
Zhu Yib481de92007-09-25 17:54:57 -07006783 if (conf->bssid && !is_zero_ether_addr(conf->bssid) &&
6784 !is_multicast_ether_addr(conf->bssid)) {
6785 /* If there is currently a HW scan going on in the background
6786 * then we need to cancel it else the RXON below will fail. */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006787 if (iwl3945_scan_cancel_timeout(priv, 100)) {
Zhu Yib481de92007-09-25 17:54:57 -07006788 IWL_WARNING("Aborted scan still in progress "
6789 "after 100ms\n");
6790 IWL_DEBUG_MAC80211("leaving - scan abort failed.\n");
6791 mutex_unlock(&priv->mutex);
6792 return -EAGAIN;
6793 }
6794 memcpy(priv->staging_rxon.bssid_addr, conf->bssid, ETH_ALEN);
6795
6796 /* TODO: Audit driver for usage of these members and see
6797 * if mac80211 deprecates them (priv->bssid looks like it
6798 * shouldn't be there, but I haven't scanned the IBSS code
6799 * to verify) - jpk */
6800 memcpy(priv->bssid, conf->bssid, ETH_ALEN);
6801
Johannes Berg05c914f2008-09-11 00:01:58 +02006802 if (priv->iw_mode == NL80211_IFTYPE_AP)
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006803 iwl3945_config_ap(priv);
Zhu Yib481de92007-09-25 17:54:57 -07006804 else {
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006805 rc = iwl3945_commit_rxon(priv);
Johannes Berg05c914f2008-09-11 00:01:58 +02006806 if ((priv->iw_mode == NL80211_IFTYPE_STATION) && rc)
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006807 iwl3945_add_station(priv,
Zhu Yi556f8db2007-09-27 11:27:33 +08006808 priv->active_rxon.bssid_addr, 1, 0);
Zhu Yib481de92007-09-25 17:54:57 -07006809 }
6810
6811 } else {
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006812 iwl3945_scan_cancel_timeout(priv, 100);
Zhu Yib481de92007-09-25 17:54:57 -07006813 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006814 iwl3945_commit_rxon(priv);
Zhu Yib481de92007-09-25 17:54:57 -07006815 }
6816
Mohamed Abbasfde35712007-11-29 11:10:15 +08006817 done:
Zhu Yib481de92007-09-25 17:54:57 -07006818 IWL_DEBUG_MAC80211("leave\n");
6819 mutex_unlock(&priv->mutex);
6820
6821 return 0;
6822}
6823
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006824static void iwl3945_configure_filter(struct ieee80211_hw *hw,
Johannes Berg4150c572007-09-17 01:29:23 -04006825 unsigned int changed_flags,
6826 unsigned int *total_flags,
6827 int mc_count, struct dev_addr_list *mc_list)
6828{
Abhijeet Kolekar5ec03972008-05-05 10:22:48 +08006829 struct iwl3945_priv *priv = hw->priv;
Zhu, Yi352bc8d2008-11-12 13:14:09 -08006830 __le32 *filter_flags = &priv->staging_rxon.filter_flags;
Rick Farrington25b3f572008-06-30 17:23:28 +08006831
Zhu, Yi352bc8d2008-11-12 13:14:09 -08006832 IWL_DEBUG_MAC80211("Enter: changed: 0x%x, total: 0x%x\n",
6833 changed_flags, *total_flags);
6834
6835 if (changed_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS)) {
6836 if (*total_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS))
6837 *filter_flags |= RXON_FILTER_PROMISC_MSK;
6838 else
6839 *filter_flags &= ~RXON_FILTER_PROMISC_MSK;
Abhijeet Kolekar5ec03972008-05-05 10:22:48 +08006840 }
Zhu, Yi352bc8d2008-11-12 13:14:09 -08006841 if (changed_flags & FIF_ALLMULTI) {
6842 if (*total_flags & FIF_ALLMULTI)
6843 *filter_flags |= RXON_FILTER_ACCEPT_GRP_MSK;
6844 else
6845 *filter_flags &= ~RXON_FILTER_ACCEPT_GRP_MSK;
6846 }
6847 if (changed_flags & FIF_CONTROL) {
6848 if (*total_flags & FIF_CONTROL)
6849 *filter_flags |= RXON_FILTER_CTL2HOST_MSK;
6850 else
6851 *filter_flags &= ~RXON_FILTER_CTL2HOST_MSK;
6852 }
6853 if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
6854 if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
6855 *filter_flags |= RXON_FILTER_BCON_AWARE_MSK;
6856 else
6857 *filter_flags &= ~RXON_FILTER_BCON_AWARE_MSK;
6858 }
6859
6860 /* We avoid iwl_commit_rxon here to commit the new filter flags
6861 * since mac80211 will call ieee80211_hw_config immediately.
6862 * (mc_list is not supported at this time). Otherwise, we need to
6863 * queue a background iwl_commit_rxon work.
6864 */
6865
6866 *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
Rick Farrington25b3f572008-06-30 17:23:28 +08006867 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
Johannes Berg4150c572007-09-17 01:29:23 -04006868}
6869
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006870static void iwl3945_mac_remove_interface(struct ieee80211_hw *hw,
Zhu Yib481de92007-09-25 17:54:57 -07006871 struct ieee80211_if_init_conf *conf)
6872{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006873 struct iwl3945_priv *priv = hw->priv;
Zhu Yib481de92007-09-25 17:54:57 -07006874
6875 IWL_DEBUG_MAC80211("enter\n");
6876
6877 mutex_lock(&priv->mutex);
Mohamed Abbas6ef89d02007-10-25 17:15:47 +08006878
Mohamed Abbasfde35712007-11-29 11:10:15 +08006879 if (iwl3945_is_ready_rf(priv)) {
6880 iwl3945_scan_cancel_timeout(priv, 100);
Mohamed Abbasfde35712007-11-29 11:10:15 +08006881 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
6882 iwl3945_commit_rxon(priv);
6883 }
Johannes Berg32bfd352007-12-19 01:31:26 +01006884 if (priv->vif == conf->vif) {
6885 priv->vif = NULL;
Zhu Yib481de92007-09-25 17:54:57 -07006886 memset(priv->bssid, 0, ETH_ALEN);
Zhu Yib481de92007-09-25 17:54:57 -07006887 }
6888 mutex_unlock(&priv->mutex);
6889
6890 IWL_DEBUG_MAC80211("leave\n");
Zhu Yib481de92007-09-25 17:54:57 -07006891}
6892
Abhijeet Kolekarcd56d332008-09-03 11:26:21 +08006893#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
6894
6895static void iwl3945_bss_info_changed(struct ieee80211_hw *hw,
6896 struct ieee80211_vif *vif,
6897 struct ieee80211_bss_conf *bss_conf,
6898 u32 changes)
6899{
6900 struct iwl3945_priv *priv = hw->priv;
6901
6902 IWL_DEBUG_MAC80211("changes = 0x%X\n", changes);
6903
6904 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
6905 IWL_DEBUG_MAC80211("ERP_PREAMBLE %d\n",
6906 bss_conf->use_short_preamble);
6907 if (bss_conf->use_short_preamble)
6908 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
6909 else
6910 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
6911 }
6912
6913 if (changes & BSS_CHANGED_ERP_CTS_PROT) {
6914 IWL_DEBUG_MAC80211("ERP_CTS %d\n", bss_conf->use_cts_prot);
6915 if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
6916 priv->staging_rxon.flags |= RXON_FLG_TGG_PROTECT_MSK;
6917 else
6918 priv->staging_rxon.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
6919 }
6920
6921 if (changes & BSS_CHANGED_ASSOC) {
6922 IWL_DEBUG_MAC80211("ASSOC %d\n", bss_conf->assoc);
6923 /* This should never happen as this function should
6924 * never be called from interrupt context. */
6925 if (WARN_ON_ONCE(in_interrupt()))
6926 return;
6927 if (bss_conf->assoc) {
6928 priv->assoc_id = bss_conf->aid;
6929 priv->beacon_int = bss_conf->beacon_int;
Tomas Winkler28afaf92008-12-19 10:37:06 +08006930 priv->timestamp = bss_conf->timestamp;
Abhijeet Kolekarcd56d332008-09-03 11:26:21 +08006931 priv->assoc_capability = bss_conf->assoc_capability;
6932 priv->next_scan_jiffies = jiffies +
6933 IWL_DELAY_NEXT_SCAN_AFTER_ASSOC;
6934 mutex_lock(&priv->mutex);
6935 iwl3945_post_associate(priv);
6936 mutex_unlock(&priv->mutex);
6937 } else {
6938 priv->assoc_id = 0;
6939 IWL_DEBUG_MAC80211("DISASSOC %d\n", bss_conf->assoc);
6940 }
6941 } else if (changes && iwl3945_is_associated(priv) && priv->assoc_id) {
6942 IWL_DEBUG_MAC80211("Associated Changes %d\n", changes);
6943 iwl3945_send_rxon_assoc(priv);
6944 }
6945
6946}
6947
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006948static int iwl3945_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len)
Zhu Yib481de92007-09-25 17:54:57 -07006949{
6950 int rc = 0;
6951 unsigned long flags;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006952 struct iwl3945_priv *priv = hw->priv;
John W. Linville9387b7c2008-09-30 20:59:05 -04006953 DECLARE_SSID_BUF(ssid_buf);
Zhu Yib481de92007-09-25 17:54:57 -07006954
6955 IWL_DEBUG_MAC80211("enter\n");
6956
Mohamed Abbas15e869d2007-10-25 17:15:46 +08006957 mutex_lock(&priv->mutex);
Zhu Yib481de92007-09-25 17:54:57 -07006958 spin_lock_irqsave(&priv->lock, flags);
6959
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006960 if (!iwl3945_is_ready_rf(priv)) {
Zhu Yib481de92007-09-25 17:54:57 -07006961 rc = -EIO;
6962 IWL_DEBUG_MAC80211("leave - not ready or exit pending\n");
6963 goto out_unlock;
6964 }
6965
Mohamed Abbas7878a5a2007-11-29 11:10:13 +08006966 /* we don't schedule scan within next_scan_jiffies period */
6967 if (priv->next_scan_jiffies &&
6968 time_after(priv->next_scan_jiffies, jiffies)) {
6969 rc = -EAGAIN;
6970 goto out_unlock;
6971 }
Bill Moss15dbf1b2008-05-06 11:05:15 +08006972 /* if we just finished scan ask for delay for a broadcast scan */
6973 if ((len == 0) && priv->last_scan_jiffies &&
6974 time_after(priv->last_scan_jiffies + IWL_DELAY_NEXT_SCAN,
6975 jiffies)) {
Zhu Yib481de92007-09-25 17:54:57 -07006976 rc = -EAGAIN;
6977 goto out_unlock;
6978 }
6979 if (len) {
Mohamed Abbas7878a5a2007-11-29 11:10:13 +08006980 IWL_DEBUG_SCAN("direct scan for %s [%d]\n ",
John W. Linville9387b7c2008-09-30 20:59:05 -04006981 print_ssid(ssid_buf, ssid, len), (int)len);
Zhu Yib481de92007-09-25 17:54:57 -07006982
6983 priv->one_direct_scan = 1;
6984 priv->direct_ssid_len = (u8)
6985 min((u8) len, (u8) IW_ESSID_MAX_SIZE);
6986 memcpy(priv->direct_ssid, ssid, priv->direct_ssid_len);
Mohamed Abbas6ef89d02007-10-25 17:15:47 +08006987 } else
6988 priv->one_direct_scan = 0;
Zhu Yib481de92007-09-25 17:54:57 -07006989
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08006990 rc = iwl3945_scan_initiate(priv);
Zhu Yib481de92007-09-25 17:54:57 -07006991
6992 IWL_DEBUG_MAC80211("leave\n");
6993
6994out_unlock:
6995 spin_unlock_irqrestore(&priv->lock, flags);
Mohamed Abbas15e869d2007-10-25 17:15:46 +08006996 mutex_unlock(&priv->mutex);
Zhu Yib481de92007-09-25 17:54:57 -07006997
6998 return rc;
6999}
7000
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007001static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
Zhu Yib481de92007-09-25 17:54:57 -07007002 const u8 *local_addr, const u8 *addr,
7003 struct ieee80211_key_conf *key)
7004{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007005 struct iwl3945_priv *priv = hw->priv;
Zhu Yib481de92007-09-25 17:54:57 -07007006 int rc = 0;
7007 u8 sta_id;
7008
7009 IWL_DEBUG_MAC80211("enter\n");
7010
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007011 if (!iwl3945_param_hwcrypto) {
Zhu Yib481de92007-09-25 17:54:57 -07007012 IWL_DEBUG_MAC80211("leave - hwcrypto disabled\n");
7013 return -EOPNOTSUPP;
7014 }
7015
7016 if (is_zero_ether_addr(addr))
7017 /* only support pairwise keys */
7018 return -EOPNOTSUPP;
7019
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007020 sta_id = iwl3945_hw_find_station(priv, addr);
Zhu Yib481de92007-09-25 17:54:57 -07007021 if (sta_id == IWL_INVALID_STATION) {
Johannes Berge1749612008-10-27 15:59:26 -07007022 IWL_DEBUG_MAC80211("leave - %pM not in station map.\n",
7023 addr);
Zhu Yib481de92007-09-25 17:54:57 -07007024 return -EINVAL;
7025 }
7026
7027 mutex_lock(&priv->mutex);
7028
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007029 iwl3945_scan_cancel_timeout(priv, 100);
Mohamed Abbas15e869d2007-10-25 17:15:46 +08007030
Zhu Yib481de92007-09-25 17:54:57 -07007031 switch (cmd) {
7032 case SET_KEY:
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007033 rc = iwl3945_update_sta_key_info(priv, key, sta_id);
Zhu Yib481de92007-09-25 17:54:57 -07007034 if (!rc) {
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007035 iwl3945_set_rxon_hwcrypto(priv, 1);
7036 iwl3945_commit_rxon(priv);
Zhu Yib481de92007-09-25 17:54:57 -07007037 key->hw_key_idx = sta_id;
7038 IWL_DEBUG_MAC80211("set_key success, using hwcrypto\n");
7039 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
7040 }
7041 break;
7042 case DISABLE_KEY:
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007043 rc = iwl3945_clear_sta_key_info(priv, sta_id);
Zhu Yib481de92007-09-25 17:54:57 -07007044 if (!rc) {
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007045 iwl3945_set_rxon_hwcrypto(priv, 0);
7046 iwl3945_commit_rxon(priv);
Zhu Yib481de92007-09-25 17:54:57 -07007047 IWL_DEBUG_MAC80211("disable hwcrypto key\n");
7048 }
7049 break;
7050 default:
7051 rc = -EINVAL;
7052 }
7053
7054 IWL_DEBUG_MAC80211("leave\n");
7055 mutex_unlock(&priv->mutex);
7056
7057 return rc;
7058}
7059
Johannes Berge100bb62008-04-30 18:51:21 +02007060static int iwl3945_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
Zhu Yib481de92007-09-25 17:54:57 -07007061 const struct ieee80211_tx_queue_params *params)
7062{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007063 struct iwl3945_priv *priv = hw->priv;
Zhu Yib481de92007-09-25 17:54:57 -07007064 unsigned long flags;
7065 int q;
Zhu Yib481de92007-09-25 17:54:57 -07007066
7067 IWL_DEBUG_MAC80211("enter\n");
7068
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007069 if (!iwl3945_is_ready_rf(priv)) {
Zhu Yib481de92007-09-25 17:54:57 -07007070 IWL_DEBUG_MAC80211("leave - RF not ready\n");
7071 return -EIO;
7072 }
7073
7074 if (queue >= AC_NUM) {
7075 IWL_DEBUG_MAC80211("leave - queue >= AC_NUM %d\n", queue);
7076 return 0;
7077 }
7078
Zhu Yib481de92007-09-25 17:54:57 -07007079 q = AC_NUM - 1 - queue;
7080
7081 spin_lock_irqsave(&priv->lock, flags);
7082
7083 priv->qos_data.def_qos_parm.ac[q].cw_min = cpu_to_le16(params->cw_min);
7084 priv->qos_data.def_qos_parm.ac[q].cw_max = cpu_to_le16(params->cw_max);
7085 priv->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
7086 priv->qos_data.def_qos_parm.ac[q].edca_txop =
Johannes Berg3330d7b2008-02-10 16:49:38 +01007087 cpu_to_le16((params->txop * 32));
Zhu Yib481de92007-09-25 17:54:57 -07007088
7089 priv->qos_data.def_qos_parm.ac[q].reserved1 = 0;
7090 priv->qos_data.qos_active = 1;
7091
7092 spin_unlock_irqrestore(&priv->lock, flags);
7093
7094 mutex_lock(&priv->mutex);
Johannes Berg05c914f2008-09-11 00:01:58 +02007095 if (priv->iw_mode == NL80211_IFTYPE_AP)
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007096 iwl3945_activate_qos(priv, 1);
7097 else if (priv->assoc_id && iwl3945_is_associated(priv))
7098 iwl3945_activate_qos(priv, 0);
Zhu Yib481de92007-09-25 17:54:57 -07007099
7100 mutex_unlock(&priv->mutex);
7101
Zhu Yib481de92007-09-25 17:54:57 -07007102 IWL_DEBUG_MAC80211("leave\n");
7103 return 0;
7104}
7105
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007106static int iwl3945_mac_get_tx_stats(struct ieee80211_hw *hw,
Zhu Yib481de92007-09-25 17:54:57 -07007107 struct ieee80211_tx_queue_stats *stats)
7108{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007109 struct iwl3945_priv *priv = hw->priv;
Zhu Yib481de92007-09-25 17:54:57 -07007110 int i, avail;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007111 struct iwl3945_tx_queue *txq;
7112 struct iwl3945_queue *q;
Zhu Yib481de92007-09-25 17:54:57 -07007113 unsigned long flags;
7114
7115 IWL_DEBUG_MAC80211("enter\n");
7116
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007117 if (!iwl3945_is_ready_rf(priv)) {
Zhu Yib481de92007-09-25 17:54:57 -07007118 IWL_DEBUG_MAC80211("leave - RF not ready\n");
7119 return -EIO;
7120 }
7121
7122 spin_lock_irqsave(&priv->lock, flags);
7123
7124 for (i = 0; i < AC_NUM; i++) {
7125 txq = &priv->txq[i];
7126 q = &txq->q;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007127 avail = iwl3945_queue_space(q);
Zhu Yib481de92007-09-25 17:54:57 -07007128
Johannes Berg57ffc582008-04-29 17:18:59 +02007129 stats[i].len = q->n_window - avail;
7130 stats[i].limit = q->n_window - q->high_mark;
7131 stats[i].count = q->n_window;
Zhu Yib481de92007-09-25 17:54:57 -07007132
7133 }
7134 spin_unlock_irqrestore(&priv->lock, flags);
7135
7136 IWL_DEBUG_MAC80211("leave\n");
7137
7138 return 0;
7139}
7140
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007141static int iwl3945_mac_get_stats(struct ieee80211_hw *hw,
Zhu Yib481de92007-09-25 17:54:57 -07007142 struct ieee80211_low_level_stats *stats)
7143{
Zhu Yib481de92007-09-25 17:54:57 -07007144 return 0;
7145}
7146
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007147static void iwl3945_mac_reset_tsf(struct ieee80211_hw *hw)
Zhu Yib481de92007-09-25 17:54:57 -07007148{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007149 struct iwl3945_priv *priv = hw->priv;
Zhu Yib481de92007-09-25 17:54:57 -07007150 unsigned long flags;
7151
7152 mutex_lock(&priv->mutex);
7153 IWL_DEBUG_MAC80211("enter\n");
7154
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007155 iwl3945_reset_qos(priv);
Ron Rindjunsky292ae172008-02-06 11:20:39 -08007156
Zhu Yib481de92007-09-25 17:54:57 -07007157 spin_lock_irqsave(&priv->lock, flags);
7158 priv->assoc_id = 0;
7159 priv->assoc_capability = 0;
7160 priv->call_post_assoc_from_beacon = 0;
7161
7162 /* new association get rid of ibss beacon skb */
7163 if (priv->ibss_beacon)
7164 dev_kfree_skb(priv->ibss_beacon);
7165
7166 priv->ibss_beacon = NULL;
7167
7168 priv->beacon_int = priv->hw->conf.beacon_int;
Tomas Winkler28afaf92008-12-19 10:37:06 +08007169 priv->timestamp = 0;
Johannes Berg05c914f2008-09-11 00:01:58 +02007170 if ((priv->iw_mode == NL80211_IFTYPE_STATION))
Zhu Yib481de92007-09-25 17:54:57 -07007171 priv->beacon_int = 0;
7172
7173 spin_unlock_irqrestore(&priv->lock, flags);
7174
Mohamed Abbasfde35712007-11-29 11:10:15 +08007175 if (!iwl3945_is_ready_rf(priv)) {
7176 IWL_DEBUG_MAC80211("leave - not ready\n");
7177 mutex_unlock(&priv->mutex);
7178 return;
7179 }
7180
Mohamed Abbas15e869d2007-10-25 17:15:46 +08007181 /* we are restarting association process
7182 * clear RXON_FILTER_ASSOC_MSK bit
7183 */
Johannes Berg05c914f2008-09-11 00:01:58 +02007184 if (priv->iw_mode != NL80211_IFTYPE_AP) {
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007185 iwl3945_scan_cancel_timeout(priv, 100);
Mohamed Abbas15e869d2007-10-25 17:15:46 +08007186 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007187 iwl3945_commit_rxon(priv);
Mohamed Abbas15e869d2007-10-25 17:15:46 +08007188 }
7189
Zhu Yib481de92007-09-25 17:54:57 -07007190 /* Per mac80211.h: This is only used in IBSS mode... */
Johannes Berg05c914f2008-09-11 00:01:58 +02007191 if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
Mohamed Abbas15e869d2007-10-25 17:15:46 +08007192
Zhu Yib481de92007-09-25 17:54:57 -07007193 IWL_DEBUG_MAC80211("leave - not in IBSS\n");
7194 mutex_unlock(&priv->mutex);
7195 return;
7196 }
7197
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007198 iwl3945_set_rate(priv);
Zhu Yib481de92007-09-25 17:54:57 -07007199
7200 mutex_unlock(&priv->mutex);
7201
7202 IWL_DEBUG_MAC80211("leave\n");
7203
7204}
7205
Johannes Berge039fa42008-05-15 12:55:29 +02007206static int iwl3945_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
Zhu Yib481de92007-09-25 17:54:57 -07007207{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007208 struct iwl3945_priv *priv = hw->priv;
Zhu Yib481de92007-09-25 17:54:57 -07007209 unsigned long flags;
7210
Zhu Yib481de92007-09-25 17:54:57 -07007211 IWL_DEBUG_MAC80211("enter\n");
7212
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007213 if (!iwl3945_is_ready_rf(priv)) {
Zhu Yib481de92007-09-25 17:54:57 -07007214 IWL_DEBUG_MAC80211("leave - RF not ready\n");
Zhu Yib481de92007-09-25 17:54:57 -07007215 return -EIO;
7216 }
7217
Johannes Berg05c914f2008-09-11 00:01:58 +02007218 if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
Zhu Yib481de92007-09-25 17:54:57 -07007219 IWL_DEBUG_MAC80211("leave - not IBSS\n");
Zhu Yib481de92007-09-25 17:54:57 -07007220 return -EIO;
7221 }
7222
7223 spin_lock_irqsave(&priv->lock, flags);
7224
7225 if (priv->ibss_beacon)
7226 dev_kfree_skb(priv->ibss_beacon);
7227
7228 priv->ibss_beacon = skb;
7229
7230 priv->assoc_id = 0;
7231
7232 IWL_DEBUG_MAC80211("leave\n");
7233 spin_unlock_irqrestore(&priv->lock, flags);
7234
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007235 iwl3945_reset_qos(priv);
Zhu Yib481de92007-09-25 17:54:57 -07007236
Abhijeet Kolekardc4b1e72008-09-03 11:26:30 +08007237 iwl3945_post_associate(priv);
Zhu Yib481de92007-09-25 17:54:57 -07007238
Zhu Yib481de92007-09-25 17:54:57 -07007239
7240 return 0;
7241}
7242
7243/*****************************************************************************
7244 *
7245 * sysfs attributes
7246 *
7247 *****************************************************************************/
7248
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08007249#ifdef CONFIG_IWL3945_DEBUG
Zhu Yib481de92007-09-25 17:54:57 -07007250
7251/*
7252 * The following adds a new attribute to the sysfs representation
7253 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/)
7254 * used for controlling the debug level.
7255 *
7256 * See the level definitions in iwl for details.
7257 */
Samuel Ortiz40b8ec02008-12-19 10:37:08 +08007258static ssize_t show_debug_level(struct device *d,
7259 struct device_attribute *attr, char *buf)
Zhu Yib481de92007-09-25 17:54:57 -07007260{
Samuel Ortiz40b8ec02008-12-19 10:37:08 +08007261 struct iwl3945_priv *priv = d->driver_data;
7262
7263 return sprintf(buf, "0x%08X\n", priv->debug_level);
Zhu Yib481de92007-09-25 17:54:57 -07007264}
Samuel Ortiz40b8ec02008-12-19 10:37:08 +08007265static ssize_t store_debug_level(struct device *d,
7266 struct device_attribute *attr,
Zhu Yib481de92007-09-25 17:54:57 -07007267 const char *buf, size_t count)
7268{
Samuel Ortiz40b8ec02008-12-19 10:37:08 +08007269 struct iwl3945_priv *priv = d->driver_data;
7270 unsigned long val;
7271 int ret;
Zhu Yib481de92007-09-25 17:54:57 -07007272
Samuel Ortiz40b8ec02008-12-19 10:37:08 +08007273 ret = strict_strtoul(buf, 0, &val);
7274 if (ret)
Zhu Yib481de92007-09-25 17:54:57 -07007275 printk(KERN_INFO DRV_NAME
7276 ": %s is not in hex or decimal form.\n", buf);
7277 else
Samuel Ortiz40b8ec02008-12-19 10:37:08 +08007278 priv->debug_level = val;
Zhu Yib481de92007-09-25 17:54:57 -07007279
7280 return strnlen(buf, count);
7281}
7282
Samuel Ortiz40b8ec02008-12-19 10:37:08 +08007283static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
7284 show_debug_level, store_debug_level);
Zhu Yib481de92007-09-25 17:54:57 -07007285
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08007286#endif /* CONFIG_IWL3945_DEBUG */
Zhu Yib481de92007-09-25 17:54:57 -07007287
Zhu Yib481de92007-09-25 17:54:57 -07007288static ssize_t show_temperature(struct device *d,
7289 struct device_attribute *attr, char *buf)
7290{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007291 struct iwl3945_priv *priv = (struct iwl3945_priv *)d->driver_data;
Zhu Yib481de92007-09-25 17:54:57 -07007292
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007293 if (!iwl3945_is_alive(priv))
Zhu Yib481de92007-09-25 17:54:57 -07007294 return -EAGAIN;
7295
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007296 return sprintf(buf, "%d\n", iwl3945_hw_get_temperature(priv));
Zhu Yib481de92007-09-25 17:54:57 -07007297}
7298
7299static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL);
7300
Zhu Yib481de92007-09-25 17:54:57 -07007301static ssize_t show_tx_power(struct device *d,
7302 struct device_attribute *attr, char *buf)
7303{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007304 struct iwl3945_priv *priv = (struct iwl3945_priv *)d->driver_data;
Zhu Yib481de92007-09-25 17:54:57 -07007305 return sprintf(buf, "%d\n", priv->user_txpower_limit);
7306}
7307
7308static ssize_t store_tx_power(struct device *d,
7309 struct device_attribute *attr,
7310 const char *buf, size_t count)
7311{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007312 struct iwl3945_priv *priv = (struct iwl3945_priv *)d->driver_data;
Zhu Yib481de92007-09-25 17:54:57 -07007313 char *p = (char *)buf;
7314 u32 val;
7315
7316 val = simple_strtoul(p, &p, 10);
7317 if (p == buf)
7318 printk(KERN_INFO DRV_NAME
7319 ": %s is not in decimal form.\n", buf);
7320 else
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007321 iwl3945_hw_reg_set_txpower(priv, val);
Zhu Yib481de92007-09-25 17:54:57 -07007322
7323 return count;
7324}
7325
7326static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power);
7327
7328static ssize_t show_flags(struct device *d,
7329 struct device_attribute *attr, char *buf)
7330{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007331 struct iwl3945_priv *priv = (struct iwl3945_priv *)d->driver_data;
Zhu Yib481de92007-09-25 17:54:57 -07007332
7333 return sprintf(buf, "0x%04X\n", priv->active_rxon.flags);
7334}
7335
7336static ssize_t store_flags(struct device *d,
7337 struct device_attribute *attr,
7338 const char *buf, size_t count)
7339{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007340 struct iwl3945_priv *priv = (struct iwl3945_priv *)d->driver_data;
Zhu Yib481de92007-09-25 17:54:57 -07007341 u32 flags = simple_strtoul(buf, NULL, 0);
7342
7343 mutex_lock(&priv->mutex);
7344 if (le32_to_cpu(priv->staging_rxon.flags) != flags) {
7345 /* Cancel any currently running scans... */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007346 if (iwl3945_scan_cancel_timeout(priv, 100))
Zhu Yib481de92007-09-25 17:54:57 -07007347 IWL_WARNING("Could not cancel scan.\n");
7348 else {
7349 IWL_DEBUG_INFO("Committing rxon.flags = 0x%04X\n",
7350 flags);
7351 priv->staging_rxon.flags = cpu_to_le32(flags);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007352 iwl3945_commit_rxon(priv);
Zhu Yib481de92007-09-25 17:54:57 -07007353 }
7354 }
7355 mutex_unlock(&priv->mutex);
7356
7357 return count;
7358}
7359
7360static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, show_flags, store_flags);
7361
7362static ssize_t show_filter_flags(struct device *d,
7363 struct device_attribute *attr, char *buf)
7364{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007365 struct iwl3945_priv *priv = (struct iwl3945_priv *)d->driver_data;
Zhu Yib481de92007-09-25 17:54:57 -07007366
7367 return sprintf(buf, "0x%04X\n",
7368 le32_to_cpu(priv->active_rxon.filter_flags));
7369}
7370
7371static ssize_t store_filter_flags(struct device *d,
7372 struct device_attribute *attr,
7373 const char *buf, size_t count)
7374{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007375 struct iwl3945_priv *priv = (struct iwl3945_priv *)d->driver_data;
Zhu Yib481de92007-09-25 17:54:57 -07007376 u32 filter_flags = simple_strtoul(buf, NULL, 0);
7377
7378 mutex_lock(&priv->mutex);
7379 if (le32_to_cpu(priv->staging_rxon.filter_flags) != filter_flags) {
7380 /* Cancel any currently running scans... */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007381 if (iwl3945_scan_cancel_timeout(priv, 100))
Zhu Yib481de92007-09-25 17:54:57 -07007382 IWL_WARNING("Could not cancel scan.\n");
7383 else {
7384 IWL_DEBUG_INFO("Committing rxon.filter_flags = "
7385 "0x%04X\n", filter_flags);
7386 priv->staging_rxon.filter_flags =
7387 cpu_to_le32(filter_flags);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007388 iwl3945_commit_rxon(priv);
Zhu Yib481de92007-09-25 17:54:57 -07007389 }
7390 }
7391 mutex_unlock(&priv->mutex);
7392
7393 return count;
7394}
7395
7396static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags,
7397 store_filter_flags);
7398
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08007399#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
Zhu Yib481de92007-09-25 17:54:57 -07007400
7401static ssize_t show_measurement(struct device *d,
7402 struct device_attribute *attr, char *buf)
7403{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007404 struct iwl3945_priv *priv = dev_get_drvdata(d);
Tomas Winkler600c0e12008-12-19 10:37:04 +08007405 struct iwl_spectrum_notification measure_report;
Zhu Yib481de92007-09-25 17:54:57 -07007406 u32 size = sizeof(measure_report), len = 0, ofs = 0;
Tomas Winkler3ac7f142008-07-21 02:40:14 +03007407 u8 *data = (u8 *)&measure_report;
Zhu Yib481de92007-09-25 17:54:57 -07007408 unsigned long flags;
7409
7410 spin_lock_irqsave(&priv->lock, flags);
7411 if (!(priv->measurement_status & MEASUREMENT_READY)) {
7412 spin_unlock_irqrestore(&priv->lock, flags);
7413 return 0;
7414 }
7415 memcpy(&measure_report, &priv->measure_report, size);
7416 priv->measurement_status = 0;
7417 spin_unlock_irqrestore(&priv->lock, flags);
7418
7419 while (size && (PAGE_SIZE - len)) {
7420 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
7421 PAGE_SIZE - len, 1);
7422 len = strlen(buf);
7423 if (PAGE_SIZE - len)
7424 buf[len++] = '\n';
7425
7426 ofs += 16;
7427 size -= min(size, 16U);
7428 }
7429
7430 return len;
7431}
7432
7433static ssize_t store_measurement(struct device *d,
7434 struct device_attribute *attr,
7435 const char *buf, size_t count)
7436{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007437 struct iwl3945_priv *priv = dev_get_drvdata(d);
Zhu Yib481de92007-09-25 17:54:57 -07007438 struct ieee80211_measurement_params params = {
7439 .channel = le16_to_cpu(priv->active_rxon.channel),
7440 .start_time = cpu_to_le64(priv->last_tsf),
7441 .duration = cpu_to_le16(1),
7442 };
7443 u8 type = IWL_MEASURE_BASIC;
7444 u8 buffer[32];
7445 u8 channel;
7446
7447 if (count) {
7448 char *p = buffer;
7449 strncpy(buffer, buf, min(sizeof(buffer), count));
7450 channel = simple_strtoul(p, NULL, 0);
7451 if (channel)
7452 params.channel = channel;
7453
7454 p = buffer;
7455 while (*p && *p != ' ')
7456 p++;
7457 if (*p)
7458 type = simple_strtoul(p + 1, NULL, 0);
7459 }
7460
7461 IWL_DEBUG_INFO("Invoking measurement of type %d on "
7462 "channel %d (for '%s')\n", type, params.channel, buf);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007463 iwl3945_get_measurement(priv, &params, type);
Zhu Yib481de92007-09-25 17:54:57 -07007464
7465 return count;
7466}
7467
7468static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR,
7469 show_measurement, store_measurement);
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08007470#endif /* CONFIG_IWL3945_SPECTRUM_MEASUREMENT */
Zhu Yib481de92007-09-25 17:54:57 -07007471
Zhu Yib481de92007-09-25 17:54:57 -07007472static ssize_t store_retry_rate(struct device *d,
7473 struct device_attribute *attr,
7474 const char *buf, size_t count)
7475{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007476 struct iwl3945_priv *priv = dev_get_drvdata(d);
Zhu Yib481de92007-09-25 17:54:57 -07007477
7478 priv->retry_rate = simple_strtoul(buf, NULL, 0);
7479 if (priv->retry_rate <= 0)
7480 priv->retry_rate = 1;
7481
7482 return count;
7483}
7484
7485static ssize_t show_retry_rate(struct device *d,
7486 struct device_attribute *attr, char *buf)
7487{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007488 struct iwl3945_priv *priv = dev_get_drvdata(d);
Zhu Yib481de92007-09-25 17:54:57 -07007489 return sprintf(buf, "%d", priv->retry_rate);
7490}
7491
7492static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, show_retry_rate,
7493 store_retry_rate);
7494
7495static ssize_t store_power_level(struct device *d,
7496 struct device_attribute *attr,
7497 const char *buf, size_t count)
7498{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007499 struct iwl3945_priv *priv = dev_get_drvdata(d);
Zhu Yib481de92007-09-25 17:54:57 -07007500 int rc;
7501 int mode;
7502
7503 mode = simple_strtoul(buf, NULL, 0);
7504 mutex_lock(&priv->mutex);
7505
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007506 if (!iwl3945_is_ready(priv)) {
Zhu Yib481de92007-09-25 17:54:57 -07007507 rc = -EAGAIN;
7508 goto out;
7509 }
7510
7511 if ((mode < 1) || (mode > IWL_POWER_LIMIT) || (mode == IWL_POWER_AC))
7512 mode = IWL_POWER_AC;
7513 else
7514 mode |= IWL_POWER_ENABLED;
7515
7516 if (mode != priv->power_mode) {
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007517 rc = iwl3945_send_power_mode(priv, IWL_POWER_LEVEL(mode));
Zhu Yib481de92007-09-25 17:54:57 -07007518 if (rc) {
7519 IWL_DEBUG_MAC80211("failed setting power mode.\n");
7520 goto out;
7521 }
7522 priv->power_mode = mode;
7523 }
7524
7525 rc = count;
7526
7527 out:
7528 mutex_unlock(&priv->mutex);
7529 return rc;
7530}
7531
7532#define MAX_WX_STRING 80
7533
7534/* Values are in microsecond */
7535static const s32 timeout_duration[] = {
7536 350000,
7537 250000,
7538 75000,
7539 37000,
7540 25000,
7541};
7542static const s32 period_duration[] = {
7543 400000,
7544 700000,
7545 1000000,
7546 1000000,
7547 1000000
7548};
7549
7550static ssize_t show_power_level(struct device *d,
7551 struct device_attribute *attr, char *buf)
7552{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007553 struct iwl3945_priv *priv = dev_get_drvdata(d);
Zhu Yib481de92007-09-25 17:54:57 -07007554 int level = IWL_POWER_LEVEL(priv->power_mode);
7555 char *p = buf;
7556
7557 p += sprintf(p, "%d ", level);
7558 switch (level) {
7559 case IWL_POWER_MODE_CAM:
7560 case IWL_POWER_AC:
7561 p += sprintf(p, "(AC)");
7562 break;
7563 case IWL_POWER_BATTERY:
7564 p += sprintf(p, "(BATTERY)");
7565 break;
7566 default:
7567 p += sprintf(p,
7568 "(Timeout %dms, Period %dms)",
7569 timeout_duration[level - 1] / 1000,
7570 period_duration[level - 1] / 1000);
7571 }
7572
7573 if (!(priv->power_mode & IWL_POWER_ENABLED))
7574 p += sprintf(p, " OFF\n");
7575 else
7576 p += sprintf(p, " \n");
7577
Tomas Winkler3ac7f142008-07-21 02:40:14 +03007578 return p - buf + 1;
Zhu Yib481de92007-09-25 17:54:57 -07007579
7580}
7581
7582static DEVICE_ATTR(power_level, S_IWUSR | S_IRUSR, show_power_level,
7583 store_power_level);
7584
7585static ssize_t show_channels(struct device *d,
7586 struct device_attribute *attr, char *buf)
7587{
Johannes Berg8318d782008-01-24 19:38:38 +01007588 /* all this shit doesn't belong into sysfs anyway */
7589 return 0;
Zhu Yib481de92007-09-25 17:54:57 -07007590}
7591
7592static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
7593
7594static ssize_t show_statistics(struct device *d,
7595 struct device_attribute *attr, char *buf)
7596{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007597 struct iwl3945_priv *priv = dev_get_drvdata(d);
7598 u32 size = sizeof(struct iwl3945_notif_statistics);
Zhu Yib481de92007-09-25 17:54:57 -07007599 u32 len = 0, ofs = 0;
Tomas Winkler3ac7f142008-07-21 02:40:14 +03007600 u8 *data = (u8 *)&priv->statistics;
Zhu Yib481de92007-09-25 17:54:57 -07007601 int rc = 0;
7602
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007603 if (!iwl3945_is_alive(priv))
Zhu Yib481de92007-09-25 17:54:57 -07007604 return -EAGAIN;
7605
7606 mutex_lock(&priv->mutex);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007607 rc = iwl3945_send_statistics_request(priv);
Zhu Yib481de92007-09-25 17:54:57 -07007608 mutex_unlock(&priv->mutex);
7609
7610 if (rc) {
7611 len = sprintf(buf,
7612 "Error sending statistics request: 0x%08X\n", rc);
7613 return len;
7614 }
7615
7616 while (size && (PAGE_SIZE - len)) {
7617 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
7618 PAGE_SIZE - len, 1);
7619 len = strlen(buf);
7620 if (PAGE_SIZE - len)
7621 buf[len++] = '\n';
7622
7623 ofs += 16;
7624 size -= min(size, 16U);
7625 }
7626
7627 return len;
7628}
7629
7630static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL);
7631
7632static ssize_t show_antenna(struct device *d,
7633 struct device_attribute *attr, char *buf)
7634{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007635 struct iwl3945_priv *priv = dev_get_drvdata(d);
Zhu Yib481de92007-09-25 17:54:57 -07007636
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007637 if (!iwl3945_is_alive(priv))
Zhu Yib481de92007-09-25 17:54:57 -07007638 return -EAGAIN;
7639
7640 return sprintf(buf, "%d\n", priv->antenna);
7641}
7642
7643static ssize_t store_antenna(struct device *d,
7644 struct device_attribute *attr,
7645 const char *buf, size_t count)
7646{
7647 int ant;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007648 struct iwl3945_priv *priv = dev_get_drvdata(d);
Zhu Yib481de92007-09-25 17:54:57 -07007649
7650 if (count == 0)
7651 return 0;
7652
7653 if (sscanf(buf, "%1i", &ant) != 1) {
7654 IWL_DEBUG_INFO("not in hex or decimal form.\n");
7655 return count;
7656 }
7657
7658 if ((ant >= 0) && (ant <= 2)) {
7659 IWL_DEBUG_INFO("Setting antenna select to %d.\n", ant);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007660 priv->antenna = (enum iwl3945_antenna)ant;
Zhu Yib481de92007-09-25 17:54:57 -07007661 } else
7662 IWL_DEBUG_INFO("Bad antenna select value %d.\n", ant);
7663
7664
7665 return count;
7666}
7667
7668static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, show_antenna, store_antenna);
7669
7670static ssize_t show_status(struct device *d,
7671 struct device_attribute *attr, char *buf)
7672{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007673 struct iwl3945_priv *priv = (struct iwl3945_priv *)d->driver_data;
7674 if (!iwl3945_is_alive(priv))
Zhu Yib481de92007-09-25 17:54:57 -07007675 return -EAGAIN;
7676 return sprintf(buf, "0x%08x\n", (int)priv->status);
7677}
7678
7679static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
7680
7681static ssize_t dump_error_log(struct device *d,
7682 struct device_attribute *attr,
7683 const char *buf, size_t count)
7684{
7685 char *p = (char *)buf;
7686
7687 if (p[0] == '1')
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007688 iwl3945_dump_nic_error_log((struct iwl3945_priv *)d->driver_data);
Zhu Yib481de92007-09-25 17:54:57 -07007689
7690 return strnlen(buf, count);
7691}
7692
7693static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log);
7694
7695static ssize_t dump_event_log(struct device *d,
7696 struct device_attribute *attr,
7697 const char *buf, size_t count)
7698{
7699 char *p = (char *)buf;
7700
7701 if (p[0] == '1')
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007702 iwl3945_dump_nic_event_log((struct iwl3945_priv *)d->driver_data);
Zhu Yib481de92007-09-25 17:54:57 -07007703
7704 return strnlen(buf, count);
7705}
7706
7707static DEVICE_ATTR(dump_events, S_IWUSR, NULL, dump_event_log);
7708
7709/*****************************************************************************
7710 *
Tomas Winklera96a27f2008-10-23 23:48:56 -07007711 * driver setup and tear down
Zhu Yib481de92007-09-25 17:54:57 -07007712 *
7713 *****************************************************************************/
7714
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007715static void iwl3945_setup_deferred_work(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07007716{
7717 priv->workqueue = create_workqueue(DRV_NAME);
7718
7719 init_waitqueue_head(&priv->wait_command_queue);
7720
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007721 INIT_WORK(&priv->up, iwl3945_bg_up);
7722 INIT_WORK(&priv->restart, iwl3945_bg_restart);
7723 INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish);
7724 INIT_WORK(&priv->scan_completed, iwl3945_bg_scan_completed);
7725 INIT_WORK(&priv->request_scan, iwl3945_bg_request_scan);
7726 INIT_WORK(&priv->abort_scan, iwl3945_bg_abort_scan);
7727 INIT_WORK(&priv->rf_kill, iwl3945_bg_rf_kill);
7728 INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007729 INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start);
7730 INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start);
7731 INIT_DELAYED_WORK(&priv->scan_check, iwl3945_bg_scan_check);
Zhu Yib481de92007-09-25 17:54:57 -07007732
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007733 iwl3945_hw_setup_deferred_work(priv);
Zhu Yib481de92007-09-25 17:54:57 -07007734
7735 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007736 iwl3945_irq_tasklet, (unsigned long)priv);
Zhu Yib481de92007-09-25 17:54:57 -07007737}
7738
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007739static void iwl3945_cancel_deferred_work(struct iwl3945_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07007740{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007741 iwl3945_hw_cancel_deferred_work(priv);
Zhu Yib481de92007-09-25 17:54:57 -07007742
Joonwoo Parke47eb6a2007-11-29 10:42:49 +09007743 cancel_delayed_work_sync(&priv->init_alive_start);
Zhu Yib481de92007-09-25 17:54:57 -07007744 cancel_delayed_work(&priv->scan_check);
7745 cancel_delayed_work(&priv->alive_start);
Zhu Yib481de92007-09-25 17:54:57 -07007746 cancel_work_sync(&priv->beacon_update);
7747}
7748
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007749static struct attribute *iwl3945_sysfs_entries[] = {
Zhu Yib481de92007-09-25 17:54:57 -07007750 &dev_attr_antenna.attr,
7751 &dev_attr_channels.attr,
7752 &dev_attr_dump_errors.attr,
7753 &dev_attr_dump_events.attr,
7754 &dev_attr_flags.attr,
7755 &dev_attr_filter_flags.attr,
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08007756#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
Zhu Yib481de92007-09-25 17:54:57 -07007757 &dev_attr_measurement.attr,
7758#endif
7759 &dev_attr_power_level.attr,
Zhu Yib481de92007-09-25 17:54:57 -07007760 &dev_attr_retry_rate.attr,
Zhu Yib481de92007-09-25 17:54:57 -07007761 &dev_attr_statistics.attr,
7762 &dev_attr_status.attr,
7763 &dev_attr_temperature.attr,
Zhu Yib481de92007-09-25 17:54:57 -07007764 &dev_attr_tx_power.attr,
Samuel Ortiz40b8ec02008-12-19 10:37:08 +08007765#ifdef CONFIG_IWL3945_DEBUG
7766 &dev_attr_debug_level.attr,
7767#endif
Zhu Yib481de92007-09-25 17:54:57 -07007768 NULL
7769};
7770
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007771static struct attribute_group iwl3945_attribute_group = {
Zhu Yib481de92007-09-25 17:54:57 -07007772 .name = NULL, /* put in device directory */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007773 .attrs = iwl3945_sysfs_entries,
Zhu Yib481de92007-09-25 17:54:57 -07007774};
7775
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007776static struct ieee80211_ops iwl3945_hw_ops = {
7777 .tx = iwl3945_mac_tx,
7778 .start = iwl3945_mac_start,
7779 .stop = iwl3945_mac_stop,
7780 .add_interface = iwl3945_mac_add_interface,
7781 .remove_interface = iwl3945_mac_remove_interface,
7782 .config = iwl3945_mac_config,
7783 .config_interface = iwl3945_mac_config_interface,
7784 .configure_filter = iwl3945_configure_filter,
7785 .set_key = iwl3945_mac_set_key,
7786 .get_stats = iwl3945_mac_get_stats,
7787 .get_tx_stats = iwl3945_mac_get_tx_stats,
7788 .conf_tx = iwl3945_mac_conf_tx,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007789 .reset_tsf = iwl3945_mac_reset_tsf,
Abhijeet Kolekarcd56d332008-09-03 11:26:21 +08007790 .bss_info_changed = iwl3945_bss_info_changed,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007791 .hw_scan = iwl3945_mac_hw_scan
Zhu Yib481de92007-09-25 17:54:57 -07007792};
7793
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007794static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
Zhu Yib481de92007-09-25 17:54:57 -07007795{
7796 int err = 0;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007797 struct iwl3945_priv *priv;
Zhu Yib481de92007-09-25 17:54:57 -07007798 struct ieee80211_hw *hw;
Tomas Winkler82b9a122008-03-04 18:09:30 -08007799 struct iwl_3945_cfg *cfg = (struct iwl_3945_cfg *)(ent->driver_data);
Mohamed Abbas0359fac2008-03-28 16:21:08 -07007800 unsigned long flags;
Zhu Yib481de92007-09-25 17:54:57 -07007801
Kolekar, Abhijeetcee53dd2008-11-12 13:14:04 -08007802 /***********************
7803 * 1. Allocating HW data
7804 * ********************/
7805
Ron Rindjunskydfe7d452008-04-15 16:01:45 -07007806 if ((iwl3945_param_queues_num > IWL39_MAX_NUM_QUEUES) ||
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007807 (iwl3945_param_queues_num < IWL_MIN_NUM_QUEUES)) {
Zhu Yib481de92007-09-25 17:54:57 -07007808 IWL_ERROR("invalid queues_num, should be between %d and %d\n",
Ron Rindjunskydfe7d452008-04-15 16:01:45 -07007809 IWL_MIN_NUM_QUEUES, IWL39_MAX_NUM_QUEUES);
Zhu Yib481de92007-09-25 17:54:57 -07007810 err = -EINVAL;
7811 goto out;
7812 }
7813
7814 /* mac80211 allocates memory for this device instance, including
7815 * space for this driver's private structure */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007816 hw = ieee80211_alloc_hw(sizeof(struct iwl3945_priv), &iwl3945_hw_ops);
Zhu Yib481de92007-09-25 17:54:57 -07007817 if (hw == NULL) {
7818 IWL_ERROR("Can not allocate network device\n");
7819 err = -ENOMEM;
7820 goto out;
7821 }
Kolekar, Abhijeetcee53dd2008-11-12 13:14:04 -08007822
Zhu Yib481de92007-09-25 17:54:57 -07007823 SET_IEEE80211_DEV(hw, &pdev->dev);
7824
Zhu Yib481de92007-09-25 17:54:57 -07007825 priv = hw->priv;
7826 priv->hw = hw;
Zhu Yib481de92007-09-25 17:54:57 -07007827 priv->pci_dev = pdev;
Tomas Winkler82b9a122008-03-04 18:09:30 -08007828 priv->cfg = cfg;
Cahill, Ben M6440adb2007-11-29 11:09:55 +08007829
Samuel Ortiz40b8ec02008-12-19 10:37:08 +08007830 /* Disabling hardware scan means that mac80211 will perform scans
7831 * "the hard way", rather than using device's scan. */
7832 if (iwl3945_param_disable_hw_scan) {
7833 IWL_DEBUG_INFO("Disabling hw_scan\n");
7834 iwl3945_hw_ops.hw_scan = NULL;
7835 }
7836
Kolekar, Abhijeetcee53dd2008-11-12 13:14:04 -08007837 IWL_DEBUG_INFO("*** LOAD DRIVER ***\n");
7838 hw->rate_control_algorithm = "iwl-3945-rs";
7839 hw->sta_data_size = sizeof(struct iwl3945_sta_priv);
7840
Cahill, Ben M6440adb2007-11-29 11:09:55 +08007841 /* Select antenna (may be helpful if only one antenna is connected) */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08007842 priv->antenna = (enum iwl3945_antenna)iwl3945_param_antenna;
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08007843#ifdef CONFIG_IWL3945_DEBUG
Samuel Ortiz40b8ec02008-12-19 10:37:08 +08007844 priv->debug_level = iwl3945_param_debug;
Zhu Yib481de92007-09-25 17:54:57 -07007845 atomic_set(&priv->restrict_refcnt, 0);
7846#endif
Zhu Yib481de92007-09-25 17:54:57 -07007847
Bruno Randolf566bfe52008-05-08 19:15:40 +02007848 /* Tell mac80211 our characteristics */
Johannes Berg605a0bd2008-07-15 10:10:01 +02007849 hw->flags = IEEE80211_HW_SIGNAL_DBM |
Bruno Randolf566bfe52008-05-08 19:15:40 +02007850 IEEE80211_HW_NOISE_DBM;
Zhu Yib481de92007-09-25 17:54:57 -07007851
Luis R. Rodriguezf59ac042008-08-29 16:26:43 -07007852 hw->wiphy->interface_modes =
Luis R. Rodriguezf59ac042008-08-29 16:26:43 -07007853 BIT(NL80211_IFTYPE_STATION) |
7854 BIT(NL80211_IFTYPE_ADHOC);
7855
Luis R. Rodriguezea4a82dc2008-11-12 14:22:04 -08007856 hw->wiphy->fw_handles_regulatory = true;
7857
Cahill, Ben M6440adb2007-11-29 11:09:55 +08007858 /* 4 EDCA QOS priorities */
Zhu Yib481de92007-09-25 17:54:57 -07007859 hw->queues = 4;
7860
Kolekar, Abhijeetcee53dd2008-11-12 13:14:04 -08007861 /***************************
7862 * 2. Initializing PCI bus
7863 * *************************/
Zhu Yib481de92007-09-25 17:54:57 -07007864 if (pci_enable_device(pdev)) {
7865 err = -ENODEV;
7866 goto out_ieee80211_free_hw;
7867 }
7868
7869 pci_set_master(pdev);
7870
Zhu Yib481de92007-09-25 17:54:57 -07007871 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
7872 if (!err)
7873 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
7874 if (err) {
7875 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
7876 goto out_pci_disable_device;
7877 }
7878
7879 pci_set_drvdata(pdev, priv);
7880 err = pci_request_regions(pdev, DRV_NAME);
7881 if (err)
7882 goto out_pci_disable_device;
Cahill, Ben M6440adb2007-11-29 11:09:55 +08007883
Kolekar, Abhijeetcee53dd2008-11-12 13:14:04 -08007884 /***********************
7885 * 3. Read REV Register
7886 * ********************/
Zhu Yib481de92007-09-25 17:54:57 -07007887 priv->hw_base = pci_iomap(pdev, 0, 0);
7888 if (!priv->hw_base) {
7889 err = -ENODEV;
7890 goto out_pci_release_regions;
7891 }
7892
7893 IWL_DEBUG_INFO("pci_resource_len = 0x%08llx\n",
7894 (unsigned long long) pci_resource_len(pdev, 0));
7895 IWL_DEBUG_INFO("pci_resource_base = %p\n", priv->hw_base);
7896
Kolekar, Abhijeetcee53dd2008-11-12 13:14:04 -08007897 /* We disable the RETRY_TIMEOUT register (0x41) to keep
7898 * PCI Tx retries from interfering with C3 CPU state */
7899 pci_write_config_byte(pdev, 0x41, 0x00);
Zhu Yib481de92007-09-25 17:54:57 -07007900
Zhu Yi5a669262008-01-14 17:46:18 -08007901 /* nic init */
7902 iwl3945_set_bit(priv, CSR_GIO_CHICKEN_BITS,
Tomas Winkler3ac7f142008-07-21 02:40:14 +03007903 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
Zhu Yi5a669262008-01-14 17:46:18 -08007904
Tomas Winkler3ac7f142008-07-21 02:40:14 +03007905 iwl3945_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
Zhu, Yi73d7b5a2008-12-05 07:58:40 -08007906 err = iwl3945_poll_direct_bit(priv, CSR_GP_CNTRL,
7907 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
Tomas Winkler3ac7f142008-07-21 02:40:14 +03007908 if (err < 0) {
7909 IWL_DEBUG_INFO("Failed to init the card\n");
Zhu Yi5a669262008-01-14 17:46:18 -08007910 goto out_remove_sysfs;
Tomas Winkler3ac7f142008-07-21 02:40:14 +03007911 }
Kolekar, Abhijeetcee53dd2008-11-12 13:14:04 -08007912
7913 /***********************
7914 * 4. Read EEPROM
7915 * ********************/
Zhu Yi5a669262008-01-14 17:46:18 -08007916 /* Read the EEPROM */
7917 err = iwl3945_eeprom_init(priv);
Zhu Yib481de92007-09-25 17:54:57 -07007918 if (err) {
Zhu Yi5a669262008-01-14 17:46:18 -08007919 IWL_ERROR("Unable to init EEPROM\n");
7920 goto out_remove_sysfs;
7921 }
7922 /* MAC Address location in EEPROM same for 3945/4965 */
7923 get_eeprom_mac(priv, priv->mac_addr);
Johannes Berge1749612008-10-27 15:59:26 -07007924 IWL_DEBUG_INFO("MAC address: %pM\n", priv->mac_addr);
Zhu Yi5a669262008-01-14 17:46:18 -08007925 SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr);
7926
Kolekar, Abhijeetcee53dd2008-11-12 13:14:04 -08007927 /***********************
7928 * 5. Setup HW Constants
7929 * ********************/
7930 /* Device-specific setup */
7931 if (iwl3945_hw_set_hw_setting(priv)) {
7932 IWL_ERROR("failed to set hw settings\n");
7933 goto out_iounmap;
7934 }
7935
7936 /***********************
7937 * 6. Setup priv
7938 * ********************/
7939 priv->retry_rate = 1;
7940 priv->ibss_beacon = NULL;
7941
7942 spin_lock_init(&priv->lock);
7943 spin_lock_init(&priv->power_data.lock);
7944 spin_lock_init(&priv->sta_lock);
7945 spin_lock_init(&priv->hcmd_lock);
7946
7947 INIT_LIST_HEAD(&priv->free_frames);
7948 mutex_init(&priv->mutex);
7949
7950 /* Clear the driver's (not device's) station table */
7951 iwl3945_clear_stations_table(priv);
7952
7953 priv->data_retry_limit = -1;
7954 priv->ieee_channels = NULL;
7955 priv->ieee_rates = NULL;
7956 priv->band = IEEE80211_BAND_2GHZ;
7957
7958 priv->iw_mode = NL80211_IFTYPE_STATION;
7959
Kolekar, Abhijeetcee53dd2008-11-12 13:14:04 -08007960 iwl3945_reset_qos(priv);
7961
7962 priv->qos_data.qos_active = 0;
7963 priv->qos_data.qos_cap.val = 0;
7964
7965
7966 priv->rates_mask = IWL_RATES_MASK;
7967 /* If power management is turned on, default to AC mode */
7968 priv->power_mode = IWL_POWER_AC;
7969 priv->user_txpower_limit = IWL_DEFAULT_TX_POWER;
7970
Reinette Chatre849e0dc2008-01-23 10:15:18 -08007971 err = iwl3945_init_channel_map(priv);
7972 if (err) {
7973 IWL_ERROR("initializing regulatory failed: %d\n", err);
Kolekar, Abhijeetcee53dd2008-11-12 13:14:04 -08007974 goto out_release_irq;
Reinette Chatre849e0dc2008-01-23 10:15:18 -08007975 }
7976
7977 err = iwl3945_init_geos(priv);
7978 if (err) {
7979 IWL_ERROR("initializing geos failed: %d\n", err);
7980 goto out_free_channel_map;
7981 }
Reinette Chatre849e0dc2008-01-23 10:15:18 -08007982
Kolekar, Abhijeetcee53dd2008-11-12 13:14:04 -08007983 printk(KERN_INFO DRV_NAME
7984 ": Detected Intel Wireless WiFi Link %s\n", priv->cfg->name);
7985
7986 /***********************************
7987 * 7. Initialize Module Parameters
7988 * **********************************/
7989
7990 /* Initialize module parameter values here */
7991 /* Disable radio (SW RF KILL) via parameter when loading driver */
7992 if (iwl3945_param_disable) {
7993 set_bit(STATUS_RF_KILL_SW, &priv->status);
7994 IWL_DEBUG_INFO("Radio disabled.\n");
7995 }
7996
7997
7998 /***********************
7999 * 8. Setup Services
8000 * ********************/
8001
8002 spin_lock_irqsave(&priv->lock, flags);
8003 iwl3945_disable_interrupts(priv);
8004 spin_unlock_irqrestore(&priv->lock, flags);
8005
8006 err = sysfs_create_group(&pdev->dev.kobj, &iwl3945_attribute_group);
8007 if (err) {
8008 IWL_ERROR("failed to create sysfs device attributes\n");
8009 goto out_free_geos;
8010 }
8011
8012 iwl3945_set_rxon_channel(priv, IEEE80211_BAND_2GHZ, 6);
8013 iwl3945_setup_deferred_work(priv);
8014 iwl3945_setup_rx_handlers(priv);
8015
8016 /***********************
8017 * 9. Conclude
8018 * ********************/
8019 pci_save_state(pdev);
8020 pci_disable_device(pdev);
8021
8022 /*********************************
8023 * 10. Setup and Register mac80211
8024 * *******************************/
8025
Zhu Yi5a669262008-01-14 17:46:18 -08008026 err = ieee80211_register_hw(priv->hw);
8027 if (err) {
8028 IWL_ERROR("Failed to register network device (error %d)\n", err);
Kolekar, Abhijeetcee53dd2008-11-12 13:14:04 -08008029 goto out_remove_sysfs;
Zhu Yib481de92007-09-25 17:54:57 -07008030 }
8031
Zhu Yi5a669262008-01-14 17:46:18 -08008032 priv->hw->conf.beacon_int = 100;
8033 priv->mac80211_registered = 1;
Kolekar, Abhijeetcee53dd2008-11-12 13:14:04 -08008034
Zhu Yib481de92007-09-25 17:54:57 -07008035
Abhijeet Kolekarebef2002008-06-30 17:23:18 +08008036 err = iwl3945_rfkill_init(priv);
8037 if (err)
8038 IWL_ERROR("Unable to initialize RFKILL system. "
8039 "Ignoring error: %d\n", err);
8040
Zhu Yib481de92007-09-25 17:54:57 -07008041 return 0;
8042
Kolekar, Abhijeetcee53dd2008-11-12 13:14:04 -08008043 out_remove_sysfs:
8044 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
Reinette Chatre849e0dc2008-01-23 10:15:18 -08008045 out_free_geos:
8046 iwl3945_free_geos(priv);
8047 out_free_channel_map:
8048 iwl3945_free_channel_map(priv);
Kolekar, Abhijeetcee53dd2008-11-12 13:14:04 -08008049
Zhu Yib481de92007-09-25 17:54:57 -07008050
8051 out_release_irq:
Zhu Yib481de92007-09-25 17:54:57 -07008052 destroy_workqueue(priv->workqueue);
8053 priv->workqueue = NULL;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08008054 iwl3945_unset_hw_setting(priv);
Zhu Yib481de92007-09-25 17:54:57 -07008055
8056 out_iounmap:
8057 pci_iounmap(pdev, priv->hw_base);
8058 out_pci_release_regions:
8059 pci_release_regions(pdev);
8060 out_pci_disable_device:
8061 pci_disable_device(pdev);
8062 pci_set_drvdata(pdev, NULL);
8063 out_ieee80211_free_hw:
8064 ieee80211_free_hw(priv->hw);
8065 out:
8066 return err;
8067}
8068
Reinette Chatrec83dbf62008-03-21 13:53:41 -07008069static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
Zhu Yib481de92007-09-25 17:54:57 -07008070{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08008071 struct iwl3945_priv *priv = pci_get_drvdata(pdev);
Mohamed Abbas0359fac2008-03-28 16:21:08 -07008072 unsigned long flags;
Zhu Yib481de92007-09-25 17:54:57 -07008073
8074 if (!priv)
8075 return;
8076
8077 IWL_DEBUG_INFO("*** UNLOAD DRIVER ***\n");
8078
Zhu Yib481de92007-09-25 17:54:57 -07008079 set_bit(STATUS_EXIT_PENDING, &priv->status);
Zhu Yib24d22b2007-12-19 13:59:52 +08008080
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08008081 iwl3945_down(priv);
Zhu Yib481de92007-09-25 17:54:57 -07008082
Mohamed Abbas0359fac2008-03-28 16:21:08 -07008083 /* make sure we flush any pending irq or
8084 * tasklet for the driver
8085 */
8086 spin_lock_irqsave(&priv->lock, flags);
8087 iwl3945_disable_interrupts(priv);
8088 spin_unlock_irqrestore(&priv->lock, flags);
8089
8090 iwl_synchronize_irq(priv);
8091
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08008092 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
Zhu Yib481de92007-09-25 17:54:57 -07008093
Abhijeet Kolekarebef2002008-06-30 17:23:18 +08008094 iwl3945_rfkill_unregister(priv);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08008095 iwl3945_dealloc_ucode_pci(priv);
Zhu Yib481de92007-09-25 17:54:57 -07008096
8097 if (priv->rxq.bd)
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08008098 iwl3945_rx_queue_free(priv, &priv->rxq);
8099 iwl3945_hw_txq_ctx_free(priv);
Zhu Yib481de92007-09-25 17:54:57 -07008100
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08008101 iwl3945_unset_hw_setting(priv);
8102 iwl3945_clear_stations_table(priv);
Zhu Yib481de92007-09-25 17:54:57 -07008103
Tomas Winkler3ac7f142008-07-21 02:40:14 +03008104 if (priv->mac80211_registered)
Zhu Yib481de92007-09-25 17:54:57 -07008105 ieee80211_unregister_hw(priv->hw);
Zhu Yib481de92007-09-25 17:54:57 -07008106
Mohamed Abbas6ef89d02007-10-25 17:15:47 +08008107 /*netif_stop_queue(dev); */
8108 flush_workqueue(priv->workqueue);
8109
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08008110 /* ieee80211_unregister_hw calls iwl3945_mac_stop, which flushes
Zhu Yib481de92007-09-25 17:54:57 -07008111 * priv->workqueue... so we can't take down the workqueue
8112 * until now... */
8113 destroy_workqueue(priv->workqueue);
8114 priv->workqueue = NULL;
8115
Zhu Yib481de92007-09-25 17:54:57 -07008116 pci_iounmap(pdev, priv->hw_base);
8117 pci_release_regions(pdev);
8118 pci_disable_device(pdev);
8119 pci_set_drvdata(pdev, NULL);
8120
Reinette Chatre849e0dc2008-01-23 10:15:18 -08008121 iwl3945_free_channel_map(priv);
8122 iwl3945_free_geos(priv);
Emmanuel Grumbach261415f2008-05-29 16:35:25 +08008123 kfree(priv->scan);
Zhu Yib481de92007-09-25 17:54:57 -07008124 if (priv->ibss_beacon)
8125 dev_kfree_skb(priv->ibss_beacon);
8126
8127 ieee80211_free_hw(priv->hw);
8128}
8129
8130#ifdef CONFIG_PM
8131
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08008132static int iwl3945_pci_suspend(struct pci_dev *pdev, pm_message_t state)
Zhu Yib481de92007-09-25 17:54:57 -07008133{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08008134 struct iwl3945_priv *priv = pci_get_drvdata(pdev);
Zhu Yib481de92007-09-25 17:54:57 -07008135
Zhu Yie655b9f2008-01-24 02:19:38 -08008136 if (priv->is_open) {
8137 set_bit(STATUS_IN_SUSPEND, &priv->status);
8138 iwl3945_mac_stop(priv->hw);
8139 priv->is_open = 1;
8140 }
Zhu Yib481de92007-09-25 17:54:57 -07008141
Zhu Yib481de92007-09-25 17:54:57 -07008142 pci_set_power_state(pdev, PCI_D3hot);
8143
Zhu Yib481de92007-09-25 17:54:57 -07008144 return 0;
8145}
8146
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08008147static int iwl3945_pci_resume(struct pci_dev *pdev)
Zhu Yib481de92007-09-25 17:54:57 -07008148{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08008149 struct iwl3945_priv *priv = pci_get_drvdata(pdev);
Zhu Yib481de92007-09-25 17:54:57 -07008150
Zhu Yib481de92007-09-25 17:54:57 -07008151 pci_set_power_state(pdev, PCI_D0);
Zhu Yib481de92007-09-25 17:54:57 -07008152
Zhu Yie655b9f2008-01-24 02:19:38 -08008153 if (priv->is_open)
8154 iwl3945_mac_start(priv->hw);
Zhu Yib481de92007-09-25 17:54:57 -07008155
Zhu Yie655b9f2008-01-24 02:19:38 -08008156 clear_bit(STATUS_IN_SUSPEND, &priv->status);
Zhu Yib481de92007-09-25 17:54:57 -07008157 return 0;
8158}
8159
8160#endif /* CONFIG_PM */
8161
Abhijeet Kolekarebef2002008-06-30 17:23:18 +08008162/*************** RFKILL FUNCTIONS **********/
Adel Gadllah80fcc9e2008-07-01 17:49:50 +02008163#ifdef CONFIG_IWL3945_RFKILL
Abhijeet Kolekarebef2002008-06-30 17:23:18 +08008164/* software rf-kill from user */
8165static int iwl3945_rfkill_soft_rf_kill(void *data, enum rfkill_state state)
8166{
8167 struct iwl3945_priv *priv = data;
8168 int err = 0;
8169
Adel Gadllah80fcc9e2008-07-01 17:49:50 +02008170 if (!priv->rfkill)
Abhijeet Kolekarebef2002008-06-30 17:23:18 +08008171 return 0;
8172
8173 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
8174 return 0;
8175
Tomas Winklera96a27f2008-10-23 23:48:56 -07008176 IWL_DEBUG_RF_KILL("we received soft RFKILL set to state %d\n", state);
Abhijeet Kolekarebef2002008-06-30 17:23:18 +08008177 mutex_lock(&priv->mutex);
8178
8179 switch (state) {
Zhu Yiacdfe9b2008-06-30 17:23:32 +08008180 case RFKILL_STATE_UNBLOCKED:
Adel Gadllah80fcc9e2008-07-01 17:49:50 +02008181 if (iwl3945_is_rfkill_hw(priv)) {
Abhijeet Kolekarebef2002008-06-30 17:23:18 +08008182 err = -EBUSY;
Adel Gadllah80fcc9e2008-07-01 17:49:50 +02008183 goto out_unlock;
8184 }
8185 iwl3945_radio_kill_sw(priv, 0);
Abhijeet Kolekarebef2002008-06-30 17:23:18 +08008186 break;
Zhu Yiacdfe9b2008-06-30 17:23:32 +08008187 case RFKILL_STATE_SOFT_BLOCKED:
Abhijeet Kolekarebef2002008-06-30 17:23:18 +08008188 iwl3945_radio_kill_sw(priv, 1);
Abhijeet Kolekarebef2002008-06-30 17:23:18 +08008189 break;
Zhu Yiacdfe9b2008-06-30 17:23:32 +08008190 default:
Tomas Winklera96a27f2008-10-23 23:48:56 -07008191 IWL_WARNING("we received unexpected RFKILL state %d\n", state);
Zhu Yiacdfe9b2008-06-30 17:23:32 +08008192 break;
Abhijeet Kolekarebef2002008-06-30 17:23:18 +08008193 }
Adel Gadllah80fcc9e2008-07-01 17:49:50 +02008194out_unlock:
Abhijeet Kolekarebef2002008-06-30 17:23:18 +08008195 mutex_unlock(&priv->mutex);
8196
8197 return err;
8198}
8199
8200int iwl3945_rfkill_init(struct iwl3945_priv *priv)
8201{
8202 struct device *device = wiphy_dev(priv->hw->wiphy);
8203 int ret = 0;
8204
8205 BUG_ON(device == NULL);
8206
8207 IWL_DEBUG_RF_KILL("Initializing RFKILL.\n");
Adel Gadllah80fcc9e2008-07-01 17:49:50 +02008208 priv->rfkill = rfkill_allocate(device, RFKILL_TYPE_WLAN);
8209 if (!priv->rfkill) {
Abhijeet Kolekarebef2002008-06-30 17:23:18 +08008210 IWL_ERROR("Unable to allocate rfkill device.\n");
8211 ret = -ENOMEM;
8212 goto error;
8213 }
8214
Adel Gadllah80fcc9e2008-07-01 17:49:50 +02008215 priv->rfkill->name = priv->cfg->name;
8216 priv->rfkill->data = priv;
8217 priv->rfkill->state = RFKILL_STATE_UNBLOCKED;
8218 priv->rfkill->toggle_radio = iwl3945_rfkill_soft_rf_kill;
8219 priv->rfkill->user_claim_unsupported = 1;
Abhijeet Kolekarebef2002008-06-30 17:23:18 +08008220
Adel Gadllah80fcc9e2008-07-01 17:49:50 +02008221 priv->rfkill->dev.class->suspend = NULL;
8222 priv->rfkill->dev.class->resume = NULL;
Abhijeet Kolekarebef2002008-06-30 17:23:18 +08008223
Adel Gadllah80fcc9e2008-07-01 17:49:50 +02008224 ret = rfkill_register(priv->rfkill);
Abhijeet Kolekarebef2002008-06-30 17:23:18 +08008225 if (ret) {
8226 IWL_ERROR("Unable to register rfkill: %d\n", ret);
Adel Gadllah80fcc9e2008-07-01 17:49:50 +02008227 goto freed_rfkill;
Abhijeet Kolekarebef2002008-06-30 17:23:18 +08008228 }
8229
8230 IWL_DEBUG_RF_KILL("RFKILL initialization complete.\n");
8231 return ret;
8232
Abhijeet Kolekarebef2002008-06-30 17:23:18 +08008233freed_rfkill:
Adel Gadllah80fcc9e2008-07-01 17:49:50 +02008234 if (priv->rfkill != NULL)
8235 rfkill_free(priv->rfkill);
8236 priv->rfkill = NULL;
Abhijeet Kolekarebef2002008-06-30 17:23:18 +08008237
8238error:
8239 IWL_DEBUG_RF_KILL("RFKILL initialization complete.\n");
8240 return ret;
8241}
8242
8243void iwl3945_rfkill_unregister(struct iwl3945_priv *priv)
8244{
Adel Gadllah80fcc9e2008-07-01 17:49:50 +02008245 if (priv->rfkill)
8246 rfkill_unregister(priv->rfkill);
Abhijeet Kolekarebef2002008-06-30 17:23:18 +08008247
Adel Gadllah80fcc9e2008-07-01 17:49:50 +02008248 priv->rfkill = NULL;
Abhijeet Kolekarebef2002008-06-30 17:23:18 +08008249}
8250
8251/* set rf-kill to the right state. */
8252void iwl3945_rfkill_set_hw_state(struct iwl3945_priv *priv)
8253{
8254
Adel Gadllah80fcc9e2008-07-01 17:49:50 +02008255 if (!priv->rfkill)
Abhijeet Kolekarebef2002008-06-30 17:23:18 +08008256 return;
8257
Adel Gadllah80fcc9e2008-07-01 17:49:50 +02008258 if (iwl3945_is_rfkill_hw(priv)) {
8259 rfkill_force_state(priv->rfkill, RFKILL_STATE_HARD_BLOCKED);
8260 return;
8261 }
8262
8263 if (!iwl3945_is_rfkill_sw(priv))
8264 rfkill_force_state(priv->rfkill, RFKILL_STATE_UNBLOCKED);
Abhijeet Kolekarebef2002008-06-30 17:23:18 +08008265 else
Adel Gadllah80fcc9e2008-07-01 17:49:50 +02008266 rfkill_force_state(priv->rfkill, RFKILL_STATE_SOFT_BLOCKED);
Abhijeet Kolekarebef2002008-06-30 17:23:18 +08008267}
8268#endif
8269
Zhu Yib481de92007-09-25 17:54:57 -07008270/*****************************************************************************
8271 *
8272 * driver and module entry point
8273 *
8274 *****************************************************************************/
8275
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08008276static struct pci_driver iwl3945_driver = {
Zhu Yib481de92007-09-25 17:54:57 -07008277 .name = DRV_NAME,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08008278 .id_table = iwl3945_hw_card_ids,
8279 .probe = iwl3945_pci_probe,
8280 .remove = __devexit_p(iwl3945_pci_remove),
Zhu Yib481de92007-09-25 17:54:57 -07008281#ifdef CONFIG_PM
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08008282 .suspend = iwl3945_pci_suspend,
8283 .resume = iwl3945_pci_resume,
Zhu Yib481de92007-09-25 17:54:57 -07008284#endif
8285};
8286
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08008287static int __init iwl3945_init(void)
Zhu Yib481de92007-09-25 17:54:57 -07008288{
8289
8290 int ret;
8291 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
8292 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
Reinette Chatre897e1cf2008-03-28 16:21:09 -07008293
8294 ret = iwl3945_rate_control_register();
8295 if (ret) {
8296 IWL_ERROR("Unable to register rate control algorithm: %d\n", ret);
8297 return ret;
8298 }
8299
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08008300 ret = pci_register_driver(&iwl3945_driver);
Zhu Yib481de92007-09-25 17:54:57 -07008301 if (ret) {
8302 IWL_ERROR("Unable to initialize PCI module\n");
Reinette Chatre897e1cf2008-03-28 16:21:09 -07008303 goto error_register;
Zhu Yib481de92007-09-25 17:54:57 -07008304 }
Zhu Yib481de92007-09-25 17:54:57 -07008305
8306 return ret;
Reinette Chatre897e1cf2008-03-28 16:21:09 -07008307
Reinette Chatre897e1cf2008-03-28 16:21:09 -07008308error_register:
8309 iwl3945_rate_control_unregister();
8310 return ret;
Zhu Yib481de92007-09-25 17:54:57 -07008311}
8312
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08008313static void __exit iwl3945_exit(void)
Zhu Yib481de92007-09-25 17:54:57 -07008314{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08008315 pci_unregister_driver(&iwl3945_driver);
Reinette Chatre897e1cf2008-03-28 16:21:09 -07008316 iwl3945_rate_control_unregister();
Zhu Yib481de92007-09-25 17:54:57 -07008317}
8318
Reinette Chatrea0987a82008-12-02 12:14:06 -08008319MODULE_FIRMWARE(IWL3945_MODULE_FIRMWARE(IWL3945_UCODE_API_MAX));
Zhu Yi25cb6ca2008-09-11 11:45:22 +08008320
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08008321module_param_named(antenna, iwl3945_param_antenna, int, 0444);
Zhu Yib481de92007-09-25 17:54:57 -07008322MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08008323module_param_named(disable, iwl3945_param_disable, int, 0444);
Zhu Yib481de92007-09-25 17:54:57 -07008324MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08008325module_param_named(hwcrypto, iwl3945_param_hwcrypto, int, 0444);
Zhu Yib481de92007-09-25 17:54:57 -07008326MODULE_PARM_DESC(hwcrypto,
8327 "using hardware crypto engine (default 0 [software])\n");
Wu, Fengguang95aa1942008-12-17 16:52:30 +08008328module_param_named(debug, iwl3945_param_debug, uint, 0444);
Zhu Yib481de92007-09-25 17:54:57 -07008329MODULE_PARM_DESC(debug, "debug output mask");
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08008330module_param_named(disable_hw_scan, iwl3945_param_disable_hw_scan, int, 0444);
Zhu Yib481de92007-09-25 17:54:57 -07008331MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
8332
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08008333module_param_named(queues_num, iwl3945_param_queues_num, int, 0444);
Zhu Yib481de92007-09-25 17:54:57 -07008334MODULE_PARM_DESC(queues_num, "number of hw queues.");
8335
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08008336module_exit(iwl3945_exit);
8337module_init(iwl3945_init);