blob: fe731918c2f4554b315ebe1649276be58f33242b [file] [log] [blame]
Zhu Yib481de92007-09-25 17:54:57 -07001/******************************************************************************
2 *
Reinette Chatreeb7ae892008-03-11 16:17:17 -07003 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
Zhu Yib481de92007-09-25 17:54:57 -07004 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/version.h>
30#include <linux/init.h>
31#include <linux/pci.h>
32#include <linux/dma-mapping.h>
33#include <linux/delay.h>
34#include <linux/skbuff.h>
35#include <linux/netdevice.h>
36#include <linux/wireless.h>
37#include <net/mac80211.h>
Zhu Yib481de92007-09-25 17:54:57 -070038#include <linux/etherdevice.h>
Zhu Yi12342c42007-12-20 11:27:32 +080039#include <asm/unaligned.h>
Zhu Yib481de92007-09-25 17:54:57 -070040
Assaf Krauss6bc913b2008-03-11 16:17:18 -070041#include "iwl-eeprom.h"
Tomas Winkler3e0d4cb2008-04-24 11:55:38 -070042#include "iwl-dev.h"
Tomas Winklerfee12472008-04-03 16:05:21 -070043#include "iwl-core.h"
Tomas Winkler3395f6e2008-03-25 16:33:37 -070044#include "iwl-io.h"
Zhu Yib481de92007-09-25 17:54:57 -070045#include "iwl-helpers.h"
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -070046#include "iwl-calib.h"
Zhu Yib481de92007-09-25 17:54:57 -070047
Assaf Krauss1ea87392008-03-18 14:57:50 -070048/* module parameters */
49static struct iwl_mod_params iwl4965_mod_params = {
Emmanuel Grumbach038669e2008-04-23 17:15:04 -070050 .num_of_queues = IWL49_NUM_QUEUES,
Assaf Krauss1ea87392008-03-18 14:57:50 -070051 .enable_qos = 1,
52 .amsdu_size_8K = 1,
Ester Kummer3a1081e2008-05-06 11:05:14 +080053 .restart_fw = 1,
Assaf Krauss1ea87392008-03-18 14:57:50 -070054 /* the rest are 0 by default */
55};
56
Ron Rindjunskyfe01b472008-01-28 14:07:24 +020057#ifdef CONFIG_IWL4965_HT
58
59static const u16 default_tid_to_tx_fifo[] = {
60 IWL_TX_FIFO_AC1,
61 IWL_TX_FIFO_AC0,
62 IWL_TX_FIFO_AC0,
63 IWL_TX_FIFO_AC1,
64 IWL_TX_FIFO_AC2,
65 IWL_TX_FIFO_AC2,
66 IWL_TX_FIFO_AC3,
67 IWL_TX_FIFO_AC3,
68 IWL_TX_FIFO_NONE,
69 IWL_TX_FIFO_NONE,
70 IWL_TX_FIFO_NONE,
71 IWL_TX_FIFO_NONE,
72 IWL_TX_FIFO_NONE,
73 IWL_TX_FIFO_NONE,
74 IWL_TX_FIFO_NONE,
75 IWL_TX_FIFO_NONE,
76 IWL_TX_FIFO_AC3
77};
78
79#endif /*CONFIG_IWL4965_HT */
80
Tomas Winkler57aab752008-04-14 21:16:03 -070081/* check contents of special bootstrap uCode SRAM */
82static int iwl4965_verify_bsm(struct iwl_priv *priv)
83{
84 __le32 *image = priv->ucode_boot.v_addr;
85 u32 len = priv->ucode_boot.len;
86 u32 reg;
87 u32 val;
88
89 IWL_DEBUG_INFO("Begin verify bsm\n");
90
91 /* verify BSM SRAM contents */
92 val = iwl_read_prph(priv, BSM_WR_DWCOUNT_REG);
93 for (reg = BSM_SRAM_LOWER_BOUND;
94 reg < BSM_SRAM_LOWER_BOUND + len;
95 reg += sizeof(u32), image++) {
96 val = iwl_read_prph(priv, reg);
97 if (val != le32_to_cpu(*image)) {
98 IWL_ERROR("BSM uCode verification failed at "
99 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
100 BSM_SRAM_LOWER_BOUND,
101 reg - BSM_SRAM_LOWER_BOUND, len,
102 val, le32_to_cpu(*image));
103 return -EIO;
104 }
105 }
106
107 IWL_DEBUG_INFO("BSM bootstrap uCode image OK\n");
108
109 return 0;
110}
111
112/**
113 * iwl4965_load_bsm - Load bootstrap instructions
114 *
115 * BSM operation:
116 *
117 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
118 * in special SRAM that does not power down during RFKILL. When powering back
119 * up after power-saving sleeps (or during initial uCode load), the BSM loads
120 * the bootstrap program into the on-board processor, and starts it.
121 *
122 * The bootstrap program loads (via DMA) instructions and data for a new
123 * program from host DRAM locations indicated by the host driver in the
124 * BSM_DRAM_* registers. Once the new program is loaded, it starts
125 * automatically.
126 *
127 * When initializing the NIC, the host driver points the BSM to the
128 * "initialize" uCode image. This uCode sets up some internal data, then
129 * notifies host via "initialize alive" that it is complete.
130 *
131 * The host then replaces the BSM_DRAM_* pointer values to point to the
132 * normal runtime uCode instructions and a backup uCode data cache buffer
133 * (filled initially with starting data values for the on-board processor),
134 * then triggers the "initialize" uCode to load and launch the runtime uCode,
135 * which begins normal operation.
136 *
137 * When doing a power-save shutdown, runtime uCode saves data SRAM into
138 * the backup data cache in DRAM before SRAM is powered down.
139 *
140 * When powering back up, the BSM loads the bootstrap program. This reloads
141 * the runtime uCode instructions and the backup data cache into SRAM,
142 * and re-launches the runtime uCode from where it left off.
143 */
144static int iwl4965_load_bsm(struct iwl_priv *priv)
145{
146 __le32 *image = priv->ucode_boot.v_addr;
147 u32 len = priv->ucode_boot.len;
148 dma_addr_t pinst;
149 dma_addr_t pdata;
150 u32 inst_len;
151 u32 data_len;
152 int i;
153 u32 done;
154 u32 reg_offset;
155 int ret;
156
157 IWL_DEBUG_INFO("Begin load bsm\n");
158
159 /* make sure bootstrap program is no larger than BSM's SRAM size */
160 if (len > IWL_MAX_BSM_SIZE)
161 return -EINVAL;
162
163 /* Tell bootstrap uCode where to find the "Initialize" uCode
164 * in host DRAM ... host DRAM physical address bits 35:4 for 4965.
Tomas Winkler2d878892008-05-29 16:34:51 +0800165 * NOTE: iwl_init_alive_start() will replace these values,
Tomas Winkler57aab752008-04-14 21:16:03 -0700166 * after the "initialize" uCode has run, to point to
Tomas Winkler2d878892008-05-29 16:34:51 +0800167 * runtime/protocol instructions and backup data cache.
168 */
Tomas Winkler57aab752008-04-14 21:16:03 -0700169 pinst = priv->ucode_init.p_addr >> 4;
170 pdata = priv->ucode_init_data.p_addr >> 4;
171 inst_len = priv->ucode_init.len;
172 data_len = priv->ucode_init_data.len;
173
174 ret = iwl_grab_nic_access(priv);
175 if (ret)
176 return ret;
177
178 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
179 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
180 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
181 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
182
183 /* Fill BSM memory with bootstrap instructions */
184 for (reg_offset = BSM_SRAM_LOWER_BOUND;
185 reg_offset < BSM_SRAM_LOWER_BOUND + len;
186 reg_offset += sizeof(u32), image++)
187 _iwl_write_prph(priv, reg_offset, le32_to_cpu(*image));
188
189 ret = iwl4965_verify_bsm(priv);
190 if (ret) {
191 iwl_release_nic_access(priv);
192 return ret;
193 }
194
195 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
196 iwl_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
197 iwl_write_prph(priv, BSM_WR_MEM_DST_REG, RTC_INST_LOWER_BOUND);
198 iwl_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
199
200 /* Load bootstrap code into instruction SRAM now,
201 * to prepare to load "initialize" uCode */
202 iwl_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
203
204 /* Wait for load of bootstrap uCode to finish */
205 for (i = 0; i < 100; i++) {
206 done = iwl_read_prph(priv, BSM_WR_CTRL_REG);
207 if (!(done & BSM_WR_CTRL_REG_BIT_START))
208 break;
209 udelay(10);
210 }
211 if (i < 100)
212 IWL_DEBUG_INFO("BSM write complete, poll %d iterations\n", i);
213 else {
214 IWL_ERROR("BSM write did not complete!\n");
215 return -EIO;
216 }
217
218 /* Enable future boot loads whenever power management unit triggers it
219 * (e.g. when powering back up after power-save shutdown) */
220 iwl_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
221
222 iwl_release_nic_access(priv);
223
Tomas Winkler2d878892008-05-29 16:34:51 +0800224 priv->ucode_type = UCODE_INIT;
225
Tomas Winkler57aab752008-04-14 21:16:03 -0700226 return 0;
227}
228
Emmanuel Grumbachf3ccc082008-05-05 10:22:45 +0800229/**
230 * iwl4965_set_ucode_ptrs - Set uCode address location
231 *
232 * Tell initialization uCode where to find runtime uCode.
233 *
234 * BSM registers initially contain pointers to initialization uCode.
235 * We need to replace them to load runtime uCode inst and data,
236 * and to save runtime data when powering down.
237 */
238static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv)
239{
240 dma_addr_t pinst;
241 dma_addr_t pdata;
242 unsigned long flags;
243 int ret = 0;
244
245 /* bits 35:4 for 4965 */
246 pinst = priv->ucode_code.p_addr >> 4;
247 pdata = priv->ucode_data_backup.p_addr >> 4;
248
249 spin_lock_irqsave(&priv->lock, flags);
250 ret = iwl_grab_nic_access(priv);
251 if (ret) {
252 spin_unlock_irqrestore(&priv->lock, flags);
253 return ret;
254 }
255
256 /* Tell bootstrap uCode where to find image to load */
257 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
258 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
259 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
260 priv->ucode_data.len);
261
262 /* Inst bytecount must be last to set up, bit 31 signals uCode
263 * that all new ptr/size info is in place */
264 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
265 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
266 iwl_release_nic_access(priv);
267
268 spin_unlock_irqrestore(&priv->lock, flags);
269
270 IWL_DEBUG_INFO("Runtime uCode pointers are set.\n");
271
Tomas Winkler2d878892008-05-29 16:34:51 +0800272 priv->ucode_type = UCODE_RT;
273
Emmanuel Grumbachf3ccc082008-05-05 10:22:45 +0800274 return ret;
275}
276
277/**
278 * iwl4965_init_alive_start - Called after REPLY_ALIVE notification received
279 *
280 * Called after REPLY_ALIVE notification received from "initialize" uCode.
281 *
282 * The 4965 "initialize" ALIVE reply contains calibration data for:
283 * Voltage, temperature, and MIMO tx gain correction, now stored in priv
284 * (3945 does not contain this data).
285 *
286 * Tell "initialize" uCode to go ahead and load the runtime uCode.
287*/
288static void iwl4965_init_alive_start(struct iwl_priv *priv)
289{
290 /* Check alive response for "valid" sign from uCode */
291 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
292 /* We had an error bringing up the hardware, so take it
293 * all the way back down so we can try again */
294 IWL_DEBUG_INFO("Initialize Alive failed.\n");
295 goto restart;
296 }
297
298 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
299 * This is a paranoid check, because we would not have gotten the
300 * "initialize" alive if code weren't properly loaded. */
301 if (iwl_verify_ucode(priv)) {
302 /* Runtime instruction load was bad;
303 * take it all the way back down so we can try again */
304 IWL_DEBUG_INFO("Bad \"initialize\" uCode load.\n");
305 goto restart;
306 }
307
308 /* Calculate temperature */
309 priv->temperature = iwl4965_get_temperature(priv);
310
311 /* Send pointers to protocol/runtime uCode image ... init code will
312 * load and launch runtime uCode, which will send us another "Alive"
313 * notification. */
314 IWL_DEBUG_INFO("Initialization Alive received.\n");
315 if (iwl4965_set_ucode_ptrs(priv)) {
316 /* Runtime instruction load won't happen;
317 * take it all the way back down so we can try again */
318 IWL_DEBUG_INFO("Couldn't set up uCode pointers.\n");
319 goto restart;
320 }
321 return;
322
323restart:
324 queue_work(priv->workqueue, &priv->restart);
325}
326
Zhu Yib481de92007-09-25 17:54:57 -0700327static int is_fat_channel(__le32 rxon_flags)
328{
329 return (rxon_flags & RXON_FLG_CHANNEL_MODE_PURE_40_MSK) ||
330 (rxon_flags & RXON_FLG_CHANNEL_MODE_MIXED_MSK);
331}
332
Tomas Winkler17744ff2008-03-02 01:52:00 +0200333int iwl4965_hwrate_to_plcp_idx(u32 rate_n_flags)
334{
335 int idx = 0;
336
337 /* 4965 HT rate format */
338 if (rate_n_flags & RATE_MCS_HT_MSK) {
339 idx = (rate_n_flags & 0xff);
340
Guy Cohenfde0db32008-04-21 15:42:01 -0700341 if (idx >= IWL_RATE_MIMO2_6M_PLCP)
342 idx = idx - IWL_RATE_MIMO2_6M_PLCP;
Tomas Winkler17744ff2008-03-02 01:52:00 +0200343
344 idx += IWL_FIRST_OFDM_RATE;
345 /* skip 9M not supported in ht*/
346 if (idx >= IWL_RATE_9M_INDEX)
347 idx += 1;
348 if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
349 return idx;
350
351 /* 4965 legacy rate format, search for match in table */
352 } else {
Tomas Winkler1826dcc2008-05-15 13:54:02 +0800353 for (idx = 0; idx < ARRAY_SIZE(iwl_rates); idx++)
354 if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF))
Tomas Winkler17744ff2008-03-02 01:52:00 +0200355 return idx;
356 }
357
358 return -1;
359}
360
Ron Rindjunsky4c424e42008-03-04 18:09:27 -0800361/**
362 * translate ucode response to mac80211 tx status control values
363 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700364void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
Johannes Berge039fa42008-05-15 12:55:29 +0200365 struct ieee80211_tx_info *control)
Ron Rindjunsky4c424e42008-03-04 18:09:27 -0800366{
367 int rate_index;
368
369 control->antenna_sel_tx =
Guy Cohenfde0db32008-04-21 15:42:01 -0700370 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
Ron Rindjunsky4c424e42008-03-04 18:09:27 -0800371 if (rate_n_flags & RATE_MCS_HT_MSK)
Johannes Berge039fa42008-05-15 12:55:29 +0200372 control->flags |= IEEE80211_TX_CTL_OFDM_HT;
Ron Rindjunsky4c424e42008-03-04 18:09:27 -0800373 if (rate_n_flags & RATE_MCS_GF_MSK)
Johannes Berge039fa42008-05-15 12:55:29 +0200374 control->flags |= IEEE80211_TX_CTL_GREEN_FIELD;
Ron Rindjunsky4c424e42008-03-04 18:09:27 -0800375 if (rate_n_flags & RATE_MCS_FAT_MSK)
Johannes Berge039fa42008-05-15 12:55:29 +0200376 control->flags |= IEEE80211_TX_CTL_40_MHZ_WIDTH;
Ron Rindjunsky4c424e42008-03-04 18:09:27 -0800377 if (rate_n_flags & RATE_MCS_DUP_MSK)
Johannes Berge039fa42008-05-15 12:55:29 +0200378 control->flags |= IEEE80211_TX_CTL_DUP_DATA;
Ron Rindjunsky4c424e42008-03-04 18:09:27 -0800379 if (rate_n_flags & RATE_MCS_SGI_MSK)
Johannes Berge039fa42008-05-15 12:55:29 +0200380 control->flags |= IEEE80211_TX_CTL_SHORT_GI;
Ron Rindjunsky4c424e42008-03-04 18:09:27 -0800381 rate_index = iwl4965_hwrate_to_plcp_idx(rate_n_flags);
Johannes Berg2e92e6f2008-05-15 12:55:27 +0200382 if (control->band == IEEE80211_BAND_5GHZ)
383 rate_index -= IWL_FIRST_OFDM_RATE;
384 control->tx_rate_idx = rate_index;
Ron Rindjunsky4c424e42008-03-04 18:09:27 -0800385}
Tomas Winkler17744ff2008-03-02 01:52:00 +0200386
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700387int iwl4965_hw_rxq_stop(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700388{
389 int rc;
390 unsigned long flags;
391
392 spin_lock_irqsave(&priv->lock, flags);
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700393 rc = iwl_grab_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700394 if (rc) {
395 spin_unlock_irqrestore(&priv->lock, flags);
396 return rc;
397 }
398
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800399 /* stop Rx DMA */
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700400 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
401 rc = iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
Zhu Yib481de92007-09-25 17:54:57 -0700402 (1 << 24), 1000);
403 if (rc < 0)
404 IWL_ERROR("Can't stop Rx DMA.\n");
405
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700406 iwl_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700407 spin_unlock_irqrestore(&priv->lock, flags);
408
409 return 0;
410}
411
Tomas Winkler8614f362008-04-23 17:14:55 -0700412/*
413 * EEPROM handlers
414 */
415
416static int iwl4965_eeprom_check_version(struct iwl_priv *priv)
417{
418 u16 eeprom_ver;
419 u16 calib_ver;
420
421 eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
422
423 calib_ver = iwl_eeprom_query16(priv, EEPROM_4965_CALIB_VERSION_OFFSET);
424
425 if (eeprom_ver < EEPROM_4965_EEPROM_VERSION ||
426 calib_ver < EEPROM_4965_TX_POWER_VERSION)
427 goto err;
428
429 return 0;
430err:
431 IWL_ERROR("Unsuported EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n",
432 eeprom_ver, EEPROM_4965_EEPROM_VERSION,
433 calib_ver, EEPROM_4965_TX_POWER_VERSION);
434 return -EINVAL;
435
436}
Tomas Winkler079a2532008-04-17 16:03:39 -0700437int iwl4965_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src)
Zhu Yib481de92007-09-25 17:54:57 -0700438{
Tomas Winklerd8609652007-10-25 17:15:35 +0800439 int ret;
Zhu Yib481de92007-09-25 17:54:57 -0700440 unsigned long flags;
441
442 spin_lock_irqsave(&priv->lock, flags);
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700443 ret = iwl_grab_nic_access(priv);
Tomas Winklerd8609652007-10-25 17:15:35 +0800444 if (ret) {
Zhu Yib481de92007-09-25 17:54:57 -0700445 spin_unlock_irqrestore(&priv->lock, flags);
Tomas Winklerd8609652007-10-25 17:15:35 +0800446 return ret;
Zhu Yib481de92007-09-25 17:54:57 -0700447 }
448
Tomas Winkler6f4083a2008-04-16 16:34:49 -0700449 if (src == IWL_PWR_SRC_VAUX) {
Zhu Yib481de92007-09-25 17:54:57 -0700450 u32 val;
Tomas Winklerd8609652007-10-25 17:15:35 +0800451 ret = pci_read_config_dword(priv->pci_dev, PCI_POWER_SOURCE,
Tomas Winkler6f4083a2008-04-16 16:34:49 -0700452 &val);
Zhu Yib481de92007-09-25 17:54:57 -0700453
Tomas Winkler6f4083a2008-04-16 16:34:49 -0700454 if (val & PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT) {
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700455 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
Tomas Winkler6f4083a2008-04-16 16:34:49 -0700456 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
457 ~APMG_PS_CTRL_MSK_PWR_SRC);
458 }
459 } else {
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700460 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
Tomas Winkler6f4083a2008-04-16 16:34:49 -0700461 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
462 ~APMG_PS_CTRL_MSK_PWR_SRC);
463 }
Zhu Yib481de92007-09-25 17:54:57 -0700464
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700465 iwl_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700466 spin_unlock_irqrestore(&priv->lock, flags);
467
Tomas Winklerd8609652007-10-25 17:15:35 +0800468 return ret;
Zhu Yib481de92007-09-25 17:54:57 -0700469}
470
Tomas Winklerda1bc452008-05-29 16:35:00 +0800471/*
472 * Activate/Deactivat Tx DMA/FIFO channels according tx fifos mask
473 * must be called under priv->lock and mac access
474 */
475static void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask)
Zhu Yib481de92007-09-25 17:54:57 -0700476{
Tomas Winklerda1bc452008-05-29 16:35:00 +0800477 iwl_write_prph(priv, IWL49_SCD_TXFACT, mask);
Zhu Yib481de92007-09-25 17:54:57 -0700478}
Ron Rindjunsky5a676bb2008-05-05 10:22:42 +0800479
Tomas Winkler91238712008-04-23 17:14:53 -0700480static int iwl4965_apm_init(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700481{
Tomas Winkler91238712008-04-23 17:14:53 -0700482 int ret = 0;
Zhu Yib481de92007-09-25 17:54:57 -0700483
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700484 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
Tomas Winkler91238712008-04-23 17:14:53 -0700485 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
Zhu Yib481de92007-09-25 17:54:57 -0700486
Tomas Winkler8f061892008-05-29 16:34:56 +0800487 /* disable L0s without affecting L1 :don't wait for ICH L0s bug W/A) */
488 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
489 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
490
Tomas Winkler91238712008-04-23 17:14:53 -0700491 /* set "initialization complete" bit to move adapter
492 * D0U* --> D0A* state */
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700493 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
Tomas Winkler91238712008-04-23 17:14:53 -0700494
495 /* wait for clock stabilization */
496 ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
497 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
498 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
499 if (ret < 0) {
Zhu Yib481de92007-09-25 17:54:57 -0700500 IWL_DEBUG_INFO("Failed to init the card\n");
Tomas Winkler91238712008-04-23 17:14:53 -0700501 goto out;
Zhu Yib481de92007-09-25 17:54:57 -0700502 }
503
Tomas Winkler91238712008-04-23 17:14:53 -0700504 ret = iwl_grab_nic_access(priv);
505 if (ret)
506 goto out;
Zhu Yib481de92007-09-25 17:54:57 -0700507
Tomas Winkler91238712008-04-23 17:14:53 -0700508 /* enable DMA */
Tomas Winkler8f061892008-05-29 16:34:56 +0800509 iwl_write_prph(priv, APMG_CLK_CTRL_REG, APMG_CLK_VAL_DMA_CLK_RQT |
510 APMG_CLK_VAL_BSM_CLK_RQT);
Zhu Yib481de92007-09-25 17:54:57 -0700511
512 udelay(20);
513
Tomas Winkler8f061892008-05-29 16:34:56 +0800514 /* disable L1-Active */
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700515 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
Tomas Winkler91238712008-04-23 17:14:53 -0700516 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
Zhu Yib481de92007-09-25 17:54:57 -0700517
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700518 iwl_release_nic_access(priv);
Tomas Winkler91238712008-04-23 17:14:53 -0700519out:
Tomas Winkler91238712008-04-23 17:14:53 -0700520 return ret;
521}
522
Tomas Winkler694cc562008-04-24 11:55:22 -0700523
524static void iwl4965_nic_config(struct iwl_priv *priv)
525{
526 unsigned long flags;
527 u32 val;
528 u16 radio_cfg;
529 u8 val_link;
530
531 spin_lock_irqsave(&priv->lock, flags);
532
533 if ((priv->rev_id & 0x80) == 0x80 && (priv->rev_id & 0x7f) < 8) {
534 pci_read_config_dword(priv->pci_dev, PCI_REG_WUM8, &val);
535 /* Enable No Snoop field */
536 pci_write_config_dword(priv->pci_dev, PCI_REG_WUM8,
537 val & ~(1 << 11));
538 }
539
540 pci_read_config_byte(priv->pci_dev, PCI_LINK_CTRL, &val_link);
541
Tomas Winkler8f061892008-05-29 16:34:56 +0800542 /* L1 is enabled by BIOS */
543 if ((val_link & PCI_LINK_VAL_L1_EN) == PCI_LINK_VAL_L1_EN)
544 /* diable L0S disabled L1A enabled */
545 iwl_set_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
546 else
547 /* L0S enabled L1A disabled */
548 iwl_clear_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
Tomas Winkler694cc562008-04-24 11:55:22 -0700549
550 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
551
552 /* write radio config values to register */
553 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX)
554 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
555 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
556 EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
557 EEPROM_RF_CFG_DASH_MSK(radio_cfg));
558
559 /* set CSR_HW_CONFIG_REG for uCode use */
560 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
561 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
562 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
563
564 priv->calib_info = (struct iwl_eeprom_calib_info *)
565 iwl_eeprom_query_addr(priv, EEPROM_4965_CALIB_TXPOWER_OFFSET);
566
567 spin_unlock_irqrestore(&priv->lock, flags);
568}
569
Tomas Winkler46315e02008-05-29 16:34:59 +0800570static int iwl4965_apm_stop_master(struct iwl_priv *priv)
571{
572 int ret = 0;
573 unsigned long flags;
574
575 spin_lock_irqsave(&priv->lock, flags);
576
577 /* set stop master bit */
578 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
579
580 ret = iwl_poll_bit(priv, CSR_RESET,
581 CSR_RESET_REG_FLAG_MASTER_DISABLED,
582 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
583 if (ret < 0)
584 goto out;
585
586out:
587 spin_unlock_irqrestore(&priv->lock, flags);
588 IWL_DEBUG_INFO("stop master\n");
589
590 return ret;
591}
592
Tomas Winklerf118a912008-05-29 16:34:58 +0800593static void iwl4965_apm_stop(struct iwl_priv *priv)
594{
595 unsigned long flags;
596
Tomas Winkler46315e02008-05-29 16:34:59 +0800597 iwl4965_apm_stop_master(priv);
Tomas Winklerf118a912008-05-29 16:34:58 +0800598
599 spin_lock_irqsave(&priv->lock, flags);
600
601 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
602
603 udelay(10);
604
605 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
606 spin_unlock_irqrestore(&priv->lock, flags);
607}
608
Tomas Winkler7f066102008-05-29 16:34:57 +0800609static int iwl4965_apm_reset(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700610{
Tomas Winkler7f066102008-05-29 16:34:57 +0800611 int ret = 0;
Zhu Yib481de92007-09-25 17:54:57 -0700612 unsigned long flags;
613
Tomas Winkler46315e02008-05-29 16:34:59 +0800614 iwl4965_apm_stop_master(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700615
616 spin_lock_irqsave(&priv->lock, flags);
617
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700618 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
Zhu Yib481de92007-09-25 17:54:57 -0700619
620 udelay(10);
621
Tomas Winkler7f066102008-05-29 16:34:57 +0800622 /* FIXME: put here L1A -L0S w/a */
623
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700624 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
Tomas Winklerf118a912008-05-29 16:34:58 +0800625
Tomas Winkler7f066102008-05-29 16:34:57 +0800626 ret = iwl_poll_bit(priv, CSR_RESET,
Zhu Yib481de92007-09-25 17:54:57 -0700627 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
628 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25);
629
Tomas Winkler7f066102008-05-29 16:34:57 +0800630 if (ret)
631 goto out;
632
Zhu Yib481de92007-09-25 17:54:57 -0700633 udelay(10);
634
Tomas Winkler7f066102008-05-29 16:34:57 +0800635 ret = iwl_grab_nic_access(priv);
636 if (ret)
637 goto out;
638 /* Enable DMA and BSM Clock */
639 iwl_write_prph(priv, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT |
640 APMG_CLK_VAL_BSM_CLK_RQT);
Zhu Yib481de92007-09-25 17:54:57 -0700641
Tomas Winkler7f066102008-05-29 16:34:57 +0800642 udelay(10);
Zhu Yib481de92007-09-25 17:54:57 -0700643
Tomas Winkler7f066102008-05-29 16:34:57 +0800644 /* disable L1A */
645 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
646 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
Zhu Yib481de92007-09-25 17:54:57 -0700647
Tomas Winkler7f066102008-05-29 16:34:57 +0800648 iwl_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700649
650 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
651 wake_up_interruptible(&priv->wait_command_queue);
652
Tomas Winkler7f066102008-05-29 16:34:57 +0800653out:
Zhu Yib481de92007-09-25 17:54:57 -0700654 spin_unlock_irqrestore(&priv->lock, flags);
655
Tomas Winkler7f066102008-05-29 16:34:57 +0800656 return ret;
Zhu Yib481de92007-09-25 17:54:57 -0700657}
658
659#define REG_RECALIB_PERIOD (60)
660
661/**
662 * iwl4965_bg_statistics_periodic - Timer callback to queue statistics
663 *
Emmanuel Grumbach49ea8592008-04-15 16:01:37 -0700664 * This callback is provided in order to send a statistics request.
Zhu Yib481de92007-09-25 17:54:57 -0700665 *
666 * This timer function is continually reset to execute within
667 * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
668 * was received. We need to ensure we receive the statistics in order
Emmanuel Grumbach49ea8592008-04-15 16:01:37 -0700669 * to update the temperature used for calibrating the TXPOWER.
Zhu Yib481de92007-09-25 17:54:57 -0700670 */
671static void iwl4965_bg_statistics_periodic(unsigned long data)
672{
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700673 struct iwl_priv *priv = (struct iwl_priv *)data;
Zhu Yib481de92007-09-25 17:54:57 -0700674
Zhu Yib481de92007-09-25 17:54:57 -0700675 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
676 return;
677
Emmanuel Grumbach49ea8592008-04-15 16:01:37 -0700678 iwl_send_statistics_request(priv, CMD_ASYNC);
Zhu Yib481de92007-09-25 17:54:57 -0700679}
680
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700681void iwl4965_rf_kill_ct_config(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700682{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800683 struct iwl4965_ct_kill_config cmd;
Zhu Yib481de92007-09-25 17:54:57 -0700684 unsigned long flags;
Tomas Winkler857485c2008-03-21 13:53:44 -0700685 int ret = 0;
Zhu Yib481de92007-09-25 17:54:57 -0700686
687 spin_lock_irqsave(&priv->lock, flags);
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700688 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
Zhu Yib481de92007-09-25 17:54:57 -0700689 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
690 spin_unlock_irqrestore(&priv->lock, flags);
691
Ron Rindjunsky099b40b2008-04-21 15:41:53 -0700692 cmd.critical_temperature_R =
Emmanuel Grumbachb73cdf22008-04-21 15:41:58 -0700693 cpu_to_le32(priv->hw_params.ct_kill_threshold);
694
Tomas Winkler857485c2008-03-21 13:53:44 -0700695 ret = iwl_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
696 sizeof(cmd), &cmd);
697 if (ret)
Zhu Yib481de92007-09-25 17:54:57 -0700698 IWL_ERROR("REPLY_CT_KILL_CONFIG_CMD failed\n");
699 else
Emmanuel Grumbachb73cdf22008-04-21 15:41:58 -0700700 IWL_DEBUG_INFO("REPLY_CT_KILL_CONFIG_CMD succeeded, "
701 "critical temperature is %d\n",
702 cmd.critical_temperature_R);
Zhu Yib481de92007-09-25 17:54:57 -0700703}
704
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700705#ifdef CONFIG_IWL4965_RUN_TIME_CALIB
Zhu Yib481de92007-09-25 17:54:57 -0700706
707/* Reset differential Rx gains in NIC to prepare for chain noise calibration.
708 * Called after every association, but this runs only once!
709 * ... once chain noise is calibrated the first time, it's good forever. */
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700710static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700711{
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700712 struct iwl_chain_noise_data *data = &(priv->chain_noise_data);
Zhu Yib481de92007-09-25 17:54:57 -0700713
Tomas Winkler3109ece2008-03-28 16:33:35 -0700714 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) {
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800715 struct iwl4965_calibration_cmd cmd;
Zhu Yib481de92007-09-25 17:54:57 -0700716
717 memset(&cmd, 0, sizeof(cmd));
718 cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
719 cmd.diff_gain_a = 0;
720 cmd.diff_gain_b = 0;
721 cmd.diff_gain_c = 0;
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700722 if (iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
723 sizeof(cmd), &cmd))
724 IWL_ERROR("Could not send REPLY_PHY_CALIBRATION_CMD\n");
Zhu Yib481de92007-09-25 17:54:57 -0700725 data->state = IWL_CHAIN_NOISE_ACCUMULATE;
726 IWL_DEBUG_CALIB("Run chain_noise_calibrate\n");
727 }
Zhu Yib481de92007-09-25 17:54:57 -0700728}
729
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700730static void iwl4965_gain_computation(struct iwl_priv *priv,
731 u32 *average_noise,
732 u16 min_average_noise_antenna_i,
733 u32 min_average_noise)
Zhu Yib481de92007-09-25 17:54:57 -0700734{
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700735 int i, ret;
736 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
Zhu Yib481de92007-09-25 17:54:57 -0700737
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700738 data->delta_gain_code[min_average_noise_antenna_i] = 0;
Zhu Yib481de92007-09-25 17:54:57 -0700739
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700740 for (i = 0; i < NUM_RX_CHAINS; i++) {
741 s32 delta_g = 0;
Zhu Yib481de92007-09-25 17:54:57 -0700742
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700743 if (!(data->disconn_array[i]) &&
744 (data->delta_gain_code[i] ==
Zhu Yib481de92007-09-25 17:54:57 -0700745 CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) {
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700746 delta_g = average_noise[i] - min_average_noise;
747 data->delta_gain_code[i] = (u8)((delta_g * 10) / 15);
748 data->delta_gain_code[i] =
749 min(data->delta_gain_code[i],
750 (u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
Zhu Yib481de92007-09-25 17:54:57 -0700751
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700752 data->delta_gain_code[i] =
753 (data->delta_gain_code[i] | (1 << 2));
754 } else {
755 data->delta_gain_code[i] = 0;
Zhu Yib481de92007-09-25 17:54:57 -0700756 }
Zhu Yib481de92007-09-25 17:54:57 -0700757 }
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700758 IWL_DEBUG_CALIB("delta_gain_codes: a %d b %d c %d\n",
759 data->delta_gain_code[0],
760 data->delta_gain_code[1],
761 data->delta_gain_code[2]);
Zhu Yib481de92007-09-25 17:54:57 -0700762
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700763 /* Differential gain gets sent to uCode only once */
764 if (!data->radio_write) {
765 struct iwl4965_calibration_cmd cmd;
766 data->radio_write = 1;
Zhu Yib481de92007-09-25 17:54:57 -0700767
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700768 memset(&cmd, 0, sizeof(cmd));
769 cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
770 cmd.diff_gain_a = data->delta_gain_code[0];
771 cmd.diff_gain_b = data->delta_gain_code[1];
772 cmd.diff_gain_c = data->delta_gain_code[2];
773 ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
774 sizeof(cmd), &cmd);
775 if (ret)
776 IWL_DEBUG_CALIB("fail sending cmd "
777 "REPLY_PHY_CALIBRATION_CMD \n");
Zhu Yib481de92007-09-25 17:54:57 -0700778
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700779 /* TODO we might want recalculate
780 * rx_chain in rxon cmd */
781
782 /* Mark so we run this algo only once! */
783 data->state = IWL_CHAIN_NOISE_CALIBRATED;
Zhu Yib481de92007-09-25 17:54:57 -0700784 }
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700785 data->chain_noise_a = 0;
786 data->chain_noise_b = 0;
787 data->chain_noise_c = 0;
788 data->chain_signal_a = 0;
789 data->chain_signal_b = 0;
790 data->chain_signal_c = 0;
791 data->beacon_count = 0;
Zhu Yib481de92007-09-25 17:54:57 -0700792}
793
794static void iwl4965_bg_sensitivity_work(struct work_struct *work)
795{
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700796 struct iwl_priv *priv = container_of(work, struct iwl_priv,
Zhu Yib481de92007-09-25 17:54:57 -0700797 sensitivity_work);
798
799 mutex_lock(&priv->mutex);
800
801 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
802 test_bit(STATUS_SCANNING, &priv->status)) {
803 mutex_unlock(&priv->mutex);
804 return;
805 }
806
807 if (priv->start_calib) {
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700808 iwl_chain_noise_calibration(priv, &priv->statistics);
Zhu Yib481de92007-09-25 17:54:57 -0700809
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700810 iwl_sensitivity_calibration(priv, &priv->statistics);
Zhu Yib481de92007-09-25 17:54:57 -0700811 }
812
813 mutex_unlock(&priv->mutex);
814 return;
815}
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700816#endif /*CONFIG_IWL4965_RUN_TIME_CALIB*/
Zhu Yib481de92007-09-25 17:54:57 -0700817
818static void iwl4965_bg_txpower_work(struct work_struct *work)
819{
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700820 struct iwl_priv *priv = container_of(work, struct iwl_priv,
Zhu Yib481de92007-09-25 17:54:57 -0700821 txpower_work);
822
823 /* If a scan happened to start before we got here
824 * then just return; the statistics notification will
825 * kick off another scheduled work to compensate for
826 * any temperature delta we missed here. */
827 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
828 test_bit(STATUS_SCANNING, &priv->status))
829 return;
830
831 mutex_lock(&priv->mutex);
832
833 /* Regardless of if we are assocaited, we must reconfigure the
834 * TX power since frames can be sent on non-radar channels while
835 * not associated */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800836 iwl4965_hw_reg_send_txpower(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700837
838 /* Update last_temperature to keep is_calib_needed from running
839 * when it isn't needed... */
840 priv->last_temperature = priv->temperature;
841
842 mutex_unlock(&priv->mutex);
843}
844
845/*
846 * Acquire priv->lock before calling this function !
847 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700848static void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index)
Zhu Yib481de92007-09-25 17:54:57 -0700849{
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700850 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
Zhu Yib481de92007-09-25 17:54:57 -0700851 (index & 0xff) | (txq_id << 8));
Tomas Winkler12a81f62008-04-03 16:05:20 -0700852 iwl_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(txq_id), index);
Zhu Yib481de92007-09-25 17:54:57 -0700853}
854
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800855/**
856 * iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
857 * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
858 * @scd_retry: (1) Indicates queue will be used in aggregation mode
859 *
860 * NOTE: Acquire priv->lock before calling this function !
Zhu Yib481de92007-09-25 17:54:57 -0700861 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700862static void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
Ron Rindjunsky16466902008-05-05 10:22:50 +0800863 struct iwl_tx_queue *txq,
Zhu Yib481de92007-09-25 17:54:57 -0700864 int tx_fifo_id, int scd_retry)
865{
866 int txq_id = txq->q.id;
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800867
868 /* Find out whether to activate Tx queue */
Zhu Yib481de92007-09-25 17:54:57 -0700869 int active = test_bit(txq_id, &priv->txq_ctx_active_msk)?1:0;
870
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800871 /* Set up and activate */
Tomas Winkler12a81f62008-04-03 16:05:20 -0700872 iwl_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
Emmanuel Grumbach038669e2008-04-23 17:15:04 -0700873 (active << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
874 (tx_fifo_id << IWL49_SCD_QUEUE_STTS_REG_POS_TXF) |
875 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_WSL) |
876 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
877 IWL49_SCD_QUEUE_STTS_REG_MSK);
Zhu Yib481de92007-09-25 17:54:57 -0700878
879 txq->sched_retry = scd_retry;
880
881 IWL_DEBUG_INFO("%s %s Queue %d on AC %d\n",
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800882 active ? "Activate" : "Deactivate",
Zhu Yib481de92007-09-25 17:54:57 -0700883 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
884}
885
886static const u16 default_queue_to_tx_fifo[] = {
887 IWL_TX_FIFO_AC3,
888 IWL_TX_FIFO_AC2,
889 IWL_TX_FIFO_AC1,
890 IWL_TX_FIFO_AC0,
Emmanuel Grumbach038669e2008-04-23 17:15:04 -0700891 IWL49_CMD_FIFO_NUM,
Zhu Yib481de92007-09-25 17:54:57 -0700892 IWL_TX_FIFO_HCCA_1,
893 IWL_TX_FIFO_HCCA_2
894};
895
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700896int iwl4965_alive_notify(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700897{
898 u32 a;
899 int i = 0;
900 unsigned long flags;
Tomas Winkler857485c2008-03-21 13:53:44 -0700901 int ret;
Zhu Yib481de92007-09-25 17:54:57 -0700902
903 spin_lock_irqsave(&priv->lock, flags);
904
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700905 ret = iwl_grab_nic_access(priv);
Tomas Winkler857485c2008-03-21 13:53:44 -0700906 if (ret) {
Zhu Yib481de92007-09-25 17:54:57 -0700907 spin_unlock_irqrestore(&priv->lock, flags);
Tomas Winkler857485c2008-03-21 13:53:44 -0700908 return ret;
Zhu Yib481de92007-09-25 17:54:57 -0700909 }
910
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800911 /* Clear 4965's internal Tx Scheduler data base */
Tomas Winkler12a81f62008-04-03 16:05:20 -0700912 priv->scd_base_addr = iwl_read_prph(priv, IWL49_SCD_SRAM_BASE_ADDR);
Emmanuel Grumbach038669e2008-04-23 17:15:04 -0700913 a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET;
914 for (; a < priv->scd_base_addr + IWL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700915 iwl_write_targ_mem(priv, a, 0);
Emmanuel Grumbach038669e2008-04-23 17:15:04 -0700916 for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700917 iwl_write_targ_mem(priv, a, 0);
Tomas Winkler5425e492008-04-15 16:01:38 -0700918 for (; a < sizeof(u16) * priv->hw_params.max_txq_num; a += 4)
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700919 iwl_write_targ_mem(priv, a, 0);
Zhu Yib481de92007-09-25 17:54:57 -0700920
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800921 /* Tel 4965 where to find Tx byte count tables */
Tomas Winkler12a81f62008-04-03 16:05:20 -0700922 iwl_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR,
Tomas Winkler059ff822008-04-14 21:16:14 -0700923 (priv->shared_phys +
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800924 offsetof(struct iwl4965_shared, queues_byte_cnt_tbls)) >> 10);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800925
926 /* Disable chain mode for all queues */
Tomas Winkler12a81f62008-04-03 16:05:20 -0700927 iwl_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0);
Zhu Yib481de92007-09-25 17:54:57 -0700928
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800929 /* Initialize each Tx queue (including the command queue) */
Tomas Winkler5425e492008-04-15 16:01:38 -0700930 for (i = 0; i < priv->hw_params.max_txq_num; i++) {
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800931
932 /* TFD circular buffer read/write indexes */
Tomas Winkler12a81f62008-04-03 16:05:20 -0700933 iwl_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(i), 0);
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700934 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800935
936 /* Max Tx Window size for Scheduler-ACK mode */
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700937 iwl_write_targ_mem(priv, priv->scd_base_addr +
Emmanuel Grumbach038669e2008-04-23 17:15:04 -0700938 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i),
939 (SCD_WIN_SIZE <<
940 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
941 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800942
943 /* Frame limit */
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700944 iwl_write_targ_mem(priv, priv->scd_base_addr +
Emmanuel Grumbach038669e2008-04-23 17:15:04 -0700945 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
946 sizeof(u32),
947 (SCD_FRAME_LIMIT <<
948 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
949 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
Zhu Yib481de92007-09-25 17:54:57 -0700950
951 }
Tomas Winkler12a81f62008-04-03 16:05:20 -0700952 iwl_write_prph(priv, IWL49_SCD_INTERRUPT_MASK,
Tomas Winkler5425e492008-04-15 16:01:38 -0700953 (1 << priv->hw_params.max_txq_num) - 1);
Zhu Yib481de92007-09-25 17:54:57 -0700954
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800955 /* Activate all Tx DMA/FIFO channels */
Tomas Winklerda1bc452008-05-29 16:35:00 +0800956 priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7));
Zhu Yib481de92007-09-25 17:54:57 -0700957
958 iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800959
960 /* Map each Tx/cmd queue to its corresponding fifo */
Zhu Yib481de92007-09-25 17:54:57 -0700961 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
962 int ac = default_queue_to_tx_fifo[i];
Ron Rindjunsky36470742008-05-15 13:54:10 +0800963 iwl_txq_ctx_activate(priv, i);
Zhu Yib481de92007-09-25 17:54:57 -0700964 iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
965 }
966
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700967 iwl_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700968 spin_unlock_irqrestore(&priv->lock, flags);
969
Tomas Winkler857485c2008-03-21 13:53:44 -0700970 return ret;
Zhu Yib481de92007-09-25 17:54:57 -0700971}
972
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700973#ifdef CONFIG_IWL4965_RUN_TIME_CALIB
974static struct iwl_sensitivity_ranges iwl4965_sensitivity = {
975 .min_nrg_cck = 97,
976 .max_nrg_cck = 0,
977
978 .auto_corr_min_ofdm = 85,
979 .auto_corr_min_ofdm_mrc = 170,
980 .auto_corr_min_ofdm_x1 = 105,
981 .auto_corr_min_ofdm_mrc_x1 = 220,
982
983 .auto_corr_max_ofdm = 120,
984 .auto_corr_max_ofdm_mrc = 210,
985 .auto_corr_max_ofdm_x1 = 140,
986 .auto_corr_max_ofdm_mrc_x1 = 270,
987
988 .auto_corr_min_cck = 125,
989 .auto_corr_max_cck = 200,
990 .auto_corr_min_cck_mrc = 200,
991 .auto_corr_max_cck_mrc = 400,
992
993 .nrg_th_cck = 100,
994 .nrg_th_ofdm = 100,
995};
996#endif
997
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800998/**
Tomas Winkler5425e492008-04-15 16:01:38 -0700999 * iwl4965_hw_set_hw_params
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001000 *
1001 * Called when initializing driver
1002 */
Tomas Winkler5425e492008-04-15 16:01:38 -07001003int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07001004{
Assaf Krauss316c30d2008-03-14 10:38:46 -07001005
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07001006 if ((priv->cfg->mod_params->num_of_queues > IWL49_NUM_QUEUES) ||
Assaf Krauss1ea87392008-03-18 14:57:50 -07001007 (priv->cfg->mod_params->num_of_queues < IWL_MIN_NUM_QUEUES)) {
Assaf Krauss316c30d2008-03-14 10:38:46 -07001008 IWL_ERROR("invalid queues_num, should be between %d and %d\n",
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07001009 IWL_MIN_NUM_QUEUES, IWL49_NUM_QUEUES);
Tomas Winkler059ff822008-04-14 21:16:14 -07001010 return -EINVAL;
Assaf Krauss316c30d2008-03-14 10:38:46 -07001011 }
1012
Tomas Winkler5425e492008-04-15 16:01:38 -07001013 priv->hw_params.max_txq_num = priv->cfg->mod_params->num_of_queues;
Ron Rindjunsky099b40b2008-04-21 15:41:53 -07001014 priv->hw_params.sw_crypto = priv->cfg->mod_params->sw_crypto;
Tomas Winkler5425e492008-04-15 16:01:38 -07001015 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
1016 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
Assaf Krauss1ea87392008-03-18 14:57:50 -07001017 if (priv->cfg->mod_params->amsdu_size_8K)
Tomas Winkler5425e492008-04-15 16:01:38 -07001018 priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_8K;
Ron Rindjunsky9ee1ba42007-11-26 16:14:42 +02001019 else
Tomas Winkler5425e492008-04-15 16:01:38 -07001020 priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_4K;
1021 priv->hw_params.max_pkt_size = priv->hw_params.rx_buf_size - 256;
1022 priv->hw_params.max_stations = IWL4965_STATION_COUNT;
1023 priv->hw_params.bcast_sta_id = IWL4965_BROADCAST_ID;
Tomas Winkler3e82a822008-02-13 11:32:31 -08001024
Ron Rindjunsky099b40b2008-04-21 15:41:53 -07001025 priv->hw_params.max_data_size = IWL49_RTC_DATA_SIZE;
1026 priv->hw_params.max_inst_size = IWL49_RTC_INST_SIZE;
1027 priv->hw_params.max_bsm_size = BSM_SRAM_SIZE;
1028 priv->hw_params.fat_channel = BIT(IEEE80211_BAND_5GHZ);
1029
Tomas Winklerec35cf22008-04-15 16:01:39 -07001030 priv->hw_params.tx_chains_num = 2;
1031 priv->hw_params.rx_chains_num = 2;
Guy Cohenfde0db32008-04-21 15:42:01 -07001032 priv->hw_params.valid_tx_ant = ANT_A | ANT_B;
1033 priv->hw_params.valid_rx_ant = ANT_A | ANT_B;
Ron Rindjunsky099b40b2008-04-21 15:41:53 -07001034 priv->hw_params.ct_kill_threshold = CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD);
1035
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -07001036#ifdef CONFIG_IWL4965_RUN_TIME_CALIB
1037 priv->hw_params.sens = &iwl4965_sensitivity;
1038#endif
Tomas Winkler3e82a822008-02-13 11:32:31 -08001039
Tomas Winkler059ff822008-04-14 21:16:14 -07001040 return 0;
Zhu Yib481de92007-09-25 17:54:57 -07001041}
1042
Mohamed Abbas5da4b552008-04-21 15:41:51 -07001043/* set card power command */
1044static int iwl4965_set_power(struct iwl_priv *priv,
1045 void *cmd)
1046{
1047 int ret = 0;
1048
1049 ret = iwl_send_cmd_pdu_async(priv, POWER_TABLE_CMD,
1050 sizeof(struct iwl4965_powertable_cmd),
1051 cmd, NULL);
1052 return ret;
1053}
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001054int iwl4965_hw_reg_set_txpower(struct iwl_priv *priv, s8 power)
Zhu Yib481de92007-09-25 17:54:57 -07001055{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001056 IWL_ERROR("TODO: Implement iwl4965_hw_reg_set_txpower!\n");
Zhu Yib481de92007-09-25 17:54:57 -07001057 return -EINVAL;
1058}
1059
1060static s32 iwl4965_math_div_round(s32 num, s32 denom, s32 *res)
1061{
1062 s32 sign = 1;
1063
1064 if (num < 0) {
1065 sign = -sign;
1066 num = -num;
1067 }
1068 if (denom < 0) {
1069 sign = -sign;
1070 denom = -denom;
1071 }
1072 *res = 1;
1073 *res = ((num * 2 + denom) / (denom * 2)) * sign;
1074
1075 return 1;
1076}
1077
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001078/**
1079 * iwl4965_get_voltage_compensation - Power supply voltage comp for txpower
1080 *
1081 * Determines power supply voltage compensation for txpower calculations.
1082 * Returns number of 1/2-dB steps to subtract from gain table index,
1083 * to compensate for difference between power supply voltage during
1084 * factory measurements, vs. current power supply voltage.
1085 *
1086 * Voltage indication is higher for lower voltage.
1087 * Lower voltage requires more gain (lower gain table index).
1088 */
Zhu Yib481de92007-09-25 17:54:57 -07001089static s32 iwl4965_get_voltage_compensation(s32 eeprom_voltage,
1090 s32 current_voltage)
1091{
1092 s32 comp = 0;
1093
1094 if ((TX_POWER_IWL_ILLEGAL_VOLTAGE == eeprom_voltage) ||
1095 (TX_POWER_IWL_ILLEGAL_VOLTAGE == current_voltage))
1096 return 0;
1097
1098 iwl4965_math_div_round(current_voltage - eeprom_voltage,
1099 TX_POWER_IWL_VOLTAGE_CODES_PER_03V, &comp);
1100
1101 if (current_voltage > eeprom_voltage)
1102 comp *= 2;
1103 if ((comp < -2) || (comp > 2))
1104 comp = 0;
1105
1106 return comp;
1107}
1108
Assaf Kraussbf85ea42008-03-14 10:38:49 -07001109static const struct iwl_channel_info *
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001110iwl4965_get_channel_txpower_info(struct iwl_priv *priv,
Johannes Berg8318d782008-01-24 19:38:38 +01001111 enum ieee80211_band band, u16 channel)
Zhu Yib481de92007-09-25 17:54:57 -07001112{
Assaf Kraussbf85ea42008-03-14 10:38:49 -07001113 const struct iwl_channel_info *ch_info;
Zhu Yib481de92007-09-25 17:54:57 -07001114
Assaf Krauss8622e702008-03-21 13:53:43 -07001115 ch_info = iwl_get_channel_info(priv, band, channel);
Zhu Yib481de92007-09-25 17:54:57 -07001116
1117 if (!is_channel_valid(ch_info))
1118 return NULL;
1119
1120 return ch_info;
1121}
1122
1123static s32 iwl4965_get_tx_atten_grp(u16 channel)
1124{
1125 if (channel >= CALIB_IWL_TX_ATTEN_GR5_FCH &&
1126 channel <= CALIB_IWL_TX_ATTEN_GR5_LCH)
1127 return CALIB_CH_GROUP_5;
1128
1129 if (channel >= CALIB_IWL_TX_ATTEN_GR1_FCH &&
1130 channel <= CALIB_IWL_TX_ATTEN_GR1_LCH)
1131 return CALIB_CH_GROUP_1;
1132
1133 if (channel >= CALIB_IWL_TX_ATTEN_GR2_FCH &&
1134 channel <= CALIB_IWL_TX_ATTEN_GR2_LCH)
1135 return CALIB_CH_GROUP_2;
1136
1137 if (channel >= CALIB_IWL_TX_ATTEN_GR3_FCH &&
1138 channel <= CALIB_IWL_TX_ATTEN_GR3_LCH)
1139 return CALIB_CH_GROUP_3;
1140
1141 if (channel >= CALIB_IWL_TX_ATTEN_GR4_FCH &&
1142 channel <= CALIB_IWL_TX_ATTEN_GR4_LCH)
1143 return CALIB_CH_GROUP_4;
1144
1145 IWL_ERROR("Can't find txatten group for channel %d.\n", channel);
1146 return -1;
1147}
1148
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001149static u32 iwl4965_get_sub_band(const struct iwl_priv *priv, u32 channel)
Zhu Yib481de92007-09-25 17:54:57 -07001150{
1151 s32 b = -1;
1152
1153 for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) {
Tomas Winkler073d3f52008-04-21 15:41:52 -07001154 if (priv->calib_info->band_info[b].ch_from == 0)
Zhu Yib481de92007-09-25 17:54:57 -07001155 continue;
1156
Tomas Winkler073d3f52008-04-21 15:41:52 -07001157 if ((channel >= priv->calib_info->band_info[b].ch_from)
1158 && (channel <= priv->calib_info->band_info[b].ch_to))
Zhu Yib481de92007-09-25 17:54:57 -07001159 break;
1160 }
1161
1162 return b;
1163}
1164
1165static s32 iwl4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
1166{
1167 s32 val;
1168
1169 if (x2 == x1)
1170 return y1;
1171 else {
1172 iwl4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val);
1173 return val + y2;
1174 }
1175}
1176
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001177/**
1178 * iwl4965_interpolate_chan - Interpolate factory measurements for one channel
1179 *
1180 * Interpolates factory measurements from the two sample channels within a
1181 * sub-band, to apply to channel of interest. Interpolation is proportional to
1182 * differences in channel frequencies, which is proportional to differences
1183 * in channel number.
1184 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001185static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel,
Tomas Winkler073d3f52008-04-21 15:41:52 -07001186 struct iwl_eeprom_calib_ch_info *chan_info)
Zhu Yib481de92007-09-25 17:54:57 -07001187{
1188 s32 s = -1;
1189 u32 c;
1190 u32 m;
Tomas Winkler073d3f52008-04-21 15:41:52 -07001191 const struct iwl_eeprom_calib_measure *m1;
1192 const struct iwl_eeprom_calib_measure *m2;
1193 struct iwl_eeprom_calib_measure *omeas;
Zhu Yib481de92007-09-25 17:54:57 -07001194 u32 ch_i1;
1195 u32 ch_i2;
1196
1197 s = iwl4965_get_sub_band(priv, channel);
1198 if (s >= EEPROM_TX_POWER_BANDS) {
1199 IWL_ERROR("Tx Power can not find channel %d ", channel);
1200 return -1;
1201 }
1202
Tomas Winkler073d3f52008-04-21 15:41:52 -07001203 ch_i1 = priv->calib_info->band_info[s].ch1.ch_num;
1204 ch_i2 = priv->calib_info->band_info[s].ch2.ch_num;
Zhu Yib481de92007-09-25 17:54:57 -07001205 chan_info->ch_num = (u8) channel;
1206
1207 IWL_DEBUG_TXPOWER("channel %d subband %d factory cal ch %d & %d\n",
1208 channel, s, ch_i1, ch_i2);
1209
1210 for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) {
1211 for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) {
Tomas Winkler073d3f52008-04-21 15:41:52 -07001212 m1 = &(priv->calib_info->band_info[s].ch1.
Zhu Yib481de92007-09-25 17:54:57 -07001213 measurements[c][m]);
Tomas Winkler073d3f52008-04-21 15:41:52 -07001214 m2 = &(priv->calib_info->band_info[s].ch2.
Zhu Yib481de92007-09-25 17:54:57 -07001215 measurements[c][m]);
1216 omeas = &(chan_info->measurements[c][m]);
1217
1218 omeas->actual_pow =
1219 (u8) iwl4965_interpolate_value(channel, ch_i1,
1220 m1->actual_pow,
1221 ch_i2,
1222 m2->actual_pow);
1223 omeas->gain_idx =
1224 (u8) iwl4965_interpolate_value(channel, ch_i1,
1225 m1->gain_idx, ch_i2,
1226 m2->gain_idx);
1227 omeas->temperature =
1228 (u8) iwl4965_interpolate_value(channel, ch_i1,
1229 m1->temperature,
1230 ch_i2,
1231 m2->temperature);
1232 omeas->pa_det =
1233 (s8) iwl4965_interpolate_value(channel, ch_i1,
1234 m1->pa_det, ch_i2,
1235 m2->pa_det);
1236
1237 IWL_DEBUG_TXPOWER
1238 ("chain %d meas %d AP1=%d AP2=%d AP=%d\n", c, m,
1239 m1->actual_pow, m2->actual_pow, omeas->actual_pow);
1240 IWL_DEBUG_TXPOWER
1241 ("chain %d meas %d NI1=%d NI2=%d NI=%d\n", c, m,
1242 m1->gain_idx, m2->gain_idx, omeas->gain_idx);
1243 IWL_DEBUG_TXPOWER
1244 ("chain %d meas %d PA1=%d PA2=%d PA=%d\n", c, m,
1245 m1->pa_det, m2->pa_det, omeas->pa_det);
1246 IWL_DEBUG_TXPOWER
1247 ("chain %d meas %d T1=%d T2=%d T=%d\n", c, m,
1248 m1->temperature, m2->temperature,
1249 omeas->temperature);
1250 }
1251 }
1252
1253 return 0;
1254}
1255
1256/* bit-rate-dependent table to prevent Tx distortion, in half-dB units,
1257 * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */
1258static s32 back_off_table[] = {
1259 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */
1260 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */
1261 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */
1262 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */
1263 10 /* CCK */
1264};
1265
1266/* Thermal compensation values for txpower for various frequency ranges ...
1267 * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001268static struct iwl4965_txpower_comp_entry {
Zhu Yib481de92007-09-25 17:54:57 -07001269 s32 degrees_per_05db_a;
1270 s32 degrees_per_05db_a_denom;
1271} tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = {
1272 {9, 2}, /* group 0 5.2, ch 34-43 */
1273 {4, 1}, /* group 1 5.2, ch 44-70 */
1274 {4, 1}, /* group 2 5.2, ch 71-124 */
1275 {4, 1}, /* group 3 5.2, ch 125-200 */
1276 {3, 1} /* group 4 2.4, ch all */
1277};
1278
1279static s32 get_min_power_index(s32 rate_power_index, u32 band)
1280{
1281 if (!band) {
1282 if ((rate_power_index & 7) <= 4)
1283 return MIN_TX_GAIN_INDEX_52GHZ_EXT;
1284 }
1285 return MIN_TX_GAIN_INDEX;
1286}
1287
1288struct gain_entry {
1289 u8 dsp;
1290 u8 radio;
1291};
1292
1293static const struct gain_entry gain_table[2][108] = {
1294 /* 5.2GHz power gain index table */
1295 {
1296 {123, 0x3F}, /* highest txpower */
1297 {117, 0x3F},
1298 {110, 0x3F},
1299 {104, 0x3F},
1300 {98, 0x3F},
1301 {110, 0x3E},
1302 {104, 0x3E},
1303 {98, 0x3E},
1304 {110, 0x3D},
1305 {104, 0x3D},
1306 {98, 0x3D},
1307 {110, 0x3C},
1308 {104, 0x3C},
1309 {98, 0x3C},
1310 {110, 0x3B},
1311 {104, 0x3B},
1312 {98, 0x3B},
1313 {110, 0x3A},
1314 {104, 0x3A},
1315 {98, 0x3A},
1316 {110, 0x39},
1317 {104, 0x39},
1318 {98, 0x39},
1319 {110, 0x38},
1320 {104, 0x38},
1321 {98, 0x38},
1322 {110, 0x37},
1323 {104, 0x37},
1324 {98, 0x37},
1325 {110, 0x36},
1326 {104, 0x36},
1327 {98, 0x36},
1328 {110, 0x35},
1329 {104, 0x35},
1330 {98, 0x35},
1331 {110, 0x34},
1332 {104, 0x34},
1333 {98, 0x34},
1334 {110, 0x33},
1335 {104, 0x33},
1336 {98, 0x33},
1337 {110, 0x32},
1338 {104, 0x32},
1339 {98, 0x32},
1340 {110, 0x31},
1341 {104, 0x31},
1342 {98, 0x31},
1343 {110, 0x30},
1344 {104, 0x30},
1345 {98, 0x30},
1346 {110, 0x25},
1347 {104, 0x25},
1348 {98, 0x25},
1349 {110, 0x24},
1350 {104, 0x24},
1351 {98, 0x24},
1352 {110, 0x23},
1353 {104, 0x23},
1354 {98, 0x23},
1355 {110, 0x22},
1356 {104, 0x18},
1357 {98, 0x18},
1358 {110, 0x17},
1359 {104, 0x17},
1360 {98, 0x17},
1361 {110, 0x16},
1362 {104, 0x16},
1363 {98, 0x16},
1364 {110, 0x15},
1365 {104, 0x15},
1366 {98, 0x15},
1367 {110, 0x14},
1368 {104, 0x14},
1369 {98, 0x14},
1370 {110, 0x13},
1371 {104, 0x13},
1372 {98, 0x13},
1373 {110, 0x12},
1374 {104, 0x08},
1375 {98, 0x08},
1376 {110, 0x07},
1377 {104, 0x07},
1378 {98, 0x07},
1379 {110, 0x06},
1380 {104, 0x06},
1381 {98, 0x06},
1382 {110, 0x05},
1383 {104, 0x05},
1384 {98, 0x05},
1385 {110, 0x04},
1386 {104, 0x04},
1387 {98, 0x04},
1388 {110, 0x03},
1389 {104, 0x03},
1390 {98, 0x03},
1391 {110, 0x02},
1392 {104, 0x02},
1393 {98, 0x02},
1394 {110, 0x01},
1395 {104, 0x01},
1396 {98, 0x01},
1397 {110, 0x00},
1398 {104, 0x00},
1399 {98, 0x00},
1400 {93, 0x00},
1401 {88, 0x00},
1402 {83, 0x00},
1403 {78, 0x00},
1404 },
1405 /* 2.4GHz power gain index table */
1406 {
1407 {110, 0x3f}, /* highest txpower */
1408 {104, 0x3f},
1409 {98, 0x3f},
1410 {110, 0x3e},
1411 {104, 0x3e},
1412 {98, 0x3e},
1413 {110, 0x3d},
1414 {104, 0x3d},
1415 {98, 0x3d},
1416 {110, 0x3c},
1417 {104, 0x3c},
1418 {98, 0x3c},
1419 {110, 0x3b},
1420 {104, 0x3b},
1421 {98, 0x3b},
1422 {110, 0x3a},
1423 {104, 0x3a},
1424 {98, 0x3a},
1425 {110, 0x39},
1426 {104, 0x39},
1427 {98, 0x39},
1428 {110, 0x38},
1429 {104, 0x38},
1430 {98, 0x38},
1431 {110, 0x37},
1432 {104, 0x37},
1433 {98, 0x37},
1434 {110, 0x36},
1435 {104, 0x36},
1436 {98, 0x36},
1437 {110, 0x35},
1438 {104, 0x35},
1439 {98, 0x35},
1440 {110, 0x34},
1441 {104, 0x34},
1442 {98, 0x34},
1443 {110, 0x33},
1444 {104, 0x33},
1445 {98, 0x33},
1446 {110, 0x32},
1447 {104, 0x32},
1448 {98, 0x32},
1449 {110, 0x31},
1450 {104, 0x31},
1451 {98, 0x31},
1452 {110, 0x30},
1453 {104, 0x30},
1454 {98, 0x30},
1455 {110, 0x6},
1456 {104, 0x6},
1457 {98, 0x6},
1458 {110, 0x5},
1459 {104, 0x5},
1460 {98, 0x5},
1461 {110, 0x4},
1462 {104, 0x4},
1463 {98, 0x4},
1464 {110, 0x3},
1465 {104, 0x3},
1466 {98, 0x3},
1467 {110, 0x2},
1468 {104, 0x2},
1469 {98, 0x2},
1470 {110, 0x1},
1471 {104, 0x1},
1472 {98, 0x1},
1473 {110, 0x0},
1474 {104, 0x0},
1475 {98, 0x0},
1476 {97, 0},
1477 {96, 0},
1478 {95, 0},
1479 {94, 0},
1480 {93, 0},
1481 {92, 0},
1482 {91, 0},
1483 {90, 0},
1484 {89, 0},
1485 {88, 0},
1486 {87, 0},
1487 {86, 0},
1488 {85, 0},
1489 {84, 0},
1490 {83, 0},
1491 {82, 0},
1492 {81, 0},
1493 {80, 0},
1494 {79, 0},
1495 {78, 0},
1496 {77, 0},
1497 {76, 0},
1498 {75, 0},
1499 {74, 0},
1500 {73, 0},
1501 {72, 0},
1502 {71, 0},
1503 {70, 0},
1504 {69, 0},
1505 {68, 0},
1506 {67, 0},
1507 {66, 0},
1508 {65, 0},
1509 {64, 0},
1510 {63, 0},
1511 {62, 0},
1512 {61, 0},
1513 {60, 0},
1514 {59, 0},
1515 }
1516};
1517
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001518static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
Zhu Yib481de92007-09-25 17:54:57 -07001519 u8 is_fat, u8 ctrl_chan_high,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001520 struct iwl4965_tx_power_db *tx_power_tbl)
Zhu Yib481de92007-09-25 17:54:57 -07001521{
1522 u8 saturation_power;
1523 s32 target_power;
1524 s32 user_target_power;
1525 s32 power_limit;
1526 s32 current_temp;
1527 s32 reg_limit;
1528 s32 current_regulatory;
1529 s32 txatten_grp = CALIB_CH_GROUP_MAX;
1530 int i;
1531 int c;
Assaf Kraussbf85ea42008-03-14 10:38:49 -07001532 const struct iwl_channel_info *ch_info = NULL;
Tomas Winkler073d3f52008-04-21 15:41:52 -07001533 struct iwl_eeprom_calib_ch_info ch_eeprom_info;
1534 const struct iwl_eeprom_calib_measure *measurement;
Zhu Yib481de92007-09-25 17:54:57 -07001535 s16 voltage;
1536 s32 init_voltage;
1537 s32 voltage_compensation;
1538 s32 degrees_per_05db_num;
1539 s32 degrees_per_05db_denom;
1540 s32 factory_temp;
1541 s32 temperature_comp[2];
1542 s32 factory_gain_index[2];
1543 s32 factory_actual_pwr[2];
1544 s32 power_index;
1545
1546 /* Sanity check requested level (dBm) */
1547 if (priv->user_txpower_limit < IWL_TX_POWER_TARGET_POWER_MIN) {
1548 IWL_WARNING("Requested user TXPOWER %d below limit.\n",
1549 priv->user_txpower_limit);
1550 return -EINVAL;
1551 }
1552 if (priv->user_txpower_limit > IWL_TX_POWER_TARGET_POWER_MAX) {
1553 IWL_WARNING("Requested user TXPOWER %d above limit.\n",
1554 priv->user_txpower_limit);
1555 return -EINVAL;
1556 }
1557
1558 /* user_txpower_limit is in dBm, convert to half-dBm (half-dB units
1559 * are used for indexing into txpower table) */
1560 user_target_power = 2 * priv->user_txpower_limit;
1561
1562 /* Get current (RXON) channel, band, width */
1563 ch_info =
Johannes Berg8318d782008-01-24 19:38:38 +01001564 iwl4965_get_channel_txpower_info(priv, priv->band, channel);
Zhu Yib481de92007-09-25 17:54:57 -07001565
1566 IWL_DEBUG_TXPOWER("chan %d band %d is_fat %d\n", channel, band,
1567 is_fat);
1568
1569 if (!ch_info)
1570 return -EINVAL;
1571
1572 /* get txatten group, used to select 1) thermal txpower adjustment
1573 * and 2) mimo txpower balance between Tx chains. */
1574 txatten_grp = iwl4965_get_tx_atten_grp(channel);
1575 if (txatten_grp < 0)
1576 return -EINVAL;
1577
1578 IWL_DEBUG_TXPOWER("channel %d belongs to txatten group %d\n",
1579 channel, txatten_grp);
1580
1581 if (is_fat) {
1582 if (ctrl_chan_high)
1583 channel -= 2;
1584 else
1585 channel += 2;
1586 }
1587
1588 /* hardware txpower limits ...
1589 * saturation (clipping distortion) txpowers are in half-dBm */
1590 if (band)
Tomas Winkler073d3f52008-04-21 15:41:52 -07001591 saturation_power = priv->calib_info->saturation_power24;
Zhu Yib481de92007-09-25 17:54:57 -07001592 else
Tomas Winkler073d3f52008-04-21 15:41:52 -07001593 saturation_power = priv->calib_info->saturation_power52;
Zhu Yib481de92007-09-25 17:54:57 -07001594
1595 if (saturation_power < IWL_TX_POWER_SATURATION_MIN ||
1596 saturation_power > IWL_TX_POWER_SATURATION_MAX) {
1597 if (band)
1598 saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_24;
1599 else
1600 saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_52;
1601 }
1602
1603 /* regulatory txpower limits ... reg_limit values are in half-dBm,
1604 * max_power_avg values are in dBm, convert * 2 */
1605 if (is_fat)
1606 reg_limit = ch_info->fat_max_power_avg * 2;
1607 else
1608 reg_limit = ch_info->max_power_avg * 2;
1609
1610 if ((reg_limit < IWL_TX_POWER_REGULATORY_MIN) ||
1611 (reg_limit > IWL_TX_POWER_REGULATORY_MAX)) {
1612 if (band)
1613 reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_24;
1614 else
1615 reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_52;
1616 }
1617
1618 /* Interpolate txpower calibration values for this channel,
1619 * based on factory calibration tests on spaced channels. */
1620 iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info);
1621
1622 /* calculate tx gain adjustment based on power supply voltage */
Tomas Winkler073d3f52008-04-21 15:41:52 -07001623 voltage = priv->calib_info->voltage;
Zhu Yib481de92007-09-25 17:54:57 -07001624 init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage);
1625 voltage_compensation =
1626 iwl4965_get_voltage_compensation(voltage, init_voltage);
1627
1628 IWL_DEBUG_TXPOWER("curr volt %d eeprom volt %d volt comp %d\n",
1629 init_voltage,
1630 voltage, voltage_compensation);
1631
1632 /* get current temperature (Celsius) */
1633 current_temp = max(priv->temperature, IWL_TX_POWER_TEMPERATURE_MIN);
1634 current_temp = min(priv->temperature, IWL_TX_POWER_TEMPERATURE_MAX);
1635 current_temp = KELVIN_TO_CELSIUS(current_temp);
1636
1637 /* select thermal txpower adjustment params, based on channel group
1638 * (same frequency group used for mimo txatten adjustment) */
1639 degrees_per_05db_num =
1640 tx_power_cmp_tble[txatten_grp].degrees_per_05db_a;
1641 degrees_per_05db_denom =
1642 tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom;
1643
1644 /* get per-chain txpower values from factory measurements */
1645 for (c = 0; c < 2; c++) {
1646 measurement = &ch_eeprom_info.measurements[c][1];
1647
1648 /* txgain adjustment (in half-dB steps) based on difference
1649 * between factory and current temperature */
1650 factory_temp = measurement->temperature;
1651 iwl4965_math_div_round((current_temp - factory_temp) *
1652 degrees_per_05db_denom,
1653 degrees_per_05db_num,
1654 &temperature_comp[c]);
1655
1656 factory_gain_index[c] = measurement->gain_idx;
1657 factory_actual_pwr[c] = measurement->actual_pow;
1658
1659 IWL_DEBUG_TXPOWER("chain = %d\n", c);
1660 IWL_DEBUG_TXPOWER("fctry tmp %d, "
1661 "curr tmp %d, comp %d steps\n",
1662 factory_temp, current_temp,
1663 temperature_comp[c]);
1664
1665 IWL_DEBUG_TXPOWER("fctry idx %d, fctry pwr %d\n",
1666 factory_gain_index[c],
1667 factory_actual_pwr[c]);
1668 }
1669
1670 /* for each of 33 bit-rates (including 1 for CCK) */
1671 for (i = 0; i < POWER_TABLE_NUM_ENTRIES; i++) {
1672 u8 is_mimo_rate;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001673 union iwl4965_tx_power_dual_stream tx_power;
Zhu Yib481de92007-09-25 17:54:57 -07001674
1675 /* for mimo, reduce each chain's txpower by half
1676 * (3dB, 6 steps), so total output power is regulatory
1677 * compliant. */
1678 if (i & 0x8) {
1679 current_regulatory = reg_limit -
1680 IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION;
1681 is_mimo_rate = 1;
1682 } else {
1683 current_regulatory = reg_limit;
1684 is_mimo_rate = 0;
1685 }
1686
1687 /* find txpower limit, either hardware or regulatory */
1688 power_limit = saturation_power - back_off_table[i];
1689 if (power_limit > current_regulatory)
1690 power_limit = current_regulatory;
1691
1692 /* reduce user's txpower request if necessary
1693 * for this rate on this channel */
1694 target_power = user_target_power;
1695 if (target_power > power_limit)
1696 target_power = power_limit;
1697
1698 IWL_DEBUG_TXPOWER("rate %d sat %d reg %d usr %d tgt %d\n",
1699 i, saturation_power - back_off_table[i],
1700 current_regulatory, user_target_power,
1701 target_power);
1702
1703 /* for each of 2 Tx chains (radio transmitters) */
1704 for (c = 0; c < 2; c++) {
1705 s32 atten_value;
1706
1707 if (is_mimo_rate)
1708 atten_value =
1709 (s32)le32_to_cpu(priv->card_alive_init.
1710 tx_atten[txatten_grp][c]);
1711 else
1712 atten_value = 0;
1713
1714 /* calculate index; higher index means lower txpower */
1715 power_index = (u8) (factory_gain_index[c] -
1716 (target_power -
1717 factory_actual_pwr[c]) -
1718 temperature_comp[c] -
1719 voltage_compensation +
1720 atten_value);
1721
1722/* IWL_DEBUG_TXPOWER("calculated txpower index %d\n",
1723 power_index); */
1724
1725 if (power_index < get_min_power_index(i, band))
1726 power_index = get_min_power_index(i, band);
1727
1728 /* adjust 5 GHz index to support negative indexes */
1729 if (!band)
1730 power_index += 9;
1731
1732 /* CCK, rate 32, reduce txpower for CCK */
1733 if (i == POWER_TABLE_CCK_ENTRY)
1734 power_index +=
1735 IWL_TX_POWER_CCK_COMPENSATION_C_STEP;
1736
1737 /* stay within the table! */
1738 if (power_index > 107) {
1739 IWL_WARNING("txpower index %d > 107\n",
1740 power_index);
1741 power_index = 107;
1742 }
1743 if (power_index < 0) {
1744 IWL_WARNING("txpower index %d < 0\n",
1745 power_index);
1746 power_index = 0;
1747 }
1748
1749 /* fill txpower command for this rate/chain */
1750 tx_power.s.radio_tx_gain[c] =
1751 gain_table[band][power_index].radio;
1752 tx_power.s.dsp_predis_atten[c] =
1753 gain_table[band][power_index].dsp;
1754
1755 IWL_DEBUG_TXPOWER("chain %d mimo %d index %d "
1756 "gain 0x%02x dsp %d\n",
1757 c, atten_value, power_index,
1758 tx_power.s.radio_tx_gain[c],
1759 tx_power.s.dsp_predis_atten[c]);
1760 }/* for each chain */
1761
1762 tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw);
1763
1764 }/* for each rate */
1765
1766 return 0;
1767}
1768
1769/**
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001770 * iwl4965_hw_reg_send_txpower - Configure the TXPOWER level user limit
Zhu Yib481de92007-09-25 17:54:57 -07001771 *
1772 * Uses the active RXON for channel, band, and characteristics (fat, high)
1773 * The power limit is taken from priv->user_txpower_limit.
1774 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001775int iwl4965_hw_reg_send_txpower(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07001776{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001777 struct iwl4965_txpowertable_cmd cmd = { 0 };
Tomas Winkler857485c2008-03-21 13:53:44 -07001778 int ret;
Zhu Yib481de92007-09-25 17:54:57 -07001779 u8 band = 0;
1780 u8 is_fat = 0;
1781 u8 ctrl_chan_high = 0;
1782
1783 if (test_bit(STATUS_SCANNING, &priv->status)) {
1784 /* If this gets hit a lot, switch it to a BUG() and catch
1785 * the stack trace to find out who is calling this during
1786 * a scan. */
1787 IWL_WARNING("TX Power requested while scanning!\n");
1788 return -EAGAIN;
1789 }
1790
Johannes Berg8318d782008-01-24 19:38:38 +01001791 band = priv->band == IEEE80211_BAND_2GHZ;
Zhu Yib481de92007-09-25 17:54:57 -07001792
1793 is_fat = is_fat_channel(priv->active_rxon.flags);
1794
1795 if (is_fat &&
1796 (priv->active_rxon.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
1797 ctrl_chan_high = 1;
1798
1799 cmd.band = band;
1800 cmd.channel = priv->active_rxon.channel;
1801
Tomas Winkler857485c2008-03-21 13:53:44 -07001802 ret = iwl4965_fill_txpower_tbl(priv, band,
Zhu Yib481de92007-09-25 17:54:57 -07001803 le16_to_cpu(priv->active_rxon.channel),
1804 is_fat, ctrl_chan_high, &cmd.tx_power);
Tomas Winkler857485c2008-03-21 13:53:44 -07001805 if (ret)
1806 goto out;
Zhu Yib481de92007-09-25 17:54:57 -07001807
Tomas Winkler857485c2008-03-21 13:53:44 -07001808 ret = iwl_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd);
1809
1810out:
1811 return ret;
Zhu Yib481de92007-09-25 17:54:57 -07001812}
1813
Tomas Winkler7e8c5192008-04-15 16:01:43 -07001814static int iwl4965_send_rxon_assoc(struct iwl_priv *priv)
1815{
1816 int ret = 0;
1817 struct iwl4965_rxon_assoc_cmd rxon_assoc;
Gregory Greenmanc1adf9f2008-05-15 13:53:59 +08001818 const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon;
1819 const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon;
Tomas Winkler7e8c5192008-04-15 16:01:43 -07001820
1821 if ((rxon1->flags == rxon2->flags) &&
1822 (rxon1->filter_flags == rxon2->filter_flags) &&
1823 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
1824 (rxon1->ofdm_ht_single_stream_basic_rates ==
1825 rxon2->ofdm_ht_single_stream_basic_rates) &&
1826 (rxon1->ofdm_ht_dual_stream_basic_rates ==
1827 rxon2->ofdm_ht_dual_stream_basic_rates) &&
1828 (rxon1->rx_chain == rxon2->rx_chain) &&
1829 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
1830 IWL_DEBUG_INFO("Using current RXON_ASSOC. Not resending.\n");
1831 return 0;
1832 }
1833
1834 rxon_assoc.flags = priv->staging_rxon.flags;
1835 rxon_assoc.filter_flags = priv->staging_rxon.filter_flags;
1836 rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates;
1837 rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates;
1838 rxon_assoc.reserved = 0;
1839 rxon_assoc.ofdm_ht_single_stream_basic_rates =
1840 priv->staging_rxon.ofdm_ht_single_stream_basic_rates;
1841 rxon_assoc.ofdm_ht_dual_stream_basic_rates =
1842 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates;
1843 rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain;
1844
1845 ret = iwl_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
1846 sizeof(rxon_assoc), &rxon_assoc, NULL);
1847 if (ret)
1848 return ret;
1849
1850 return ret;
1851}
1852
1853
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001854int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel)
Zhu Yib481de92007-09-25 17:54:57 -07001855{
1856 int rc;
1857 u8 band = 0;
1858 u8 is_fat = 0;
1859 u8 ctrl_chan_high = 0;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001860 struct iwl4965_channel_switch_cmd cmd = { 0 };
Assaf Kraussbf85ea42008-03-14 10:38:49 -07001861 const struct iwl_channel_info *ch_info;
Zhu Yib481de92007-09-25 17:54:57 -07001862
Johannes Berg8318d782008-01-24 19:38:38 +01001863 band = priv->band == IEEE80211_BAND_2GHZ;
Zhu Yib481de92007-09-25 17:54:57 -07001864
Assaf Krauss8622e702008-03-21 13:53:43 -07001865 ch_info = iwl_get_channel_info(priv, priv->band, channel);
Zhu Yib481de92007-09-25 17:54:57 -07001866
1867 is_fat = is_fat_channel(priv->staging_rxon.flags);
1868
1869 if (is_fat &&
1870 (priv->active_rxon.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
1871 ctrl_chan_high = 1;
1872
1873 cmd.band = band;
1874 cmd.expect_beacon = 0;
1875 cmd.channel = cpu_to_le16(channel);
1876 cmd.rxon_flags = priv->active_rxon.flags;
1877 cmd.rxon_filter_flags = priv->active_rxon.filter_flags;
1878 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
1879 if (ch_info)
1880 cmd.expect_beacon = is_channel_radar(ch_info);
1881 else
1882 cmd.expect_beacon = 1;
1883
1884 rc = iwl4965_fill_txpower_tbl(priv, band, channel, is_fat,
1885 ctrl_chan_high, &cmd.tx_power);
1886 if (rc) {
1887 IWL_DEBUG_11H("error:%d fill txpower_tbl\n", rc);
1888 return rc;
1889 }
1890
Tomas Winkler857485c2008-03-21 13:53:44 -07001891 rc = iwl_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
Zhu Yib481de92007-09-25 17:54:57 -07001892 return rc;
1893}
1894
Ron Rindjunskyd67f5482008-05-05 10:22:49 +08001895static int iwl4965_shared_mem_rx_idx(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07001896{
Tomas Winkler059ff822008-04-14 21:16:14 -07001897 struct iwl4965_shared *s = priv->shared_virt;
1898 return le32_to_cpu(s->rb_closed) & 0xFFF;
Zhu Yib481de92007-09-25 17:54:57 -07001899}
1900
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001901int iwl4965_hw_get_temperature(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07001902{
1903 return priv->temperature;
1904}
1905
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001906unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
Tomas Winklerfcab4232008-05-15 13:54:01 +08001907 struct iwl_frame *frame, u8 rate)
Zhu Yib481de92007-09-25 17:54:57 -07001908{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001909 struct iwl4965_tx_beacon_cmd *tx_beacon_cmd;
Zhu Yib481de92007-09-25 17:54:57 -07001910 unsigned int frame_size;
1911
1912 tx_beacon_cmd = &frame->u.beacon;
1913 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
1914
Tomas Winkler5425e492008-04-15 16:01:38 -07001915 tx_beacon_cmd->tx.sta_id = priv->hw_params.bcast_sta_id;
Zhu Yib481de92007-09-25 17:54:57 -07001916 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
1917
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001918 frame_size = iwl4965_fill_beacon_frame(priv,
Zhu Yib481de92007-09-25 17:54:57 -07001919 tx_beacon_cmd->frame,
Tomas Winkler57bd1be2008-05-15 13:54:03 +08001920 iwl_bcast_addr,
Zhu Yib481de92007-09-25 17:54:57 -07001921 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
1922
1923 BUG_ON(frame_size > MAX_MPDU_SIZE);
1924 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
1925
1926 if ((rate == IWL_RATE_1M_PLCP) || (rate >= IWL_RATE_2M_PLCP))
1927 tx_beacon_cmd->tx.rate_n_flags =
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001928 iwl4965_hw_set_rate_n_flags(rate, RATE_MCS_CCK_MSK);
Zhu Yib481de92007-09-25 17:54:57 -07001929 else
1930 tx_beacon_cmd->tx.rate_n_flags =
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001931 iwl4965_hw_set_rate_n_flags(rate, 0);
Zhu Yib481de92007-09-25 17:54:57 -07001932
1933 tx_beacon_cmd->tx.tx_flags = (TX_CMD_FLG_SEQ_CTL_MSK |
1934 TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK);
1935 return (sizeof(*tx_beacon_cmd) + frame_size);
1936}
1937
Ron Rindjunsky399f4902008-04-23 17:14:56 -07001938static int iwl4965_alloc_shared_mem(struct iwl_priv *priv)
1939{
1940 priv->shared_virt = pci_alloc_consistent(priv->pci_dev,
1941 sizeof(struct iwl4965_shared),
1942 &priv->shared_phys);
1943 if (!priv->shared_virt)
1944 return -ENOMEM;
1945
1946 memset(priv->shared_virt, 0, sizeof(struct iwl4965_shared));
1947
Ron Rindjunskyd67f5482008-05-05 10:22:49 +08001948 priv->rb_closed_offset = offsetof(struct iwl4965_shared, rb_closed);
1949
Ron Rindjunsky399f4902008-04-23 17:14:56 -07001950 return 0;
1951}
1952
1953static void iwl4965_free_shared_mem(struct iwl_priv *priv)
1954{
1955 if (priv->shared_virt)
1956 pci_free_consistent(priv->pci_dev,
1957 sizeof(struct iwl4965_shared),
1958 priv->shared_virt,
1959 priv->shared_phys);
1960}
1961
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001962/**
Tomas Winklere2a722e2008-04-14 21:16:10 -07001963 * iwl4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001964 */
Tomas Winklere2a722e2008-04-14 21:16:10 -07001965static void iwl4965_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
Ron Rindjunsky16466902008-05-05 10:22:50 +08001966 struct iwl_tx_queue *txq,
Tomas Winklere2a722e2008-04-14 21:16:10 -07001967 u16 byte_cnt)
Zhu Yib481de92007-09-25 17:54:57 -07001968{
1969 int len;
1970 int txq_id = txq->q.id;
Tomas Winkler059ff822008-04-14 21:16:14 -07001971 struct iwl4965_shared *shared_data = priv->shared_virt;
Zhu Yib481de92007-09-25 17:54:57 -07001972
Zhu Yib481de92007-09-25 17:54:57 -07001973 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
1974
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001975 /* Set up byte count within first 256 entries */
Zhu Yib481de92007-09-25 17:54:57 -07001976 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
Tomas Winklerfc4b6852007-10-25 17:15:24 +08001977 tfd_offset[txq->q.write_ptr], byte_cnt, len);
Zhu Yib481de92007-09-25 17:54:57 -07001978
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001979 /* If within first 64 entries, duplicate at end */
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07001980 if (txq->q.write_ptr < IWL49_MAX_WIN_SIZE)
Zhu Yib481de92007-09-25 17:54:57 -07001981 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07001982 tfd_offset[IWL49_QUEUE_SIZE + txq->q.write_ptr],
Zhu Yib481de92007-09-25 17:54:57 -07001983 byte_cnt, len);
Zhu Yib481de92007-09-25 17:54:57 -07001984}
1985
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001986/**
Zhu Yib481de92007-09-25 17:54:57 -07001987 * sign_extend - Sign extend a value using specified bit as sign-bit
1988 *
1989 * Example: sign_extend(9, 3) would return -7 as bit3 of 1001b is 1
1990 * and bit0..2 is 001b which when sign extended to 1111111111111001b is -7.
1991 *
1992 * @param oper value to sign extend
1993 * @param index 0 based bit index (0<=index<32) to sign bit
1994 */
1995static s32 sign_extend(u32 oper, int index)
1996{
1997 u8 shift = 31 - index;
1998
1999 return (s32)(oper << shift) >> shift;
2000}
2001
2002/**
2003 * iwl4965_get_temperature - return the calibrated temperature (in Kelvin)
2004 * @statistics: Provides the temperature reading from the uCode
2005 *
2006 * A return of <0 indicates bogus data in the statistics
2007 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002008int iwl4965_get_temperature(const struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07002009{
2010 s32 temperature;
2011 s32 vt;
2012 s32 R1, R2, R3;
2013 u32 R4;
2014
2015 if (test_bit(STATUS_TEMPERATURE, &priv->status) &&
2016 (priv->statistics.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK)) {
2017 IWL_DEBUG_TEMP("Running FAT temperature calibration\n");
2018 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
2019 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]);
2020 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]);
2021 R4 = le32_to_cpu(priv->card_alive_init.therm_r4[1]);
2022 } else {
2023 IWL_DEBUG_TEMP("Running temperature calibration\n");
2024 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]);
2025 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]);
2026 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]);
2027 R4 = le32_to_cpu(priv->card_alive_init.therm_r4[0]);
2028 }
2029
2030 /*
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002031 * Temperature is only 23 bits, so sign extend out to 32.
Zhu Yib481de92007-09-25 17:54:57 -07002032 *
2033 * NOTE If we haven't received a statistics notification yet
2034 * with an updated temperature, use R4 provided to us in the
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002035 * "initialize" ALIVE response.
2036 */
Zhu Yib481de92007-09-25 17:54:57 -07002037 if (!test_bit(STATUS_TEMPERATURE, &priv->status))
2038 vt = sign_extend(R4, 23);
2039 else
2040 vt = sign_extend(
2041 le32_to_cpu(priv->statistics.general.temperature), 23);
2042
2043 IWL_DEBUG_TEMP("Calib values R[1-3]: %d %d %d R4: %d\n",
2044 R1, R2, R3, vt);
2045
2046 if (R3 == R1) {
2047 IWL_ERROR("Calibration conflict R1 == R3\n");
2048 return -1;
2049 }
2050
2051 /* Calculate temperature in degrees Kelvin, adjust by 97%.
2052 * Add offset to center the adjustment around 0 degrees Centigrade. */
2053 temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2);
2054 temperature /= (R3 - R1);
2055 temperature = (temperature * 97) / 100 +
2056 TEMPERATURE_CALIB_KELVIN_OFFSET;
2057
2058 IWL_DEBUG_TEMP("Calibrated temperature: %dK, %dC\n", temperature,
2059 KELVIN_TO_CELSIUS(temperature));
2060
2061 return temperature;
2062}
2063
2064/* Adjust Txpower only if temperature variance is greater than threshold. */
2065#define IWL_TEMPERATURE_THRESHOLD 3
2066
2067/**
2068 * iwl4965_is_temp_calib_needed - determines if new calibration is needed
2069 *
2070 * If the temperature changed has changed sufficiently, then a recalibration
2071 * is needed.
2072 *
2073 * Assumes caller will replace priv->last_temperature once calibration
2074 * executed.
2075 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002076static int iwl4965_is_temp_calib_needed(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07002077{
2078 int temp_diff;
2079
2080 if (!test_bit(STATUS_STATISTICS, &priv->status)) {
2081 IWL_DEBUG_TEMP("Temperature not updated -- no statistics.\n");
2082 return 0;
2083 }
2084
2085 temp_diff = priv->temperature - priv->last_temperature;
2086
2087 /* get absolute value */
2088 if (temp_diff < 0) {
2089 IWL_DEBUG_POWER("Getting cooler, delta %d, \n", temp_diff);
2090 temp_diff = -temp_diff;
2091 } else if (temp_diff == 0)
2092 IWL_DEBUG_POWER("Same temp, \n");
2093 else
2094 IWL_DEBUG_POWER("Getting warmer, delta %d, \n", temp_diff);
2095
2096 if (temp_diff < IWL_TEMPERATURE_THRESHOLD) {
2097 IWL_DEBUG_POWER("Thermal txpower calib not needed\n");
2098 return 0;
2099 }
2100
2101 IWL_DEBUG_POWER("Thermal txpower calib needed\n");
2102
2103 return 1;
2104}
2105
2106/* Calculate noise level, based on measurements during network silence just
2107 * before arriving beacon. This measurement can be done only if we know
2108 * exactly when to expect beacons, therefore only when we're associated. */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002109static void iwl4965_rx_calc_noise(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07002110{
2111 struct statistics_rx_non_phy *rx_info
2112 = &(priv->statistics.rx.general);
2113 int num_active_rx = 0;
2114 int total_silence = 0;
2115 int bcn_silence_a =
2116 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
2117 int bcn_silence_b =
2118 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
2119 int bcn_silence_c =
2120 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
2121
2122 if (bcn_silence_a) {
2123 total_silence += bcn_silence_a;
2124 num_active_rx++;
2125 }
2126 if (bcn_silence_b) {
2127 total_silence += bcn_silence_b;
2128 num_active_rx++;
2129 }
2130 if (bcn_silence_c) {
2131 total_silence += bcn_silence_c;
2132 num_active_rx++;
2133 }
2134
2135 /* Average among active antennas */
2136 if (num_active_rx)
2137 priv->last_rx_noise = (total_silence / num_active_rx) - 107;
2138 else
2139 priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
2140
2141 IWL_DEBUG_CALIB("inband silence a %u, b %u, c %u, dBm %d\n",
2142 bcn_silence_a, bcn_silence_b, bcn_silence_c,
2143 priv->last_rx_noise);
2144}
2145
Tomas Winklera55360e2008-05-05 10:22:28 +08002146void iwl4965_hw_rx_statistics(struct iwl_priv *priv,
2147 struct iwl_rx_mem_buffer *rxb)
Zhu Yib481de92007-09-25 17:54:57 -07002148{
Tomas Winklerdb11d632008-05-05 10:22:33 +08002149 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
Zhu Yib481de92007-09-25 17:54:57 -07002150 int change;
2151 s32 temp;
2152
2153 IWL_DEBUG_RX("Statistics notification received (%d vs %d).\n",
2154 (int)sizeof(priv->statistics), pkt->len);
2155
2156 change = ((priv->statistics.general.temperature !=
2157 pkt->u.stats.general.temperature) ||
2158 ((priv->statistics.flag &
2159 STATISTICS_REPLY_FLG_FAT_MODE_MSK) !=
2160 (pkt->u.stats.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK)));
2161
2162 memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics));
2163
2164 set_bit(STATUS_STATISTICS, &priv->status);
2165
2166 /* Reschedule the statistics timer to occur in
2167 * REG_RECALIB_PERIOD seconds to ensure we get a
2168 * thermal update even if the uCode doesn't give
2169 * us one */
2170 mod_timer(&priv->statistics_periodic, jiffies +
2171 msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
2172
2173 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
2174 (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
2175 iwl4965_rx_calc_noise(priv);
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -07002176#ifdef CONFIG_IWL4965_RUN_TIME_CALIB
Zhu Yib481de92007-09-25 17:54:57 -07002177 queue_work(priv->workqueue, &priv->sensitivity_work);
2178#endif
2179 }
2180
Mohamed Abbasab53d8a2008-03-25 16:33:36 -07002181 iwl_leds_background(priv);
2182
Zhu Yib481de92007-09-25 17:54:57 -07002183 /* If the hardware hasn't reported a change in
2184 * temperature then don't bother computing a
2185 * calibrated temperature value */
2186 if (!change)
2187 return;
2188
2189 temp = iwl4965_get_temperature(priv);
2190 if (temp < 0)
2191 return;
2192
2193 if (priv->temperature != temp) {
2194 if (priv->temperature)
2195 IWL_DEBUG_TEMP("Temperature changed "
2196 "from %dC to %dC\n",
2197 KELVIN_TO_CELSIUS(priv->temperature),
2198 KELVIN_TO_CELSIUS(temp));
2199 else
2200 IWL_DEBUG_TEMP("Temperature "
2201 "initialized to %dC\n",
2202 KELVIN_TO_CELSIUS(temp));
2203 }
2204
2205 priv->temperature = temp;
2206 set_bit(STATUS_TEMPERATURE, &priv->status);
2207
2208 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
2209 iwl4965_is_temp_calib_needed(priv))
2210 queue_work(priv->workqueue, &priv->txpower_work);
2211}
2212
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002213static void iwl4965_add_radiotap(struct iwl_priv *priv,
Zhu Yi12342c42007-12-20 11:27:32 +08002214 struct sk_buff *skb,
2215 struct iwl4965_rx_phy_res *rx_start,
2216 struct ieee80211_rx_status *stats,
2217 u32 ampdu_status)
2218{
Bruno Randolf566bfe52008-05-08 19:15:40 +02002219 s8 signal = stats->signal;
Zhu Yi12342c42007-12-20 11:27:32 +08002220 s8 noise = 0;
Johannes Berg8318d782008-01-24 19:38:38 +01002221 int rate = stats->rate_idx;
Zhu Yi12342c42007-12-20 11:27:32 +08002222 u64 tsf = stats->mactime;
Johannes Berga0b484f2008-04-01 17:51:47 +02002223 __le16 antenna;
Zhu Yi12342c42007-12-20 11:27:32 +08002224 __le16 phy_flags_hw = rx_start->phy_flags;
2225 struct iwl4965_rt_rx_hdr {
2226 struct ieee80211_radiotap_header rt_hdr;
2227 __le64 rt_tsf; /* TSF */
2228 u8 rt_flags; /* radiotap packet flags */
2229 u8 rt_rate; /* rate in 500kb/s */
2230 __le16 rt_channelMHz; /* channel in MHz */
2231 __le16 rt_chbitmask; /* channel bitfield */
2232 s8 rt_dbmsignal; /* signal in dBm, kluged to signed */
2233 s8 rt_dbmnoise;
2234 u8 rt_antenna; /* antenna number */
2235 } __attribute__ ((packed)) *iwl4965_rt;
2236
2237 /* TODO: We won't have enough headroom for HT frames. Fix it later. */
2238 if (skb_headroom(skb) < sizeof(*iwl4965_rt)) {
2239 if (net_ratelimit())
2240 printk(KERN_ERR "not enough headroom [%d] for "
Miguel Botón01c20982008-01-04 23:34:35 +01002241 "radiotap head [%zd]\n",
Zhu Yi12342c42007-12-20 11:27:32 +08002242 skb_headroom(skb), sizeof(*iwl4965_rt));
2243 return;
2244 }
2245
2246 /* put radiotap header in front of 802.11 header and data */
2247 iwl4965_rt = (void *)skb_push(skb, sizeof(*iwl4965_rt));
2248
2249 /* initialise radiotap header */
2250 iwl4965_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
2251 iwl4965_rt->rt_hdr.it_pad = 0;
2252
2253 /* total header + data */
2254 put_unaligned(cpu_to_le16(sizeof(*iwl4965_rt)),
2255 &iwl4965_rt->rt_hdr.it_len);
2256
2257 /* Indicate all the fields we add to the radiotap header */
2258 put_unaligned(cpu_to_le32((1 << IEEE80211_RADIOTAP_TSFT) |
2259 (1 << IEEE80211_RADIOTAP_FLAGS) |
2260 (1 << IEEE80211_RADIOTAP_RATE) |
2261 (1 << IEEE80211_RADIOTAP_CHANNEL) |
2262 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
2263 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
2264 (1 << IEEE80211_RADIOTAP_ANTENNA)),
2265 &iwl4965_rt->rt_hdr.it_present);
2266
2267 /* Zero the flags, we'll add to them as we go */
2268 iwl4965_rt->rt_flags = 0;
2269
2270 put_unaligned(cpu_to_le64(tsf), &iwl4965_rt->rt_tsf);
2271
2272 iwl4965_rt->rt_dbmsignal = signal;
2273 iwl4965_rt->rt_dbmnoise = noise;
2274
2275 /* Convert the channel frequency and set the flags */
2276 put_unaligned(cpu_to_le16(stats->freq), &iwl4965_rt->rt_channelMHz);
2277 if (!(phy_flags_hw & RX_RES_PHY_FLAGS_BAND_24_MSK))
2278 put_unaligned(cpu_to_le16(IEEE80211_CHAN_OFDM |
2279 IEEE80211_CHAN_5GHZ),
2280 &iwl4965_rt->rt_chbitmask);
2281 else if (phy_flags_hw & RX_RES_PHY_FLAGS_MOD_CCK_MSK)
2282 put_unaligned(cpu_to_le16(IEEE80211_CHAN_CCK |
2283 IEEE80211_CHAN_2GHZ),
2284 &iwl4965_rt->rt_chbitmask);
2285 else /* 802.11g */
2286 put_unaligned(cpu_to_le16(IEEE80211_CHAN_OFDM |
2287 IEEE80211_CHAN_2GHZ),
2288 &iwl4965_rt->rt_chbitmask);
2289
Zhu Yi12342c42007-12-20 11:27:32 +08002290 if (rate == -1)
2291 iwl4965_rt->rt_rate = 0;
2292 else
Tomas Winkler1826dcc2008-05-15 13:54:02 +08002293 iwl4965_rt->rt_rate = iwl_rates[rate].ieee;
Zhu Yi12342c42007-12-20 11:27:32 +08002294
2295 /*
2296 * "antenna number"
2297 *
2298 * It seems that the antenna field in the phy flags value
2299 * is actually a bitfield. This is undefined by radiotap,
2300 * it wants an actual antenna number but I always get "7"
2301 * for most legacy frames I receive indicating that the
2302 * same frame was received on all three RX chains.
2303 *
2304 * I think this field should be removed in favour of a
2305 * new 802.11n radiotap field "RX chains" that is defined
2306 * as a bitmask.
2307 */
Johannes Berga0b484f2008-04-01 17:51:47 +02002308 antenna = phy_flags_hw & RX_RES_PHY_FLAGS_ANTENNA_MSK;
2309 iwl4965_rt->rt_antenna = le16_to_cpu(antenna) >> 4;
Zhu Yi12342c42007-12-20 11:27:32 +08002310
2311 /* set the preamble flag if appropriate */
2312 if (phy_flags_hw & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
2313 iwl4965_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
2314
2315 stats->flag |= RX_FLAG_RADIOTAP;
2316}
2317
Tomas Winkler19758be2008-03-12 16:58:51 -07002318static void iwl_update_rx_stats(struct iwl_priv *priv, u16 fc, u16 len)
2319{
2320 /* 0 - mgmt, 1 - cnt, 2 - data */
2321 int idx = (fc & IEEE80211_FCTL_FTYPE) >> 2;
2322 priv->rx_stats[idx].cnt++;
2323 priv->rx_stats[idx].bytes += len;
2324}
2325
Emmanuel Grumbach3ec47732008-04-17 16:03:36 -07002326/*
2327 * returns non-zero if packet should be dropped
2328 */
2329static int iwl4965_set_decrypted_flag(struct iwl_priv *priv,
2330 struct ieee80211_hdr *hdr,
2331 u32 decrypt_res,
2332 struct ieee80211_rx_status *stats)
2333{
2334 u16 fc = le16_to_cpu(hdr->frame_control);
2335
2336 if (priv->active_rxon.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
2337 return 0;
2338
2339 if (!(fc & IEEE80211_FCTL_PROTECTED))
2340 return 0;
2341
2342 IWL_DEBUG_RX("decrypt_res:0x%x\n", decrypt_res);
2343 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
2344 case RX_RES_STATUS_SEC_TYPE_TKIP:
2345 /* The uCode has got a bad phase 1 Key, pushes the packet.
2346 * Decryption will be done in SW. */
2347 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2348 RX_RES_STATUS_BAD_KEY_TTAK)
2349 break;
2350
Emmanuel Grumbachccc038a2008-05-15 13:54:09 +08002351 case RX_RES_STATUS_SEC_TYPE_WEP:
Emmanuel Grumbach3ec47732008-04-17 16:03:36 -07002352 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2353 RX_RES_STATUS_BAD_ICV_MIC) {
2354 /* bad ICV, the packet is destroyed since the
2355 * decryption is inplace, drop it */
2356 IWL_DEBUG_RX("Packet destroyed\n");
2357 return -1;
2358 }
Emmanuel Grumbach3ec47732008-04-17 16:03:36 -07002359 case RX_RES_STATUS_SEC_TYPE_CCMP:
2360 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2361 RX_RES_STATUS_DECRYPT_OK) {
2362 IWL_DEBUG_RX("hw decrypt successfully!!!\n");
2363 stats->flag |= RX_FLAG_DECRYPTED;
2364 }
2365 break;
2366
2367 default:
2368 break;
2369 }
2370 return 0;
2371}
2372
Ester Kummerbf403db2008-05-05 10:22:40 +08002373static u32 iwl4965_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
Emmanuel Grumbach17e476b2008-03-19 16:41:42 -07002374{
2375 u32 decrypt_out = 0;
2376
2377 if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
2378 RX_RES_STATUS_STATION_FOUND)
2379 decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
2380 RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
2381
2382 decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
2383
2384 /* packet was not encrypted */
2385 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
2386 RX_RES_STATUS_SEC_TYPE_NONE)
2387 return decrypt_out;
2388
2389 /* packet was encrypted with unknown alg */
2390 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
2391 RX_RES_STATUS_SEC_TYPE_ERR)
2392 return decrypt_out;
2393
2394 /* decryption was not done in HW */
2395 if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
2396 RX_MPDU_RES_STATUS_DEC_DONE_MSK)
2397 return decrypt_out;
2398
2399 switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
2400
2401 case RX_RES_STATUS_SEC_TYPE_CCMP:
2402 /* alg is CCM: check MIC only */
2403 if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
2404 /* Bad MIC */
2405 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
2406 else
2407 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
2408
2409 break;
2410
2411 case RX_RES_STATUS_SEC_TYPE_TKIP:
2412 if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
2413 /* Bad TTAK */
2414 decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
2415 break;
2416 }
2417 /* fall through if TTAK OK */
2418 default:
2419 if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
2420 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
2421 else
2422 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
2423 break;
2424 };
2425
2426 IWL_DEBUG_RX("decrypt_in:0x%x decrypt_out = 0x%x\n",
2427 decrypt_in, decrypt_out);
2428
2429 return decrypt_out;
2430}
2431
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002432static void iwl4965_handle_data_packet(struct iwl_priv *priv, int is_data,
Zhu Yib481de92007-09-25 17:54:57 -07002433 int include_phy,
Tomas Winklera55360e2008-05-05 10:22:28 +08002434 struct iwl_rx_mem_buffer *rxb,
Zhu Yib481de92007-09-25 17:54:57 -07002435 struct ieee80211_rx_status *stats)
2436{
Tomas Winklerdb11d632008-05-05 10:22:33 +08002437 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
Zhu Yib481de92007-09-25 17:54:57 -07002438 struct iwl4965_rx_phy_res *rx_start = (include_phy) ?
2439 (struct iwl4965_rx_phy_res *)&(pkt->u.raw[0]) : NULL;
2440 struct ieee80211_hdr *hdr;
2441 u16 len;
2442 __le32 *rx_end;
2443 unsigned int skblen;
2444 u32 ampdu_status;
Emmanuel Grumbach17e476b2008-03-19 16:41:42 -07002445 u32 ampdu_status_legacy;
Zhu Yib481de92007-09-25 17:54:57 -07002446
2447 if (!include_phy && priv->last_phy_res[0])
2448 rx_start = (struct iwl4965_rx_phy_res *)&priv->last_phy_res[1];
2449
2450 if (!rx_start) {
2451 IWL_ERROR("MPDU frame without a PHY data\n");
2452 return;
2453 }
2454 if (include_phy) {
2455 hdr = (struct ieee80211_hdr *)((u8 *) & rx_start[1] +
2456 rx_start->cfg_phy_cnt);
2457
2458 len = le16_to_cpu(rx_start->byte_count);
2459
2460 rx_end = (__le32 *) ((u8 *) & pkt->u.raw[0] +
2461 sizeof(struct iwl4965_rx_phy_res) +
2462 rx_start->cfg_phy_cnt + len);
2463
2464 } else {
2465 struct iwl4965_rx_mpdu_res_start *amsdu =
2466 (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw;
2467
2468 hdr = (struct ieee80211_hdr *)(pkt->u.raw +
2469 sizeof(struct iwl4965_rx_mpdu_res_start));
2470 len = le16_to_cpu(amsdu->byte_count);
2471 rx_start->byte_count = amsdu->byte_count;
2472 rx_end = (__le32 *) (((u8 *) hdr) + len);
2473 }
Abhijeet Kolekar4419e392008-05-05 10:22:47 +08002474 /* In monitor mode allow 802.11 ACk frames (10 bytes) */
2475 if (len > priv->hw_params.max_pkt_size ||
2476 len < ((priv->iw_mode == IEEE80211_IF_TYPE_MNTR) ? 10 : 16)) {
Zhu Yi12342c42007-12-20 11:27:32 +08002477 IWL_WARNING("byte count out of range [16,4K] : %d\n", len);
Zhu Yib481de92007-09-25 17:54:57 -07002478 return;
2479 }
2480
2481 ampdu_status = le32_to_cpu(*rx_end);
2482 skblen = ((u8 *) rx_end - (u8 *) & pkt->u.raw[0]) + sizeof(u32);
2483
Emmanuel Grumbach17e476b2008-03-19 16:41:42 -07002484 if (!include_phy) {
2485 /* New status scheme, need to translate */
2486 ampdu_status_legacy = ampdu_status;
Ester Kummerbf403db2008-05-05 10:22:40 +08002487 ampdu_status = iwl4965_translate_rx_status(priv, ampdu_status);
Emmanuel Grumbach17e476b2008-03-19 16:41:42 -07002488 }
2489
Zhu Yib481de92007-09-25 17:54:57 -07002490 /* start from MAC */
2491 skb_reserve(rxb->skb, (void *)hdr - (void *)pkt);
2492 skb_put(rxb->skb, len); /* end where data ends */
2493
2494 /* We only process data packets if the interface is open */
2495 if (unlikely(!priv->is_open)) {
2496 IWL_DEBUG_DROP_LIMIT
2497 ("Dropping packet while interface is not open.\n");
2498 return;
2499 }
2500
Zhu Yib481de92007-09-25 17:54:57 -07002501 stats->flag = 0;
2502 hdr = (struct ieee80211_hdr *)rxb->skb->data;
2503
Emmanuel Grumbach3ec47732008-04-17 16:03:36 -07002504 /* in case of HW accelerated crypto and bad decryption, drop */
Ron Rindjunsky099b40b2008-04-21 15:41:53 -07002505 if (!priv->hw_params.sw_crypto &&
Emmanuel Grumbach3ec47732008-04-17 16:03:36 -07002506 iwl4965_set_decrypted_flag(priv, hdr, ampdu_status, stats))
2507 return;
Zhu Yib481de92007-09-25 17:54:57 -07002508
Zhu Yi12342c42007-12-20 11:27:32 +08002509 if (priv->add_radiotap)
2510 iwl4965_add_radiotap(priv, rxb->skb, rx_start, stats, ampdu_status);
2511
Tomas Winkler19758be2008-03-12 16:58:51 -07002512 iwl_update_rx_stats(priv, le16_to_cpu(hdr->frame_control), len);
Zhu Yib481de92007-09-25 17:54:57 -07002513 ieee80211_rx_irqsafe(priv->hw, rxb->skb, stats);
2514 priv->alloc_rxb_skb--;
2515 rxb->skb = NULL;
Zhu Yib481de92007-09-25 17:54:57 -07002516}
2517
2518/* Calc max signal level (dBm) among 3 possible receivers */
Ester Kummerbf403db2008-05-05 10:22:40 +08002519static int iwl4965_calc_rssi(struct iwl_priv *priv,
2520 struct iwl4965_rx_phy_res *rx_resp)
Zhu Yib481de92007-09-25 17:54:57 -07002521{
2522 /* data from PHY/DSP regarding signal strength, etc.,
2523 * contents are always there, not configurable by host. */
2524 struct iwl4965_rx_non_cfg_phy *ncphy =
2525 (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy;
2526 u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL_AGC_DB_MASK)
2527 >> IWL_AGC_DB_POS;
2528
2529 u32 valid_antennae =
2530 (le16_to_cpu(rx_resp->phy_flags) & RX_PHY_FLAGS_ANTENNAE_MASK)
2531 >> RX_PHY_FLAGS_ANTENNAE_OFFSET;
2532 u8 max_rssi = 0;
2533 u32 i;
2534
2535 /* Find max rssi among 3 possible receivers.
2536 * These values are measured by the digital signal processor (DSP).
2537 * They should stay fairly constant even as the signal strength varies,
2538 * if the radio's automatic gain control (AGC) is working right.
2539 * AGC value (see below) will provide the "interesting" info. */
2540 for (i = 0; i < 3; i++)
2541 if (valid_antennae & (1 << i))
2542 max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
2543
2544 IWL_DEBUG_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n",
2545 ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
2546 max_rssi, agc);
2547
2548 /* dBm = max_rssi dB - agc dB - constant.
2549 * Higher AGC (higher radio gain) means lower signal. */
2550 return (max_rssi - agc - IWL_RSSI_OFFSET);
2551}
2552
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002553static void iwl4965_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
Zhu Yib481de92007-09-25 17:54:57 -07002554{
2555 unsigned long flags;
2556
2557 spin_lock_irqsave(&priv->sta_lock, flags);
2558 priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK;
2559 priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
2560 priv->stations[sta_id].sta.sta.modify_mask = 0;
2561 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
2562 spin_unlock_irqrestore(&priv->sta_lock, flags);
2563
Tomas Winkler133636d2008-05-05 10:22:34 +08002564 iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
Zhu Yib481de92007-09-25 17:54:57 -07002565}
2566
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002567static void iwl4965_update_ps_mode(struct iwl_priv *priv, u16 ps_bit, u8 *addr)
Zhu Yib481de92007-09-25 17:54:57 -07002568{
2569 /* FIXME: need locking over ps_status ??? */
Tomas Winkler947b13a2008-04-16 16:34:48 -07002570 u8 sta_id = iwl_find_station(priv, addr);
Zhu Yib481de92007-09-25 17:54:57 -07002571
2572 if (sta_id != IWL_INVALID_STATION) {
2573 u8 sta_awake = priv->stations[sta_id].
2574 ps_status == STA_PS_STATUS_WAKE;
2575
2576 if (sta_awake && ps_bit)
2577 priv->stations[sta_id].ps_status = STA_PS_STATUS_SLEEP;
2578 else if (!sta_awake && !ps_bit) {
2579 iwl4965_sta_modify_ps_wake(priv, sta_id);
2580 priv->stations[sta_id].ps_status = STA_PS_STATUS_WAKE;
2581 }
2582 }
2583}
Tomas Winkler0a6857e2008-03-12 16:58:49 -07002584#ifdef CONFIG_IWLWIFI_DEBUG
Tomas Winkler17744ff2008-03-02 01:52:00 +02002585
2586/**
2587 * iwl4965_dbg_report_frame - dump frame to syslog during debug sessions
2588 *
2589 * You may hack this function to show different aspects of received frames,
2590 * including selective frame dumps.
2591 * group100 parameter selects whether to show 1 out of 100 good frames.
2592 *
2593 * TODO: This was originally written for 3945, need to audit for
2594 * proper operation with 4965.
2595 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002596static void iwl4965_dbg_report_frame(struct iwl_priv *priv,
Tomas Winklerdb11d632008-05-05 10:22:33 +08002597 struct iwl_rx_packet *pkt,
Tomas Winkler17744ff2008-03-02 01:52:00 +02002598 struct ieee80211_hdr *header, int group100)
2599{
2600 u32 to_us;
2601 u32 print_summary = 0;
2602 u32 print_dump = 0; /* set to 1 to dump all frames' contents */
2603 u32 hundred = 0;
2604 u32 dataframe = 0;
2605 u16 fc;
2606 u16 seq_ctl;
2607 u16 channel;
2608 u16 phy_flags;
2609 int rate_sym;
2610 u16 length;
2611 u16 status;
2612 u16 bcn_tmr;
2613 u32 tsf_low;
2614 u64 tsf;
2615 u8 rssi;
2616 u8 agc;
2617 u16 sig_avg;
2618 u16 noise_diff;
2619 struct iwl4965_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
2620 struct iwl4965_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
2621 struct iwl4965_rx_frame_end *rx_end = IWL_RX_END(pkt);
2622 u8 *data = IWL_RX_DATA(pkt);
2623
Ester Kummerbf403db2008-05-05 10:22:40 +08002624 if (likely(!(priv->debug_level & IWL_DL_RX)))
Tomas Winkler17744ff2008-03-02 01:52:00 +02002625 return;
2626
2627 /* MAC header */
2628 fc = le16_to_cpu(header->frame_control);
2629 seq_ctl = le16_to_cpu(header->seq_ctrl);
2630
2631 /* metadata */
2632 channel = le16_to_cpu(rx_hdr->channel);
2633 phy_flags = le16_to_cpu(rx_hdr->phy_flags);
2634 rate_sym = rx_hdr->rate;
2635 length = le16_to_cpu(rx_hdr->len);
2636
2637 /* end-of-frame status and timestamp */
2638 status = le32_to_cpu(rx_end->status);
2639 bcn_tmr = le32_to_cpu(rx_end->beacon_timestamp);
2640 tsf_low = le64_to_cpu(rx_end->timestamp) & 0x0ffffffff;
2641 tsf = le64_to_cpu(rx_end->timestamp);
2642
2643 /* signal statistics */
2644 rssi = rx_stats->rssi;
2645 agc = rx_stats->agc;
2646 sig_avg = le16_to_cpu(rx_stats->sig_avg);
2647 noise_diff = le16_to_cpu(rx_stats->noise_diff);
2648
2649 to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
2650
2651 /* if data frame is to us and all is good,
2652 * (optionally) print summary for only 1 out of every 100 */
2653 if (to_us && (fc & ~IEEE80211_FCTL_PROTECTED) ==
2654 (IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
2655 dataframe = 1;
2656 if (!group100)
2657 print_summary = 1; /* print each frame */
2658 else if (priv->framecnt_to_us < 100) {
2659 priv->framecnt_to_us++;
2660 print_summary = 0;
2661 } else {
2662 priv->framecnt_to_us = 0;
2663 print_summary = 1;
2664 hundred = 1;
2665 }
2666 } else {
2667 /* print summary for all other frames */
2668 print_summary = 1;
2669 }
2670
2671 if (print_summary) {
2672 char *title;
2673 int rate_idx;
2674 u32 bitrate;
2675
2676 if (hundred)
2677 title = "100Frames";
2678 else if (fc & IEEE80211_FCTL_RETRY)
2679 title = "Retry";
2680 else if (ieee80211_is_assoc_response(fc))
2681 title = "AscRsp";
2682 else if (ieee80211_is_reassoc_response(fc))
2683 title = "RasRsp";
2684 else if (ieee80211_is_probe_response(fc)) {
2685 title = "PrbRsp";
2686 print_dump = 1; /* dump frame contents */
2687 } else if (ieee80211_is_beacon(fc)) {
2688 title = "Beacon";
2689 print_dump = 1; /* dump frame contents */
2690 } else if (ieee80211_is_atim(fc))
2691 title = "ATIM";
2692 else if (ieee80211_is_auth(fc))
2693 title = "Auth";
2694 else if (ieee80211_is_deauth(fc))
2695 title = "DeAuth";
2696 else if (ieee80211_is_disassoc(fc))
2697 title = "DisAssoc";
2698 else
2699 title = "Frame";
2700
2701 rate_idx = iwl4965_hwrate_to_plcp_idx(rate_sym);
2702 if (unlikely(rate_idx == -1))
2703 bitrate = 0;
2704 else
Tomas Winkler1826dcc2008-05-15 13:54:02 +08002705 bitrate = iwl_rates[rate_idx].ieee / 2;
Tomas Winkler17744ff2008-03-02 01:52:00 +02002706
2707 /* print frame summary.
2708 * MAC addresses show just the last byte (for brevity),
2709 * but you can hack it to show more, if you'd like to. */
2710 if (dataframe)
2711 IWL_DEBUG_RX("%s: mhd=0x%04x, dst=0x%02x, "
2712 "len=%u, rssi=%d, chnl=%d, rate=%u, \n",
2713 title, fc, header->addr1[5],
2714 length, rssi, channel, bitrate);
2715 else {
2716 /* src/dst addresses assume managed mode */
2717 IWL_DEBUG_RX("%s: 0x%04x, dst=0x%02x, "
2718 "src=0x%02x, rssi=%u, tim=%lu usec, "
2719 "phy=0x%02x, chnl=%d\n",
2720 title, fc, header->addr1[5],
2721 header->addr3[5], rssi,
2722 tsf_low - priv->scan_start_tsf,
2723 phy_flags, channel);
2724 }
2725 }
2726 if (print_dump)
Ester Kummerbf403db2008-05-05 10:22:40 +08002727 iwl_print_hex_dump(priv, IWL_DL_RX, data, length);
Tomas Winkler17744ff2008-03-02 01:52:00 +02002728}
2729#else
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002730static inline void iwl4965_dbg_report_frame(struct iwl_priv *priv,
Tomas Winklerdb11d632008-05-05 10:22:33 +08002731 struct iwl_rx_packet *pkt,
Tomas Winkler17744ff2008-03-02 01:52:00 +02002732 struct ieee80211_hdr *header,
2733 int group100)
2734{
2735}
2736#endif
2737
Zhu Yib481de92007-09-25 17:54:57 -07002738
Mohamed Abbas7878a5a2007-11-29 11:10:13 +08002739
Tomas Winkler857485c2008-03-21 13:53:44 -07002740/* Called for REPLY_RX (legacy ABG frames), or
Zhu Yib481de92007-09-25 17:54:57 -07002741 * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002742static void iwl4965_rx_reply_rx(struct iwl_priv *priv,
Tomas Winklera55360e2008-05-05 10:22:28 +08002743 struct iwl_rx_mem_buffer *rxb)
Zhu Yib481de92007-09-25 17:54:57 -07002744{
Tomas Winkler17744ff2008-03-02 01:52:00 +02002745 struct ieee80211_hdr *header;
2746 struct ieee80211_rx_status rx_status;
Tomas Winklerdb11d632008-05-05 10:22:33 +08002747 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
Zhu Yib481de92007-09-25 17:54:57 -07002748 /* Use phy data (Rx signal strength, etc.) contained within
2749 * this rx packet for legacy frames,
2750 * or phy data cached from REPLY_RX_PHY_CMD for HT frames. */
Tomas Winkler857485c2008-03-21 13:53:44 -07002751 int include_phy = (pkt->hdr.cmd == REPLY_RX);
Zhu Yib481de92007-09-25 17:54:57 -07002752 struct iwl4965_rx_phy_res *rx_start = (include_phy) ?
2753 (struct iwl4965_rx_phy_res *)&(pkt->u.raw[0]) :
2754 (struct iwl4965_rx_phy_res *)&priv->last_phy_res[1];
2755 __le32 *rx_end;
2756 unsigned int len = 0;
Zhu Yib481de92007-09-25 17:54:57 -07002757 u16 fc;
Zhu Yib481de92007-09-25 17:54:57 -07002758 u8 network_packet;
2759
Tomas Winkler17744ff2008-03-02 01:52:00 +02002760 rx_status.mactime = le64_to_cpu(rx_start->timestamp);
Tomas Winklerdc92e492008-04-03 16:05:22 -07002761 rx_status.freq =
Emmanuel Grumbachc0186072008-05-08 11:34:05 +08002762 ieee80211_channel_to_frequency(le16_to_cpu(rx_start->channel));
Tomas Winkler17744ff2008-03-02 01:52:00 +02002763 rx_status.band = (rx_start->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
2764 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
Tomas Winklerdc92e492008-04-03 16:05:22 -07002765 rx_status.rate_idx =
2766 iwl4965_hwrate_to_plcp_idx(le32_to_cpu(rx_start->rate_n_flags));
Tomas Winkler17744ff2008-03-02 01:52:00 +02002767 if (rx_status.band == IEEE80211_BAND_5GHZ)
2768 rx_status.rate_idx -= IWL_FIRST_OFDM_RATE;
2769
2770 rx_status.antenna = 0;
2771 rx_status.flag = 0;
2772
Zhu Yib481de92007-09-25 17:54:57 -07002773 if ((unlikely(rx_start->cfg_phy_cnt > 20))) {
Tomas Winklerdc92e492008-04-03 16:05:22 -07002774 IWL_DEBUG_DROP("dsp size out of range [0,20]: %d/n",
2775 rx_start->cfg_phy_cnt);
Zhu Yib481de92007-09-25 17:54:57 -07002776 return;
2777 }
Tomas Winkler17744ff2008-03-02 01:52:00 +02002778
Zhu Yib481de92007-09-25 17:54:57 -07002779 if (!include_phy) {
2780 if (priv->last_phy_res[0])
2781 rx_start = (struct iwl4965_rx_phy_res *)
2782 &priv->last_phy_res[1];
2783 else
2784 rx_start = NULL;
2785 }
2786
2787 if (!rx_start) {
2788 IWL_ERROR("MPDU frame without a PHY data\n");
2789 return;
2790 }
2791
2792 if (include_phy) {
2793 header = (struct ieee80211_hdr *)((u8 *) & rx_start[1]
2794 + rx_start->cfg_phy_cnt);
2795
2796 len = le16_to_cpu(rx_start->byte_count);
Tomas Winkler17744ff2008-03-02 01:52:00 +02002797 rx_end = (__le32 *)(pkt->u.raw + rx_start->cfg_phy_cnt +
Zhu Yib481de92007-09-25 17:54:57 -07002798 sizeof(struct iwl4965_rx_phy_res) + len);
2799 } else {
2800 struct iwl4965_rx_mpdu_res_start *amsdu =
2801 (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw;
2802
2803 header = (void *)(pkt->u.raw +
2804 sizeof(struct iwl4965_rx_mpdu_res_start));
2805 len = le16_to_cpu(amsdu->byte_count);
2806 rx_end = (__le32 *) (pkt->u.raw +
2807 sizeof(struct iwl4965_rx_mpdu_res_start) + len);
2808 }
2809
2810 if (!(*rx_end & RX_RES_STATUS_NO_CRC32_ERROR) ||
2811 !(*rx_end & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
2812 IWL_DEBUG_RX("Bad CRC or FIFO: 0x%08X.\n",
2813 le32_to_cpu(*rx_end));
2814 return;
2815 }
2816
2817 priv->ucode_beacon_time = le32_to_cpu(rx_start->beacon_time_stamp);
2818
Zhu Yib481de92007-09-25 17:54:57 -07002819 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
Bruno Randolf566bfe52008-05-08 19:15:40 +02002820 rx_status.signal = iwl4965_calc_rssi(priv, rx_start);
Zhu Yib481de92007-09-25 17:54:57 -07002821
2822 /* Meaningful noise values are available only from beacon statistics,
2823 * which are gathered only when associated, and indicate noise
2824 * only for the associated network channel ...
2825 * Ignore these noise values while scanning (other channels) */
Tomas Winkler3109ece2008-03-28 16:33:35 -07002826 if (iwl_is_associated(priv) &&
Zhu Yib481de92007-09-25 17:54:57 -07002827 !test_bit(STATUS_SCANNING, &priv->status)) {
Tomas Winkler17744ff2008-03-02 01:52:00 +02002828 rx_status.noise = priv->last_rx_noise;
Bruno Randolf566bfe52008-05-08 19:15:40 +02002829 rx_status.qual = iwl4965_calc_sig_qual(rx_status.signal,
Tomas Winkler17744ff2008-03-02 01:52:00 +02002830 rx_status.noise);
Zhu Yib481de92007-09-25 17:54:57 -07002831 } else {
Tomas Winkler17744ff2008-03-02 01:52:00 +02002832 rx_status.noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
Bruno Randolf566bfe52008-05-08 19:15:40 +02002833 rx_status.qual = iwl4965_calc_sig_qual(rx_status.signal, 0);
Zhu Yib481de92007-09-25 17:54:57 -07002834 }
2835
2836 /* Reset beacon noise level if not associated. */
Tomas Winkler3109ece2008-03-28 16:33:35 -07002837 if (!iwl_is_associated(priv))
Zhu Yib481de92007-09-25 17:54:57 -07002838 priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
2839
Tomas Winkler17744ff2008-03-02 01:52:00 +02002840 /* Set "1" to report good data frames in groups of 100 */
2841 /* FIXME: need to optimze the call: */
2842 iwl4965_dbg_report_frame(priv, pkt, header, 1);
Zhu Yib481de92007-09-25 17:54:57 -07002843
Tomas Winkler17744ff2008-03-02 01:52:00 +02002844 IWL_DEBUG_STATS_LIMIT("Rssi %d, noise %d, qual %d, TSF %llu\n",
Bruno Randolf566bfe52008-05-08 19:15:40 +02002845 rx_status.signal, rx_status.noise, rx_status.signal,
John W. Linville06501d22008-04-01 17:38:47 -04002846 (unsigned long long)rx_status.mactime);
Zhu Yib481de92007-09-25 17:54:57 -07002847
Abhijeet Kolekar4419e392008-05-05 10:22:47 +08002848
2849 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) {
2850 iwl4965_handle_data_packet(priv, 1, include_phy,
2851 rxb, &rx_status);
2852 return;
2853 }
2854
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002855 network_packet = iwl4965_is_network_packet(priv, header);
Zhu Yib481de92007-09-25 17:54:57 -07002856 if (network_packet) {
Bruno Randolf566bfe52008-05-08 19:15:40 +02002857 priv->last_rx_rssi = rx_status.signal;
Zhu Yib481de92007-09-25 17:54:57 -07002858 priv->last_beacon_time = priv->ucode_beacon_time;
2859 priv->last_tsf = le64_to_cpu(rx_start->timestamp);
2860 }
2861
2862 fc = le16_to_cpu(header->frame_control);
2863 switch (fc & IEEE80211_FCTL_FTYPE) {
2864 case IEEE80211_FTYPE_MGMT:
Zhu Yib481de92007-09-25 17:54:57 -07002865 if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
2866 iwl4965_update_ps_mode(priv, fc & IEEE80211_FCTL_PM,
2867 header->addr2);
Tomas Winkler17744ff2008-03-02 01:52:00 +02002868 iwl4965_handle_data_packet(priv, 0, include_phy, rxb, &rx_status);
Zhu Yib481de92007-09-25 17:54:57 -07002869 break;
2870
2871 case IEEE80211_FTYPE_CTL:
Ron Rindjunsky9ab46172007-12-25 17:00:38 +02002872#ifdef CONFIG_IWL4965_HT
Zhu Yib481de92007-09-25 17:54:57 -07002873 switch (fc & IEEE80211_FCTL_STYPE) {
2874 case IEEE80211_STYPE_BACK_REQ:
2875 IWL_DEBUG_HT("IEEE80211_STYPE_BACK_REQ arrived\n");
2876 iwl4965_handle_data_packet(priv, 0, include_phy,
Tomas Winkler17744ff2008-03-02 01:52:00 +02002877 rxb, &rx_status);
Zhu Yib481de92007-09-25 17:54:57 -07002878 break;
2879 default:
2880 break;
2881 }
2882#endif
Zhu Yib481de92007-09-25 17:54:57 -07002883 break;
2884
Joe Perches0795af52007-10-03 17:59:30 -07002885 case IEEE80211_FTYPE_DATA: {
2886 DECLARE_MAC_BUF(mac1);
2887 DECLARE_MAC_BUF(mac2);
2888 DECLARE_MAC_BUF(mac3);
2889
Zhu Yib481de92007-09-25 17:54:57 -07002890 if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
2891 iwl4965_update_ps_mode(priv, fc & IEEE80211_FCTL_PM,
2892 header->addr2);
2893
2894 if (unlikely(!network_packet))
2895 IWL_DEBUG_DROP("Dropping (non network): "
Joe Perches0795af52007-10-03 17:59:30 -07002896 "%s, %s, %s\n",
2897 print_mac(mac1, header->addr1),
2898 print_mac(mac2, header->addr2),
2899 print_mac(mac3, header->addr3));
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002900 else if (unlikely(iwl4965_is_duplicate_packet(priv, header)))
Joe Perches0795af52007-10-03 17:59:30 -07002901 IWL_DEBUG_DROP("Dropping (dup): %s, %s, %s\n",
2902 print_mac(mac1, header->addr1),
2903 print_mac(mac2, header->addr2),
2904 print_mac(mac3, header->addr3));
Zhu Yib481de92007-09-25 17:54:57 -07002905 else
2906 iwl4965_handle_data_packet(priv, 1, include_phy, rxb,
Tomas Winkler17744ff2008-03-02 01:52:00 +02002907 &rx_status);
Zhu Yib481de92007-09-25 17:54:57 -07002908 break;
Joe Perches0795af52007-10-03 17:59:30 -07002909 }
Zhu Yib481de92007-09-25 17:54:57 -07002910 default:
2911 break;
2912
2913 }
2914}
2915
2916/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
2917 * This will be used later in iwl4965_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002918static void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
Tomas Winklera55360e2008-05-05 10:22:28 +08002919 struct iwl_rx_mem_buffer *rxb)
Zhu Yib481de92007-09-25 17:54:57 -07002920{
Tomas Winklerdb11d632008-05-05 10:22:33 +08002921 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
Zhu Yib481de92007-09-25 17:54:57 -07002922 priv->last_phy_res[0] = 1;
2923 memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]),
2924 sizeof(struct iwl4965_rx_phy_res));
2925}
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002926static void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
Tomas Winklera55360e2008-05-05 10:22:28 +08002927 struct iwl_rx_mem_buffer *rxb)
Zhu Yib481de92007-09-25 17:54:57 -07002928
2929{
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -07002930#ifdef CONFIG_IWL4965_RUN_TIME_CALIB
Tomas Winklerdb11d632008-05-05 10:22:33 +08002931 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002932 struct iwl4965_missed_beacon_notif *missed_beacon;
Zhu Yib481de92007-09-25 17:54:57 -07002933
2934 missed_beacon = &pkt->u.missed_beacon;
2935 if (le32_to_cpu(missed_beacon->consequtive_missed_beacons) > 5) {
2936 IWL_DEBUG_CALIB("missed bcn cnsq %d totl %d rcd %d expctd %d\n",
2937 le32_to_cpu(missed_beacon->consequtive_missed_beacons),
2938 le32_to_cpu(missed_beacon->total_missed_becons),
2939 le32_to_cpu(missed_beacon->num_recvd_beacons),
2940 le32_to_cpu(missed_beacon->num_expected_beacons));
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -07002941 if (!test_bit(STATUS_SCANNING, &priv->status))
2942 iwl_init_sensitivity(priv);
Zhu Yib481de92007-09-25 17:54:57 -07002943 }
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -07002944#endif /*CONFIG_IWL4965_RUN_TIME_CALIB*/
Zhu Yib481de92007-09-25 17:54:57 -07002945}
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08002946#ifdef CONFIG_IWL4965_HT
Zhu Yib481de92007-09-25 17:54:57 -07002947
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002948/**
2949 * iwl4965_sta_modify_enable_tid_tx - Enable Tx for this TID in station table
2950 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002951static void iwl4965_sta_modify_enable_tid_tx(struct iwl_priv *priv,
Zhu Yib481de92007-09-25 17:54:57 -07002952 int sta_id, int tid)
2953{
2954 unsigned long flags;
2955
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002956 /* Remove "disable" flag, to enable Tx for this TID */
Zhu Yib481de92007-09-25 17:54:57 -07002957 spin_lock_irqsave(&priv->sta_lock, flags);
2958 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
2959 priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
2960 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
2961 spin_unlock_irqrestore(&priv->sta_lock, flags);
2962
Tomas Winkler133636d2008-05-05 10:22:34 +08002963 iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
Zhu Yib481de92007-09-25 17:54:57 -07002964}
2965
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002966/**
2967 * iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack
2968 *
2969 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
2970 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
2971 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002972static int iwl4965_tx_status_reply_compressed_ba(struct iwl_priv *priv,
Tomas Winkler6def9762008-05-05 10:22:31 +08002973 struct iwl_ht_agg *agg,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002974 struct iwl4965_compressed_ba_resp*
Zhu Yib481de92007-09-25 17:54:57 -07002975 ba_resp)
2976
2977{
2978 int i, sh, ack;
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02002979 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
2980 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
2981 u64 bitmap;
2982 int successes = 0;
Johannes Berge039fa42008-05-15 12:55:29 +02002983 struct ieee80211_tx_info *info;
Zhu Yib481de92007-09-25 17:54:57 -07002984
2985 if (unlikely(!agg->wait_for_ba)) {
2986 IWL_ERROR("Received BA when not expected\n");
2987 return -EINVAL;
2988 }
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002989
2990 /* Mark that the expected block-ack response arrived */
Zhu Yib481de92007-09-25 17:54:57 -07002991 agg->wait_for_ba = 0;
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02002992 IWL_DEBUG_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002993
2994 /* Calculate shift to align block-ack bits with our Tx window bits */
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02002995 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl>>4);
Ian Schram01ebd062007-10-25 17:15:22 +08002996 if (sh < 0) /* tbw something is wrong with indices */
Zhu Yib481de92007-09-25 17:54:57 -07002997 sh += 0x100;
2998
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002999 /* don't use 64-bit values for now */
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003000 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
Zhu Yib481de92007-09-25 17:54:57 -07003001
3002 if (agg->frame_count > (64 - sh)) {
3003 IWL_DEBUG_TX_REPLY("more frames than bitmap size");
3004 return -1;
3005 }
3006
3007 /* check for success or failure according to the
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003008 * transmitted bitmap and block-ack bitmap */
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003009 bitmap &= agg->bitmap;
Zhu Yib481de92007-09-25 17:54:57 -07003010
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003011 /* For each frame attempted in aggregation,
3012 * update driver's record of tx frame's status. */
Zhu Yib481de92007-09-25 17:54:57 -07003013 for (i = 0; i < agg->frame_count ; i++) {
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003014 ack = bitmap & (1 << i);
3015 successes += !!ack;
Zhu Yib481de92007-09-25 17:54:57 -07003016 IWL_DEBUG_TX_REPLY("%s ON i=%d idx=%d raw=%d\n",
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003017 ack? "ACK":"NACK", i, (agg->start_idx + i) & 0xff,
3018 agg->start_idx + i);
Zhu Yib481de92007-09-25 17:54:57 -07003019 }
3020
Johannes Berge039fa42008-05-15 12:55:29 +02003021 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]);
3022 memset(&info->status, 0, sizeof(info->status));
3023 info->flags = IEEE80211_TX_STAT_ACK;
3024 info->flags |= IEEE80211_TX_STAT_AMPDU;
3025 info->status.ampdu_ack_map = successes;
3026 info->status.ampdu_ack_len = agg->frame_count;
3027 iwl4965_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
Zhu Yib481de92007-09-25 17:54:57 -07003028
John W. Linvillef868f4e2008-03-07 16:38:43 -05003029 IWL_DEBUG_TX_REPLY("Bitmap %llx\n", (unsigned long long)bitmap);
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003030
3031 return 0;
3032}
3033
3034/**
3035 * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
3036 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003037static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv,
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003038 u16 txq_id)
3039{
3040 /* Simply stop the queue, but don't change any configuration;
3041 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
Tomas Winkler3395f6e2008-03-25 16:33:37 -07003042 iwl_write_prph(priv,
Tomas Winkler12a81f62008-04-03 16:05:20 -07003043 IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07003044 (0 << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
3045 (1 << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003046}
3047
3048/**
3049 * txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID
Ron Rindjunskyb095d03a72008-03-06 17:36:56 -08003050 * priv->lock must be held by the caller
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003051 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003052static int iwl4965_tx_queue_agg_disable(struct iwl_priv *priv, u16 txq_id,
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003053 u16 ssn_idx, u8 tx_fifo)
3054{
Ron Rindjunskyb095d03a72008-03-06 17:36:56 -08003055 int ret = 0;
3056
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003057 if (IWL_BACK_QUEUE_FIRST_ID > txq_id) {
3058 IWL_WARNING("queue number too small: %d, must be > %d\n",
3059 txq_id, IWL_BACK_QUEUE_FIRST_ID);
3060 return -EINVAL;
3061 }
3062
Tomas Winkler3395f6e2008-03-25 16:33:37 -07003063 ret = iwl_grab_nic_access(priv);
Ron Rindjunskyb095d03a72008-03-06 17:36:56 -08003064 if (ret)
3065 return ret;
3066
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003067 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
3068
Tomas Winkler12a81f62008-04-03 16:05:20 -07003069 iwl_clear_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003070
3071 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
3072 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
3073 /* supposes that ssn_idx is valid (!= 0xFFF) */
3074 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
3075
Tomas Winkler12a81f62008-04-03 16:05:20 -07003076 iwl_clear_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
Ron Rindjunsky36470742008-05-15 13:54:10 +08003077 iwl_txq_ctx_deactivate(priv, txq_id);
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003078 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
3079
Tomas Winkler3395f6e2008-03-25 16:33:37 -07003080 iwl_release_nic_access(priv);
Ron Rindjunskyb095d03a72008-03-06 17:36:56 -08003081
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003082 return 0;
3083}
3084
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003085int iwl4965_check_empty_hw_queue(struct iwl_priv *priv, int sta_id,
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003086 u8 tid, int txq_id)
3087{
Tomas Winkler443cfd42008-05-15 13:53:57 +08003088 struct iwl_queue *q = &priv->txq[txq_id].q;
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003089 u8 *addr = priv->stations[sta_id].sta.sta.addr;
Tomas Winkler6def9762008-05-05 10:22:31 +08003090 struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003091
3092 switch (priv->stations[sta_id].tid[tid].agg.state) {
3093 case IWL_EMPTYING_HW_QUEUE_DELBA:
3094 /* We are reclaiming the last packet of the */
3095 /* aggregated HW queue */
3096 if (txq_id == tid_data->agg.txq_id &&
3097 q->read_ptr == q->write_ptr) {
3098 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
3099 int tx_fifo = default_tid_to_tx_fifo[tid];
3100 IWL_DEBUG_HT("HW queue empty: continue DELBA flow\n");
3101 iwl4965_tx_queue_agg_disable(priv, txq_id,
3102 ssn, tx_fifo);
3103 tid_data->agg.state = IWL_AGG_OFF;
3104 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, addr, tid);
3105 }
3106 break;
3107 case IWL_EMPTYING_HW_QUEUE_ADDBA:
3108 /* We are reclaiming the last packet of the queue */
3109 if (tid_data->tfds_in_queue == 0) {
3110 IWL_DEBUG_HT("HW queue empty: continue ADDBA flow\n");
3111 tid_data->agg.state = IWL_AGG_ON;
3112 ieee80211_start_tx_ba_cb_irqsafe(priv->hw, addr, tid);
3113 }
3114 break;
3115 }
Zhu Yib481de92007-09-25 17:54:57 -07003116 return 0;
3117}
3118
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003119/**
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003120 * iwl4965_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
3121 *
3122 * Handles block-acknowledge notification from device, which reports success
3123 * of frames sent via aggregation.
3124 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003125static void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
Tomas Winklera55360e2008-05-05 10:22:28 +08003126 struct iwl_rx_mem_buffer *rxb)
Zhu Yib481de92007-09-25 17:54:57 -07003127{
Tomas Winklerdb11d632008-05-05 10:22:33 +08003128 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003129 struct iwl4965_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
Zhu Yib481de92007-09-25 17:54:57 -07003130 int index;
Ron Rindjunsky16466902008-05-05 10:22:50 +08003131 struct iwl_tx_queue *txq = NULL;
Tomas Winkler6def9762008-05-05 10:22:31 +08003132 struct iwl_ht_agg *agg;
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003133 DECLARE_MAC_BUF(mac);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003134
3135 /* "flow" corresponds to Tx queue */
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003136 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003137
3138 /* "ssn" is start of block-ack Tx window, corresponds to index
3139 * (in Tx queue's circular buffer) of first TFD/frame in window */
Zhu Yib481de92007-09-25 17:54:57 -07003140 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
3141
Ron Rindjunskydfe7d452008-04-15 16:01:45 -07003142 if (scd_flow >= priv->hw_params.max_txq_num) {
Zhu Yib481de92007-09-25 17:54:57 -07003143 IWL_ERROR("BUG_ON scd_flow is bigger than number of queues");
3144 return;
3145 }
3146
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003147 txq = &priv->txq[scd_flow];
Zhu Yib481de92007-09-25 17:54:57 -07003148 agg = &priv->stations[ba_resp->sta_id].tid[ba_resp->tid].agg;
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003149
3150 /* Find index just before block-ack window */
Tomas Winkler443cfd42008-05-15 13:53:57 +08003151 index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
Zhu Yib481de92007-09-25 17:54:57 -07003152
Ian Schram01ebd062007-10-25 17:15:22 +08003153 /* TODO: Need to get this copy more safely - now good for debug */
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003154
Joe Perches0795af52007-10-03 17:59:30 -07003155 IWL_DEBUG_TX_REPLY("REPLY_COMPRESSED_BA [%d]Received from %s, "
3156 "sta_id = %d\n",
Zhu Yib481de92007-09-25 17:54:57 -07003157 agg->wait_for_ba,
Joe Perches0795af52007-10-03 17:59:30 -07003158 print_mac(mac, (u8*) &ba_resp->sta_addr_lo32),
Zhu Yib481de92007-09-25 17:54:57 -07003159 ba_resp->sta_id);
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003160 IWL_DEBUG_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
Zhu Yib481de92007-09-25 17:54:57 -07003161 "%d, scd_ssn = %d\n",
3162 ba_resp->tid,
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003163 ba_resp->seq_ctl,
Tomas Winkler0310ae72008-03-11 16:17:19 -07003164 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
Zhu Yib481de92007-09-25 17:54:57 -07003165 ba_resp->scd_flow,
3166 ba_resp->scd_ssn);
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003167 IWL_DEBUG_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx \n",
Zhu Yib481de92007-09-25 17:54:57 -07003168 agg->start_idx,
John W. Linvillef868f4e2008-03-07 16:38:43 -05003169 (unsigned long long)agg->bitmap);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003170
3171 /* Update driver's record of ACK vs. not for each frame in window */
Zhu Yib481de92007-09-25 17:54:57 -07003172 iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003173
3174 /* Release all TFDs before the SSN, i.e. all TFDs in front of
3175 * block-ack window (we assume that they've been successfully
3176 * transmitted ... if not, it's too late anyway). */
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003177 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
Ron Rindjunsky0d0b2c12008-05-04 14:48:18 +03003178 /* calculate mac80211 ampdu sw queue to wake */
3179 int ampdu_q =
3180 scd_flow - IWL_BACK_QUEUE_FIRST_ID + priv->hw->queues;
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003181 int freed = iwl4965_tx_queue_reclaim(priv, scd_flow, index);
3182 priv->stations[ba_resp->sta_id].
3183 tid[ba_resp->tid].tfds_in_queue -= freed;
Tomas Winkler443cfd42008-05-15 13:53:57 +08003184 if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003185 priv->mac80211_registered &&
3186 agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)
Ron Rindjunsky0d0b2c12008-05-04 14:48:18 +03003187 ieee80211_wake_queue(priv->hw, ampdu_q);
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003188 iwl4965_check_empty_hw_queue(priv, ba_resp->sta_id,
3189 ba_resp->tid, scd_flow);
3190 }
Zhu Yib481de92007-09-25 17:54:57 -07003191}
3192
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003193/**
3194 * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
3195 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003196static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
Zhu Yib481de92007-09-25 17:54:57 -07003197 u16 txq_id)
3198{
3199 u32 tbl_dw_addr;
3200 u32 tbl_dw;
3201 u16 scd_q2ratid;
3202
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07003203 scd_q2ratid = ra_tid & IWL49_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
Zhu Yib481de92007-09-25 17:54:57 -07003204
3205 tbl_dw_addr = priv->scd_base_addr +
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07003206 IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
Zhu Yib481de92007-09-25 17:54:57 -07003207
Tomas Winkler3395f6e2008-03-25 16:33:37 -07003208 tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
Zhu Yib481de92007-09-25 17:54:57 -07003209
3210 if (txq_id & 0x1)
3211 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
3212 else
3213 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
3214
Tomas Winkler3395f6e2008-03-25 16:33:37 -07003215 iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
Zhu Yib481de92007-09-25 17:54:57 -07003216
3217 return 0;
3218}
3219
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003220
Zhu Yib481de92007-09-25 17:54:57 -07003221/**
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003222 * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
3223 *
3224 * NOTE: txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID,
3225 * i.e. it must be one of the higher queues used for aggregation
Zhu Yib481de92007-09-25 17:54:57 -07003226 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003227static int iwl4965_tx_queue_agg_enable(struct iwl_priv *priv, int txq_id,
Zhu Yib481de92007-09-25 17:54:57 -07003228 int tx_fifo, int sta_id, int tid,
3229 u16 ssn_idx)
3230{
3231 unsigned long flags;
3232 int rc;
3233 u16 ra_tid;
3234
3235 if (IWL_BACK_QUEUE_FIRST_ID > txq_id)
3236 IWL_WARNING("queue number too small: %d, must be > %d\n",
3237 txq_id, IWL_BACK_QUEUE_FIRST_ID);
3238
3239 ra_tid = BUILD_RAxTID(sta_id, tid);
3240
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003241 /* Modify device's station table to Tx this TID */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003242 iwl4965_sta_modify_enable_tid_tx(priv, sta_id, tid);
Zhu Yib481de92007-09-25 17:54:57 -07003243
3244 spin_lock_irqsave(&priv->lock, flags);
Tomas Winkler3395f6e2008-03-25 16:33:37 -07003245 rc = iwl_grab_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -07003246 if (rc) {
3247 spin_unlock_irqrestore(&priv->lock, flags);
3248 return rc;
3249 }
3250
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003251 /* Stop this Tx queue before configuring it */
Zhu Yib481de92007-09-25 17:54:57 -07003252 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
3253
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003254 /* Map receiver-address / traffic-ID to this queue */
Zhu Yib481de92007-09-25 17:54:57 -07003255 iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
3256
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003257 /* Set this queue as a chain-building queue */
Tomas Winkler12a81f62008-04-03 16:05:20 -07003258 iwl_set_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
Zhu Yib481de92007-09-25 17:54:57 -07003259
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003260 /* Place first TFD at index corresponding to start sequence number.
3261 * Assumes that ssn_idx is valid (!= 0xFFF) */
Tomas Winklerfc4b6852007-10-25 17:15:24 +08003262 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
3263 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
Zhu Yib481de92007-09-25 17:54:57 -07003264 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
3265
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003266 /* Set up Tx window size and frame limit for this queue */
Tomas Winkler3395f6e2008-03-25 16:33:37 -07003267 iwl_write_targ_mem(priv,
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07003268 priv->scd_base_addr + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
3269 (SCD_WIN_SIZE << IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
3270 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
Zhu Yib481de92007-09-25 17:54:57 -07003271
Tomas Winkler3395f6e2008-03-25 16:33:37 -07003272 iwl_write_targ_mem(priv, priv->scd_base_addr +
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07003273 IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
3274 (SCD_FRAME_LIMIT << IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
3275 & IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
Zhu Yib481de92007-09-25 17:54:57 -07003276
Tomas Winkler12a81f62008-04-03 16:05:20 -07003277 iwl_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
Zhu Yib481de92007-09-25 17:54:57 -07003278
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003279 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
Zhu Yib481de92007-09-25 17:54:57 -07003280 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
3281
Tomas Winkler3395f6e2008-03-25 16:33:37 -07003282 iwl_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -07003283 spin_unlock_irqrestore(&priv->lock, flags);
3284
3285 return 0;
3286}
3287
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08003288#endif /* CONFIG_IWL4965_HT */
Zhu Yib481de92007-09-25 17:54:57 -07003289
Zhu Yib481de92007-09-25 17:54:57 -07003290
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08003291#ifdef CONFIG_IWL4965_HT
Ron Rindjunskyfe07aa72008-04-17 16:03:37 -07003292static int iwl4965_rx_agg_start(struct iwl_priv *priv,
3293 const u8 *addr, int tid, u16 ssn)
Zhu Yib481de92007-09-25 17:54:57 -07003294{
3295 unsigned long flags;
Ron Rindjunskyfe07aa72008-04-17 16:03:37 -07003296 int sta_id;
3297
3298 sta_id = iwl_find_station(priv, addr);
3299 if (sta_id == IWL_INVALID_STATION)
3300 return -ENXIO;
Zhu Yib481de92007-09-25 17:54:57 -07003301
3302 spin_lock_irqsave(&priv->sta_lock, flags);
3303 priv->stations[sta_id].sta.station_flags_msk = 0;
3304 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
3305 priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
3306 priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
3307 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3308 spin_unlock_irqrestore(&priv->sta_lock, flags);
3309
Tomas Winkler133636d2008-05-05 10:22:34 +08003310 return iwl_send_add_sta(priv, &priv->stations[sta_id].sta,
Ron Rindjunskyfe07aa72008-04-17 16:03:37 -07003311 CMD_ASYNC);
Zhu Yib481de92007-09-25 17:54:57 -07003312}
3313
Ron Rindjunskyfe07aa72008-04-17 16:03:37 -07003314static int iwl4965_rx_agg_stop(struct iwl_priv *priv,
3315 const u8 *addr, int tid)
Zhu Yib481de92007-09-25 17:54:57 -07003316{
3317 unsigned long flags;
Ron Rindjunskyfe07aa72008-04-17 16:03:37 -07003318 int sta_id;
3319
3320 sta_id = iwl_find_station(priv, addr);
3321 if (sta_id == IWL_INVALID_STATION)
3322 return -ENXIO;
Zhu Yib481de92007-09-25 17:54:57 -07003323
3324 spin_lock_irqsave(&priv->sta_lock, flags);
3325 priv->stations[sta_id].sta.station_flags_msk = 0;
3326 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
3327 priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
3328 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3329 spin_unlock_irqrestore(&priv->sta_lock, flags);
3330
Tomas Winkler133636d2008-05-05 10:22:34 +08003331 return iwl_send_add_sta(priv, &priv->stations[sta_id].sta,
Ron Rindjunskyfe07aa72008-04-17 16:03:37 -07003332 CMD_ASYNC);
Zhu Yib481de92007-09-25 17:54:57 -07003333}
3334
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003335/*
3336 * Find first available (lowest unused) Tx Queue, mark it "active".
3337 * Called only when finding queue for aggregation.
3338 * Should never return anything < 7, because they should already
3339 * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6).
3340 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003341static int iwl4965_txq_ctx_activate_free(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07003342{
3343 int txq_id;
3344
Tomas Winkler5425e492008-04-15 16:01:38 -07003345 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
Zhu Yib481de92007-09-25 17:54:57 -07003346 if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
3347 return txq_id;
3348 return -1;
3349}
3350
Ron Rindjunskyfe07aa72008-04-17 16:03:37 -07003351static int iwl4965_tx_agg_start(struct ieee80211_hw *hw, const u8 *ra,
3352 u16 tid, u16 *start_seq_num)
Zhu Yib481de92007-09-25 17:54:57 -07003353{
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003354 struct iwl_priv *priv = hw->priv;
Zhu Yib481de92007-09-25 17:54:57 -07003355 int sta_id;
3356 int tx_fifo;
3357 int txq_id;
3358 int ssn = -1;
Ron Rindjunskyb095d03a72008-03-06 17:36:56 -08003359 int ret = 0;
Zhu Yib481de92007-09-25 17:54:57 -07003360 unsigned long flags;
Tomas Winkler6def9762008-05-05 10:22:31 +08003361 struct iwl_tid_data *tid_data;
Joe Perches0795af52007-10-03 17:59:30 -07003362 DECLARE_MAC_BUF(mac);
Zhu Yib481de92007-09-25 17:54:57 -07003363
3364 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
3365 tx_fifo = default_tid_to_tx_fifo[tid];
3366 else
3367 return -EINVAL;
3368
Ron Rindjunskyfe07aa72008-04-17 16:03:37 -07003369 IWL_WARNING("%s on ra = %s tid = %d\n",
3370 __func__, print_mac(mac, ra), tid);
Zhu Yib481de92007-09-25 17:54:57 -07003371
Ron Rindjunskyfe07aa72008-04-17 16:03:37 -07003372 sta_id = iwl_find_station(priv, ra);
Zhu Yib481de92007-09-25 17:54:57 -07003373 if (sta_id == IWL_INVALID_STATION)
3374 return -ENXIO;
3375
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003376 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
3377 IWL_ERROR("Start AGG when state is not IWL_AGG_OFF !\n");
3378 return -ENXIO;
3379 }
3380
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003381 txq_id = iwl4965_txq_ctx_activate_free(priv);
Zhu Yib481de92007-09-25 17:54:57 -07003382 if (txq_id == -1)
3383 return -ENXIO;
3384
3385 spin_lock_irqsave(&priv->sta_lock, flags);
3386 tid_data = &priv->stations[sta_id].tid[tid];
3387 ssn = SEQ_TO_SN(tid_data->seq_number);
3388 tid_data->agg.txq_id = txq_id;
3389 spin_unlock_irqrestore(&priv->sta_lock, flags);
3390
3391 *start_seq_num = ssn;
Ron Rindjunskyb095d03a72008-03-06 17:36:56 -08003392 ret = iwl4965_tx_queue_agg_enable(priv, txq_id, tx_fifo,
3393 sta_id, tid, ssn);
3394 if (ret)
3395 return ret;
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003396
Ron Rindjunskyb095d03a72008-03-06 17:36:56 -08003397 ret = 0;
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003398 if (tid_data->tfds_in_queue == 0) {
3399 printk(KERN_ERR "HW queue is empty\n");
3400 tid_data->agg.state = IWL_AGG_ON;
Ron Rindjunskyfe07aa72008-04-17 16:03:37 -07003401 ieee80211_start_tx_ba_cb_irqsafe(hw, ra, tid);
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003402 } else {
3403 IWL_DEBUG_HT("HW queue is NOT empty: %d packets in HW queue\n",
3404 tid_data->tfds_in_queue);
3405 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
3406 }
Ron Rindjunskyb095d03a72008-03-06 17:36:56 -08003407 return ret;
Zhu Yib481de92007-09-25 17:54:57 -07003408}
3409
Ron Rindjunskyfe07aa72008-04-17 16:03:37 -07003410static int iwl4965_tx_agg_stop(struct ieee80211_hw *hw, const u8 *ra, u16 tid)
Zhu Yib481de92007-09-25 17:54:57 -07003411{
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003412 struct iwl_priv *priv = hw->priv;
Zhu Yib481de92007-09-25 17:54:57 -07003413 int tx_fifo_id, txq_id, sta_id, ssn = -1;
Tomas Winkler6def9762008-05-05 10:22:31 +08003414 struct iwl_tid_data *tid_data;
Ron Rindjunskyb095d03a72008-03-06 17:36:56 -08003415 int ret, write_ptr, read_ptr;
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003416 unsigned long flags;
Joe Perches0795af52007-10-03 17:59:30 -07003417 DECLARE_MAC_BUF(mac);
3418
Ron Rindjunskyfe07aa72008-04-17 16:03:37 -07003419 if (!ra) {
3420 IWL_ERROR("ra = NULL\n");
Zhu Yib481de92007-09-25 17:54:57 -07003421 return -EINVAL;
3422 }
3423
3424 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
3425 tx_fifo_id = default_tid_to_tx_fifo[tid];
3426 else
3427 return -EINVAL;
3428
Ron Rindjunskyfe07aa72008-04-17 16:03:37 -07003429 sta_id = iwl_find_station(priv, ra);
Zhu Yib481de92007-09-25 17:54:57 -07003430
3431 if (sta_id == IWL_INVALID_STATION)
3432 return -ENXIO;
3433
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003434 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
3435 IWL_WARNING("Stopping AGG while state not IWL_AGG_ON\n");
3436
Zhu Yib481de92007-09-25 17:54:57 -07003437 tid_data = &priv->stations[sta_id].tid[tid];
3438 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
3439 txq_id = tid_data->agg.txq_id;
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003440 write_ptr = priv->txq[txq_id].q.write_ptr;
3441 read_ptr = priv->txq[txq_id].q.read_ptr;
Zhu Yib481de92007-09-25 17:54:57 -07003442
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003443 /* The queue is not empty */
3444 if (write_ptr != read_ptr) {
3445 IWL_DEBUG_HT("Stopping a non empty AGG HW QUEUE\n");
3446 priv->stations[sta_id].tid[tid].agg.state =
3447 IWL_EMPTYING_HW_QUEUE_DELBA;
3448 return 0;
3449 }
3450
Ron Rindjunskyfe07aa72008-04-17 16:03:37 -07003451 IWL_DEBUG_HT("HW queue is empty\n");
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003452 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
3453
3454 spin_lock_irqsave(&priv->lock, flags);
Ron Rindjunskyb095d03a72008-03-06 17:36:56 -08003455 ret = iwl4965_tx_queue_agg_disable(priv, txq_id, ssn, tx_fifo_id);
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003456 spin_unlock_irqrestore(&priv->lock, flags);
3457
Ron Rindjunskyb095d03a72008-03-06 17:36:56 -08003458 if (ret)
3459 return ret;
Zhu Yib481de92007-09-25 17:54:57 -07003460
Ron Rindjunskyfe07aa72008-04-17 16:03:37 -07003461 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, ra, tid);
Zhu Yib481de92007-09-25 17:54:57 -07003462
3463 return 0;
3464}
3465
Ron Rindjunsky8114fcf2008-01-28 14:07:23 +02003466int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
3467 enum ieee80211_ampdu_mlme_action action,
3468 const u8 *addr, u16 tid, u16 *ssn)
3469{
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003470 struct iwl_priv *priv = hw->priv;
Ron Rindjunsky8114fcf2008-01-28 14:07:23 +02003471 DECLARE_MAC_BUF(mac);
3472
Ron Rindjunskyfe07aa72008-04-17 16:03:37 -07003473 IWL_DEBUG_HT("A-MPDU action on addr %s tid %d\n",
3474 print_mac(mac, addr), tid);
3475
Ron Rindjunsky8114fcf2008-01-28 14:07:23 +02003476 switch (action) {
3477 case IEEE80211_AMPDU_RX_START:
3478 IWL_DEBUG_HT("start Rx\n");
Ron Rindjunskyfe07aa72008-04-17 16:03:37 -07003479 return iwl4965_rx_agg_start(priv, addr, tid, *ssn);
Ron Rindjunsky8114fcf2008-01-28 14:07:23 +02003480 case IEEE80211_AMPDU_RX_STOP:
3481 IWL_DEBUG_HT("stop Rx\n");
Ron Rindjunskyfe07aa72008-04-17 16:03:37 -07003482 return iwl4965_rx_agg_stop(priv, addr, tid);
Ron Rindjunsky8114fcf2008-01-28 14:07:23 +02003483 case IEEE80211_AMPDU_TX_START:
3484 IWL_DEBUG_HT("start Tx\n");
Ron Rindjunskyfe07aa72008-04-17 16:03:37 -07003485 return iwl4965_tx_agg_start(hw, addr, tid, ssn);
Ron Rindjunsky8114fcf2008-01-28 14:07:23 +02003486 case IEEE80211_AMPDU_TX_STOP:
3487 IWL_DEBUG_HT("stop Tx\n");
Ron Rindjunskyfe07aa72008-04-17 16:03:37 -07003488 return iwl4965_tx_agg_stop(hw, addr, tid);
Ron Rindjunsky8114fcf2008-01-28 14:07:23 +02003489 default:
3490 IWL_DEBUG_HT("unknown\n");
3491 return -EINVAL;
3492 break;
3493 }
3494 return 0;
3495}
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08003496#endif /* CONFIG_IWL4965_HT */
Zhu Yib481de92007-09-25 17:54:57 -07003497
Tomas Winkler133636d2008-05-05 10:22:34 +08003498
Gregory Greenmanc1adf9f2008-05-15 13:53:59 +08003499static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len)
3500{
3501 switch (cmd_id) {
3502 case REPLY_RXON:
3503 return (u16) sizeof(struct iwl4965_rxon_cmd);
3504 default:
3505 return len;
3506 }
3507}
3508
Tomas Winkler133636d2008-05-05 10:22:34 +08003509static u16 iwl4965_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
3510{
3511 struct iwl4965_addsta_cmd *addsta = (struct iwl4965_addsta_cmd *)data;
3512 addsta->mode = cmd->mode;
3513 memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
3514 memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo));
3515 addsta->station_flags = cmd->station_flags;
3516 addsta->station_flags_msk = cmd->station_flags_msk;
3517 addsta->tid_disable_tx = cmd->tid_disable_tx;
3518 addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
3519 addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
3520 addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
3521 addsta->reserved1 = __constant_cpu_to_le16(0);
3522 addsta->reserved2 = __constant_cpu_to_le32(0);
3523
3524 return (u16)sizeof(struct iwl4965_addsta_cmd);
3525}
Zhu Yib481de92007-09-25 17:54:57 -07003526/* Set up 4965-specific Rx frame reply handlers */
Emmanuel Grumbachd4789ef2008-04-24 11:55:20 -07003527static void iwl4965_rx_handler_setup(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07003528{
3529 /* Legacy Rx frames */
Tomas Winkler857485c2008-03-21 13:53:44 -07003530 priv->rx_handlers[REPLY_RX] = iwl4965_rx_reply_rx;
Zhu Yib481de92007-09-25 17:54:57 -07003531
3532 /* High-throughput (HT) Rx frames */
3533 priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl4965_rx_reply_rx_phy;
3534 priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl4965_rx_reply_rx;
3535
3536 priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
3537 iwl4965_rx_missed_beacon_notif;
3538
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08003539#ifdef CONFIG_IWL4965_HT
Zhu Yib481de92007-09-25 17:54:57 -07003540 priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba;
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08003541#endif /* CONFIG_IWL4965_HT */
Zhu Yib481de92007-09-25 17:54:57 -07003542}
3543
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003544void iwl4965_hw_setup_deferred_work(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07003545{
3546 INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -07003547#ifdef CONFIG_IWL4965_RUN_TIME_CALIB
Zhu Yib481de92007-09-25 17:54:57 -07003548 INIT_WORK(&priv->sensitivity_work, iwl4965_bg_sensitivity_work);
3549#endif
Zhu Yib481de92007-09-25 17:54:57 -07003550 init_timer(&priv->statistics_periodic);
3551 priv->statistics_periodic.data = (unsigned long)priv;
3552 priv->statistics_periodic.function = iwl4965_bg_statistics_periodic;
3553}
3554
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003555void iwl4965_hw_cancel_deferred_work(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07003556{
3557 del_timer_sync(&priv->statistics_periodic);
3558
3559 cancel_delayed_work(&priv->init_alive_start);
3560}
3561
Tomas Winkler3c424c22008-04-15 16:01:42 -07003562
3563static struct iwl_hcmd_ops iwl4965_hcmd = {
Tomas Winkler7e8c5192008-04-15 16:01:43 -07003564 .rxon_assoc = iwl4965_send_rxon_assoc,
Tomas Winkler3c424c22008-04-15 16:01:42 -07003565};
3566
Tomas Winkler857485c2008-03-21 13:53:44 -07003567static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
Gregory Greenmanc1adf9f2008-05-15 13:53:59 +08003568 .get_hcmd_size = iwl4965_get_hcmd_size,
Tomas Winkler133636d2008-05-05 10:22:34 +08003569 .build_addsta_hcmd = iwl4965_build_addsta_hcmd,
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -07003570#ifdef CONFIG_IWL4965_RUN_TIME_CALIB
3571 .chain_noise_reset = iwl4965_chain_noise_reset,
3572 .gain_computation = iwl4965_gain_computation,
3573#endif
Tomas Winkler857485c2008-03-21 13:53:44 -07003574};
3575
Assaf Krauss6bc913b2008-03-11 16:17:18 -07003576static struct iwl_lib_ops iwl4965_lib = {
Tomas Winkler5425e492008-04-15 16:01:38 -07003577 .set_hw_params = iwl4965_hw_set_hw_params,
Ron Rindjunsky399f4902008-04-23 17:14:56 -07003578 .alloc_shared_mem = iwl4965_alloc_shared_mem,
3579 .free_shared_mem = iwl4965_free_shared_mem,
Ron Rindjunskyd67f5482008-05-05 10:22:49 +08003580 .shared_mem_rx_idx = iwl4965_shared_mem_rx_idx,
Tomas Winklere2a722e2008-04-14 21:16:10 -07003581 .txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl,
Tomas Winklerda1bc452008-05-29 16:35:00 +08003582 .txq_set_sched = iwl4965_txq_set_sched,
Emmanuel Grumbachd4789ef2008-04-24 11:55:20 -07003583 .rx_handler_setup = iwl4965_rx_handler_setup,
Tomas Winkler57aab752008-04-14 21:16:03 -07003584 .is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr,
3585 .alive_notify = iwl4965_alive_notify,
Emmanuel Grumbachf3ccc082008-05-05 10:22:45 +08003586 .init_alive_start = iwl4965_init_alive_start,
Tomas Winkler57aab752008-04-14 21:16:03 -07003587 .load_ucode = iwl4965_load_bsm,
Tomas Winkler6f4083a2008-04-16 16:34:49 -07003588 .apm_ops = {
Tomas Winkler91238712008-04-23 17:14:53 -07003589 .init = iwl4965_apm_init,
Tomas Winkler7f066102008-05-29 16:34:57 +08003590 .reset = iwl4965_apm_reset,
Tomas Winklerf118a912008-05-29 16:34:58 +08003591 .stop = iwl4965_apm_stop,
Tomas Winkler694cc562008-04-24 11:55:22 -07003592 .config = iwl4965_nic_config,
Tomas Winkler6f4083a2008-04-16 16:34:49 -07003593 .set_pwr_src = iwl4965_set_pwr_src,
3594 },
Assaf Krauss6bc913b2008-03-11 16:17:18 -07003595 .eeprom_ops = {
Tomas Winkler073d3f52008-04-21 15:41:52 -07003596 .regulatory_bands = {
3597 EEPROM_REGULATORY_BAND_1_CHANNELS,
3598 EEPROM_REGULATORY_BAND_2_CHANNELS,
3599 EEPROM_REGULATORY_BAND_3_CHANNELS,
3600 EEPROM_REGULATORY_BAND_4_CHANNELS,
3601 EEPROM_REGULATORY_BAND_5_CHANNELS,
3602 EEPROM_4965_REGULATORY_BAND_24_FAT_CHANNELS,
3603 EEPROM_4965_REGULATORY_BAND_52_FAT_CHANNELS
3604 },
Assaf Krauss6bc913b2008-03-11 16:17:18 -07003605 .verify_signature = iwlcore_eeprom_verify_signature,
3606 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
3607 .release_semaphore = iwlcore_eeprom_release_semaphore,
Tomas Winkler8614f362008-04-23 17:14:55 -07003608 .check_version = iwl4965_eeprom_check_version,
Tomas Winkler073d3f52008-04-21 15:41:52 -07003609 .query_addr = iwlcore_eeprom_query_addr,
Assaf Krauss6bc913b2008-03-11 16:17:18 -07003610 },
Mohamed Abbasad97edd2008-03-28 16:21:06 -07003611 .radio_kill_sw = iwl4965_radio_kill_sw,
Mohamed Abbas5da4b552008-04-21 15:41:51 -07003612 .set_power = iwl4965_set_power,
3613 .update_chain_flags = iwl4965_update_chain_flags,
Assaf Krauss6bc913b2008-03-11 16:17:18 -07003614};
3615
3616static struct iwl_ops iwl4965_ops = {
3617 .lib = &iwl4965_lib,
Tomas Winkler3c424c22008-04-15 16:01:42 -07003618 .hcmd = &iwl4965_hcmd,
Tomas Winkler857485c2008-03-21 13:53:44 -07003619 .utils = &iwl4965_hcmd_utils,
Assaf Krauss6bc913b2008-03-11 16:17:18 -07003620};
3621
Ron Rindjunskyfed90172008-04-15 16:01:41 -07003622struct iwl_cfg iwl4965_agn_cfg = {
Tomas Winkler82b9a122008-03-04 18:09:30 -08003623 .name = "4965AGN",
Tomas Winkler4bf775c2008-03-04 18:09:31 -08003624 .fw_name = "iwlwifi-4965" IWL4965_UCODE_API ".ucode",
Tomas Winkler82b9a122008-03-04 18:09:30 -08003625 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
Tomas Winkler073d3f52008-04-21 15:41:52 -07003626 .eeprom_size = IWL4965_EEPROM_IMG_SIZE,
Assaf Krauss6bc913b2008-03-11 16:17:18 -07003627 .ops = &iwl4965_ops,
Assaf Krauss1ea87392008-03-18 14:57:50 -07003628 .mod_params = &iwl4965_mod_params,
Tomas Winkler82b9a122008-03-04 18:09:30 -08003629};
3630
Assaf Krauss1ea87392008-03-18 14:57:50 -07003631module_param_named(antenna, iwl4965_mod_params.antenna, int, 0444);
3632MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
3633module_param_named(disable, iwl4965_mod_params.disable, int, 0444);
3634MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
Emmanuel Grumbachfcc76c62008-04-15 16:01:47 -07003635module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, 0444);
3636MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])\n");
Assaf Krauss1ea87392008-03-18 14:57:50 -07003637module_param_named(debug, iwl4965_mod_params.debug, int, 0444);
3638MODULE_PARM_DESC(debug, "debug output mask");
3639module_param_named(
3640 disable_hw_scan, iwl4965_mod_params.disable_hw_scan, int, 0444);
3641MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
3642
3643module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, 0444);
3644MODULE_PARM_DESC(queues_num, "number of hw queues.");
3645
3646/* QoS */
3647module_param_named(qos_enable, iwl4965_mod_params.enable_qos, int, 0444);
3648MODULE_PARM_DESC(qos_enable, "enable all QoS functionality");
3649module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K, int, 0444);
3650MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
Ester Kummer3a1081e2008-05-06 11:05:14 +08003651module_param_named(fw_restart4965, iwl4965_mod_params.restart_fw, int, 0444);
3652MODULE_PARM_DESC(fw_restart4965, "restart firmware in case of error");