blob: a20adab6163a678d78ffceb519076f7c1b3b7799 [file] [log] [blame]
Zhu Yib481de92007-09-25 17:54:57 -07001/******************************************************************************
2 *
Reinette Chatreeb7ae892008-03-11 16:17:17 -07003 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
Zhu Yib481de92007-09-25 17:54:57 -07004 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/version.h>
30#include <linux/init.h>
31#include <linux/pci.h>
32#include <linux/dma-mapping.h>
33#include <linux/delay.h>
34#include <linux/skbuff.h>
35#include <linux/netdevice.h>
36#include <linux/wireless.h>
37#include <net/mac80211.h>
Zhu Yib481de92007-09-25 17:54:57 -070038#include <linux/etherdevice.h>
Zhu Yi12342c42007-12-20 11:27:32 +080039#include <asm/unaligned.h>
Zhu Yib481de92007-09-25 17:54:57 -070040
Assaf Krauss6bc913b2008-03-11 16:17:18 -070041#include "iwl-eeprom.h"
Tomas Winkler3e0d4cb2008-04-24 11:55:38 -070042#include "iwl-dev.h"
Tomas Winklerfee12472008-04-03 16:05:21 -070043#include "iwl-core.h"
Tomas Winkler3395f6e2008-03-25 16:33:37 -070044#include "iwl-io.h"
Zhu Yib481de92007-09-25 17:54:57 -070045#include "iwl-helpers.h"
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -070046#include "iwl-calib.h"
Tomas Winkler5083e562008-05-29 16:35:15 +080047#include "iwl-sta.h"
Zhu Yib481de92007-09-25 17:54:57 -070048
Tomas Winkler630fe9b2008-06-12 09:47:08 +080049static int iwl4965_send_tx_power(struct iwl_priv *priv);
Emmanuel Grumbach91dbc5b2008-06-12 09:47:14 +080050static int iwl4965_hw_get_temperature(const struct iwl_priv *priv);
Tomas Winkler630fe9b2008-06-12 09:47:08 +080051
Assaf Krauss1ea87392008-03-18 14:57:50 -070052/* module parameters */
53static struct iwl_mod_params iwl4965_mod_params = {
Emmanuel Grumbach038669e2008-04-23 17:15:04 -070054 .num_of_queues = IWL49_NUM_QUEUES,
Tomas Winkler9f17b312008-07-11 11:53:35 +080055 .num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES,
Assaf Krauss1ea87392008-03-18 14:57:50 -070056 .enable_qos = 1,
57 .amsdu_size_8K = 1,
Ester Kummer3a1081e2008-05-06 11:05:14 +080058 .restart_fw = 1,
Assaf Krauss1ea87392008-03-18 14:57:50 -070059 /* the rest are 0 by default */
60};
61
Tomas Winkler57aab752008-04-14 21:16:03 -070062/* check contents of special bootstrap uCode SRAM */
63static int iwl4965_verify_bsm(struct iwl_priv *priv)
64{
65 __le32 *image = priv->ucode_boot.v_addr;
66 u32 len = priv->ucode_boot.len;
67 u32 reg;
68 u32 val;
69
70 IWL_DEBUG_INFO("Begin verify bsm\n");
71
72 /* verify BSM SRAM contents */
73 val = iwl_read_prph(priv, BSM_WR_DWCOUNT_REG);
74 for (reg = BSM_SRAM_LOWER_BOUND;
75 reg < BSM_SRAM_LOWER_BOUND + len;
76 reg += sizeof(u32), image++) {
77 val = iwl_read_prph(priv, reg);
78 if (val != le32_to_cpu(*image)) {
79 IWL_ERROR("BSM uCode verification failed at "
80 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
81 BSM_SRAM_LOWER_BOUND,
82 reg - BSM_SRAM_LOWER_BOUND, len,
83 val, le32_to_cpu(*image));
84 return -EIO;
85 }
86 }
87
88 IWL_DEBUG_INFO("BSM bootstrap uCode image OK\n");
89
90 return 0;
91}
92
93/**
94 * iwl4965_load_bsm - Load bootstrap instructions
95 *
96 * BSM operation:
97 *
98 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
99 * in special SRAM that does not power down during RFKILL. When powering back
100 * up after power-saving sleeps (or during initial uCode load), the BSM loads
101 * the bootstrap program into the on-board processor, and starts it.
102 *
103 * The bootstrap program loads (via DMA) instructions and data for a new
104 * program from host DRAM locations indicated by the host driver in the
105 * BSM_DRAM_* registers. Once the new program is loaded, it starts
106 * automatically.
107 *
108 * When initializing the NIC, the host driver points the BSM to the
109 * "initialize" uCode image. This uCode sets up some internal data, then
110 * notifies host via "initialize alive" that it is complete.
111 *
112 * The host then replaces the BSM_DRAM_* pointer values to point to the
113 * normal runtime uCode instructions and a backup uCode data cache buffer
114 * (filled initially with starting data values for the on-board processor),
115 * then triggers the "initialize" uCode to load and launch the runtime uCode,
116 * which begins normal operation.
117 *
118 * When doing a power-save shutdown, runtime uCode saves data SRAM into
119 * the backup data cache in DRAM before SRAM is powered down.
120 *
121 * When powering back up, the BSM loads the bootstrap program. This reloads
122 * the runtime uCode instructions and the backup data cache into SRAM,
123 * and re-launches the runtime uCode from where it left off.
124 */
125static int iwl4965_load_bsm(struct iwl_priv *priv)
126{
127 __le32 *image = priv->ucode_boot.v_addr;
128 u32 len = priv->ucode_boot.len;
129 dma_addr_t pinst;
130 dma_addr_t pdata;
131 u32 inst_len;
132 u32 data_len;
133 int i;
134 u32 done;
135 u32 reg_offset;
136 int ret;
137
138 IWL_DEBUG_INFO("Begin load bsm\n");
139
Ron Rindjunskyfe9b6b72008-05-29 16:35:06 +0800140 priv->ucode_type = UCODE_RT;
141
Tomas Winkler57aab752008-04-14 21:16:03 -0700142 /* make sure bootstrap program is no larger than BSM's SRAM size */
143 if (len > IWL_MAX_BSM_SIZE)
144 return -EINVAL;
145
146 /* Tell bootstrap uCode where to find the "Initialize" uCode
147 * in host DRAM ... host DRAM physical address bits 35:4 for 4965.
Tomas Winkler2d878892008-05-29 16:34:51 +0800148 * NOTE: iwl_init_alive_start() will replace these values,
Tomas Winkler57aab752008-04-14 21:16:03 -0700149 * after the "initialize" uCode has run, to point to
Tomas Winkler2d878892008-05-29 16:34:51 +0800150 * runtime/protocol instructions and backup data cache.
151 */
Tomas Winkler57aab752008-04-14 21:16:03 -0700152 pinst = priv->ucode_init.p_addr >> 4;
153 pdata = priv->ucode_init_data.p_addr >> 4;
154 inst_len = priv->ucode_init.len;
155 data_len = priv->ucode_init_data.len;
156
157 ret = iwl_grab_nic_access(priv);
158 if (ret)
159 return ret;
160
161 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
162 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
163 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
164 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
165
166 /* Fill BSM memory with bootstrap instructions */
167 for (reg_offset = BSM_SRAM_LOWER_BOUND;
168 reg_offset < BSM_SRAM_LOWER_BOUND + len;
169 reg_offset += sizeof(u32), image++)
170 _iwl_write_prph(priv, reg_offset, le32_to_cpu(*image));
171
172 ret = iwl4965_verify_bsm(priv);
173 if (ret) {
174 iwl_release_nic_access(priv);
175 return ret;
176 }
177
178 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
179 iwl_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
180 iwl_write_prph(priv, BSM_WR_MEM_DST_REG, RTC_INST_LOWER_BOUND);
181 iwl_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
182
183 /* Load bootstrap code into instruction SRAM now,
184 * to prepare to load "initialize" uCode */
185 iwl_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
186
187 /* Wait for load of bootstrap uCode to finish */
188 for (i = 0; i < 100; i++) {
189 done = iwl_read_prph(priv, BSM_WR_CTRL_REG);
190 if (!(done & BSM_WR_CTRL_REG_BIT_START))
191 break;
192 udelay(10);
193 }
194 if (i < 100)
195 IWL_DEBUG_INFO("BSM write complete, poll %d iterations\n", i);
196 else {
197 IWL_ERROR("BSM write did not complete!\n");
198 return -EIO;
199 }
200
201 /* Enable future boot loads whenever power management unit triggers it
202 * (e.g. when powering back up after power-save shutdown) */
203 iwl_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
204
205 iwl_release_nic_access(priv);
206
207 return 0;
208}
209
Emmanuel Grumbachf3ccc082008-05-05 10:22:45 +0800210/**
211 * iwl4965_set_ucode_ptrs - Set uCode address location
212 *
213 * Tell initialization uCode where to find runtime uCode.
214 *
215 * BSM registers initially contain pointers to initialization uCode.
216 * We need to replace them to load runtime uCode inst and data,
217 * and to save runtime data when powering down.
218 */
219static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv)
220{
221 dma_addr_t pinst;
222 dma_addr_t pdata;
223 unsigned long flags;
224 int ret = 0;
225
226 /* bits 35:4 for 4965 */
227 pinst = priv->ucode_code.p_addr >> 4;
228 pdata = priv->ucode_data_backup.p_addr >> 4;
229
230 spin_lock_irqsave(&priv->lock, flags);
231 ret = iwl_grab_nic_access(priv);
232 if (ret) {
233 spin_unlock_irqrestore(&priv->lock, flags);
234 return ret;
235 }
236
237 /* Tell bootstrap uCode where to find image to load */
238 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
239 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
240 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
241 priv->ucode_data.len);
242
243 /* Inst bytecount must be last to set up, bit 31 signals uCode
244 * that all new ptr/size info is in place */
245 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
246 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
247 iwl_release_nic_access(priv);
248
249 spin_unlock_irqrestore(&priv->lock, flags);
250
251 IWL_DEBUG_INFO("Runtime uCode pointers are set.\n");
252
253 return ret;
254}
255
256/**
257 * iwl4965_init_alive_start - Called after REPLY_ALIVE notification received
258 *
259 * Called after REPLY_ALIVE notification received from "initialize" uCode.
260 *
261 * The 4965 "initialize" ALIVE reply contains calibration data for:
262 * Voltage, temperature, and MIMO tx gain correction, now stored in priv
263 * (3945 does not contain this data).
264 *
265 * Tell "initialize" uCode to go ahead and load the runtime uCode.
266*/
267static void iwl4965_init_alive_start(struct iwl_priv *priv)
268{
269 /* Check alive response for "valid" sign from uCode */
270 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
271 /* We had an error bringing up the hardware, so take it
272 * all the way back down so we can try again */
273 IWL_DEBUG_INFO("Initialize Alive failed.\n");
274 goto restart;
275 }
276
277 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
278 * This is a paranoid check, because we would not have gotten the
279 * "initialize" alive if code weren't properly loaded. */
280 if (iwl_verify_ucode(priv)) {
281 /* Runtime instruction load was bad;
282 * take it all the way back down so we can try again */
283 IWL_DEBUG_INFO("Bad \"initialize\" uCode load.\n");
284 goto restart;
285 }
286
287 /* Calculate temperature */
Emmanuel Grumbach91dbc5b2008-06-12 09:47:14 +0800288 priv->temperature = iwl4965_hw_get_temperature(priv);
Emmanuel Grumbachf3ccc082008-05-05 10:22:45 +0800289
290 /* Send pointers to protocol/runtime uCode image ... init code will
291 * load and launch runtime uCode, which will send us another "Alive"
292 * notification. */
293 IWL_DEBUG_INFO("Initialization Alive received.\n");
294 if (iwl4965_set_ucode_ptrs(priv)) {
295 /* Runtime instruction load won't happen;
296 * take it all the way back down so we can try again */
297 IWL_DEBUG_INFO("Couldn't set up uCode pointers.\n");
298 goto restart;
299 }
300 return;
301
302restart:
303 queue_work(priv->workqueue, &priv->restart);
304}
305
Zhu Yib481de92007-09-25 17:54:57 -0700306static int is_fat_channel(__le32 rxon_flags)
307{
308 return (rxon_flags & RXON_FLG_CHANNEL_MODE_PURE_40_MSK) ||
309 (rxon_flags & RXON_FLG_CHANNEL_MODE_MIXED_MSK);
310}
311
Tomas Winkler8614f362008-04-23 17:14:55 -0700312/*
313 * EEPROM handlers
314 */
315
316static int iwl4965_eeprom_check_version(struct iwl_priv *priv)
317{
318 u16 eeprom_ver;
319 u16 calib_ver;
320
321 eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
322
323 calib_ver = iwl_eeprom_query16(priv, EEPROM_4965_CALIB_VERSION_OFFSET);
324
325 if (eeprom_ver < EEPROM_4965_EEPROM_VERSION ||
326 calib_ver < EEPROM_4965_TX_POWER_VERSION)
327 goto err;
328
329 return 0;
330err:
331 IWL_ERROR("Unsuported EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n",
332 eeprom_ver, EEPROM_4965_EEPROM_VERSION,
333 calib_ver, EEPROM_4965_TX_POWER_VERSION);
334 return -EINVAL;
335
336}
Tomas Winkler079a2532008-04-17 16:03:39 -0700337int iwl4965_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src)
Zhu Yib481de92007-09-25 17:54:57 -0700338{
Tomas Winklerd8609652007-10-25 17:15:35 +0800339 int ret;
Zhu Yib481de92007-09-25 17:54:57 -0700340 unsigned long flags;
341
342 spin_lock_irqsave(&priv->lock, flags);
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700343 ret = iwl_grab_nic_access(priv);
Tomas Winklerd8609652007-10-25 17:15:35 +0800344 if (ret) {
Zhu Yib481de92007-09-25 17:54:57 -0700345 spin_unlock_irqrestore(&priv->lock, flags);
Tomas Winklerd8609652007-10-25 17:15:35 +0800346 return ret;
Zhu Yib481de92007-09-25 17:54:57 -0700347 }
348
Tomas Winkler6f4083a2008-04-16 16:34:49 -0700349 if (src == IWL_PWR_SRC_VAUX) {
Zhu Yib481de92007-09-25 17:54:57 -0700350 u32 val;
Tomas Winklerd8609652007-10-25 17:15:35 +0800351 ret = pci_read_config_dword(priv->pci_dev, PCI_POWER_SOURCE,
Tomas Winkler6f4083a2008-04-16 16:34:49 -0700352 &val);
Zhu Yib481de92007-09-25 17:54:57 -0700353
Tomas Winkler6f4083a2008-04-16 16:34:49 -0700354 if (val & PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT) {
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700355 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
Tomas Winkler6f4083a2008-04-16 16:34:49 -0700356 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
357 ~APMG_PS_CTRL_MSK_PWR_SRC);
358 }
359 } else {
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700360 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
Tomas Winkler6f4083a2008-04-16 16:34:49 -0700361 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
362 ~APMG_PS_CTRL_MSK_PWR_SRC);
363 }
Zhu Yib481de92007-09-25 17:54:57 -0700364
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700365 iwl_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700366 spin_unlock_irqrestore(&priv->lock, flags);
367
Tomas Winklerd8609652007-10-25 17:15:35 +0800368 return ret;
Zhu Yib481de92007-09-25 17:54:57 -0700369}
370
Tomas Winklerda1bc452008-05-29 16:35:00 +0800371/*
372 * Activate/Deactivat Tx DMA/FIFO channels according tx fifos mask
373 * must be called under priv->lock and mac access
374 */
375static void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask)
Zhu Yib481de92007-09-25 17:54:57 -0700376{
Tomas Winklerda1bc452008-05-29 16:35:00 +0800377 iwl_write_prph(priv, IWL49_SCD_TXFACT, mask);
Zhu Yib481de92007-09-25 17:54:57 -0700378}
Ron Rindjunsky5a676bb2008-05-05 10:22:42 +0800379
Tomas Winkler91238712008-04-23 17:14:53 -0700380static int iwl4965_apm_init(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700381{
Tomas Winkler91238712008-04-23 17:14:53 -0700382 int ret = 0;
Zhu Yib481de92007-09-25 17:54:57 -0700383
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700384 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
Tomas Winkler91238712008-04-23 17:14:53 -0700385 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
Zhu Yib481de92007-09-25 17:54:57 -0700386
Tomas Winkler8f061892008-05-29 16:34:56 +0800387 /* disable L0s without affecting L1 :don't wait for ICH L0s bug W/A) */
388 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
389 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
390
Tomas Winkler91238712008-04-23 17:14:53 -0700391 /* set "initialization complete" bit to move adapter
392 * D0U* --> D0A* state */
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700393 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
Tomas Winkler91238712008-04-23 17:14:53 -0700394
395 /* wait for clock stabilization */
396 ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
397 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
398 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
399 if (ret < 0) {
Zhu Yib481de92007-09-25 17:54:57 -0700400 IWL_DEBUG_INFO("Failed to init the card\n");
Tomas Winkler91238712008-04-23 17:14:53 -0700401 goto out;
Zhu Yib481de92007-09-25 17:54:57 -0700402 }
403
Tomas Winkler91238712008-04-23 17:14:53 -0700404 ret = iwl_grab_nic_access(priv);
405 if (ret)
406 goto out;
Zhu Yib481de92007-09-25 17:54:57 -0700407
Tomas Winkler91238712008-04-23 17:14:53 -0700408 /* enable DMA */
Tomas Winkler8f061892008-05-29 16:34:56 +0800409 iwl_write_prph(priv, APMG_CLK_CTRL_REG, APMG_CLK_VAL_DMA_CLK_RQT |
410 APMG_CLK_VAL_BSM_CLK_RQT);
Zhu Yib481de92007-09-25 17:54:57 -0700411
412 udelay(20);
413
Tomas Winkler8f061892008-05-29 16:34:56 +0800414 /* disable L1-Active */
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700415 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
Tomas Winkler91238712008-04-23 17:14:53 -0700416 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
Zhu Yib481de92007-09-25 17:54:57 -0700417
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700418 iwl_release_nic_access(priv);
Tomas Winkler91238712008-04-23 17:14:53 -0700419out:
Tomas Winkler91238712008-04-23 17:14:53 -0700420 return ret;
421}
422
Tomas Winkler694cc562008-04-24 11:55:22 -0700423
424static void iwl4965_nic_config(struct iwl_priv *priv)
425{
426 unsigned long flags;
427 u32 val;
428 u16 radio_cfg;
429 u8 val_link;
430
431 spin_lock_irqsave(&priv->lock, flags);
432
433 if ((priv->rev_id & 0x80) == 0x80 && (priv->rev_id & 0x7f) < 8) {
434 pci_read_config_dword(priv->pci_dev, PCI_REG_WUM8, &val);
435 /* Enable No Snoop field */
436 pci_write_config_dword(priv->pci_dev, PCI_REG_WUM8,
437 val & ~(1 << 11));
438 }
439
440 pci_read_config_byte(priv->pci_dev, PCI_LINK_CTRL, &val_link);
441
Tomas Winkler8f061892008-05-29 16:34:56 +0800442 /* L1 is enabled by BIOS */
443 if ((val_link & PCI_LINK_VAL_L1_EN) == PCI_LINK_VAL_L1_EN)
444 /* diable L0S disabled L1A enabled */
445 iwl_set_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
446 else
447 /* L0S enabled L1A disabled */
448 iwl_clear_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
Tomas Winkler694cc562008-04-24 11:55:22 -0700449
450 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
451
452 /* write radio config values to register */
453 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX)
454 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
455 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
456 EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
457 EEPROM_RF_CFG_DASH_MSK(radio_cfg));
458
459 /* set CSR_HW_CONFIG_REG for uCode use */
460 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
461 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
462 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
463
464 priv->calib_info = (struct iwl_eeprom_calib_info *)
465 iwl_eeprom_query_addr(priv, EEPROM_4965_CALIB_TXPOWER_OFFSET);
466
467 spin_unlock_irqrestore(&priv->lock, flags);
468}
469
Tomas Winkler46315e02008-05-29 16:34:59 +0800470static int iwl4965_apm_stop_master(struct iwl_priv *priv)
471{
472 int ret = 0;
473 unsigned long flags;
474
475 spin_lock_irqsave(&priv->lock, flags);
476
477 /* set stop master bit */
478 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
479
480 ret = iwl_poll_bit(priv, CSR_RESET,
481 CSR_RESET_REG_FLAG_MASTER_DISABLED,
482 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
483 if (ret < 0)
484 goto out;
485
486out:
487 spin_unlock_irqrestore(&priv->lock, flags);
488 IWL_DEBUG_INFO("stop master\n");
489
490 return ret;
491}
492
Tomas Winklerf118a912008-05-29 16:34:58 +0800493static void iwl4965_apm_stop(struct iwl_priv *priv)
494{
495 unsigned long flags;
496
Tomas Winkler46315e02008-05-29 16:34:59 +0800497 iwl4965_apm_stop_master(priv);
Tomas Winklerf118a912008-05-29 16:34:58 +0800498
499 spin_lock_irqsave(&priv->lock, flags);
500
501 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
502
503 udelay(10);
504
505 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
506 spin_unlock_irqrestore(&priv->lock, flags);
507}
508
Tomas Winkler7f066102008-05-29 16:34:57 +0800509static int iwl4965_apm_reset(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700510{
Tomas Winkler7f066102008-05-29 16:34:57 +0800511 int ret = 0;
Zhu Yib481de92007-09-25 17:54:57 -0700512 unsigned long flags;
513
Tomas Winkler46315e02008-05-29 16:34:59 +0800514 iwl4965_apm_stop_master(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700515
516 spin_lock_irqsave(&priv->lock, flags);
517
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700518 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
Zhu Yib481de92007-09-25 17:54:57 -0700519
520 udelay(10);
521
Tomas Winkler7f066102008-05-29 16:34:57 +0800522 /* FIXME: put here L1A -L0S w/a */
523
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700524 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
Tomas Winklerf118a912008-05-29 16:34:58 +0800525
Tomas Winkler7f066102008-05-29 16:34:57 +0800526 ret = iwl_poll_bit(priv, CSR_RESET,
Zhu Yib481de92007-09-25 17:54:57 -0700527 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
528 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25);
529
Tomas Winkler7f066102008-05-29 16:34:57 +0800530 if (ret)
531 goto out;
532
Zhu Yib481de92007-09-25 17:54:57 -0700533 udelay(10);
534
Tomas Winkler7f066102008-05-29 16:34:57 +0800535 ret = iwl_grab_nic_access(priv);
536 if (ret)
537 goto out;
538 /* Enable DMA and BSM Clock */
539 iwl_write_prph(priv, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT |
540 APMG_CLK_VAL_BSM_CLK_RQT);
Zhu Yib481de92007-09-25 17:54:57 -0700541
Tomas Winkler7f066102008-05-29 16:34:57 +0800542 udelay(10);
Zhu Yib481de92007-09-25 17:54:57 -0700543
Tomas Winkler7f066102008-05-29 16:34:57 +0800544 /* disable L1A */
545 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
546 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
Zhu Yib481de92007-09-25 17:54:57 -0700547
Tomas Winkler7f066102008-05-29 16:34:57 +0800548 iwl_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700549
550 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
551 wake_up_interruptible(&priv->wait_command_queue);
552
Tomas Winkler7f066102008-05-29 16:34:57 +0800553out:
Zhu Yib481de92007-09-25 17:54:57 -0700554 spin_unlock_irqrestore(&priv->lock, flags);
555
Tomas Winkler7f066102008-05-29 16:34:57 +0800556 return ret;
Zhu Yib481de92007-09-25 17:54:57 -0700557}
558
Zhu Yib481de92007-09-25 17:54:57 -0700559/* Reset differential Rx gains in NIC to prepare for chain noise calibration.
560 * Called after every association, but this runs only once!
561 * ... once chain noise is calibrated the first time, it's good forever. */
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700562static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700563{
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700564 struct iwl_chain_noise_data *data = &(priv->chain_noise_data);
Zhu Yib481de92007-09-25 17:54:57 -0700565
Tomas Winkler3109ece2008-03-28 16:33:35 -0700566 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) {
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800567 struct iwl4965_calibration_cmd cmd;
Zhu Yib481de92007-09-25 17:54:57 -0700568
569 memset(&cmd, 0, sizeof(cmd));
570 cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
571 cmd.diff_gain_a = 0;
572 cmd.diff_gain_b = 0;
573 cmd.diff_gain_c = 0;
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700574 if (iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
575 sizeof(cmd), &cmd))
576 IWL_ERROR("Could not send REPLY_PHY_CALIBRATION_CMD\n");
Zhu Yib481de92007-09-25 17:54:57 -0700577 data->state = IWL_CHAIN_NOISE_ACCUMULATE;
578 IWL_DEBUG_CALIB("Run chain_noise_calibrate\n");
579 }
Zhu Yib481de92007-09-25 17:54:57 -0700580}
581
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700582static void iwl4965_gain_computation(struct iwl_priv *priv,
583 u32 *average_noise,
584 u16 min_average_noise_antenna_i,
585 u32 min_average_noise)
Zhu Yib481de92007-09-25 17:54:57 -0700586{
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700587 int i, ret;
588 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
Zhu Yib481de92007-09-25 17:54:57 -0700589
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700590 data->delta_gain_code[min_average_noise_antenna_i] = 0;
Zhu Yib481de92007-09-25 17:54:57 -0700591
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700592 for (i = 0; i < NUM_RX_CHAINS; i++) {
593 s32 delta_g = 0;
Zhu Yib481de92007-09-25 17:54:57 -0700594
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700595 if (!(data->disconn_array[i]) &&
596 (data->delta_gain_code[i] ==
Zhu Yib481de92007-09-25 17:54:57 -0700597 CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) {
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700598 delta_g = average_noise[i] - min_average_noise;
599 data->delta_gain_code[i] = (u8)((delta_g * 10) / 15);
600 data->delta_gain_code[i] =
601 min(data->delta_gain_code[i],
602 (u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
Zhu Yib481de92007-09-25 17:54:57 -0700603
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700604 data->delta_gain_code[i] =
605 (data->delta_gain_code[i] | (1 << 2));
606 } else {
607 data->delta_gain_code[i] = 0;
Zhu Yib481de92007-09-25 17:54:57 -0700608 }
Zhu Yib481de92007-09-25 17:54:57 -0700609 }
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700610 IWL_DEBUG_CALIB("delta_gain_codes: a %d b %d c %d\n",
611 data->delta_gain_code[0],
612 data->delta_gain_code[1],
613 data->delta_gain_code[2]);
Zhu Yib481de92007-09-25 17:54:57 -0700614
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700615 /* Differential gain gets sent to uCode only once */
616 if (!data->radio_write) {
617 struct iwl4965_calibration_cmd cmd;
618 data->radio_write = 1;
Zhu Yib481de92007-09-25 17:54:57 -0700619
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700620 memset(&cmd, 0, sizeof(cmd));
621 cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
622 cmd.diff_gain_a = data->delta_gain_code[0];
623 cmd.diff_gain_b = data->delta_gain_code[1];
624 cmd.diff_gain_c = data->delta_gain_code[2];
625 ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
626 sizeof(cmd), &cmd);
627 if (ret)
628 IWL_DEBUG_CALIB("fail sending cmd "
629 "REPLY_PHY_CALIBRATION_CMD \n");
Zhu Yib481de92007-09-25 17:54:57 -0700630
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700631 /* TODO we might want recalculate
632 * rx_chain in rxon cmd */
633
634 /* Mark so we run this algo only once! */
635 data->state = IWL_CHAIN_NOISE_CALIBRATED;
Zhu Yib481de92007-09-25 17:54:57 -0700636 }
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700637 data->chain_noise_a = 0;
638 data->chain_noise_b = 0;
639 data->chain_noise_c = 0;
640 data->chain_signal_a = 0;
641 data->chain_signal_b = 0;
642 data->chain_signal_c = 0;
643 data->beacon_count = 0;
Zhu Yib481de92007-09-25 17:54:57 -0700644}
645
Emmanuel Grumbacha326a5d2008-07-11 11:53:31 +0800646static void iwl4965_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
647 __le32 *tx_flags)
648{
649 if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) {
650 *tx_flags |= TX_CMD_FLG_RTS_MSK;
651 *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
652 } else if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) {
653 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
654 *tx_flags |= TX_CMD_FLG_CTS_MSK;
655 }
656}
657
Zhu Yib481de92007-09-25 17:54:57 -0700658static void iwl4965_bg_txpower_work(struct work_struct *work)
659{
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700660 struct iwl_priv *priv = container_of(work, struct iwl_priv,
Zhu Yib481de92007-09-25 17:54:57 -0700661 txpower_work);
662
663 /* If a scan happened to start before we got here
664 * then just return; the statistics notification will
665 * kick off another scheduled work to compensate for
666 * any temperature delta we missed here. */
667 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
668 test_bit(STATUS_SCANNING, &priv->status))
669 return;
670
671 mutex_lock(&priv->mutex);
672
673 /* Regardless of if we are assocaited, we must reconfigure the
674 * TX power since frames can be sent on non-radar channels while
675 * not associated */
Tomas Winkler630fe9b2008-06-12 09:47:08 +0800676 iwl4965_send_tx_power(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700677
678 /* Update last_temperature to keep is_calib_needed from running
679 * when it isn't needed... */
680 priv->last_temperature = priv->temperature;
681
682 mutex_unlock(&priv->mutex);
683}
684
685/*
686 * Acquire priv->lock before calling this function !
687 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700688static void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index)
Zhu Yib481de92007-09-25 17:54:57 -0700689{
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700690 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
Zhu Yib481de92007-09-25 17:54:57 -0700691 (index & 0xff) | (txq_id << 8));
Tomas Winkler12a81f62008-04-03 16:05:20 -0700692 iwl_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(txq_id), index);
Zhu Yib481de92007-09-25 17:54:57 -0700693}
694
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800695/**
696 * iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
697 * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
698 * @scd_retry: (1) Indicates queue will be used in aggregation mode
699 *
700 * NOTE: Acquire priv->lock before calling this function !
Zhu Yib481de92007-09-25 17:54:57 -0700701 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700702static void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
Ron Rindjunsky16466902008-05-05 10:22:50 +0800703 struct iwl_tx_queue *txq,
Zhu Yib481de92007-09-25 17:54:57 -0700704 int tx_fifo_id, int scd_retry)
705{
706 int txq_id = txq->q.id;
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800707
708 /* Find out whether to activate Tx queue */
Zhu Yib481de92007-09-25 17:54:57 -0700709 int active = test_bit(txq_id, &priv->txq_ctx_active_msk)?1:0;
710
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800711 /* Set up and activate */
Tomas Winkler12a81f62008-04-03 16:05:20 -0700712 iwl_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
Emmanuel Grumbach038669e2008-04-23 17:15:04 -0700713 (active << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
714 (tx_fifo_id << IWL49_SCD_QUEUE_STTS_REG_POS_TXF) |
715 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_WSL) |
716 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
717 IWL49_SCD_QUEUE_STTS_REG_MSK);
Zhu Yib481de92007-09-25 17:54:57 -0700718
719 txq->sched_retry = scd_retry;
720
721 IWL_DEBUG_INFO("%s %s Queue %d on AC %d\n",
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800722 active ? "Activate" : "Deactivate",
Zhu Yib481de92007-09-25 17:54:57 -0700723 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
724}
725
726static const u16 default_queue_to_tx_fifo[] = {
727 IWL_TX_FIFO_AC3,
728 IWL_TX_FIFO_AC2,
729 IWL_TX_FIFO_AC1,
730 IWL_TX_FIFO_AC0,
Emmanuel Grumbach038669e2008-04-23 17:15:04 -0700731 IWL49_CMD_FIFO_NUM,
Zhu Yib481de92007-09-25 17:54:57 -0700732 IWL_TX_FIFO_HCCA_1,
733 IWL_TX_FIFO_HCCA_2
734};
735
Emmanuel Grumbachbe1f3ab62008-06-12 09:47:18 +0800736static int iwl4965_alive_notify(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700737{
738 u32 a;
739 int i = 0;
740 unsigned long flags;
Tomas Winkler857485c2008-03-21 13:53:44 -0700741 int ret;
Zhu Yib481de92007-09-25 17:54:57 -0700742
743 spin_lock_irqsave(&priv->lock, flags);
744
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700745 ret = iwl_grab_nic_access(priv);
Tomas Winkler857485c2008-03-21 13:53:44 -0700746 if (ret) {
Zhu Yib481de92007-09-25 17:54:57 -0700747 spin_unlock_irqrestore(&priv->lock, flags);
Tomas Winkler857485c2008-03-21 13:53:44 -0700748 return ret;
Zhu Yib481de92007-09-25 17:54:57 -0700749 }
750
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800751 /* Clear 4965's internal Tx Scheduler data base */
Tomas Winkler12a81f62008-04-03 16:05:20 -0700752 priv->scd_base_addr = iwl_read_prph(priv, IWL49_SCD_SRAM_BASE_ADDR);
Emmanuel Grumbach038669e2008-04-23 17:15:04 -0700753 a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET;
754 for (; a < priv->scd_base_addr + IWL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700755 iwl_write_targ_mem(priv, a, 0);
Emmanuel Grumbach038669e2008-04-23 17:15:04 -0700756 for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700757 iwl_write_targ_mem(priv, a, 0);
Tomas Winkler5425e492008-04-15 16:01:38 -0700758 for (; a < sizeof(u16) * priv->hw_params.max_txq_num; a += 4)
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700759 iwl_write_targ_mem(priv, a, 0);
Zhu Yib481de92007-09-25 17:54:57 -0700760
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800761 /* Tel 4965 where to find Tx byte count tables */
Tomas Winkler12a81f62008-04-03 16:05:20 -0700762 iwl_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR,
Tomas Winkler059ff822008-04-14 21:16:14 -0700763 (priv->shared_phys +
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800764 offsetof(struct iwl4965_shared, queues_byte_cnt_tbls)) >> 10);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800765
766 /* Disable chain mode for all queues */
Tomas Winkler12a81f62008-04-03 16:05:20 -0700767 iwl_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0);
Zhu Yib481de92007-09-25 17:54:57 -0700768
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800769 /* Initialize each Tx queue (including the command queue) */
Tomas Winkler5425e492008-04-15 16:01:38 -0700770 for (i = 0; i < priv->hw_params.max_txq_num; i++) {
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800771
772 /* TFD circular buffer read/write indexes */
Tomas Winkler12a81f62008-04-03 16:05:20 -0700773 iwl_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(i), 0);
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700774 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800775
776 /* Max Tx Window size for Scheduler-ACK mode */
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700777 iwl_write_targ_mem(priv, priv->scd_base_addr +
Emmanuel Grumbach038669e2008-04-23 17:15:04 -0700778 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i),
779 (SCD_WIN_SIZE <<
780 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
781 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800782
783 /* Frame limit */
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700784 iwl_write_targ_mem(priv, priv->scd_base_addr +
Emmanuel Grumbach038669e2008-04-23 17:15:04 -0700785 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
786 sizeof(u32),
787 (SCD_FRAME_LIMIT <<
788 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
789 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
Zhu Yib481de92007-09-25 17:54:57 -0700790
791 }
Tomas Winkler12a81f62008-04-03 16:05:20 -0700792 iwl_write_prph(priv, IWL49_SCD_INTERRUPT_MASK,
Tomas Winkler5425e492008-04-15 16:01:38 -0700793 (1 << priv->hw_params.max_txq_num) - 1);
Zhu Yib481de92007-09-25 17:54:57 -0700794
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800795 /* Activate all Tx DMA/FIFO channels */
Tomas Winklerda1bc452008-05-29 16:35:00 +0800796 priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7));
Zhu Yib481de92007-09-25 17:54:57 -0700797
798 iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800799
800 /* Map each Tx/cmd queue to its corresponding fifo */
Zhu Yib481de92007-09-25 17:54:57 -0700801 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
802 int ac = default_queue_to_tx_fifo[i];
Ron Rindjunsky36470742008-05-15 13:54:10 +0800803 iwl_txq_ctx_activate(priv, i);
Zhu Yib481de92007-09-25 17:54:57 -0700804 iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
805 }
806
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700807 iwl_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700808 spin_unlock_irqrestore(&priv->lock, flags);
809
Tomas Winkler857485c2008-03-21 13:53:44 -0700810 return ret;
Zhu Yib481de92007-09-25 17:54:57 -0700811}
812
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700813static struct iwl_sensitivity_ranges iwl4965_sensitivity = {
814 .min_nrg_cck = 97,
815 .max_nrg_cck = 0,
816
817 .auto_corr_min_ofdm = 85,
818 .auto_corr_min_ofdm_mrc = 170,
819 .auto_corr_min_ofdm_x1 = 105,
820 .auto_corr_min_ofdm_mrc_x1 = 220,
821
822 .auto_corr_max_ofdm = 120,
823 .auto_corr_max_ofdm_mrc = 210,
824 .auto_corr_max_ofdm_x1 = 140,
825 .auto_corr_max_ofdm_mrc_x1 = 270,
826
827 .auto_corr_min_cck = 125,
828 .auto_corr_max_cck = 200,
829 .auto_corr_min_cck_mrc = 200,
830 .auto_corr_max_cck_mrc = 400,
831
832 .nrg_th_cck = 100,
833 .nrg_th_ofdm = 100,
834};
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700835
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800836/**
Tomas Winkler5425e492008-04-15 16:01:38 -0700837 * iwl4965_hw_set_hw_params
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800838 *
839 * Called when initializing driver
840 */
Emmanuel Grumbachbe1f3ab62008-06-12 09:47:18 +0800841static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700842{
Assaf Krauss316c30d2008-03-14 10:38:46 -0700843
Emmanuel Grumbach038669e2008-04-23 17:15:04 -0700844 if ((priv->cfg->mod_params->num_of_queues > IWL49_NUM_QUEUES) ||
Assaf Krauss1ea87392008-03-18 14:57:50 -0700845 (priv->cfg->mod_params->num_of_queues < IWL_MIN_NUM_QUEUES)) {
Assaf Krauss316c30d2008-03-14 10:38:46 -0700846 IWL_ERROR("invalid queues_num, should be between %d and %d\n",
Emmanuel Grumbach038669e2008-04-23 17:15:04 -0700847 IWL_MIN_NUM_QUEUES, IWL49_NUM_QUEUES);
Tomas Winkler059ff822008-04-14 21:16:14 -0700848 return -EINVAL;
Assaf Krauss316c30d2008-03-14 10:38:46 -0700849 }
850
Tomas Winkler5425e492008-04-15 16:01:38 -0700851 priv->hw_params.max_txq_num = priv->cfg->mod_params->num_of_queues;
Ron Rindjunsky7f3e4bb2008-06-12 09:46:55 +0800852 priv->hw_params.first_ampdu_q = IWL49_FIRST_AMPDU_QUEUE;
Tomas Winkler5425e492008-04-15 16:01:38 -0700853 priv->hw_params.max_stations = IWL4965_STATION_COUNT;
854 priv->hw_params.bcast_sta_id = IWL4965_BROADCAST_ID;
Ron Rindjunsky099b40b2008-04-21 15:41:53 -0700855 priv->hw_params.max_data_size = IWL49_RTC_DATA_SIZE;
856 priv->hw_params.max_inst_size = IWL49_RTC_INST_SIZE;
857 priv->hw_params.max_bsm_size = BSM_SRAM_SIZE;
858 priv->hw_params.fat_channel = BIT(IEEE80211_BAND_5GHZ);
859
Tomas Winklerec35cf22008-04-15 16:01:39 -0700860 priv->hw_params.tx_chains_num = 2;
861 priv->hw_params.rx_chains_num = 2;
Guy Cohenfde0db32008-04-21 15:42:01 -0700862 priv->hw_params.valid_tx_ant = ANT_A | ANT_B;
863 priv->hw_params.valid_rx_ant = ANT_A | ANT_B;
Ron Rindjunsky099b40b2008-04-21 15:41:53 -0700864 priv->hw_params.ct_kill_threshold = CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD);
865
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700866 priv->hw_params.sens = &iwl4965_sensitivity;
Tomas Winkler3e82a822008-02-13 11:32:31 -0800867
Tomas Winkler059ff822008-04-14 21:16:14 -0700868 return 0;
Zhu Yib481de92007-09-25 17:54:57 -0700869}
870
Mohamed Abbas5da4b552008-04-21 15:41:51 -0700871/* set card power command */
872static int iwl4965_set_power(struct iwl_priv *priv,
873 void *cmd)
874{
875 int ret = 0;
876
877 ret = iwl_send_cmd_pdu_async(priv, POWER_TABLE_CMD,
878 sizeof(struct iwl4965_powertable_cmd),
879 cmd, NULL);
880 return ret;
881}
Zhu Yib481de92007-09-25 17:54:57 -0700882
883static s32 iwl4965_math_div_round(s32 num, s32 denom, s32 *res)
884{
885 s32 sign = 1;
886
887 if (num < 0) {
888 sign = -sign;
889 num = -num;
890 }
891 if (denom < 0) {
892 sign = -sign;
893 denom = -denom;
894 }
895 *res = 1;
896 *res = ((num * 2 + denom) / (denom * 2)) * sign;
897
898 return 1;
899}
900
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800901/**
902 * iwl4965_get_voltage_compensation - Power supply voltage comp for txpower
903 *
904 * Determines power supply voltage compensation for txpower calculations.
905 * Returns number of 1/2-dB steps to subtract from gain table index,
906 * to compensate for difference between power supply voltage during
907 * factory measurements, vs. current power supply voltage.
908 *
909 * Voltage indication is higher for lower voltage.
910 * Lower voltage requires more gain (lower gain table index).
911 */
Zhu Yib481de92007-09-25 17:54:57 -0700912static s32 iwl4965_get_voltage_compensation(s32 eeprom_voltage,
913 s32 current_voltage)
914{
915 s32 comp = 0;
916
917 if ((TX_POWER_IWL_ILLEGAL_VOLTAGE == eeprom_voltage) ||
918 (TX_POWER_IWL_ILLEGAL_VOLTAGE == current_voltage))
919 return 0;
920
921 iwl4965_math_div_round(current_voltage - eeprom_voltage,
922 TX_POWER_IWL_VOLTAGE_CODES_PER_03V, &comp);
923
924 if (current_voltage > eeprom_voltage)
925 comp *= 2;
926 if ((comp < -2) || (comp > 2))
927 comp = 0;
928
929 return comp;
930}
931
Zhu Yib481de92007-09-25 17:54:57 -0700932static s32 iwl4965_get_tx_atten_grp(u16 channel)
933{
934 if (channel >= CALIB_IWL_TX_ATTEN_GR5_FCH &&
935 channel <= CALIB_IWL_TX_ATTEN_GR5_LCH)
936 return CALIB_CH_GROUP_5;
937
938 if (channel >= CALIB_IWL_TX_ATTEN_GR1_FCH &&
939 channel <= CALIB_IWL_TX_ATTEN_GR1_LCH)
940 return CALIB_CH_GROUP_1;
941
942 if (channel >= CALIB_IWL_TX_ATTEN_GR2_FCH &&
943 channel <= CALIB_IWL_TX_ATTEN_GR2_LCH)
944 return CALIB_CH_GROUP_2;
945
946 if (channel >= CALIB_IWL_TX_ATTEN_GR3_FCH &&
947 channel <= CALIB_IWL_TX_ATTEN_GR3_LCH)
948 return CALIB_CH_GROUP_3;
949
950 if (channel >= CALIB_IWL_TX_ATTEN_GR4_FCH &&
951 channel <= CALIB_IWL_TX_ATTEN_GR4_LCH)
952 return CALIB_CH_GROUP_4;
953
954 IWL_ERROR("Can't find txatten group for channel %d.\n", channel);
955 return -1;
956}
957
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700958static u32 iwl4965_get_sub_band(const struct iwl_priv *priv, u32 channel)
Zhu Yib481de92007-09-25 17:54:57 -0700959{
960 s32 b = -1;
961
962 for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) {
Tomas Winkler073d3f52008-04-21 15:41:52 -0700963 if (priv->calib_info->band_info[b].ch_from == 0)
Zhu Yib481de92007-09-25 17:54:57 -0700964 continue;
965
Tomas Winkler073d3f52008-04-21 15:41:52 -0700966 if ((channel >= priv->calib_info->band_info[b].ch_from)
967 && (channel <= priv->calib_info->band_info[b].ch_to))
Zhu Yib481de92007-09-25 17:54:57 -0700968 break;
969 }
970
971 return b;
972}
973
974static s32 iwl4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
975{
976 s32 val;
977
978 if (x2 == x1)
979 return y1;
980 else {
981 iwl4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val);
982 return val + y2;
983 }
984}
985
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800986/**
987 * iwl4965_interpolate_chan - Interpolate factory measurements for one channel
988 *
989 * Interpolates factory measurements from the two sample channels within a
990 * sub-band, to apply to channel of interest. Interpolation is proportional to
991 * differences in channel frequencies, which is proportional to differences
992 * in channel number.
993 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700994static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel,
Tomas Winkler073d3f52008-04-21 15:41:52 -0700995 struct iwl_eeprom_calib_ch_info *chan_info)
Zhu Yib481de92007-09-25 17:54:57 -0700996{
997 s32 s = -1;
998 u32 c;
999 u32 m;
Tomas Winkler073d3f52008-04-21 15:41:52 -07001000 const struct iwl_eeprom_calib_measure *m1;
1001 const struct iwl_eeprom_calib_measure *m2;
1002 struct iwl_eeprom_calib_measure *omeas;
Zhu Yib481de92007-09-25 17:54:57 -07001003 u32 ch_i1;
1004 u32 ch_i2;
1005
1006 s = iwl4965_get_sub_band(priv, channel);
1007 if (s >= EEPROM_TX_POWER_BANDS) {
1008 IWL_ERROR("Tx Power can not find channel %d ", channel);
1009 return -1;
1010 }
1011
Tomas Winkler073d3f52008-04-21 15:41:52 -07001012 ch_i1 = priv->calib_info->band_info[s].ch1.ch_num;
1013 ch_i2 = priv->calib_info->band_info[s].ch2.ch_num;
Zhu Yib481de92007-09-25 17:54:57 -07001014 chan_info->ch_num = (u8) channel;
1015
1016 IWL_DEBUG_TXPOWER("channel %d subband %d factory cal ch %d & %d\n",
1017 channel, s, ch_i1, ch_i2);
1018
1019 for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) {
1020 for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) {
Tomas Winkler073d3f52008-04-21 15:41:52 -07001021 m1 = &(priv->calib_info->band_info[s].ch1.
Zhu Yib481de92007-09-25 17:54:57 -07001022 measurements[c][m]);
Tomas Winkler073d3f52008-04-21 15:41:52 -07001023 m2 = &(priv->calib_info->band_info[s].ch2.
Zhu Yib481de92007-09-25 17:54:57 -07001024 measurements[c][m]);
1025 omeas = &(chan_info->measurements[c][m]);
1026
1027 omeas->actual_pow =
1028 (u8) iwl4965_interpolate_value(channel, ch_i1,
1029 m1->actual_pow,
1030 ch_i2,
1031 m2->actual_pow);
1032 omeas->gain_idx =
1033 (u8) iwl4965_interpolate_value(channel, ch_i1,
1034 m1->gain_idx, ch_i2,
1035 m2->gain_idx);
1036 omeas->temperature =
1037 (u8) iwl4965_interpolate_value(channel, ch_i1,
1038 m1->temperature,
1039 ch_i2,
1040 m2->temperature);
1041 omeas->pa_det =
1042 (s8) iwl4965_interpolate_value(channel, ch_i1,
1043 m1->pa_det, ch_i2,
1044 m2->pa_det);
1045
1046 IWL_DEBUG_TXPOWER
1047 ("chain %d meas %d AP1=%d AP2=%d AP=%d\n", c, m,
1048 m1->actual_pow, m2->actual_pow, omeas->actual_pow);
1049 IWL_DEBUG_TXPOWER
1050 ("chain %d meas %d NI1=%d NI2=%d NI=%d\n", c, m,
1051 m1->gain_idx, m2->gain_idx, omeas->gain_idx);
1052 IWL_DEBUG_TXPOWER
1053 ("chain %d meas %d PA1=%d PA2=%d PA=%d\n", c, m,
1054 m1->pa_det, m2->pa_det, omeas->pa_det);
1055 IWL_DEBUG_TXPOWER
1056 ("chain %d meas %d T1=%d T2=%d T=%d\n", c, m,
1057 m1->temperature, m2->temperature,
1058 omeas->temperature);
1059 }
1060 }
1061
1062 return 0;
1063}
1064
1065/* bit-rate-dependent table to prevent Tx distortion, in half-dB units,
1066 * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */
1067static s32 back_off_table[] = {
1068 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */
1069 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */
1070 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */
1071 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */
1072 10 /* CCK */
1073};
1074
1075/* Thermal compensation values for txpower for various frequency ranges ...
1076 * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001077static struct iwl4965_txpower_comp_entry {
Zhu Yib481de92007-09-25 17:54:57 -07001078 s32 degrees_per_05db_a;
1079 s32 degrees_per_05db_a_denom;
1080} tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = {
1081 {9, 2}, /* group 0 5.2, ch 34-43 */
1082 {4, 1}, /* group 1 5.2, ch 44-70 */
1083 {4, 1}, /* group 2 5.2, ch 71-124 */
1084 {4, 1}, /* group 3 5.2, ch 125-200 */
1085 {3, 1} /* group 4 2.4, ch all */
1086};
1087
1088static s32 get_min_power_index(s32 rate_power_index, u32 band)
1089{
1090 if (!band) {
1091 if ((rate_power_index & 7) <= 4)
1092 return MIN_TX_GAIN_INDEX_52GHZ_EXT;
1093 }
1094 return MIN_TX_GAIN_INDEX;
1095}
1096
1097struct gain_entry {
1098 u8 dsp;
1099 u8 radio;
1100};
1101
1102static const struct gain_entry gain_table[2][108] = {
1103 /* 5.2GHz power gain index table */
1104 {
1105 {123, 0x3F}, /* highest txpower */
1106 {117, 0x3F},
1107 {110, 0x3F},
1108 {104, 0x3F},
1109 {98, 0x3F},
1110 {110, 0x3E},
1111 {104, 0x3E},
1112 {98, 0x3E},
1113 {110, 0x3D},
1114 {104, 0x3D},
1115 {98, 0x3D},
1116 {110, 0x3C},
1117 {104, 0x3C},
1118 {98, 0x3C},
1119 {110, 0x3B},
1120 {104, 0x3B},
1121 {98, 0x3B},
1122 {110, 0x3A},
1123 {104, 0x3A},
1124 {98, 0x3A},
1125 {110, 0x39},
1126 {104, 0x39},
1127 {98, 0x39},
1128 {110, 0x38},
1129 {104, 0x38},
1130 {98, 0x38},
1131 {110, 0x37},
1132 {104, 0x37},
1133 {98, 0x37},
1134 {110, 0x36},
1135 {104, 0x36},
1136 {98, 0x36},
1137 {110, 0x35},
1138 {104, 0x35},
1139 {98, 0x35},
1140 {110, 0x34},
1141 {104, 0x34},
1142 {98, 0x34},
1143 {110, 0x33},
1144 {104, 0x33},
1145 {98, 0x33},
1146 {110, 0x32},
1147 {104, 0x32},
1148 {98, 0x32},
1149 {110, 0x31},
1150 {104, 0x31},
1151 {98, 0x31},
1152 {110, 0x30},
1153 {104, 0x30},
1154 {98, 0x30},
1155 {110, 0x25},
1156 {104, 0x25},
1157 {98, 0x25},
1158 {110, 0x24},
1159 {104, 0x24},
1160 {98, 0x24},
1161 {110, 0x23},
1162 {104, 0x23},
1163 {98, 0x23},
1164 {110, 0x22},
1165 {104, 0x18},
1166 {98, 0x18},
1167 {110, 0x17},
1168 {104, 0x17},
1169 {98, 0x17},
1170 {110, 0x16},
1171 {104, 0x16},
1172 {98, 0x16},
1173 {110, 0x15},
1174 {104, 0x15},
1175 {98, 0x15},
1176 {110, 0x14},
1177 {104, 0x14},
1178 {98, 0x14},
1179 {110, 0x13},
1180 {104, 0x13},
1181 {98, 0x13},
1182 {110, 0x12},
1183 {104, 0x08},
1184 {98, 0x08},
1185 {110, 0x07},
1186 {104, 0x07},
1187 {98, 0x07},
1188 {110, 0x06},
1189 {104, 0x06},
1190 {98, 0x06},
1191 {110, 0x05},
1192 {104, 0x05},
1193 {98, 0x05},
1194 {110, 0x04},
1195 {104, 0x04},
1196 {98, 0x04},
1197 {110, 0x03},
1198 {104, 0x03},
1199 {98, 0x03},
1200 {110, 0x02},
1201 {104, 0x02},
1202 {98, 0x02},
1203 {110, 0x01},
1204 {104, 0x01},
1205 {98, 0x01},
1206 {110, 0x00},
1207 {104, 0x00},
1208 {98, 0x00},
1209 {93, 0x00},
1210 {88, 0x00},
1211 {83, 0x00},
1212 {78, 0x00},
1213 },
1214 /* 2.4GHz power gain index table */
1215 {
1216 {110, 0x3f}, /* highest txpower */
1217 {104, 0x3f},
1218 {98, 0x3f},
1219 {110, 0x3e},
1220 {104, 0x3e},
1221 {98, 0x3e},
1222 {110, 0x3d},
1223 {104, 0x3d},
1224 {98, 0x3d},
1225 {110, 0x3c},
1226 {104, 0x3c},
1227 {98, 0x3c},
1228 {110, 0x3b},
1229 {104, 0x3b},
1230 {98, 0x3b},
1231 {110, 0x3a},
1232 {104, 0x3a},
1233 {98, 0x3a},
1234 {110, 0x39},
1235 {104, 0x39},
1236 {98, 0x39},
1237 {110, 0x38},
1238 {104, 0x38},
1239 {98, 0x38},
1240 {110, 0x37},
1241 {104, 0x37},
1242 {98, 0x37},
1243 {110, 0x36},
1244 {104, 0x36},
1245 {98, 0x36},
1246 {110, 0x35},
1247 {104, 0x35},
1248 {98, 0x35},
1249 {110, 0x34},
1250 {104, 0x34},
1251 {98, 0x34},
1252 {110, 0x33},
1253 {104, 0x33},
1254 {98, 0x33},
1255 {110, 0x32},
1256 {104, 0x32},
1257 {98, 0x32},
1258 {110, 0x31},
1259 {104, 0x31},
1260 {98, 0x31},
1261 {110, 0x30},
1262 {104, 0x30},
1263 {98, 0x30},
1264 {110, 0x6},
1265 {104, 0x6},
1266 {98, 0x6},
1267 {110, 0x5},
1268 {104, 0x5},
1269 {98, 0x5},
1270 {110, 0x4},
1271 {104, 0x4},
1272 {98, 0x4},
1273 {110, 0x3},
1274 {104, 0x3},
1275 {98, 0x3},
1276 {110, 0x2},
1277 {104, 0x2},
1278 {98, 0x2},
1279 {110, 0x1},
1280 {104, 0x1},
1281 {98, 0x1},
1282 {110, 0x0},
1283 {104, 0x0},
1284 {98, 0x0},
1285 {97, 0},
1286 {96, 0},
1287 {95, 0},
1288 {94, 0},
1289 {93, 0},
1290 {92, 0},
1291 {91, 0},
1292 {90, 0},
1293 {89, 0},
1294 {88, 0},
1295 {87, 0},
1296 {86, 0},
1297 {85, 0},
1298 {84, 0},
1299 {83, 0},
1300 {82, 0},
1301 {81, 0},
1302 {80, 0},
1303 {79, 0},
1304 {78, 0},
1305 {77, 0},
1306 {76, 0},
1307 {75, 0},
1308 {74, 0},
1309 {73, 0},
1310 {72, 0},
1311 {71, 0},
1312 {70, 0},
1313 {69, 0},
1314 {68, 0},
1315 {67, 0},
1316 {66, 0},
1317 {65, 0},
1318 {64, 0},
1319 {63, 0},
1320 {62, 0},
1321 {61, 0},
1322 {60, 0},
1323 {59, 0},
1324 }
1325};
1326
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001327static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
Zhu Yib481de92007-09-25 17:54:57 -07001328 u8 is_fat, u8 ctrl_chan_high,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001329 struct iwl4965_tx_power_db *tx_power_tbl)
Zhu Yib481de92007-09-25 17:54:57 -07001330{
1331 u8 saturation_power;
1332 s32 target_power;
1333 s32 user_target_power;
1334 s32 power_limit;
1335 s32 current_temp;
1336 s32 reg_limit;
1337 s32 current_regulatory;
1338 s32 txatten_grp = CALIB_CH_GROUP_MAX;
1339 int i;
1340 int c;
Assaf Kraussbf85ea42008-03-14 10:38:49 -07001341 const struct iwl_channel_info *ch_info = NULL;
Tomas Winkler073d3f52008-04-21 15:41:52 -07001342 struct iwl_eeprom_calib_ch_info ch_eeprom_info;
1343 const struct iwl_eeprom_calib_measure *measurement;
Zhu Yib481de92007-09-25 17:54:57 -07001344 s16 voltage;
1345 s32 init_voltage;
1346 s32 voltage_compensation;
1347 s32 degrees_per_05db_num;
1348 s32 degrees_per_05db_denom;
1349 s32 factory_temp;
1350 s32 temperature_comp[2];
1351 s32 factory_gain_index[2];
1352 s32 factory_actual_pwr[2];
1353 s32 power_index;
1354
Zhu Yib481de92007-09-25 17:54:57 -07001355 /* user_txpower_limit is in dBm, convert to half-dBm (half-dB units
1356 * are used for indexing into txpower table) */
Tomas Winkler630fe9b2008-06-12 09:47:08 +08001357 user_target_power = 2 * priv->tx_power_user_lmt;
Zhu Yib481de92007-09-25 17:54:57 -07001358
1359 /* Get current (RXON) channel, band, width */
Zhu Yib481de92007-09-25 17:54:57 -07001360 IWL_DEBUG_TXPOWER("chan %d band %d is_fat %d\n", channel, band,
1361 is_fat);
1362
Tomas Winkler630fe9b2008-06-12 09:47:08 +08001363 ch_info = iwl_get_channel_info(priv, priv->band, channel);
1364
1365 if (!is_channel_valid(ch_info))
Zhu Yib481de92007-09-25 17:54:57 -07001366 return -EINVAL;
1367
1368 /* get txatten group, used to select 1) thermal txpower adjustment
1369 * and 2) mimo txpower balance between Tx chains. */
1370 txatten_grp = iwl4965_get_tx_atten_grp(channel);
1371 if (txatten_grp < 0)
1372 return -EINVAL;
1373
1374 IWL_DEBUG_TXPOWER("channel %d belongs to txatten group %d\n",
1375 channel, txatten_grp);
1376
1377 if (is_fat) {
1378 if (ctrl_chan_high)
1379 channel -= 2;
1380 else
1381 channel += 2;
1382 }
1383
1384 /* hardware txpower limits ...
1385 * saturation (clipping distortion) txpowers are in half-dBm */
1386 if (band)
Tomas Winkler073d3f52008-04-21 15:41:52 -07001387 saturation_power = priv->calib_info->saturation_power24;
Zhu Yib481de92007-09-25 17:54:57 -07001388 else
Tomas Winkler073d3f52008-04-21 15:41:52 -07001389 saturation_power = priv->calib_info->saturation_power52;
Zhu Yib481de92007-09-25 17:54:57 -07001390
1391 if (saturation_power < IWL_TX_POWER_SATURATION_MIN ||
1392 saturation_power > IWL_TX_POWER_SATURATION_MAX) {
1393 if (band)
1394 saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_24;
1395 else
1396 saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_52;
1397 }
1398
1399 /* regulatory txpower limits ... reg_limit values are in half-dBm,
1400 * max_power_avg values are in dBm, convert * 2 */
1401 if (is_fat)
1402 reg_limit = ch_info->fat_max_power_avg * 2;
1403 else
1404 reg_limit = ch_info->max_power_avg * 2;
1405
1406 if ((reg_limit < IWL_TX_POWER_REGULATORY_MIN) ||
1407 (reg_limit > IWL_TX_POWER_REGULATORY_MAX)) {
1408 if (band)
1409 reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_24;
1410 else
1411 reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_52;
1412 }
1413
1414 /* Interpolate txpower calibration values for this channel,
1415 * based on factory calibration tests on spaced channels. */
1416 iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info);
1417
1418 /* calculate tx gain adjustment based on power supply voltage */
Tomas Winkler073d3f52008-04-21 15:41:52 -07001419 voltage = priv->calib_info->voltage;
Zhu Yib481de92007-09-25 17:54:57 -07001420 init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage);
1421 voltage_compensation =
1422 iwl4965_get_voltage_compensation(voltage, init_voltage);
1423
1424 IWL_DEBUG_TXPOWER("curr volt %d eeprom volt %d volt comp %d\n",
1425 init_voltage,
1426 voltage, voltage_compensation);
1427
1428 /* get current temperature (Celsius) */
1429 current_temp = max(priv->temperature, IWL_TX_POWER_TEMPERATURE_MIN);
1430 current_temp = min(priv->temperature, IWL_TX_POWER_TEMPERATURE_MAX);
1431 current_temp = KELVIN_TO_CELSIUS(current_temp);
1432
1433 /* select thermal txpower adjustment params, based on channel group
1434 * (same frequency group used for mimo txatten adjustment) */
1435 degrees_per_05db_num =
1436 tx_power_cmp_tble[txatten_grp].degrees_per_05db_a;
1437 degrees_per_05db_denom =
1438 tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom;
1439
1440 /* get per-chain txpower values from factory measurements */
1441 for (c = 0; c < 2; c++) {
1442 measurement = &ch_eeprom_info.measurements[c][1];
1443
1444 /* txgain adjustment (in half-dB steps) based on difference
1445 * between factory and current temperature */
1446 factory_temp = measurement->temperature;
1447 iwl4965_math_div_round((current_temp - factory_temp) *
1448 degrees_per_05db_denom,
1449 degrees_per_05db_num,
1450 &temperature_comp[c]);
1451
1452 factory_gain_index[c] = measurement->gain_idx;
1453 factory_actual_pwr[c] = measurement->actual_pow;
1454
1455 IWL_DEBUG_TXPOWER("chain = %d\n", c);
1456 IWL_DEBUG_TXPOWER("fctry tmp %d, "
1457 "curr tmp %d, comp %d steps\n",
1458 factory_temp, current_temp,
1459 temperature_comp[c]);
1460
1461 IWL_DEBUG_TXPOWER("fctry idx %d, fctry pwr %d\n",
1462 factory_gain_index[c],
1463 factory_actual_pwr[c]);
1464 }
1465
1466 /* for each of 33 bit-rates (including 1 for CCK) */
1467 for (i = 0; i < POWER_TABLE_NUM_ENTRIES; i++) {
1468 u8 is_mimo_rate;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001469 union iwl4965_tx_power_dual_stream tx_power;
Zhu Yib481de92007-09-25 17:54:57 -07001470
1471 /* for mimo, reduce each chain's txpower by half
1472 * (3dB, 6 steps), so total output power is regulatory
1473 * compliant. */
1474 if (i & 0x8) {
1475 current_regulatory = reg_limit -
1476 IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION;
1477 is_mimo_rate = 1;
1478 } else {
1479 current_regulatory = reg_limit;
1480 is_mimo_rate = 0;
1481 }
1482
1483 /* find txpower limit, either hardware or regulatory */
1484 power_limit = saturation_power - back_off_table[i];
1485 if (power_limit > current_regulatory)
1486 power_limit = current_regulatory;
1487
1488 /* reduce user's txpower request if necessary
1489 * for this rate on this channel */
1490 target_power = user_target_power;
1491 if (target_power > power_limit)
1492 target_power = power_limit;
1493
1494 IWL_DEBUG_TXPOWER("rate %d sat %d reg %d usr %d tgt %d\n",
1495 i, saturation_power - back_off_table[i],
1496 current_regulatory, user_target_power,
1497 target_power);
1498
1499 /* for each of 2 Tx chains (radio transmitters) */
1500 for (c = 0; c < 2; c++) {
1501 s32 atten_value;
1502
1503 if (is_mimo_rate)
1504 atten_value =
1505 (s32)le32_to_cpu(priv->card_alive_init.
1506 tx_atten[txatten_grp][c]);
1507 else
1508 atten_value = 0;
1509
1510 /* calculate index; higher index means lower txpower */
1511 power_index = (u8) (factory_gain_index[c] -
1512 (target_power -
1513 factory_actual_pwr[c]) -
1514 temperature_comp[c] -
1515 voltage_compensation +
1516 atten_value);
1517
1518/* IWL_DEBUG_TXPOWER("calculated txpower index %d\n",
1519 power_index); */
1520
1521 if (power_index < get_min_power_index(i, band))
1522 power_index = get_min_power_index(i, band);
1523
1524 /* adjust 5 GHz index to support negative indexes */
1525 if (!band)
1526 power_index += 9;
1527
1528 /* CCK, rate 32, reduce txpower for CCK */
1529 if (i == POWER_TABLE_CCK_ENTRY)
1530 power_index +=
1531 IWL_TX_POWER_CCK_COMPENSATION_C_STEP;
1532
1533 /* stay within the table! */
1534 if (power_index > 107) {
1535 IWL_WARNING("txpower index %d > 107\n",
1536 power_index);
1537 power_index = 107;
1538 }
1539 if (power_index < 0) {
1540 IWL_WARNING("txpower index %d < 0\n",
1541 power_index);
1542 power_index = 0;
1543 }
1544
1545 /* fill txpower command for this rate/chain */
1546 tx_power.s.radio_tx_gain[c] =
1547 gain_table[band][power_index].radio;
1548 tx_power.s.dsp_predis_atten[c] =
1549 gain_table[band][power_index].dsp;
1550
1551 IWL_DEBUG_TXPOWER("chain %d mimo %d index %d "
1552 "gain 0x%02x dsp %d\n",
1553 c, atten_value, power_index,
1554 tx_power.s.radio_tx_gain[c],
1555 tx_power.s.dsp_predis_atten[c]);
1556 }/* for each chain */
1557
1558 tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw);
1559
1560 }/* for each rate */
1561
1562 return 0;
1563}
1564
1565/**
Tomas Winkler630fe9b2008-06-12 09:47:08 +08001566 * iwl4965_send_tx_power - Configure the TXPOWER level user limit
Zhu Yib481de92007-09-25 17:54:57 -07001567 *
1568 * Uses the active RXON for channel, band, and characteristics (fat, high)
Tomas Winkler630fe9b2008-06-12 09:47:08 +08001569 * The power limit is taken from priv->tx_power_user_lmt.
Zhu Yib481de92007-09-25 17:54:57 -07001570 */
Tomas Winkler630fe9b2008-06-12 09:47:08 +08001571static int iwl4965_send_tx_power(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07001572{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001573 struct iwl4965_txpowertable_cmd cmd = { 0 };
Tomas Winkler857485c2008-03-21 13:53:44 -07001574 int ret;
Zhu Yib481de92007-09-25 17:54:57 -07001575 u8 band = 0;
1576 u8 is_fat = 0;
1577 u8 ctrl_chan_high = 0;
1578
1579 if (test_bit(STATUS_SCANNING, &priv->status)) {
1580 /* If this gets hit a lot, switch it to a BUG() and catch
1581 * the stack trace to find out who is calling this during
1582 * a scan. */
1583 IWL_WARNING("TX Power requested while scanning!\n");
1584 return -EAGAIN;
1585 }
1586
Johannes Berg8318d782008-01-24 19:38:38 +01001587 band = priv->band == IEEE80211_BAND_2GHZ;
Zhu Yib481de92007-09-25 17:54:57 -07001588
1589 is_fat = is_fat_channel(priv->active_rxon.flags);
1590
1591 if (is_fat &&
1592 (priv->active_rxon.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
1593 ctrl_chan_high = 1;
1594
1595 cmd.band = band;
1596 cmd.channel = priv->active_rxon.channel;
1597
Tomas Winkler857485c2008-03-21 13:53:44 -07001598 ret = iwl4965_fill_txpower_tbl(priv, band,
Zhu Yib481de92007-09-25 17:54:57 -07001599 le16_to_cpu(priv->active_rxon.channel),
1600 is_fat, ctrl_chan_high, &cmd.tx_power);
Tomas Winkler857485c2008-03-21 13:53:44 -07001601 if (ret)
1602 goto out;
Zhu Yib481de92007-09-25 17:54:57 -07001603
Tomas Winkler857485c2008-03-21 13:53:44 -07001604 ret = iwl_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd);
1605
1606out:
1607 return ret;
Zhu Yib481de92007-09-25 17:54:57 -07001608}
1609
Tomas Winkler7e8c5192008-04-15 16:01:43 -07001610static int iwl4965_send_rxon_assoc(struct iwl_priv *priv)
1611{
1612 int ret = 0;
1613 struct iwl4965_rxon_assoc_cmd rxon_assoc;
Gregory Greenmanc1adf9f2008-05-15 13:53:59 +08001614 const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon;
1615 const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon;
Tomas Winkler7e8c5192008-04-15 16:01:43 -07001616
1617 if ((rxon1->flags == rxon2->flags) &&
1618 (rxon1->filter_flags == rxon2->filter_flags) &&
1619 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
1620 (rxon1->ofdm_ht_single_stream_basic_rates ==
1621 rxon2->ofdm_ht_single_stream_basic_rates) &&
1622 (rxon1->ofdm_ht_dual_stream_basic_rates ==
1623 rxon2->ofdm_ht_dual_stream_basic_rates) &&
1624 (rxon1->rx_chain == rxon2->rx_chain) &&
1625 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
1626 IWL_DEBUG_INFO("Using current RXON_ASSOC. Not resending.\n");
1627 return 0;
1628 }
1629
1630 rxon_assoc.flags = priv->staging_rxon.flags;
1631 rxon_assoc.filter_flags = priv->staging_rxon.filter_flags;
1632 rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates;
1633 rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates;
1634 rxon_assoc.reserved = 0;
1635 rxon_assoc.ofdm_ht_single_stream_basic_rates =
1636 priv->staging_rxon.ofdm_ht_single_stream_basic_rates;
1637 rxon_assoc.ofdm_ht_dual_stream_basic_rates =
1638 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates;
1639 rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain;
1640
1641 ret = iwl_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
1642 sizeof(rxon_assoc), &rxon_assoc, NULL);
1643 if (ret)
1644 return ret;
1645
1646 return ret;
1647}
1648
1649
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001650int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel)
Zhu Yib481de92007-09-25 17:54:57 -07001651{
1652 int rc;
1653 u8 band = 0;
1654 u8 is_fat = 0;
1655 u8 ctrl_chan_high = 0;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001656 struct iwl4965_channel_switch_cmd cmd = { 0 };
Assaf Kraussbf85ea42008-03-14 10:38:49 -07001657 const struct iwl_channel_info *ch_info;
Zhu Yib481de92007-09-25 17:54:57 -07001658
Johannes Berg8318d782008-01-24 19:38:38 +01001659 band = priv->band == IEEE80211_BAND_2GHZ;
Zhu Yib481de92007-09-25 17:54:57 -07001660
Assaf Krauss8622e702008-03-21 13:53:43 -07001661 ch_info = iwl_get_channel_info(priv, priv->band, channel);
Zhu Yib481de92007-09-25 17:54:57 -07001662
1663 is_fat = is_fat_channel(priv->staging_rxon.flags);
1664
1665 if (is_fat &&
1666 (priv->active_rxon.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
1667 ctrl_chan_high = 1;
1668
1669 cmd.band = band;
1670 cmd.expect_beacon = 0;
1671 cmd.channel = cpu_to_le16(channel);
1672 cmd.rxon_flags = priv->active_rxon.flags;
1673 cmd.rxon_filter_flags = priv->active_rxon.filter_flags;
1674 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
1675 if (ch_info)
1676 cmd.expect_beacon = is_channel_radar(ch_info);
1677 else
1678 cmd.expect_beacon = 1;
1679
1680 rc = iwl4965_fill_txpower_tbl(priv, band, channel, is_fat,
1681 ctrl_chan_high, &cmd.tx_power);
1682 if (rc) {
1683 IWL_DEBUG_11H("error:%d fill txpower_tbl\n", rc);
1684 return rc;
1685 }
1686
Tomas Winkler857485c2008-03-21 13:53:44 -07001687 rc = iwl_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
Zhu Yib481de92007-09-25 17:54:57 -07001688 return rc;
1689}
1690
Ron Rindjunskyd67f5482008-05-05 10:22:49 +08001691static int iwl4965_shared_mem_rx_idx(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07001692{
Tomas Winkler059ff822008-04-14 21:16:14 -07001693 struct iwl4965_shared *s = priv->shared_virt;
1694 return le32_to_cpu(s->rb_closed) & 0xFFF;
Zhu Yib481de92007-09-25 17:54:57 -07001695}
1696
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001697unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
Tomas Winklerfcab4232008-05-15 13:54:01 +08001698 struct iwl_frame *frame, u8 rate)
Zhu Yib481de92007-09-25 17:54:57 -07001699{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001700 struct iwl4965_tx_beacon_cmd *tx_beacon_cmd;
Zhu Yib481de92007-09-25 17:54:57 -07001701 unsigned int frame_size;
1702
1703 tx_beacon_cmd = &frame->u.beacon;
1704 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
1705
Tomas Winkler5425e492008-04-15 16:01:38 -07001706 tx_beacon_cmd->tx.sta_id = priv->hw_params.bcast_sta_id;
Zhu Yib481de92007-09-25 17:54:57 -07001707 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
1708
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001709 frame_size = iwl4965_fill_beacon_frame(priv,
Zhu Yib481de92007-09-25 17:54:57 -07001710 tx_beacon_cmd->frame,
Tomas Winkler57bd1be2008-05-15 13:54:03 +08001711 iwl_bcast_addr,
Zhu Yib481de92007-09-25 17:54:57 -07001712 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
1713
1714 BUG_ON(frame_size > MAX_MPDU_SIZE);
1715 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
1716
1717 if ((rate == IWL_RATE_1M_PLCP) || (rate >= IWL_RATE_2M_PLCP))
1718 tx_beacon_cmd->tx.rate_n_flags =
Tomas Winklere7d326a2008-06-12 09:47:11 +08001719 iwl_hw_set_rate_n_flags(rate, RATE_MCS_CCK_MSK);
Zhu Yib481de92007-09-25 17:54:57 -07001720 else
1721 tx_beacon_cmd->tx.rate_n_flags =
Tomas Winklere7d326a2008-06-12 09:47:11 +08001722 iwl_hw_set_rate_n_flags(rate, 0);
Zhu Yib481de92007-09-25 17:54:57 -07001723
1724 tx_beacon_cmd->tx.tx_flags = (TX_CMD_FLG_SEQ_CTL_MSK |
1725 TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK);
1726 return (sizeof(*tx_beacon_cmd) + frame_size);
1727}
1728
Ron Rindjunsky399f49002008-04-23 17:14:56 -07001729static int iwl4965_alloc_shared_mem(struct iwl_priv *priv)
1730{
1731 priv->shared_virt = pci_alloc_consistent(priv->pci_dev,
1732 sizeof(struct iwl4965_shared),
1733 &priv->shared_phys);
1734 if (!priv->shared_virt)
1735 return -ENOMEM;
1736
1737 memset(priv->shared_virt, 0, sizeof(struct iwl4965_shared));
1738
Ron Rindjunskyd67f5482008-05-05 10:22:49 +08001739 priv->rb_closed_offset = offsetof(struct iwl4965_shared, rb_closed);
1740
Ron Rindjunsky399f49002008-04-23 17:14:56 -07001741 return 0;
1742}
1743
1744static void iwl4965_free_shared_mem(struct iwl_priv *priv)
1745{
1746 if (priv->shared_virt)
1747 pci_free_consistent(priv->pci_dev,
1748 sizeof(struct iwl4965_shared),
1749 priv->shared_virt,
1750 priv->shared_phys);
1751}
1752
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001753/**
Tomas Winklere2a722e2008-04-14 21:16:10 -07001754 * iwl4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001755 */
Tomas Winklere2a722e2008-04-14 21:16:10 -07001756static void iwl4965_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
Ron Rindjunsky16466902008-05-05 10:22:50 +08001757 struct iwl_tx_queue *txq,
Tomas Winklere2a722e2008-04-14 21:16:10 -07001758 u16 byte_cnt)
Zhu Yib481de92007-09-25 17:54:57 -07001759{
1760 int len;
1761 int txq_id = txq->q.id;
Tomas Winkler059ff822008-04-14 21:16:14 -07001762 struct iwl4965_shared *shared_data = priv->shared_virt;
Zhu Yib481de92007-09-25 17:54:57 -07001763
Zhu Yib481de92007-09-25 17:54:57 -07001764 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
1765
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001766 /* Set up byte count within first 256 entries */
Zhu Yib481de92007-09-25 17:54:57 -07001767 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
Tomas Winklerfc4b6852007-10-25 17:15:24 +08001768 tfd_offset[txq->q.write_ptr], byte_cnt, len);
Zhu Yib481de92007-09-25 17:54:57 -07001769
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001770 /* If within first 64 entries, duplicate at end */
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07001771 if (txq->q.write_ptr < IWL49_MAX_WIN_SIZE)
Zhu Yib481de92007-09-25 17:54:57 -07001772 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07001773 tfd_offset[IWL49_QUEUE_SIZE + txq->q.write_ptr],
Zhu Yib481de92007-09-25 17:54:57 -07001774 byte_cnt, len);
Zhu Yib481de92007-09-25 17:54:57 -07001775}
1776
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001777/**
Zhu Yib481de92007-09-25 17:54:57 -07001778 * sign_extend - Sign extend a value using specified bit as sign-bit
1779 *
1780 * Example: sign_extend(9, 3) would return -7 as bit3 of 1001b is 1
1781 * and bit0..2 is 001b which when sign extended to 1111111111111001b is -7.
1782 *
1783 * @param oper value to sign extend
1784 * @param index 0 based bit index (0<=index<32) to sign bit
1785 */
1786static s32 sign_extend(u32 oper, int index)
1787{
1788 u8 shift = 31 - index;
1789
1790 return (s32)(oper << shift) >> shift;
1791}
1792
1793/**
Emmanuel Grumbach91dbc5b2008-06-12 09:47:14 +08001794 * iwl4965_hw_get_temperature - return the calibrated temperature (in Kelvin)
Zhu Yib481de92007-09-25 17:54:57 -07001795 * @statistics: Provides the temperature reading from the uCode
1796 *
1797 * A return of <0 indicates bogus data in the statistics
1798 */
Emmanuel Grumbach91dbc5b2008-06-12 09:47:14 +08001799static int iwl4965_hw_get_temperature(const struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07001800{
1801 s32 temperature;
1802 s32 vt;
1803 s32 R1, R2, R3;
1804 u32 R4;
1805
1806 if (test_bit(STATUS_TEMPERATURE, &priv->status) &&
1807 (priv->statistics.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK)) {
1808 IWL_DEBUG_TEMP("Running FAT temperature calibration\n");
1809 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
1810 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]);
1811 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]);
1812 R4 = le32_to_cpu(priv->card_alive_init.therm_r4[1]);
1813 } else {
1814 IWL_DEBUG_TEMP("Running temperature calibration\n");
1815 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]);
1816 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]);
1817 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]);
1818 R4 = le32_to_cpu(priv->card_alive_init.therm_r4[0]);
1819 }
1820
1821 /*
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001822 * Temperature is only 23 bits, so sign extend out to 32.
Zhu Yib481de92007-09-25 17:54:57 -07001823 *
1824 * NOTE If we haven't received a statistics notification yet
1825 * with an updated temperature, use R4 provided to us in the
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001826 * "initialize" ALIVE response.
1827 */
Zhu Yib481de92007-09-25 17:54:57 -07001828 if (!test_bit(STATUS_TEMPERATURE, &priv->status))
1829 vt = sign_extend(R4, 23);
1830 else
1831 vt = sign_extend(
1832 le32_to_cpu(priv->statistics.general.temperature), 23);
1833
Emmanuel Grumbach91dbc5b2008-06-12 09:47:14 +08001834 IWL_DEBUG_TEMP("Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt);
Zhu Yib481de92007-09-25 17:54:57 -07001835
1836 if (R3 == R1) {
1837 IWL_ERROR("Calibration conflict R1 == R3\n");
1838 return -1;
1839 }
1840
1841 /* Calculate temperature in degrees Kelvin, adjust by 97%.
1842 * Add offset to center the adjustment around 0 degrees Centigrade. */
1843 temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2);
1844 temperature /= (R3 - R1);
Emmanuel Grumbach91dbc5b2008-06-12 09:47:14 +08001845 temperature = (temperature * 97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET;
Zhu Yib481de92007-09-25 17:54:57 -07001846
Emmanuel Grumbach91dbc5b2008-06-12 09:47:14 +08001847 IWL_DEBUG_TEMP("Calibrated temperature: %dK, %dC\n",
1848 temperature, KELVIN_TO_CELSIUS(temperature));
Zhu Yib481de92007-09-25 17:54:57 -07001849
1850 return temperature;
1851}
1852
1853/* Adjust Txpower only if temperature variance is greater than threshold. */
1854#define IWL_TEMPERATURE_THRESHOLD 3
1855
1856/**
1857 * iwl4965_is_temp_calib_needed - determines if new calibration is needed
1858 *
1859 * If the temperature changed has changed sufficiently, then a recalibration
1860 * is needed.
1861 *
1862 * Assumes caller will replace priv->last_temperature once calibration
1863 * executed.
1864 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001865static int iwl4965_is_temp_calib_needed(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07001866{
1867 int temp_diff;
1868
1869 if (!test_bit(STATUS_STATISTICS, &priv->status)) {
1870 IWL_DEBUG_TEMP("Temperature not updated -- no statistics.\n");
1871 return 0;
1872 }
1873
1874 temp_diff = priv->temperature - priv->last_temperature;
1875
1876 /* get absolute value */
1877 if (temp_diff < 0) {
1878 IWL_DEBUG_POWER("Getting cooler, delta %d, \n", temp_diff);
1879 temp_diff = -temp_diff;
1880 } else if (temp_diff == 0)
1881 IWL_DEBUG_POWER("Same temp, \n");
1882 else
1883 IWL_DEBUG_POWER("Getting warmer, delta %d, \n", temp_diff);
1884
1885 if (temp_diff < IWL_TEMPERATURE_THRESHOLD) {
1886 IWL_DEBUG_POWER("Thermal txpower calib not needed\n");
1887 return 0;
1888 }
1889
1890 IWL_DEBUG_POWER("Thermal txpower calib needed\n");
1891
1892 return 1;
1893}
1894
Zhu Yi52256402008-06-30 17:23:31 +08001895static void iwl4965_temperature_calib(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07001896{
Zhu Yib481de92007-09-25 17:54:57 -07001897 s32 temp;
Zhu Yib481de92007-09-25 17:54:57 -07001898
Emmanuel Grumbach91dbc5b2008-06-12 09:47:14 +08001899 temp = iwl4965_hw_get_temperature(priv);
Zhu Yib481de92007-09-25 17:54:57 -07001900 if (temp < 0)
1901 return;
1902
1903 if (priv->temperature != temp) {
1904 if (priv->temperature)
1905 IWL_DEBUG_TEMP("Temperature changed "
1906 "from %dC to %dC\n",
1907 KELVIN_TO_CELSIUS(priv->temperature),
1908 KELVIN_TO_CELSIUS(temp));
1909 else
1910 IWL_DEBUG_TEMP("Temperature "
1911 "initialized to %dC\n",
1912 KELVIN_TO_CELSIUS(temp));
1913 }
1914
1915 priv->temperature = temp;
1916 set_bit(STATUS_TEMPERATURE, &priv->status);
1917
Emmanuel Grumbach203566f2008-06-12 09:46:54 +08001918 if (!priv->disable_tx_power_cal &&
1919 unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
1920 iwl4965_is_temp_calib_needed(priv))
Zhu Yib481de92007-09-25 17:54:57 -07001921 queue_work(priv->workqueue, &priv->txpower_work);
1922}
1923
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001924/**
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001925 * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
1926 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001927static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv,
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001928 u16 txq_id)
1929{
1930 /* Simply stop the queue, but don't change any configuration;
1931 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001932 iwl_write_prph(priv,
Tomas Winkler12a81f62008-04-03 16:05:20 -07001933 IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07001934 (0 << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
1935 (1 << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001936}
1937
1938/**
Ron Rindjunsky7f3e4bb2008-06-12 09:46:55 +08001939 * txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE
Ron Rindjunskyb095d032008-03-06 17:36:56 -08001940 * priv->lock must be held by the caller
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001941 */
Tomas Winkler30e553e2008-05-29 16:35:16 +08001942static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
1943 u16 ssn_idx, u8 tx_fifo)
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001944{
Ron Rindjunskyb095d032008-03-06 17:36:56 -08001945 int ret = 0;
1946
Tomas Winkler9f17b312008-07-11 11:53:35 +08001947 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
1948 (IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES <= txq_id)) {
1949 IWL_WARNING("queue number out of range: %d, must be %d to %d\n",
1950 txq_id, IWL49_FIRST_AMPDU_QUEUE,
1951 IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES - 1);
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001952 return -EINVAL;
1953 }
1954
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001955 ret = iwl_grab_nic_access(priv);
Ron Rindjunskyb095d032008-03-06 17:36:56 -08001956 if (ret)
1957 return ret;
1958
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001959 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
1960
Tomas Winkler12a81f62008-04-03 16:05:20 -07001961 iwl_clear_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001962
1963 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
1964 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
1965 /* supposes that ssn_idx is valid (!= 0xFFF) */
1966 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
1967
Tomas Winkler12a81f62008-04-03 16:05:20 -07001968 iwl_clear_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
Ron Rindjunsky36470742008-05-15 13:54:10 +08001969 iwl_txq_ctx_deactivate(priv, txq_id);
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001970 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
1971
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001972 iwl_release_nic_access(priv);
Ron Rindjunskyb095d032008-03-06 17:36:56 -08001973
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001974 return 0;
1975}
1976
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001977/**
1978 * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
1979 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001980static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
Zhu Yib481de92007-09-25 17:54:57 -07001981 u16 txq_id)
1982{
1983 u32 tbl_dw_addr;
1984 u32 tbl_dw;
1985 u16 scd_q2ratid;
1986
Tomas Winkler30e553e2008-05-29 16:35:16 +08001987 scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
Zhu Yib481de92007-09-25 17:54:57 -07001988
1989 tbl_dw_addr = priv->scd_base_addr +
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07001990 IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
Zhu Yib481de92007-09-25 17:54:57 -07001991
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001992 tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
Zhu Yib481de92007-09-25 17:54:57 -07001993
1994 if (txq_id & 0x1)
1995 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
1996 else
1997 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
1998
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001999 iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
Zhu Yib481de92007-09-25 17:54:57 -07002000
2001 return 0;
2002}
2003
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02002004
Zhu Yib481de92007-09-25 17:54:57 -07002005/**
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002006 * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
2007 *
Ron Rindjunsky7f3e4bb2008-06-12 09:46:55 +08002008 * NOTE: txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE,
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002009 * i.e. it must be one of the higher queues used for aggregation
Zhu Yib481de92007-09-25 17:54:57 -07002010 */
Tomas Winkler30e553e2008-05-29 16:35:16 +08002011static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
2012 int tx_fifo, int sta_id, int tid, u16 ssn_idx)
Zhu Yib481de92007-09-25 17:54:57 -07002013{
2014 unsigned long flags;
Tomas Winkler30e553e2008-05-29 16:35:16 +08002015 int ret;
Zhu Yib481de92007-09-25 17:54:57 -07002016 u16 ra_tid;
2017
Tomas Winkler9f17b312008-07-11 11:53:35 +08002018 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
2019 (IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES <= txq_id)) {
2020 IWL_WARNING("queue number out of range: %d, must be %d to %d\n",
2021 txq_id, IWL49_FIRST_AMPDU_QUEUE,
2022 IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES - 1);
2023 return -EINVAL;
2024 }
Zhu Yib481de92007-09-25 17:54:57 -07002025
2026 ra_tid = BUILD_RAxTID(sta_id, tid);
2027
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002028 /* Modify device's station table to Tx this TID */
Tomas Winkler5083e562008-05-29 16:35:15 +08002029 iwl_sta_modify_enable_tid_tx(priv, sta_id, tid);
Zhu Yib481de92007-09-25 17:54:57 -07002030
2031 spin_lock_irqsave(&priv->lock, flags);
Tomas Winkler30e553e2008-05-29 16:35:16 +08002032 ret = iwl_grab_nic_access(priv);
2033 if (ret) {
Zhu Yib481de92007-09-25 17:54:57 -07002034 spin_unlock_irqrestore(&priv->lock, flags);
Tomas Winkler30e553e2008-05-29 16:35:16 +08002035 return ret;
Zhu Yib481de92007-09-25 17:54:57 -07002036 }
2037
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002038 /* Stop this Tx queue before configuring it */
Zhu Yib481de92007-09-25 17:54:57 -07002039 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
2040
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002041 /* Map receiver-address / traffic-ID to this queue */
Zhu Yib481de92007-09-25 17:54:57 -07002042 iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
2043
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002044 /* Set this queue as a chain-building queue */
Tomas Winkler12a81f62008-04-03 16:05:20 -07002045 iwl_set_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
Zhu Yib481de92007-09-25 17:54:57 -07002046
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002047 /* Place first TFD at index corresponding to start sequence number.
2048 * Assumes that ssn_idx is valid (!= 0xFFF) */
Tomas Winklerfc4b6852007-10-25 17:15:24 +08002049 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
2050 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
Zhu Yib481de92007-09-25 17:54:57 -07002051 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
2052
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002053 /* Set up Tx window size and frame limit for this queue */
Tomas Winkler3395f6e2008-03-25 16:33:37 -07002054 iwl_write_targ_mem(priv,
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07002055 priv->scd_base_addr + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
2056 (SCD_WIN_SIZE << IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
2057 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
Zhu Yib481de92007-09-25 17:54:57 -07002058
Tomas Winkler3395f6e2008-03-25 16:33:37 -07002059 iwl_write_targ_mem(priv, priv->scd_base_addr +
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07002060 IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
2061 (SCD_FRAME_LIMIT << IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
2062 & IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
Zhu Yib481de92007-09-25 17:54:57 -07002063
Tomas Winkler12a81f62008-04-03 16:05:20 -07002064 iwl_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
Zhu Yib481de92007-09-25 17:54:57 -07002065
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002066 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
Zhu Yib481de92007-09-25 17:54:57 -07002067 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
2068
Tomas Winkler3395f6e2008-03-25 16:33:37 -07002069 iwl_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -07002070 spin_unlock_irqrestore(&priv->lock, flags);
2071
2072 return 0;
2073}
2074
Ron Rindjunsky8114fcf2008-01-28 14:07:23 +02002075int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
2076 enum ieee80211_ampdu_mlme_action action,
2077 const u8 *addr, u16 tid, u16 *ssn)
2078{
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002079 struct iwl_priv *priv = hw->priv;
Ron Rindjunsky8114fcf2008-01-28 14:07:23 +02002080 DECLARE_MAC_BUF(mac);
2081
Ron Rindjunskyfe07aa72008-04-17 16:03:37 -07002082 IWL_DEBUG_HT("A-MPDU action on addr %s tid %d\n",
2083 print_mac(mac, addr), tid);
2084
Ron Rindjunsky49779292008-06-30 17:23:21 +08002085 if (!(priv->cfg->sku & IWL_SKU_N))
2086 return -EACCES;
2087
Ron Rindjunsky8114fcf2008-01-28 14:07:23 +02002088 switch (action) {
2089 case IEEE80211_AMPDU_RX_START:
2090 IWL_DEBUG_HT("start Rx\n");
Ron Rindjunsky0c705152008-06-30 17:23:12 +08002091 return iwl_rx_agg_start(priv, addr, tid, *ssn);
Ron Rindjunsky8114fcf2008-01-28 14:07:23 +02002092 case IEEE80211_AMPDU_RX_STOP:
2093 IWL_DEBUG_HT("stop Rx\n");
Ron Rindjunsky0c705152008-06-30 17:23:12 +08002094 return iwl_rx_agg_stop(priv, addr, tid);
Ron Rindjunsky8114fcf2008-01-28 14:07:23 +02002095 case IEEE80211_AMPDU_TX_START:
2096 IWL_DEBUG_HT("start Tx\n");
Tomas Winkler30e553e2008-05-29 16:35:16 +08002097 return iwl_tx_agg_start(priv, addr, tid, ssn);
Ron Rindjunsky8114fcf2008-01-28 14:07:23 +02002098 case IEEE80211_AMPDU_TX_STOP:
2099 IWL_DEBUG_HT("stop Tx\n");
Tomas Winkler30e553e2008-05-29 16:35:16 +08002100 return iwl_tx_agg_stop(priv, addr, tid);
Ron Rindjunsky8114fcf2008-01-28 14:07:23 +02002101 default:
2102 IWL_DEBUG_HT("unknown\n");
2103 return -EINVAL;
2104 break;
2105 }
2106 return 0;
2107}
Tomas Winkler133636d2008-05-05 10:22:34 +08002108
Gregory Greenmanc1adf9f2008-05-15 13:53:59 +08002109static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len)
2110{
2111 switch (cmd_id) {
2112 case REPLY_RXON:
2113 return (u16) sizeof(struct iwl4965_rxon_cmd);
2114 default:
2115 return len;
2116 }
2117}
2118
Tomas Winkler133636d2008-05-05 10:22:34 +08002119static u16 iwl4965_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
2120{
2121 struct iwl4965_addsta_cmd *addsta = (struct iwl4965_addsta_cmd *)data;
2122 addsta->mode = cmd->mode;
2123 memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
2124 memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo));
2125 addsta->station_flags = cmd->station_flags;
2126 addsta->station_flags_msk = cmd->station_flags_msk;
2127 addsta->tid_disable_tx = cmd->tid_disable_tx;
2128 addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
2129 addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
2130 addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
2131 addsta->reserved1 = __constant_cpu_to_le16(0);
2132 addsta->reserved2 = __constant_cpu_to_le32(0);
2133
2134 return (u16)sizeof(struct iwl4965_addsta_cmd);
2135}
Tomas Winklerf20217d2008-05-29 16:35:10 +08002136
Tomas Winklerf20217d2008-05-29 16:35:10 +08002137static inline u32 iwl4965_get_scd_ssn(struct iwl4965_tx_resp *tx_resp)
2138{
Tomas Winkler25a65722008-06-12 09:47:07 +08002139 return le32_to_cpup(&tx_resp->u.status + tx_resp->frame_count) & MAX_SN;
Tomas Winklerf20217d2008-05-29 16:35:10 +08002140}
2141
2142/**
2143 * iwl4965_tx_status_reply_tx - Handle Tx rspnse for frames in aggregation queue
2144 */
2145static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
2146 struct iwl_ht_agg *agg,
Tomas Winkler25a65722008-06-12 09:47:07 +08002147 struct iwl4965_tx_resp *tx_resp,
2148 int txq_id, u16 start_idx)
Tomas Winklerf20217d2008-05-29 16:35:10 +08002149{
2150 u16 status;
Tomas Winkler25a65722008-06-12 09:47:07 +08002151 struct agg_tx_status *frame_status = tx_resp->u.agg_status;
Tomas Winklerf20217d2008-05-29 16:35:10 +08002152 struct ieee80211_tx_info *info = NULL;
2153 struct ieee80211_hdr *hdr = NULL;
Tomas Winklere7d326a2008-06-12 09:47:11 +08002154 u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
Tomas Winkler25a65722008-06-12 09:47:07 +08002155 int i, sh, idx;
Tomas Winklerf20217d2008-05-29 16:35:10 +08002156 u16 seq;
Tomas Winklerf20217d2008-05-29 16:35:10 +08002157 if (agg->wait_for_ba)
2158 IWL_DEBUG_TX_REPLY("got tx response w/o block-ack\n");
2159
2160 agg->frame_count = tx_resp->frame_count;
2161 agg->start_idx = start_idx;
Tomas Winklere7d326a2008-06-12 09:47:11 +08002162 agg->rate_n_flags = rate_n_flags;
Tomas Winklerf20217d2008-05-29 16:35:10 +08002163 agg->bitmap = 0;
2164
2165 /* # frames attempted by Tx command */
2166 if (agg->frame_count == 1) {
2167 /* Only one frame was attempted; no block-ack will arrive */
2168 status = le16_to_cpu(frame_status[0].status);
Tomas Winkler25a65722008-06-12 09:47:07 +08002169 idx = start_idx;
Tomas Winklerf20217d2008-05-29 16:35:10 +08002170
2171 /* FIXME: code repetition */
2172 IWL_DEBUG_TX_REPLY("FrameCnt = %d, StartIdx=%d idx=%d\n",
2173 agg->frame_count, agg->start_idx, idx);
2174
2175 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]);
2176 info->status.retry_count = tx_resp->failure_frame;
2177 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
2178 info->flags |= iwl_is_tx_success(status)?
2179 IEEE80211_TX_STAT_ACK : 0;
Tomas Winklere7d326a2008-06-12 09:47:11 +08002180 iwl_hwrate_to_tx_control(priv, rate_n_flags, info);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002181 /* FIXME: code repetition end */
2182
2183 IWL_DEBUG_TX_REPLY("1 Frame 0x%x failure :%d\n",
2184 status & 0xff, tx_resp->failure_frame);
Tomas Winklere7d326a2008-06-12 09:47:11 +08002185 IWL_DEBUG_TX_REPLY("Rate Info rate_n_flags=%x\n", rate_n_flags);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002186
2187 agg->wait_for_ba = 0;
2188 } else {
2189 /* Two or more frames were attempted; expect block-ack */
2190 u64 bitmap = 0;
2191 int start = agg->start_idx;
2192
2193 /* Construct bit-map of pending frames within Tx window */
2194 for (i = 0; i < agg->frame_count; i++) {
2195 u16 sc;
2196 status = le16_to_cpu(frame_status[i].status);
2197 seq = le16_to_cpu(frame_status[i].sequence);
2198 idx = SEQ_TO_INDEX(seq);
2199 txq_id = SEQ_TO_QUEUE(seq);
2200
2201 if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
2202 AGG_TX_STATE_ABORT_MSK))
2203 continue;
2204
2205 IWL_DEBUG_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n",
2206 agg->frame_count, txq_id, idx);
2207
2208 hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
2209
2210 sc = le16_to_cpu(hdr->seq_ctrl);
2211 if (idx != (SEQ_TO_SN(sc) & 0xff)) {
2212 IWL_ERROR("BUG_ON idx doesn't match seq control"
2213 " idx=%d, seq_idx=%d, seq=%d\n",
2214 idx, SEQ_TO_SN(sc),
2215 hdr->seq_ctrl);
2216 return -1;
2217 }
2218
2219 IWL_DEBUG_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n",
2220 i, idx, SEQ_TO_SN(sc));
2221
2222 sh = idx - start;
2223 if (sh > 64) {
2224 sh = (start - idx) + 0xff;
2225 bitmap = bitmap << sh;
2226 sh = 0;
2227 start = idx;
2228 } else if (sh < -64)
2229 sh = 0xff - (start - idx);
2230 else if (sh < 0) {
2231 sh = start - idx;
2232 start = idx;
2233 bitmap = bitmap << sh;
2234 sh = 0;
2235 }
2236 bitmap |= (1 << sh);
2237 IWL_DEBUG_TX_REPLY("start=%d bitmap=0x%x\n",
2238 start, (u32)(bitmap & 0xFFFFFFFF));
2239 }
2240
2241 agg->bitmap = bitmap;
2242 agg->start_idx = start;
Tomas Winklerf20217d2008-05-29 16:35:10 +08002243 IWL_DEBUG_TX_REPLY("Frames %d start_idx=%d bitmap=0x%llx\n",
2244 agg->frame_count, agg->start_idx,
2245 (unsigned long long)agg->bitmap);
2246
2247 if (bitmap)
2248 agg->wait_for_ba = 1;
2249 }
2250 return 0;
2251}
Tomas Winklerf20217d2008-05-29 16:35:10 +08002252
2253/**
2254 * iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response
2255 */
2256static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2257 struct iwl_rx_mem_buffer *rxb)
2258{
2259 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
2260 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
2261 int txq_id = SEQ_TO_QUEUE(sequence);
2262 int index = SEQ_TO_INDEX(sequence);
2263 struct iwl_tx_queue *txq = &priv->txq[txq_id];
2264 struct ieee80211_tx_info *info;
2265 struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
Tomas Winkler25a65722008-06-12 09:47:07 +08002266 u32 status = le32_to_cpu(tx_resp->u.status);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002267 int tid = MAX_TID_COUNT, sta_id = IWL_INVALID_STATION;
Harvey Harrisonfd7c8a42008-06-11 14:21:56 -07002268 __le16 fc;
Tomas Winklerf20217d2008-05-29 16:35:10 +08002269 struct ieee80211_hdr *hdr;
2270 u8 *qc = NULL;
Tomas Winklerf20217d2008-05-29 16:35:10 +08002271
2272 if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
2273 IWL_ERROR("Read index for DMA queue txq_id (%d) index %d "
2274 "is out of range [0-%d] %d %d\n", txq_id,
2275 index, txq->q.n_bd, txq->q.write_ptr,
2276 txq->q.read_ptr);
2277 return;
2278 }
2279
2280 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]);
2281 memset(&info->status, 0, sizeof(info->status));
2282
Tomas Winklerf20217d2008-05-29 16:35:10 +08002283 hdr = iwl_tx_queue_get_hdr(priv, txq_id, index);
Harvey Harrisonfd7c8a42008-06-11 14:21:56 -07002284 fc = hdr->frame_control;
2285 if (ieee80211_is_data_qos(fc)) {
2286 qc = ieee80211_get_qos_ctl(hdr);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002287 tid = qc[0] & 0xf;
2288 }
2289
2290 sta_id = iwl_get_ra_sta_id(priv, hdr);
2291 if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) {
2292 IWL_ERROR("Station not known\n");
2293 return;
2294 }
2295
2296 if (txq->sched_retry) {
2297 const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp);
2298 struct iwl_ht_agg *agg = NULL;
2299
2300 if (!qc)
2301 return;
2302
2303 agg = &priv->stations[sta_id].tid[tid].agg;
2304
Tomas Winkler25a65722008-06-12 09:47:07 +08002305 iwl4965_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002306
Ron Rindjunsky32354272008-07-01 10:44:51 +03002307 /* check if BAR is needed */
2308 if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status))
2309 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Tomas Winklerf20217d2008-05-29 16:35:10 +08002310
2311 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
2312 int freed, ampdu_q;
2313 index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
2314 IWL_DEBUG_TX_REPLY("Retry scheduler reclaim scd_ssn "
2315 "%d index %d\n", scd_ssn , index);
Tomas Winkler17b88922008-05-29 16:35:12 +08002316 freed = iwl_tx_queue_reclaim(priv, txq_id, index);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002317 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
2318
2319 if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
2320 txq_id >= 0 && priv->mac80211_registered &&
2321 agg->state != IWL_EMPTYING_HW_QUEUE_DELBA) {
2322 /* calculate mac80211 ampdu sw queue to wake */
Ron Rindjunsky7f3e4bb2008-06-12 09:46:55 +08002323 ampdu_q = txq_id - IWL49_FIRST_AMPDU_QUEUE +
Tomas Winklerf20217d2008-05-29 16:35:10 +08002324 priv->hw->queues;
2325 if (agg->state == IWL_AGG_OFF)
2326 ieee80211_wake_queue(priv->hw, txq_id);
2327 else
2328 ieee80211_wake_queue(priv->hw, ampdu_q);
2329 }
Tomas Winkler30e553e2008-05-29 16:35:16 +08002330 iwl_txq_check_empty(priv, sta_id, tid, txq_id);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002331 }
2332 } else {
Ron Rindjunsky4f85f5b2008-06-09 22:54:35 +03002333 info->status.retry_count = tx_resp->failure_frame;
2334 info->flags |=
2335 iwl_is_tx_success(status) ? IEEE80211_TX_STAT_ACK : 0;
Tomas Winklere7d326a2008-06-12 09:47:11 +08002336 iwl_hwrate_to_tx_control(priv,
Ron Rindjunsky4f85f5b2008-06-09 22:54:35 +03002337 le32_to_cpu(tx_resp->rate_n_flags),
2338 info);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002339
Ron Rindjunsky4f85f5b2008-06-09 22:54:35 +03002340 IWL_DEBUG_TX("Tx queue %d Status %s (0x%08x) rate_n_flags "
2341 "0x%x retries %d\n", txq_id,
2342 iwl_get_tx_fail_reason(status),
2343 status, le32_to_cpu(tx_resp->rate_n_flags),
2344 tx_resp->failure_frame);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002345
Ron Rindjunsky4f85f5b2008-06-09 22:54:35 +03002346 IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index);
Tomas Winklere7d326a2008-06-12 09:47:11 +08002347
Ron Rindjunsky4f85f5b2008-06-09 22:54:35 +03002348 if (index != -1) {
2349 int freed = iwl_tx_queue_reclaim(priv, txq_id, index);
2350 if (tid != MAX_TID_COUNT)
Tomas Winklerf20217d2008-05-29 16:35:10 +08002351 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
Ron Rindjunsky4f85f5b2008-06-09 22:54:35 +03002352 if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
Tomas Winklerf20217d2008-05-29 16:35:10 +08002353 (txq_id >= 0) && priv->mac80211_registered)
2354 ieee80211_wake_queue(priv->hw, txq_id);
Ron Rindjunsky4f85f5b2008-06-09 22:54:35 +03002355 if (tid != MAX_TID_COUNT)
Tomas Winkler30e553e2008-05-29 16:35:16 +08002356 iwl_txq_check_empty(priv, sta_id, tid, txq_id);
Ron Rindjunsky4f85f5b2008-06-09 22:54:35 +03002357 }
Tomas Winklerf20217d2008-05-29 16:35:10 +08002358 }
Tomas Winklerf20217d2008-05-29 16:35:10 +08002359
2360 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
2361 IWL_ERROR("TODO: Implement Tx ABORT REQUIRED!!!\n");
2362}
2363
2364
Zhu Yib481de92007-09-25 17:54:57 -07002365/* Set up 4965-specific Rx frame reply handlers */
Emmanuel Grumbachd4789ef2008-04-24 11:55:20 -07002366static void iwl4965_rx_handler_setup(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07002367{
2368 /* Legacy Rx frames */
Emmanuel Grumbach1781a072008-06-30 17:23:09 +08002369 priv->rx_handlers[REPLY_RX] = iwl_rx_reply_rx;
Ron Rindjunsky37a44212008-05-29 16:35:18 +08002370 /* Tx response */
Tomas Winklerf20217d2008-05-29 16:35:10 +08002371 priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx;
Zhu Yib481de92007-09-25 17:54:57 -07002372}
2373
Emmanuel Grumbach4e393172008-06-12 09:46:53 +08002374static void iwl4965_setup_deferred_work(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07002375{
2376 INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
Zhu Yib481de92007-09-25 17:54:57 -07002377}
2378
Emmanuel Grumbach4e393172008-06-12 09:46:53 +08002379static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07002380{
Emmanuel Grumbach4e393172008-06-12 09:46:53 +08002381 cancel_work_sync(&priv->txpower_work);
Zhu Yib481de92007-09-25 17:54:57 -07002382}
2383
Tomas Winkler3c424c22008-04-15 16:01:42 -07002384
2385static struct iwl_hcmd_ops iwl4965_hcmd = {
Tomas Winkler7e8c5192008-04-15 16:01:43 -07002386 .rxon_assoc = iwl4965_send_rxon_assoc,
Tomas Winkler3c424c22008-04-15 16:01:42 -07002387};
2388
Tomas Winkler857485c2008-03-21 13:53:44 -07002389static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
Gregory Greenmanc1adf9f2008-05-15 13:53:59 +08002390 .get_hcmd_size = iwl4965_get_hcmd_size,
Tomas Winkler133636d2008-05-05 10:22:34 +08002391 .build_addsta_hcmd = iwl4965_build_addsta_hcmd,
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -07002392 .chain_noise_reset = iwl4965_chain_noise_reset,
2393 .gain_computation = iwl4965_gain_computation,
Emmanuel Grumbacha326a5d2008-07-11 11:53:31 +08002394 .rts_tx_cmd_flag = iwl4965_rts_tx_cmd_flag,
Tomas Winkler857485c2008-03-21 13:53:44 -07002395};
2396
Assaf Krauss6bc913b2008-03-11 16:17:18 -07002397static struct iwl_lib_ops iwl4965_lib = {
Tomas Winkler5425e492008-04-15 16:01:38 -07002398 .set_hw_params = iwl4965_hw_set_hw_params,
Ron Rindjunsky399f49002008-04-23 17:14:56 -07002399 .alloc_shared_mem = iwl4965_alloc_shared_mem,
2400 .free_shared_mem = iwl4965_free_shared_mem,
Ron Rindjunskyd67f5482008-05-05 10:22:49 +08002401 .shared_mem_rx_idx = iwl4965_shared_mem_rx_idx,
Tomas Winklere2a722e2008-04-14 21:16:10 -07002402 .txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl,
Tomas Winklerda1bc452008-05-29 16:35:00 +08002403 .txq_set_sched = iwl4965_txq_set_sched,
Tomas Winkler30e553e2008-05-29 16:35:16 +08002404 .txq_agg_enable = iwl4965_txq_agg_enable,
2405 .txq_agg_disable = iwl4965_txq_agg_disable,
Emmanuel Grumbachd4789ef2008-04-24 11:55:20 -07002406 .rx_handler_setup = iwl4965_rx_handler_setup,
Emmanuel Grumbach4e393172008-06-12 09:46:53 +08002407 .setup_deferred_work = iwl4965_setup_deferred_work,
2408 .cancel_deferred_work = iwl4965_cancel_deferred_work,
Tomas Winkler57aab752008-04-14 21:16:03 -07002409 .is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr,
2410 .alive_notify = iwl4965_alive_notify,
Emmanuel Grumbachf3ccc082008-05-05 10:22:45 +08002411 .init_alive_start = iwl4965_init_alive_start,
Tomas Winkler57aab752008-04-14 21:16:03 -07002412 .load_ucode = iwl4965_load_bsm,
Tomas Winkler6f4083a2008-04-16 16:34:49 -07002413 .apm_ops = {
Tomas Winkler91238712008-04-23 17:14:53 -07002414 .init = iwl4965_apm_init,
Tomas Winkler7f066102008-05-29 16:34:57 +08002415 .reset = iwl4965_apm_reset,
Tomas Winklerf118a912008-05-29 16:34:58 +08002416 .stop = iwl4965_apm_stop,
Tomas Winkler694cc562008-04-24 11:55:22 -07002417 .config = iwl4965_nic_config,
Tomas Winkler6f4083a2008-04-16 16:34:49 -07002418 .set_pwr_src = iwl4965_set_pwr_src,
2419 },
Assaf Krauss6bc913b2008-03-11 16:17:18 -07002420 .eeprom_ops = {
Tomas Winkler073d3f52008-04-21 15:41:52 -07002421 .regulatory_bands = {
2422 EEPROM_REGULATORY_BAND_1_CHANNELS,
2423 EEPROM_REGULATORY_BAND_2_CHANNELS,
2424 EEPROM_REGULATORY_BAND_3_CHANNELS,
2425 EEPROM_REGULATORY_BAND_4_CHANNELS,
2426 EEPROM_REGULATORY_BAND_5_CHANNELS,
2427 EEPROM_4965_REGULATORY_BAND_24_FAT_CHANNELS,
2428 EEPROM_4965_REGULATORY_BAND_52_FAT_CHANNELS
2429 },
Assaf Krauss6bc913b2008-03-11 16:17:18 -07002430 .verify_signature = iwlcore_eeprom_verify_signature,
2431 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
2432 .release_semaphore = iwlcore_eeprom_release_semaphore,
Tomas Winkler8614f362008-04-23 17:14:55 -07002433 .check_version = iwl4965_eeprom_check_version,
Tomas Winkler073d3f52008-04-21 15:41:52 -07002434 .query_addr = iwlcore_eeprom_query_addr,
Assaf Krauss6bc913b2008-03-11 16:17:18 -07002435 },
Mohamed Abbas5da4b552008-04-21 15:41:51 -07002436 .set_power = iwl4965_set_power,
Tomas Winkler630fe9b2008-06-12 09:47:08 +08002437 .send_tx_power = iwl4965_send_tx_power,
Mohamed Abbas5da4b552008-04-21 15:41:51 -07002438 .update_chain_flags = iwl4965_update_chain_flags,
Emmanuel Grumbach8f91aec2008-06-30 17:23:07 +08002439 .temperature = iwl4965_temperature_calib,
Assaf Krauss6bc913b2008-03-11 16:17:18 -07002440};
2441
2442static struct iwl_ops iwl4965_ops = {
2443 .lib = &iwl4965_lib,
Tomas Winkler3c424c22008-04-15 16:01:42 -07002444 .hcmd = &iwl4965_hcmd,
Tomas Winkler857485c2008-03-21 13:53:44 -07002445 .utils = &iwl4965_hcmd_utils,
Assaf Krauss6bc913b2008-03-11 16:17:18 -07002446};
2447
Ron Rindjunskyfed90172008-04-15 16:01:41 -07002448struct iwl_cfg iwl4965_agn_cfg = {
Tomas Winkler82b9a122008-03-04 18:09:30 -08002449 .name = "4965AGN",
Tomas Winkler4bf775c2008-03-04 18:09:31 -08002450 .fw_name = "iwlwifi-4965" IWL4965_UCODE_API ".ucode",
Tomas Winkler82b9a122008-03-04 18:09:30 -08002451 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
Tomas Winkler073d3f52008-04-21 15:41:52 -07002452 .eeprom_size = IWL4965_EEPROM_IMG_SIZE,
Assaf Krauss6bc913b2008-03-11 16:17:18 -07002453 .ops = &iwl4965_ops,
Assaf Krauss1ea87392008-03-18 14:57:50 -07002454 .mod_params = &iwl4965_mod_params,
Tomas Winkler82b9a122008-03-04 18:09:30 -08002455};
2456
Assaf Krauss1ea87392008-03-18 14:57:50 -07002457module_param_named(antenna, iwl4965_mod_params.antenna, int, 0444);
2458MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
2459module_param_named(disable, iwl4965_mod_params.disable, int, 0444);
2460MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
Emmanuel Grumbachfcc76c62008-04-15 16:01:47 -07002461module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, 0444);
2462MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])\n");
Assaf Krauss1ea87392008-03-18 14:57:50 -07002463module_param_named(debug, iwl4965_mod_params.debug, int, 0444);
2464MODULE_PARM_DESC(debug, "debug output mask");
2465module_param_named(
2466 disable_hw_scan, iwl4965_mod_params.disable_hw_scan, int, 0444);
2467MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
2468
2469module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, 0444);
2470MODULE_PARM_DESC(queues_num, "number of hw queues.");
Assaf Krauss1ea87392008-03-18 14:57:50 -07002471/* QoS */
2472module_param_named(qos_enable, iwl4965_mod_params.enable_qos, int, 0444);
2473MODULE_PARM_DESC(qos_enable, "enable all QoS functionality");
Ron Rindjunsky49779292008-06-30 17:23:21 +08002474/* 11n */
2475module_param_named(11n_disable, iwl4965_mod_params.disable_11n, int, 0444);
2476MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
Assaf Krauss1ea87392008-03-18 14:57:50 -07002477module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K, int, 0444);
2478MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
Ron Rindjunsky49779292008-06-30 17:23:21 +08002479
Ester Kummer3a1081e2008-05-06 11:05:14 +08002480module_param_named(fw_restart4965, iwl4965_mod_params.restart_fw, int, 0444);
2481MODULE_PARM_DESC(fw_restart4965, "restart firmware in case of error");