blob: 8998ed134d1afe163fee2ce6d2d1240ff8a0af6a [file] [log] [blame]
Zhu Yib481de92007-09-25 17:54:57 -07001/******************************************************************************
2 *
Reinette Chatre1f447802010-01-15 13:43:41 -08003 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
Zhu Yib481de92007-09-25 17:54:57 -07004 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
Winkler, Tomas759ef892008-12-09 11:28:58 -080022 * Intel Linux Wireless <ilw@linux.intel.com>
Zhu Yib481de92007-09-25 17:54:57 -070023 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
Zhu Yib481de92007-09-25 17:54:57 -070029#include <linux/init.h>
30#include <linux/pci.h>
31#include <linux/dma-mapping.h>
32#include <linux/delay.h>
Alexey Dobriyand43c36d2009-10-07 17:09:06 +040033#include <linux/sched.h>
Zhu Yib481de92007-09-25 17:54:57 -070034#include <linux/skbuff.h>
35#include <linux/netdevice.h>
36#include <linux/wireless.h>
37#include <net/mac80211.h>
Zhu Yib481de92007-09-25 17:54:57 -070038#include <linux/etherdevice.h>
Zhu Yi12342c42007-12-20 11:27:32 +080039#include <asm/unaligned.h>
Zhu Yib481de92007-09-25 17:54:57 -070040
Assaf Krauss6bc913b2008-03-11 16:17:18 -070041#include "iwl-eeprom.h"
Tomas Winkler3e0d4cb2008-04-24 11:55:38 -070042#include "iwl-dev.h"
Tomas Winklerfee12472008-04-03 16:05:21 -070043#include "iwl-core.h"
Tomas Winkler3395f6e2008-03-25 16:33:37 -070044#include "iwl-io.h"
Zhu Yib481de92007-09-25 17:54:57 -070045#include "iwl-helpers.h"
Johannes Berg0de76732010-09-22 18:02:11 +020046#include "iwl-agn-calib.h"
Tomas Winkler5083e562008-05-29 16:35:15 +080047#include "iwl-sta.h"
Johannes Berge932a602009-10-02 13:44:03 -070048#include "iwl-agn-led.h"
Wey-Yi Guy74bcdb32010-03-17 13:34:34 -070049#include "iwl-agn.h"
Abhijeet Kolekarb8c76262010-04-08 15:29:07 -070050#include "iwl-agn-debugfs.h"
Johannes Berg2295c662010-10-23 09:15:41 -070051#include "iwl-legacy.h"
Zhu Yib481de92007-09-25 17:54:57 -070052
Tomas Winkler630fe9b2008-06-12 09:47:08 +080053static int iwl4965_send_tx_power(struct iwl_priv *priv);
Reinette Chatre3d816c72009-08-07 15:41:37 -070054static int iwl4965_hw_get_temperature(struct iwl_priv *priv);
Tomas Winkler630fe9b2008-06-12 09:47:08 +080055
Reinette Chatrea0987a82008-12-02 12:14:06 -080056/* Highest firmware API version supported */
57#define IWL4965_UCODE_API_MAX 2
58
59/* Lowest firmware API version supported */
60#define IWL4965_UCODE_API_MIN 2
61
62#define IWL4965_FW_PRE "iwlwifi-4965-"
63#define _IWL4965_MODULE_FIRMWARE(api) IWL4965_FW_PRE #api ".ucode"
64#define IWL4965_MODULE_FIRMWARE(api) _IWL4965_MODULE_FIRMWARE(api)
Tomas Winklerd16dc482008-07-11 11:53:38 +080065
Tomas Winkler57aab752008-04-14 21:16:03 -070066/* check contents of special bootstrap uCode SRAM */
67static int iwl4965_verify_bsm(struct iwl_priv *priv)
68{
69 __le32 *image = priv->ucode_boot.v_addr;
70 u32 len = priv->ucode_boot.len;
71 u32 reg;
72 u32 val;
73
Tomas Winklere1623442009-01-27 14:27:56 -080074 IWL_DEBUG_INFO(priv, "Begin verify bsm\n");
Tomas Winkler57aab752008-04-14 21:16:03 -070075
76 /* verify BSM SRAM contents */
77 val = iwl_read_prph(priv, BSM_WR_DWCOUNT_REG);
78 for (reg = BSM_SRAM_LOWER_BOUND;
79 reg < BSM_SRAM_LOWER_BOUND + len;
80 reg += sizeof(u32), image++) {
81 val = iwl_read_prph(priv, reg);
82 if (val != le32_to_cpu(*image)) {
Winkler, Tomas15b16872008-12-19 10:37:33 +080083 IWL_ERR(priv, "BSM uCode verification failed at "
Tomas Winkler57aab752008-04-14 21:16:03 -070084 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
85 BSM_SRAM_LOWER_BOUND,
86 reg - BSM_SRAM_LOWER_BOUND, len,
87 val, le32_to_cpu(*image));
88 return -EIO;
89 }
90 }
91
Tomas Winklere1623442009-01-27 14:27:56 -080092 IWL_DEBUG_INFO(priv, "BSM bootstrap uCode image OK\n");
Tomas Winkler57aab752008-04-14 21:16:03 -070093
94 return 0;
95}
96
97/**
98 * iwl4965_load_bsm - Load bootstrap instructions
99 *
100 * BSM operation:
101 *
102 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
103 * in special SRAM that does not power down during RFKILL. When powering back
104 * up after power-saving sleeps (or during initial uCode load), the BSM loads
105 * the bootstrap program into the on-board processor, and starts it.
106 *
107 * The bootstrap program loads (via DMA) instructions and data for a new
108 * program from host DRAM locations indicated by the host driver in the
109 * BSM_DRAM_* registers. Once the new program is loaded, it starts
110 * automatically.
111 *
112 * When initializing the NIC, the host driver points the BSM to the
113 * "initialize" uCode image. This uCode sets up some internal data, then
114 * notifies host via "initialize alive" that it is complete.
115 *
116 * The host then replaces the BSM_DRAM_* pointer values to point to the
117 * normal runtime uCode instructions and a backup uCode data cache buffer
118 * (filled initially with starting data values for the on-board processor),
119 * then triggers the "initialize" uCode to load and launch the runtime uCode,
120 * which begins normal operation.
121 *
122 * When doing a power-save shutdown, runtime uCode saves data SRAM into
123 * the backup data cache in DRAM before SRAM is powered down.
124 *
125 * When powering back up, the BSM loads the bootstrap program. This reloads
126 * the runtime uCode instructions and the backup data cache into SRAM,
127 * and re-launches the runtime uCode from where it left off.
128 */
129static int iwl4965_load_bsm(struct iwl_priv *priv)
130{
131 __le32 *image = priv->ucode_boot.v_addr;
132 u32 len = priv->ucode_boot.len;
133 dma_addr_t pinst;
134 dma_addr_t pdata;
135 u32 inst_len;
136 u32 data_len;
137 int i;
138 u32 done;
139 u32 reg_offset;
140 int ret;
141
Tomas Winklere1623442009-01-27 14:27:56 -0800142 IWL_DEBUG_INFO(priv, "Begin load bsm\n");
Tomas Winkler57aab752008-04-14 21:16:03 -0700143
Reinette Chatrec03ea162009-08-07 15:41:44 -0700144 priv->ucode_type = UCODE_RT;
Ron Rindjunskyfe9b6b72008-05-29 16:35:06 +0800145
Tomas Winkler57aab752008-04-14 21:16:03 -0700146 /* make sure bootstrap program is no larger than BSM's SRAM size */
Samuel Ortiz250bdd22008-12-19 10:37:11 +0800147 if (len > IWL49_MAX_BSM_SIZE)
Tomas Winkler57aab752008-04-14 21:16:03 -0700148 return -EINVAL;
149
150 /* Tell bootstrap uCode where to find the "Initialize" uCode
151 * in host DRAM ... host DRAM physical address bits 35:4 for 4965.
Tomas Winkler2d878892008-05-29 16:34:51 +0800152 * NOTE: iwl_init_alive_start() will replace these values,
Tomas Winkler57aab752008-04-14 21:16:03 -0700153 * after the "initialize" uCode has run, to point to
Tomas Winkler2d878892008-05-29 16:34:51 +0800154 * runtime/protocol instructions and backup data cache.
155 */
Tomas Winkler57aab752008-04-14 21:16:03 -0700156 pinst = priv->ucode_init.p_addr >> 4;
157 pdata = priv->ucode_init_data.p_addr >> 4;
158 inst_len = priv->ucode_init.len;
159 data_len = priv->ucode_init_data.len;
160
Tomas Winkler57aab752008-04-14 21:16:03 -0700161 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
162 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
163 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
164 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
165
166 /* Fill BSM memory with bootstrap instructions */
167 for (reg_offset = BSM_SRAM_LOWER_BOUND;
168 reg_offset < BSM_SRAM_LOWER_BOUND + len;
169 reg_offset += sizeof(u32), image++)
170 _iwl_write_prph(priv, reg_offset, le32_to_cpu(*image));
171
172 ret = iwl4965_verify_bsm(priv);
Mohamed Abbasa8b50a02009-05-22 11:01:47 -0700173 if (ret)
Tomas Winkler57aab752008-04-14 21:16:03 -0700174 return ret;
Tomas Winkler57aab752008-04-14 21:16:03 -0700175
176 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
177 iwl_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
Samuel Ortiz250bdd22008-12-19 10:37:11 +0800178 iwl_write_prph(priv, BSM_WR_MEM_DST_REG, IWL49_RTC_INST_LOWER_BOUND);
Tomas Winkler57aab752008-04-14 21:16:03 -0700179 iwl_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
180
181 /* Load bootstrap code into instruction SRAM now,
182 * to prepare to load "initialize" uCode */
183 iwl_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
184
185 /* Wait for load of bootstrap uCode to finish */
186 for (i = 0; i < 100; i++) {
187 done = iwl_read_prph(priv, BSM_WR_CTRL_REG);
188 if (!(done & BSM_WR_CTRL_REG_BIT_START))
189 break;
190 udelay(10);
191 }
192 if (i < 100)
Tomas Winklere1623442009-01-27 14:27:56 -0800193 IWL_DEBUG_INFO(priv, "BSM write complete, poll %d iterations\n", i);
Tomas Winkler57aab752008-04-14 21:16:03 -0700194 else {
Winkler, Tomas15b16872008-12-19 10:37:33 +0800195 IWL_ERR(priv, "BSM write did not complete!\n");
Tomas Winkler57aab752008-04-14 21:16:03 -0700196 return -EIO;
197 }
198
199 /* Enable future boot loads whenever power management unit triggers it
200 * (e.g. when powering back up after power-save shutdown) */
201 iwl_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
202
Tomas Winkler57aab752008-04-14 21:16:03 -0700203
204 return 0;
205}
206
Emmanuel Grumbachf3ccc082008-05-05 10:22:45 +0800207/**
208 * iwl4965_set_ucode_ptrs - Set uCode address location
209 *
210 * Tell initialization uCode where to find runtime uCode.
211 *
212 * BSM registers initially contain pointers to initialization uCode.
213 * We need to replace them to load runtime uCode inst and data,
214 * and to save runtime data when powering down.
215 */
216static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv)
217{
218 dma_addr_t pinst;
219 dma_addr_t pdata;
Emmanuel Grumbachf3ccc082008-05-05 10:22:45 +0800220 int ret = 0;
221
222 /* bits 35:4 for 4965 */
223 pinst = priv->ucode_code.p_addr >> 4;
224 pdata = priv->ucode_data_backup.p_addr >> 4;
225
Emmanuel Grumbachf3ccc082008-05-05 10:22:45 +0800226 /* Tell bootstrap uCode where to find image to load */
227 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
228 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
229 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
230 priv->ucode_data.len);
231
Tomas Winklera96a27f2008-10-23 23:48:56 -0700232 /* Inst byte count must be last to set up, bit 31 signals uCode
Emmanuel Grumbachf3ccc082008-05-05 10:22:45 +0800233 * that all new ptr/size info is in place */
234 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
235 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
Tomas Winklere1623442009-01-27 14:27:56 -0800236 IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n");
Emmanuel Grumbachf3ccc082008-05-05 10:22:45 +0800237
238 return ret;
239}
240
241/**
242 * iwl4965_init_alive_start - Called after REPLY_ALIVE notification received
243 *
244 * Called after REPLY_ALIVE notification received from "initialize" uCode.
245 *
246 * The 4965 "initialize" ALIVE reply contains calibration data for:
247 * Voltage, temperature, and MIMO tx gain correction, now stored in priv
248 * (3945 does not contain this data).
249 *
250 * Tell "initialize" uCode to go ahead and load the runtime uCode.
251*/
252static void iwl4965_init_alive_start(struct iwl_priv *priv)
253{
Emmanuel Grumbachf3ccc082008-05-05 10:22:45 +0800254 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
255 * This is a paranoid check, because we would not have gotten the
256 * "initialize" alive if code weren't properly loaded. */
257 if (iwl_verify_ucode(priv)) {
258 /* Runtime instruction load was bad;
259 * take it all the way back down so we can try again */
Tomas Winklere1623442009-01-27 14:27:56 -0800260 IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
Emmanuel Grumbachf3ccc082008-05-05 10:22:45 +0800261 goto restart;
262 }
263
264 /* Calculate temperature */
Emmanuel Grumbach91dbc5b2008-06-12 09:47:14 +0800265 priv->temperature = iwl4965_hw_get_temperature(priv);
Emmanuel Grumbachf3ccc082008-05-05 10:22:45 +0800266
267 /* Send pointers to protocol/runtime uCode image ... init code will
268 * load and launch runtime uCode, which will send us another "Alive"
269 * notification. */
Tomas Winklere1623442009-01-27 14:27:56 -0800270 IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
Emmanuel Grumbachf3ccc082008-05-05 10:22:45 +0800271 if (iwl4965_set_ucode_ptrs(priv)) {
272 /* Runtime instruction load won't happen;
273 * take it all the way back down so we can try again */
Tomas Winklere1623442009-01-27 14:27:56 -0800274 IWL_DEBUG_INFO(priv, "Couldn't set up uCode pointers.\n");
Emmanuel Grumbachf3ccc082008-05-05 10:22:45 +0800275 goto restart;
276 }
277 return;
278
279restart:
280 queue_work(priv->workqueue, &priv->restart);
281}
282
Wey-Yi Guy7aafef12009-08-07 15:41:38 -0700283static bool is_ht40_channel(__le32 rxon_flags)
Zhu Yib481de92007-09-25 17:54:57 -0700284{
Wey-Yi Guya2b0f022009-05-22 11:01:49 -0700285 int chan_mod = le32_to_cpu(rxon_flags & RXON_FLG_CHANNEL_MODE_MSK)
286 >> RXON_FLG_CHANNEL_MODE_POS;
287 return ((chan_mod == CHANNEL_MODE_PURE_40) ||
288 (chan_mod == CHANNEL_MODE_MIXED));
Zhu Yib481de92007-09-25 17:54:57 -0700289}
290
Tomas Winkler8614f362008-04-23 17:14:55 -0700291/*
292 * EEPROM handlers
293 */
Tomas Winkler0ef2ca62008-10-23 23:48:51 -0700294static u16 iwl4965_eeprom_calib_version(struct iwl_priv *priv)
Tomas Winkler8614f362008-04-23 17:14:55 -0700295{
Tomas Winkler0ef2ca62008-10-23 23:48:51 -0700296 return iwl_eeprom_query16(priv, EEPROM_4965_CALIB_VERSION_OFFSET);
Tomas Winkler8614f362008-04-23 17:14:55 -0700297}
Zhu Yib481de92007-09-25 17:54:57 -0700298
Tomas Winklerda1bc452008-05-29 16:35:00 +0800299/*
Tomas Winklera96a27f2008-10-23 23:48:56 -0700300 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
Tomas Winklerda1bc452008-05-29 16:35:00 +0800301 * must be called under priv->lock and mac access
302 */
303static void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask)
Zhu Yib481de92007-09-25 17:54:57 -0700304{
Tomas Winklerda1bc452008-05-29 16:35:00 +0800305 iwl_write_prph(priv, IWL49_SCD_TXFACT, mask);
Zhu Yib481de92007-09-25 17:54:57 -0700306}
Ron Rindjunsky5a676bb2008-05-05 10:22:42 +0800307
Tomas Winkler694cc562008-04-24 11:55:22 -0700308static void iwl4965_nic_config(struct iwl_priv *priv)
309{
310 unsigned long flags;
Tomas Winkler694cc562008-04-24 11:55:22 -0700311 u16 radio_cfg;
Tomas Winkler694cc562008-04-24 11:55:22 -0700312
313 spin_lock_irqsave(&priv->lock, flags);
314
Tomas Winkler694cc562008-04-24 11:55:22 -0700315 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
316
317 /* write radio config values to register */
318 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX)
319 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
320 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
321 EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
322 EEPROM_RF_CFG_DASH_MSK(radio_cfg));
323
324 /* set CSR_HW_CONFIG_REG for uCode use */
325 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
326 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
327 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
328
329 priv->calib_info = (struct iwl_eeprom_calib_info *)
330 iwl_eeprom_query_addr(priv, EEPROM_4965_CALIB_TXPOWER_OFFSET);
331
332 spin_unlock_irqrestore(&priv->lock, flags);
333}
334
Zhu Yib481de92007-09-25 17:54:57 -0700335/* Reset differential Rx gains in NIC to prepare for chain noise calibration.
336 * Called after every association, but this runs only once!
337 * ... once chain noise is calibrated the first time, it's good forever. */
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700338static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700339{
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700340 struct iwl_chain_noise_data *data = &(priv->chain_noise_data);
Zhu Yib481de92007-09-25 17:54:57 -0700341
Shanyu Zhaof4308442010-05-11 15:25:03 -0700342 if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
Johannes Berg246ed352010-08-23 10:46:32 +0200343 iwl_is_any_associated(priv)) {
Tomas Winklerf69f42a2008-10-23 23:48:52 -0700344 struct iwl_calib_diff_gain_cmd cmd;
Zhu Yib481de92007-09-25 17:54:57 -0700345
Shanyu Zhaof4308442010-05-11 15:25:03 -0700346 /* clear data for chain noise calibration algorithm */
347 data->chain_noise_a = 0;
348 data->chain_noise_b = 0;
349 data->chain_noise_c = 0;
350 data->chain_signal_a = 0;
351 data->chain_signal_b = 0;
352 data->chain_signal_c = 0;
353 data->beacon_count = 0;
354
Zhu Yib481de92007-09-25 17:54:57 -0700355 memset(&cmd, 0, sizeof(cmd));
Tomas Winkler0d950d82008-11-25 13:36:01 -0800356 cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
Zhu Yib481de92007-09-25 17:54:57 -0700357 cmd.diff_gain_a = 0;
358 cmd.diff_gain_b = 0;
359 cmd.diff_gain_c = 0;
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700360 if (iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
361 sizeof(cmd), &cmd))
Winkler, Tomas15b16872008-12-19 10:37:33 +0800362 IWL_ERR(priv,
363 "Could not send REPLY_PHY_CALIBRATION_CMD\n");
Zhu Yib481de92007-09-25 17:54:57 -0700364 data->state = IWL_CHAIN_NOISE_ACCUMULATE;
Tomas Winklere1623442009-01-27 14:27:56 -0800365 IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n");
Zhu Yib481de92007-09-25 17:54:57 -0700366 }
Zhu Yib481de92007-09-25 17:54:57 -0700367}
368
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700369static void iwl4965_gain_computation(struct iwl_priv *priv,
370 u32 *average_noise,
371 u16 min_average_noise_antenna_i,
Wey-Yi Guyd8c07e72009-09-25 14:24:26 -0700372 u32 min_average_noise,
373 u8 default_chain)
Zhu Yib481de92007-09-25 17:54:57 -0700374{
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700375 int i, ret;
376 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
Zhu Yib481de92007-09-25 17:54:57 -0700377
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700378 data->delta_gain_code[min_average_noise_antenna_i] = 0;
Zhu Yib481de92007-09-25 17:54:57 -0700379
Wey-Yi Guyd8c07e72009-09-25 14:24:26 -0700380 for (i = default_chain; i < NUM_RX_CHAINS; i++) {
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700381 s32 delta_g = 0;
Zhu Yib481de92007-09-25 17:54:57 -0700382
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700383 if (!(data->disconn_array[i]) &&
384 (data->delta_gain_code[i] ==
Zhu Yib481de92007-09-25 17:54:57 -0700385 CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) {
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700386 delta_g = average_noise[i] - min_average_noise;
387 data->delta_gain_code[i] = (u8)((delta_g * 10) / 15);
388 data->delta_gain_code[i] =
389 min(data->delta_gain_code[i],
390 (u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
Zhu Yib481de92007-09-25 17:54:57 -0700391
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700392 data->delta_gain_code[i] =
393 (data->delta_gain_code[i] | (1 << 2));
394 } else {
395 data->delta_gain_code[i] = 0;
Zhu Yib481de92007-09-25 17:54:57 -0700396 }
Zhu Yib481de92007-09-25 17:54:57 -0700397 }
Tomas Winklere1623442009-01-27 14:27:56 -0800398 IWL_DEBUG_CALIB(priv, "delta_gain_codes: a %d b %d c %d\n",
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700399 data->delta_gain_code[0],
400 data->delta_gain_code[1],
401 data->delta_gain_code[2]);
Zhu Yib481de92007-09-25 17:54:57 -0700402
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700403 /* Differential gain gets sent to uCode only once */
404 if (!data->radio_write) {
Tomas Winklerf69f42a2008-10-23 23:48:52 -0700405 struct iwl_calib_diff_gain_cmd cmd;
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700406 data->radio_write = 1;
Zhu Yib481de92007-09-25 17:54:57 -0700407
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700408 memset(&cmd, 0, sizeof(cmd));
Tomas Winkler0d950d82008-11-25 13:36:01 -0800409 cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700410 cmd.diff_gain_a = data->delta_gain_code[0];
411 cmd.diff_gain_b = data->delta_gain_code[1];
412 cmd.diff_gain_c = data->delta_gain_code[2];
413 ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
414 sizeof(cmd), &cmd);
415 if (ret)
Tomas Winklere1623442009-01-27 14:27:56 -0800416 IWL_DEBUG_CALIB(priv, "fail sending cmd "
Frans Pop91dd6c22010-03-24 14:19:58 -0700417 "REPLY_PHY_CALIBRATION_CMD\n");
Zhu Yib481de92007-09-25 17:54:57 -0700418
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700419 /* TODO we might want recalculate
420 * rx_chain in rxon cmd */
421
422 /* Mark so we run this algo only once! */
423 data->state = IWL_CHAIN_NOISE_CALIBRATED;
Zhu Yib481de92007-09-25 17:54:57 -0700424 }
Zhu Yib481de92007-09-25 17:54:57 -0700425}
426
Zhu Yib481de92007-09-25 17:54:57 -0700427static void iwl4965_bg_txpower_work(struct work_struct *work)
428{
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700429 struct iwl_priv *priv = container_of(work, struct iwl_priv,
Zhu Yib481de92007-09-25 17:54:57 -0700430 txpower_work);
431
432 /* If a scan happened to start before we got here
433 * then just return; the statistics notification will
434 * kick off another scheduled work to compensate for
435 * any temperature delta we missed here. */
436 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
437 test_bit(STATUS_SCANNING, &priv->status))
438 return;
439
440 mutex_lock(&priv->mutex);
441
Tomas Winklera96a27f2008-10-23 23:48:56 -0700442 /* Regardless of if we are associated, we must reconfigure the
Zhu Yib481de92007-09-25 17:54:57 -0700443 * TX power since frames can be sent on non-radar channels while
444 * not associated */
Tomas Winkler630fe9b2008-06-12 09:47:08 +0800445 iwl4965_send_tx_power(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700446
447 /* Update last_temperature to keep is_calib_needed from running
448 * when it isn't needed... */
449 priv->last_temperature = priv->temperature;
450
451 mutex_unlock(&priv->mutex);
452}
453
454/*
455 * Acquire priv->lock before calling this function !
456 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700457static void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index)
Zhu Yib481de92007-09-25 17:54:57 -0700458{
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700459 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
Zhu Yib481de92007-09-25 17:54:57 -0700460 (index & 0xff) | (txq_id << 8));
Tomas Winkler12a81f62008-04-03 16:05:20 -0700461 iwl_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(txq_id), index);
Zhu Yib481de92007-09-25 17:54:57 -0700462}
463
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800464/**
465 * iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
466 * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
467 * @scd_retry: (1) Indicates queue will be used in aggregation mode
468 *
469 * NOTE: Acquire priv->lock before calling this function !
Zhu Yib481de92007-09-25 17:54:57 -0700470 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700471static void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
Ron Rindjunsky16466902008-05-05 10:22:50 +0800472 struct iwl_tx_queue *txq,
Zhu Yib481de92007-09-25 17:54:57 -0700473 int tx_fifo_id, int scd_retry)
474{
475 int txq_id = txq->q.id;
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800476
477 /* Find out whether to activate Tx queue */
Abhijeet Kolekarc3056062008-11-12 13:14:08 -0800478 int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
Zhu Yib481de92007-09-25 17:54:57 -0700479
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800480 /* Set up and activate */
Tomas Winkler12a81f62008-04-03 16:05:20 -0700481 iwl_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
Emmanuel Grumbach038669e2008-04-23 17:15:04 -0700482 (active << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
483 (tx_fifo_id << IWL49_SCD_QUEUE_STTS_REG_POS_TXF) |
484 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_WSL) |
485 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
486 IWL49_SCD_QUEUE_STTS_REG_MSK);
Zhu Yib481de92007-09-25 17:54:57 -0700487
488 txq->sched_retry = scd_retry;
489
Tomas Winklere1623442009-01-27 14:27:56 -0800490 IWL_DEBUG_INFO(priv, "%s %s Queue %d on AC %d\n",
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800491 active ? "Activate" : "Deactivate",
Zhu Yib481de92007-09-25 17:54:57 -0700492 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
493}
494
Johannes Bergedc1a3a2010-02-24 01:57:19 -0800495static const s8 default_queue_to_tx_fifo[] = {
496 IWL_TX_FIFO_VO,
497 IWL_TX_FIFO_VI,
498 IWL_TX_FIFO_BE,
499 IWL_TX_FIFO_BK,
Emmanuel Grumbach038669e2008-04-23 17:15:04 -0700500 IWL49_CMD_FIFO_NUM,
Johannes Bergedc1a3a2010-02-24 01:57:19 -0800501 IWL_TX_FIFO_UNUSED,
502 IWL_TX_FIFO_UNUSED,
Zhu Yib481de92007-09-25 17:54:57 -0700503};
504
Emmanuel Grumbachbe1f3ab62008-06-12 09:47:18 +0800505static int iwl4965_alive_notify(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700506{
507 u32 a;
Zhu Yib481de92007-09-25 17:54:57 -0700508 unsigned long flags;
Winkler, Tomas31a73fe2008-11-19 15:32:26 -0800509 int i, chan;
Winkler, Tomas40fc95d2008-11-19 15:32:27 -0800510 u32 reg_val;
Zhu Yib481de92007-09-25 17:54:57 -0700511
512 spin_lock_irqsave(&priv->lock, flags);
513
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800514 /* Clear 4965's internal Tx Scheduler data base */
Tomas Winkler12a81f62008-04-03 16:05:20 -0700515 priv->scd_base_addr = iwl_read_prph(priv, IWL49_SCD_SRAM_BASE_ADDR);
Emmanuel Grumbach038669e2008-04-23 17:15:04 -0700516 a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET;
517 for (; a < priv->scd_base_addr + IWL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700518 iwl_write_targ_mem(priv, a, 0);
Emmanuel Grumbach038669e2008-04-23 17:15:04 -0700519 for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700520 iwl_write_targ_mem(priv, a, 0);
Huaxu Wan39d5e0c2009-10-02 13:44:00 -0700521 for (; a < priv->scd_base_addr +
522 IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700523 iwl_write_targ_mem(priv, a, 0);
Zhu Yib481de92007-09-25 17:54:57 -0700524
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800525 /* Tel 4965 where to find Tx byte count tables */
Tomas Winkler12a81f62008-04-03 16:05:20 -0700526 iwl_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR,
Tomas Winkler4ddbb7d2008-11-07 09:58:40 -0800527 priv->scd_bc_tbls.dma >> 10);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800528
Winkler, Tomas31a73fe2008-11-19 15:32:26 -0800529 /* Enable DMA channel */
530 for (chan = 0; chan < FH49_TCSR_CHNL_NUM ; chan++)
531 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
532 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
533 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
534
Winkler, Tomas40fc95d2008-11-19 15:32:27 -0800535 /* Update FH chicken bits */
536 reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
537 iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
538 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
539
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800540 /* Disable chain mode for all queues */
Tomas Winkler12a81f62008-04-03 16:05:20 -0700541 iwl_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0);
Zhu Yib481de92007-09-25 17:54:57 -0700542
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800543 /* Initialize each Tx queue (including the command queue) */
Tomas Winkler5425e492008-04-15 16:01:38 -0700544 for (i = 0; i < priv->hw_params.max_txq_num; i++) {
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800545
546 /* TFD circular buffer read/write indexes */
Tomas Winkler12a81f62008-04-03 16:05:20 -0700547 iwl_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(i), 0);
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700548 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800549
550 /* Max Tx Window size for Scheduler-ACK mode */
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700551 iwl_write_targ_mem(priv, priv->scd_base_addr +
Emmanuel Grumbach038669e2008-04-23 17:15:04 -0700552 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i),
553 (SCD_WIN_SIZE <<
554 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
555 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800556
557 /* Frame limit */
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700558 iwl_write_targ_mem(priv, priv->scd_base_addr +
Emmanuel Grumbach038669e2008-04-23 17:15:04 -0700559 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
560 sizeof(u32),
561 (SCD_FRAME_LIMIT <<
562 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
563 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
Zhu Yib481de92007-09-25 17:54:57 -0700564
565 }
Tomas Winkler12a81f62008-04-03 16:05:20 -0700566 iwl_write_prph(priv, IWL49_SCD_INTERRUPT_MASK,
Tomas Winkler5425e492008-04-15 16:01:38 -0700567 (1 << priv->hw_params.max_txq_num) - 1);
Zhu Yib481de92007-09-25 17:54:57 -0700568
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800569 /* Activate all Tx DMA/FIFO channels */
Winkler, Tomas31a73fe2008-11-19 15:32:26 -0800570 priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 6));
Zhu Yib481de92007-09-25 17:54:57 -0700571
Johannes Berg13bb9482010-08-23 10:46:33 +0200572 iwl4965_set_wr_ptrs(priv, IWL_DEFAULT_CMD_QUEUE_NUM, 0);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800573
Wey-Yi Guya9e10fb2010-02-09 08:14:11 -0800574 /* make sure all queue are not stopped */
575 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
576 for (i = 0; i < 4; i++)
577 atomic_set(&priv->queue_stop_count[i], 0);
578
Wey-Yi Guydff010a2010-02-02 16:58:34 -0800579 /* reset to 0 to enable all the queue first */
580 priv->txq_ctx_active_msk = 0;
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800581 /* Map each Tx/cmd queue to its corresponding fifo */
Johannes Bergedc1a3a2010-02-24 01:57:19 -0800582 BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
Johannes Berg13bb9482010-08-23 10:46:33 +0200583
Zhu Yib481de92007-09-25 17:54:57 -0700584 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
585 int ac = default_queue_to_tx_fifo[i];
Johannes Bergedc1a3a2010-02-24 01:57:19 -0800586
Ron Rindjunsky36470742008-05-15 13:54:10 +0800587 iwl_txq_ctx_activate(priv, i);
Johannes Bergedc1a3a2010-02-24 01:57:19 -0800588
589 if (ac == IWL_TX_FIFO_UNUSED)
590 continue;
591
Zhu Yib481de92007-09-25 17:54:57 -0700592 iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
593 }
594
Zhu Yib481de92007-09-25 17:54:57 -0700595 spin_unlock_irqrestore(&priv->lock, flags);
596
Mohamed Abbasa8b50a02009-05-22 11:01:47 -0700597 return 0;
Zhu Yib481de92007-09-25 17:54:57 -0700598}
599
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700600static struct iwl_sensitivity_ranges iwl4965_sensitivity = {
601 .min_nrg_cck = 97,
Wey-Yi Guyfe6efb42009-06-12 13:22:54 -0700602 .max_nrg_cck = 0, /* not used, set to 0 */
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700603
604 .auto_corr_min_ofdm = 85,
605 .auto_corr_min_ofdm_mrc = 170,
606 .auto_corr_min_ofdm_x1 = 105,
607 .auto_corr_min_ofdm_mrc_x1 = 220,
608
609 .auto_corr_max_ofdm = 120,
610 .auto_corr_max_ofdm_mrc = 210,
611 .auto_corr_max_ofdm_x1 = 140,
612 .auto_corr_max_ofdm_mrc_x1 = 270,
613
614 .auto_corr_min_cck = 125,
615 .auto_corr_max_cck = 200,
616 .auto_corr_min_cck_mrc = 200,
617 .auto_corr_max_cck_mrc = 400,
618
619 .nrg_th_cck = 100,
620 .nrg_th_ofdm = 100,
Wey-Yi Guy55036d62009-10-09 13:20:24 -0700621
622 .barker_corr_th_min = 190,
623 .barker_corr_th_min_mrc = 390,
624 .nrg_th_cca = 62,
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700625};
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700626
Wey-Yi Guy62161ae2009-05-21 13:44:23 -0700627static void iwl4965_set_ct_threshold(struct iwl_priv *priv)
628{
629 /* want Kelvin */
Wey-Yi Guy672639d2009-07-24 11:13:01 -0700630 priv->hw_params.ct_kill_threshold =
631 CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY);
Wey-Yi Guy62161ae2009-05-21 13:44:23 -0700632}
633
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800634/**
Tomas Winkler5425e492008-04-15 16:01:38 -0700635 * iwl4965_hw_set_hw_params
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800636 *
637 * Called when initializing driver
638 */
Emmanuel Grumbachbe1f3ab62008-06-12 09:47:18 +0800639static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700640{
Wey-Yi Guy88804e22009-10-09 13:20:28 -0700641 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
642 priv->cfg->mod_params->num_of_queues <= IWL49_NUM_QUEUES)
Wey-Yi Guy7cb1b082010-10-06 08:10:00 -0700643 priv->cfg->base_params->num_of_queues =
Wey-Yi Guy88804e22009-10-09 13:20:28 -0700644 priv->cfg->mod_params->num_of_queues;
Assaf Krauss316c30d2008-03-14 10:38:46 -0700645
Wey-Yi Guy7cb1b082010-10-06 08:10:00 -0700646 priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
Zhu Yif3f911d2008-12-02 12:14:04 -0800647 priv->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM;
Tomas Winkler4ddbb7d2008-11-07 09:58:40 -0800648 priv->hw_params.scd_bc_tbls_size =
Wey-Yi Guy7cb1b082010-10-06 08:10:00 -0700649 priv->cfg->base_params->num_of_queues *
Wey-Yi Guy88804e22009-10-09 13:20:28 -0700650 sizeof(struct iwl4965_scd_bc_tbl);
Samuel Ortiza8e74e22009-01-23 13:45:14 -0800651 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
Tomas Winkler5425e492008-04-15 16:01:38 -0700652 priv->hw_params.max_stations = IWL4965_STATION_COUNT;
Johannes Berga194e322010-08-27 08:53:46 -0700653 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWL4965_BROADCAST_ID;
Ron Rindjunsky099b40b2008-04-21 15:41:53 -0700654 priv->hw_params.max_data_size = IWL49_RTC_DATA_SIZE;
655 priv->hw_params.max_inst_size = IWL49_RTC_INST_SIZE;
656 priv->hw_params.max_bsm_size = BSM_SRAM_SIZE;
Wey-Yi Guy7aafef12009-08-07 15:41:38 -0700657 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_5GHZ);
Ron Rindjunsky099b40b2008-04-21 15:41:53 -0700658
Winkler, Tomas141c43a2009-01-08 10:19:53 -0800659 priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
660
Wey-Yi Guy52aa0812009-10-23 13:42:24 -0700661 priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
662 priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
663 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
664 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
Johannes Berg04536742010-09-22 18:02:08 +0200665
666 iwl4965_set_ct_threshold(priv);
Ron Rindjunsky099b40b2008-04-21 15:41:53 -0700667
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700668 priv->hw_params.sens = &iwl4965_sensitivity;
Wey-Yi Guya0ee74c2010-05-06 08:54:10 -0700669 priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
Tomas Winkler3e82a822008-02-13 11:32:31 -0800670
Tomas Winkler059ff822008-04-14 21:16:14 -0700671 return 0;
Zhu Yib481de92007-09-25 17:54:57 -0700672}
673
Zhu Yib481de92007-09-25 17:54:57 -0700674static s32 iwl4965_math_div_round(s32 num, s32 denom, s32 *res)
675{
676 s32 sign = 1;
677
678 if (num < 0) {
679 sign = -sign;
680 num = -num;
681 }
682 if (denom < 0) {
683 sign = -sign;
684 denom = -denom;
685 }
686 *res = 1;
687 *res = ((num * 2 + denom) / (denom * 2)) * sign;
688
689 return 1;
690}
691
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800692/**
693 * iwl4965_get_voltage_compensation - Power supply voltage comp for txpower
694 *
695 * Determines power supply voltage compensation for txpower calculations.
696 * Returns number of 1/2-dB steps to subtract from gain table index,
697 * to compensate for difference between power supply voltage during
698 * factory measurements, vs. current power supply voltage.
699 *
700 * Voltage indication is higher for lower voltage.
701 * Lower voltage requires more gain (lower gain table index).
702 */
Zhu Yib481de92007-09-25 17:54:57 -0700703static s32 iwl4965_get_voltage_compensation(s32 eeprom_voltage,
704 s32 current_voltage)
705{
706 s32 comp = 0;
707
708 if ((TX_POWER_IWL_ILLEGAL_VOLTAGE == eeprom_voltage) ||
709 (TX_POWER_IWL_ILLEGAL_VOLTAGE == current_voltage))
710 return 0;
711
712 iwl4965_math_div_round(current_voltage - eeprom_voltage,
713 TX_POWER_IWL_VOLTAGE_CODES_PER_03V, &comp);
714
715 if (current_voltage > eeprom_voltage)
716 comp *= 2;
717 if ((comp < -2) || (comp > 2))
718 comp = 0;
719
720 return comp;
721}
722
Zhu Yib481de92007-09-25 17:54:57 -0700723static s32 iwl4965_get_tx_atten_grp(u16 channel)
724{
725 if (channel >= CALIB_IWL_TX_ATTEN_GR5_FCH &&
726 channel <= CALIB_IWL_TX_ATTEN_GR5_LCH)
727 return CALIB_CH_GROUP_5;
728
729 if (channel >= CALIB_IWL_TX_ATTEN_GR1_FCH &&
730 channel <= CALIB_IWL_TX_ATTEN_GR1_LCH)
731 return CALIB_CH_GROUP_1;
732
733 if (channel >= CALIB_IWL_TX_ATTEN_GR2_FCH &&
734 channel <= CALIB_IWL_TX_ATTEN_GR2_LCH)
735 return CALIB_CH_GROUP_2;
736
737 if (channel >= CALIB_IWL_TX_ATTEN_GR3_FCH &&
738 channel <= CALIB_IWL_TX_ATTEN_GR3_LCH)
739 return CALIB_CH_GROUP_3;
740
741 if (channel >= CALIB_IWL_TX_ATTEN_GR4_FCH &&
742 channel <= CALIB_IWL_TX_ATTEN_GR4_LCH)
743 return CALIB_CH_GROUP_4;
744
Zhu Yib481de92007-09-25 17:54:57 -0700745 return -1;
746}
747
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700748static u32 iwl4965_get_sub_band(const struct iwl_priv *priv, u32 channel)
Zhu Yib481de92007-09-25 17:54:57 -0700749{
750 s32 b = -1;
751
752 for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) {
Tomas Winkler073d3f52008-04-21 15:41:52 -0700753 if (priv->calib_info->band_info[b].ch_from == 0)
Zhu Yib481de92007-09-25 17:54:57 -0700754 continue;
755
Tomas Winkler073d3f52008-04-21 15:41:52 -0700756 if ((channel >= priv->calib_info->band_info[b].ch_from)
757 && (channel <= priv->calib_info->band_info[b].ch_to))
Zhu Yib481de92007-09-25 17:54:57 -0700758 break;
759 }
760
761 return b;
762}
763
764static s32 iwl4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
765{
766 s32 val;
767
768 if (x2 == x1)
769 return y1;
770 else {
771 iwl4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val);
772 return val + y2;
773 }
774}
775
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800776/**
777 * iwl4965_interpolate_chan - Interpolate factory measurements for one channel
778 *
779 * Interpolates factory measurements from the two sample channels within a
780 * sub-band, to apply to channel of interest. Interpolation is proportional to
781 * differences in channel frequencies, which is proportional to differences
782 * in channel number.
783 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700784static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel,
Tomas Winkler073d3f52008-04-21 15:41:52 -0700785 struct iwl_eeprom_calib_ch_info *chan_info)
Zhu Yib481de92007-09-25 17:54:57 -0700786{
787 s32 s = -1;
788 u32 c;
789 u32 m;
Tomas Winkler073d3f52008-04-21 15:41:52 -0700790 const struct iwl_eeprom_calib_measure *m1;
791 const struct iwl_eeprom_calib_measure *m2;
792 struct iwl_eeprom_calib_measure *omeas;
Zhu Yib481de92007-09-25 17:54:57 -0700793 u32 ch_i1;
794 u32 ch_i2;
795
796 s = iwl4965_get_sub_band(priv, channel);
797 if (s >= EEPROM_TX_POWER_BANDS) {
Winkler, Tomas15b16872008-12-19 10:37:33 +0800798 IWL_ERR(priv, "Tx Power can not find channel %d\n", channel);
Zhu Yib481de92007-09-25 17:54:57 -0700799 return -1;
800 }
801
Tomas Winkler073d3f52008-04-21 15:41:52 -0700802 ch_i1 = priv->calib_info->band_info[s].ch1.ch_num;
803 ch_i2 = priv->calib_info->band_info[s].ch2.ch_num;
Zhu Yib481de92007-09-25 17:54:57 -0700804 chan_info->ch_num = (u8) channel;
805
Tomas Winklere1623442009-01-27 14:27:56 -0800806 IWL_DEBUG_TXPOWER(priv, "channel %d subband %d factory cal ch %d & %d\n",
Zhu Yib481de92007-09-25 17:54:57 -0700807 channel, s, ch_i1, ch_i2);
808
809 for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) {
810 for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) {
Tomas Winkler073d3f52008-04-21 15:41:52 -0700811 m1 = &(priv->calib_info->band_info[s].ch1.
Zhu Yib481de92007-09-25 17:54:57 -0700812 measurements[c][m]);
Tomas Winkler073d3f52008-04-21 15:41:52 -0700813 m2 = &(priv->calib_info->band_info[s].ch2.
Zhu Yib481de92007-09-25 17:54:57 -0700814 measurements[c][m]);
815 omeas = &(chan_info->measurements[c][m]);
816
817 omeas->actual_pow =
818 (u8) iwl4965_interpolate_value(channel, ch_i1,
819 m1->actual_pow,
820 ch_i2,
821 m2->actual_pow);
822 omeas->gain_idx =
823 (u8) iwl4965_interpolate_value(channel, ch_i1,
824 m1->gain_idx, ch_i2,
825 m2->gain_idx);
826 omeas->temperature =
827 (u8) iwl4965_interpolate_value(channel, ch_i1,
828 m1->temperature,
829 ch_i2,
830 m2->temperature);
831 omeas->pa_det =
832 (s8) iwl4965_interpolate_value(channel, ch_i1,
833 m1->pa_det, ch_i2,
834 m2->pa_det);
835
Tomas Winklere1623442009-01-27 14:27:56 -0800836 IWL_DEBUG_TXPOWER(priv,
837 "chain %d meas %d AP1=%d AP2=%d AP=%d\n", c, m,
838 m1->actual_pow, m2->actual_pow, omeas->actual_pow);
839 IWL_DEBUG_TXPOWER(priv,
840 "chain %d meas %d NI1=%d NI2=%d NI=%d\n", c, m,
841 m1->gain_idx, m2->gain_idx, omeas->gain_idx);
842 IWL_DEBUG_TXPOWER(priv,
843 "chain %d meas %d PA1=%d PA2=%d PA=%d\n", c, m,
844 m1->pa_det, m2->pa_det, omeas->pa_det);
845 IWL_DEBUG_TXPOWER(priv,
846 "chain %d meas %d T1=%d T2=%d T=%d\n", c, m,
847 m1->temperature, m2->temperature,
848 omeas->temperature);
Zhu Yib481de92007-09-25 17:54:57 -0700849 }
850 }
851
852 return 0;
853}
854
855/* bit-rate-dependent table to prevent Tx distortion, in half-dB units,
856 * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */
857static s32 back_off_table[] = {
858 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */
859 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */
860 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */
861 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */
862 10 /* CCK */
863};
864
865/* Thermal compensation values for txpower for various frequency ranges ...
866 * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800867static struct iwl4965_txpower_comp_entry {
Zhu Yib481de92007-09-25 17:54:57 -0700868 s32 degrees_per_05db_a;
869 s32 degrees_per_05db_a_denom;
870} tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = {
871 {9, 2}, /* group 0 5.2, ch 34-43 */
872 {4, 1}, /* group 1 5.2, ch 44-70 */
873 {4, 1}, /* group 2 5.2, ch 71-124 */
874 {4, 1}, /* group 3 5.2, ch 125-200 */
875 {3, 1} /* group 4 2.4, ch all */
876};
877
878static s32 get_min_power_index(s32 rate_power_index, u32 band)
879{
880 if (!band) {
881 if ((rate_power_index & 7) <= 4)
882 return MIN_TX_GAIN_INDEX_52GHZ_EXT;
883 }
884 return MIN_TX_GAIN_INDEX;
885}
886
887struct gain_entry {
888 u8 dsp;
889 u8 radio;
890};
891
892static const struct gain_entry gain_table[2][108] = {
893 /* 5.2GHz power gain index table */
894 {
895 {123, 0x3F}, /* highest txpower */
896 {117, 0x3F},
897 {110, 0x3F},
898 {104, 0x3F},
899 {98, 0x3F},
900 {110, 0x3E},
901 {104, 0x3E},
902 {98, 0x3E},
903 {110, 0x3D},
904 {104, 0x3D},
905 {98, 0x3D},
906 {110, 0x3C},
907 {104, 0x3C},
908 {98, 0x3C},
909 {110, 0x3B},
910 {104, 0x3B},
911 {98, 0x3B},
912 {110, 0x3A},
913 {104, 0x3A},
914 {98, 0x3A},
915 {110, 0x39},
916 {104, 0x39},
917 {98, 0x39},
918 {110, 0x38},
919 {104, 0x38},
920 {98, 0x38},
921 {110, 0x37},
922 {104, 0x37},
923 {98, 0x37},
924 {110, 0x36},
925 {104, 0x36},
926 {98, 0x36},
927 {110, 0x35},
928 {104, 0x35},
929 {98, 0x35},
930 {110, 0x34},
931 {104, 0x34},
932 {98, 0x34},
933 {110, 0x33},
934 {104, 0x33},
935 {98, 0x33},
936 {110, 0x32},
937 {104, 0x32},
938 {98, 0x32},
939 {110, 0x31},
940 {104, 0x31},
941 {98, 0x31},
942 {110, 0x30},
943 {104, 0x30},
944 {98, 0x30},
945 {110, 0x25},
946 {104, 0x25},
947 {98, 0x25},
948 {110, 0x24},
949 {104, 0x24},
950 {98, 0x24},
951 {110, 0x23},
952 {104, 0x23},
953 {98, 0x23},
954 {110, 0x22},
955 {104, 0x18},
956 {98, 0x18},
957 {110, 0x17},
958 {104, 0x17},
959 {98, 0x17},
960 {110, 0x16},
961 {104, 0x16},
962 {98, 0x16},
963 {110, 0x15},
964 {104, 0x15},
965 {98, 0x15},
966 {110, 0x14},
967 {104, 0x14},
968 {98, 0x14},
969 {110, 0x13},
970 {104, 0x13},
971 {98, 0x13},
972 {110, 0x12},
973 {104, 0x08},
974 {98, 0x08},
975 {110, 0x07},
976 {104, 0x07},
977 {98, 0x07},
978 {110, 0x06},
979 {104, 0x06},
980 {98, 0x06},
981 {110, 0x05},
982 {104, 0x05},
983 {98, 0x05},
984 {110, 0x04},
985 {104, 0x04},
986 {98, 0x04},
987 {110, 0x03},
988 {104, 0x03},
989 {98, 0x03},
990 {110, 0x02},
991 {104, 0x02},
992 {98, 0x02},
993 {110, 0x01},
994 {104, 0x01},
995 {98, 0x01},
996 {110, 0x00},
997 {104, 0x00},
998 {98, 0x00},
999 {93, 0x00},
1000 {88, 0x00},
1001 {83, 0x00},
1002 {78, 0x00},
1003 },
1004 /* 2.4GHz power gain index table */
1005 {
1006 {110, 0x3f}, /* highest txpower */
1007 {104, 0x3f},
1008 {98, 0x3f},
1009 {110, 0x3e},
1010 {104, 0x3e},
1011 {98, 0x3e},
1012 {110, 0x3d},
1013 {104, 0x3d},
1014 {98, 0x3d},
1015 {110, 0x3c},
1016 {104, 0x3c},
1017 {98, 0x3c},
1018 {110, 0x3b},
1019 {104, 0x3b},
1020 {98, 0x3b},
1021 {110, 0x3a},
1022 {104, 0x3a},
1023 {98, 0x3a},
1024 {110, 0x39},
1025 {104, 0x39},
1026 {98, 0x39},
1027 {110, 0x38},
1028 {104, 0x38},
1029 {98, 0x38},
1030 {110, 0x37},
1031 {104, 0x37},
1032 {98, 0x37},
1033 {110, 0x36},
1034 {104, 0x36},
1035 {98, 0x36},
1036 {110, 0x35},
1037 {104, 0x35},
1038 {98, 0x35},
1039 {110, 0x34},
1040 {104, 0x34},
1041 {98, 0x34},
1042 {110, 0x33},
1043 {104, 0x33},
1044 {98, 0x33},
1045 {110, 0x32},
1046 {104, 0x32},
1047 {98, 0x32},
1048 {110, 0x31},
1049 {104, 0x31},
1050 {98, 0x31},
1051 {110, 0x30},
1052 {104, 0x30},
1053 {98, 0x30},
1054 {110, 0x6},
1055 {104, 0x6},
1056 {98, 0x6},
1057 {110, 0x5},
1058 {104, 0x5},
1059 {98, 0x5},
1060 {110, 0x4},
1061 {104, 0x4},
1062 {98, 0x4},
1063 {110, 0x3},
1064 {104, 0x3},
1065 {98, 0x3},
1066 {110, 0x2},
1067 {104, 0x2},
1068 {98, 0x2},
1069 {110, 0x1},
1070 {104, 0x1},
1071 {98, 0x1},
1072 {110, 0x0},
1073 {104, 0x0},
1074 {98, 0x0},
1075 {97, 0},
1076 {96, 0},
1077 {95, 0},
1078 {94, 0},
1079 {93, 0},
1080 {92, 0},
1081 {91, 0},
1082 {90, 0},
1083 {89, 0},
1084 {88, 0},
1085 {87, 0},
1086 {86, 0},
1087 {85, 0},
1088 {84, 0},
1089 {83, 0},
1090 {82, 0},
1091 {81, 0},
1092 {80, 0},
1093 {79, 0},
1094 {78, 0},
1095 {77, 0},
1096 {76, 0},
1097 {75, 0},
1098 {74, 0},
1099 {73, 0},
1100 {72, 0},
1101 {71, 0},
1102 {70, 0},
1103 {69, 0},
1104 {68, 0},
1105 {67, 0},
1106 {66, 0},
1107 {65, 0},
1108 {64, 0},
1109 {63, 0},
1110 {62, 0},
1111 {61, 0},
1112 {60, 0},
1113 {59, 0},
1114 }
1115};
1116
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001117static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
Wey-Yi Guy7aafef12009-08-07 15:41:38 -07001118 u8 is_ht40, u8 ctrl_chan_high,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001119 struct iwl4965_tx_power_db *tx_power_tbl)
Zhu Yib481de92007-09-25 17:54:57 -07001120{
1121 u8 saturation_power;
1122 s32 target_power;
1123 s32 user_target_power;
1124 s32 power_limit;
1125 s32 current_temp;
1126 s32 reg_limit;
1127 s32 current_regulatory;
1128 s32 txatten_grp = CALIB_CH_GROUP_MAX;
1129 int i;
1130 int c;
Assaf Kraussbf85ea42008-03-14 10:38:49 -07001131 const struct iwl_channel_info *ch_info = NULL;
Tomas Winkler073d3f52008-04-21 15:41:52 -07001132 struct iwl_eeprom_calib_ch_info ch_eeprom_info;
1133 const struct iwl_eeprom_calib_measure *measurement;
Zhu Yib481de92007-09-25 17:54:57 -07001134 s16 voltage;
1135 s32 init_voltage;
1136 s32 voltage_compensation;
1137 s32 degrees_per_05db_num;
1138 s32 degrees_per_05db_denom;
1139 s32 factory_temp;
1140 s32 temperature_comp[2];
1141 s32 factory_gain_index[2];
1142 s32 factory_actual_pwr[2];
1143 s32 power_index;
1144
Winkler, Tomas62ea9c52009-01-19 15:30:29 -08001145 /* tx_power_user_lmt is in dBm, convert to half-dBm (half-dB units
Zhu Yib481de92007-09-25 17:54:57 -07001146 * are used for indexing into txpower table) */
Tomas Winkler630fe9b2008-06-12 09:47:08 +08001147 user_target_power = 2 * priv->tx_power_user_lmt;
Zhu Yib481de92007-09-25 17:54:57 -07001148
1149 /* Get current (RXON) channel, band, width */
Wey-Yi Guy7aafef12009-08-07 15:41:38 -07001150 IWL_DEBUG_TXPOWER(priv, "chan %d band %d is_ht40 %d\n", channel, band,
1151 is_ht40);
Zhu Yib481de92007-09-25 17:54:57 -07001152
Tomas Winkler630fe9b2008-06-12 09:47:08 +08001153 ch_info = iwl_get_channel_info(priv, priv->band, channel);
1154
1155 if (!is_channel_valid(ch_info))
Zhu Yib481de92007-09-25 17:54:57 -07001156 return -EINVAL;
1157
1158 /* get txatten group, used to select 1) thermal txpower adjustment
1159 * and 2) mimo txpower balance between Tx chains. */
1160 txatten_grp = iwl4965_get_tx_atten_grp(channel);
Samuel Ortiza3139c52008-12-19 10:37:09 +08001161 if (txatten_grp < 0) {
Winkler, Tomas15b16872008-12-19 10:37:33 +08001162 IWL_ERR(priv, "Can't find txatten group for channel %d.\n",
Samuel Ortiza3139c52008-12-19 10:37:09 +08001163 channel);
Zhu Yib481de92007-09-25 17:54:57 -07001164 return -EINVAL;
Samuel Ortiza3139c52008-12-19 10:37:09 +08001165 }
Zhu Yib481de92007-09-25 17:54:57 -07001166
Tomas Winklere1623442009-01-27 14:27:56 -08001167 IWL_DEBUG_TXPOWER(priv, "channel %d belongs to txatten group %d\n",
Zhu Yib481de92007-09-25 17:54:57 -07001168 channel, txatten_grp);
1169
Wey-Yi Guy7aafef12009-08-07 15:41:38 -07001170 if (is_ht40) {
Zhu Yib481de92007-09-25 17:54:57 -07001171 if (ctrl_chan_high)
1172 channel -= 2;
1173 else
1174 channel += 2;
1175 }
1176
1177 /* hardware txpower limits ...
1178 * saturation (clipping distortion) txpowers are in half-dBm */
1179 if (band)
Tomas Winkler073d3f52008-04-21 15:41:52 -07001180 saturation_power = priv->calib_info->saturation_power24;
Zhu Yib481de92007-09-25 17:54:57 -07001181 else
Tomas Winkler073d3f52008-04-21 15:41:52 -07001182 saturation_power = priv->calib_info->saturation_power52;
Zhu Yib481de92007-09-25 17:54:57 -07001183
1184 if (saturation_power < IWL_TX_POWER_SATURATION_MIN ||
1185 saturation_power > IWL_TX_POWER_SATURATION_MAX) {
1186 if (band)
1187 saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_24;
1188 else
1189 saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_52;
1190 }
1191
1192 /* regulatory txpower limits ... reg_limit values are in half-dBm,
1193 * max_power_avg values are in dBm, convert * 2 */
Wey-Yi Guy7aafef12009-08-07 15:41:38 -07001194 if (is_ht40)
1195 reg_limit = ch_info->ht40_max_power_avg * 2;
Zhu Yib481de92007-09-25 17:54:57 -07001196 else
1197 reg_limit = ch_info->max_power_avg * 2;
1198
1199 if ((reg_limit < IWL_TX_POWER_REGULATORY_MIN) ||
1200 (reg_limit > IWL_TX_POWER_REGULATORY_MAX)) {
1201 if (band)
1202 reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_24;
1203 else
1204 reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_52;
1205 }
1206
1207 /* Interpolate txpower calibration values for this channel,
1208 * based on factory calibration tests on spaced channels. */
1209 iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info);
1210
1211 /* calculate tx gain adjustment based on power supply voltage */
Johannes Bergb7bb1752009-12-14 14:12:09 -08001212 voltage = le16_to_cpu(priv->calib_info->voltage);
Zhu Yib481de92007-09-25 17:54:57 -07001213 init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage);
1214 voltage_compensation =
1215 iwl4965_get_voltage_compensation(voltage, init_voltage);
1216
Tomas Winklere1623442009-01-27 14:27:56 -08001217 IWL_DEBUG_TXPOWER(priv, "curr volt %d eeprom volt %d volt comp %d\n",
Zhu Yib481de92007-09-25 17:54:57 -07001218 init_voltage,
1219 voltage, voltage_compensation);
1220
1221 /* get current temperature (Celsius) */
1222 current_temp = max(priv->temperature, IWL_TX_POWER_TEMPERATURE_MIN);
1223 current_temp = min(priv->temperature, IWL_TX_POWER_TEMPERATURE_MAX);
1224 current_temp = KELVIN_TO_CELSIUS(current_temp);
1225
1226 /* select thermal txpower adjustment params, based on channel group
1227 * (same frequency group used for mimo txatten adjustment) */
1228 degrees_per_05db_num =
1229 tx_power_cmp_tble[txatten_grp].degrees_per_05db_a;
1230 degrees_per_05db_denom =
1231 tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom;
1232
1233 /* get per-chain txpower values from factory measurements */
1234 for (c = 0; c < 2; c++) {
1235 measurement = &ch_eeprom_info.measurements[c][1];
1236
1237 /* txgain adjustment (in half-dB steps) based on difference
1238 * between factory and current temperature */
1239 factory_temp = measurement->temperature;
1240 iwl4965_math_div_round((current_temp - factory_temp) *
1241 degrees_per_05db_denom,
1242 degrees_per_05db_num,
1243 &temperature_comp[c]);
1244
1245 factory_gain_index[c] = measurement->gain_idx;
1246 factory_actual_pwr[c] = measurement->actual_pow;
1247
Tomas Winklere1623442009-01-27 14:27:56 -08001248 IWL_DEBUG_TXPOWER(priv, "chain = %d\n", c);
1249 IWL_DEBUG_TXPOWER(priv, "fctry tmp %d, "
Zhu Yib481de92007-09-25 17:54:57 -07001250 "curr tmp %d, comp %d steps\n",
1251 factory_temp, current_temp,
1252 temperature_comp[c]);
1253
Tomas Winklere1623442009-01-27 14:27:56 -08001254 IWL_DEBUG_TXPOWER(priv, "fctry idx %d, fctry pwr %d\n",
Zhu Yib481de92007-09-25 17:54:57 -07001255 factory_gain_index[c],
1256 factory_actual_pwr[c]);
1257 }
1258
1259 /* for each of 33 bit-rates (including 1 for CCK) */
1260 for (i = 0; i < POWER_TABLE_NUM_ENTRIES; i++) {
1261 u8 is_mimo_rate;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001262 union iwl4965_tx_power_dual_stream tx_power;
Zhu Yib481de92007-09-25 17:54:57 -07001263
1264 /* for mimo, reduce each chain's txpower by half
1265 * (3dB, 6 steps), so total output power is regulatory
1266 * compliant. */
1267 if (i & 0x8) {
1268 current_regulatory = reg_limit -
1269 IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION;
1270 is_mimo_rate = 1;
1271 } else {
1272 current_regulatory = reg_limit;
1273 is_mimo_rate = 0;
1274 }
1275
1276 /* find txpower limit, either hardware or regulatory */
1277 power_limit = saturation_power - back_off_table[i];
1278 if (power_limit > current_regulatory)
1279 power_limit = current_regulatory;
1280
1281 /* reduce user's txpower request if necessary
1282 * for this rate on this channel */
1283 target_power = user_target_power;
1284 if (target_power > power_limit)
1285 target_power = power_limit;
1286
Tomas Winklere1623442009-01-27 14:27:56 -08001287 IWL_DEBUG_TXPOWER(priv, "rate %d sat %d reg %d usr %d tgt %d\n",
Zhu Yib481de92007-09-25 17:54:57 -07001288 i, saturation_power - back_off_table[i],
1289 current_regulatory, user_target_power,
1290 target_power);
1291
1292 /* for each of 2 Tx chains (radio transmitters) */
1293 for (c = 0; c < 2; c++) {
1294 s32 atten_value;
1295
1296 if (is_mimo_rate)
1297 atten_value =
1298 (s32)le32_to_cpu(priv->card_alive_init.
1299 tx_atten[txatten_grp][c]);
1300 else
1301 atten_value = 0;
1302
1303 /* calculate index; higher index means lower txpower */
1304 power_index = (u8) (factory_gain_index[c] -
1305 (target_power -
1306 factory_actual_pwr[c]) -
1307 temperature_comp[c] -
1308 voltage_compensation +
1309 atten_value);
1310
Tomas Winklere1623442009-01-27 14:27:56 -08001311/* IWL_DEBUG_TXPOWER(priv, "calculated txpower index %d\n",
Zhu Yib481de92007-09-25 17:54:57 -07001312 power_index); */
1313
1314 if (power_index < get_min_power_index(i, band))
1315 power_index = get_min_power_index(i, band);
1316
1317 /* adjust 5 GHz index to support negative indexes */
1318 if (!band)
1319 power_index += 9;
1320
1321 /* CCK, rate 32, reduce txpower for CCK */
1322 if (i == POWER_TABLE_CCK_ENTRY)
1323 power_index +=
1324 IWL_TX_POWER_CCK_COMPENSATION_C_STEP;
1325
1326 /* stay within the table! */
1327 if (power_index > 107) {
Winkler, Tomas39aadf82008-12-19 10:37:32 +08001328 IWL_WARN(priv, "txpower index %d > 107\n",
Zhu Yib481de92007-09-25 17:54:57 -07001329 power_index);
1330 power_index = 107;
1331 }
1332 if (power_index < 0) {
Winkler, Tomas39aadf82008-12-19 10:37:32 +08001333 IWL_WARN(priv, "txpower index %d < 0\n",
Zhu Yib481de92007-09-25 17:54:57 -07001334 power_index);
1335 power_index = 0;
1336 }
1337
1338 /* fill txpower command for this rate/chain */
1339 tx_power.s.radio_tx_gain[c] =
1340 gain_table[band][power_index].radio;
1341 tx_power.s.dsp_predis_atten[c] =
1342 gain_table[band][power_index].dsp;
1343
Tomas Winklere1623442009-01-27 14:27:56 -08001344 IWL_DEBUG_TXPOWER(priv, "chain %d mimo %d index %d "
Zhu Yib481de92007-09-25 17:54:57 -07001345 "gain 0x%02x dsp %d\n",
1346 c, atten_value, power_index,
1347 tx_power.s.radio_tx_gain[c],
1348 tx_power.s.dsp_predis_atten[c]);
Tomas Winkler3ac7f142008-07-21 02:40:14 +03001349 } /* for each chain */
Zhu Yib481de92007-09-25 17:54:57 -07001350
1351 tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw);
1352
Tomas Winkler3ac7f142008-07-21 02:40:14 +03001353 } /* for each rate */
Zhu Yib481de92007-09-25 17:54:57 -07001354
1355 return 0;
1356}
1357
1358/**
Tomas Winkler630fe9b2008-06-12 09:47:08 +08001359 * iwl4965_send_tx_power - Configure the TXPOWER level user limit
Zhu Yib481de92007-09-25 17:54:57 -07001360 *
Wey-Yi Guy7aafef12009-08-07 15:41:38 -07001361 * Uses the active RXON for channel, band, and characteristics (ht40, high)
Tomas Winkler630fe9b2008-06-12 09:47:08 +08001362 * The power limit is taken from priv->tx_power_user_lmt.
Zhu Yib481de92007-09-25 17:54:57 -07001363 */
Tomas Winkler630fe9b2008-06-12 09:47:08 +08001364static int iwl4965_send_tx_power(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07001365{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001366 struct iwl4965_txpowertable_cmd cmd = { 0 };
Tomas Winkler857485c2008-03-21 13:53:44 -07001367 int ret;
Zhu Yib481de92007-09-25 17:54:57 -07001368 u8 band = 0;
Wey-Yi Guy7aafef12009-08-07 15:41:38 -07001369 bool is_ht40 = false;
Zhu Yib481de92007-09-25 17:54:57 -07001370 u8 ctrl_chan_high = 0;
Johannes Berg246ed352010-08-23 10:46:32 +02001371 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
Zhu Yib481de92007-09-25 17:54:57 -07001372
Stanislaw Gruszka4beeba72010-10-25 10:34:50 +02001373 if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
1374 "TX Power requested while scanning!\n"))
Zhu Yib481de92007-09-25 17:54:57 -07001375 return -EAGAIN;
Zhu Yib481de92007-09-25 17:54:57 -07001376
Johannes Berg8318d782008-01-24 19:38:38 +01001377 band = priv->band == IEEE80211_BAND_2GHZ;
Zhu Yib481de92007-09-25 17:54:57 -07001378
Johannes Berg246ed352010-08-23 10:46:32 +02001379 is_ht40 = is_ht40_channel(ctx->active.flags);
Zhu Yib481de92007-09-25 17:54:57 -07001380
Johannes Berg246ed352010-08-23 10:46:32 +02001381 if (is_ht40 && (ctx->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
Zhu Yib481de92007-09-25 17:54:57 -07001382 ctrl_chan_high = 1;
1383
1384 cmd.band = band;
Johannes Berg246ed352010-08-23 10:46:32 +02001385 cmd.channel = ctx->active.channel;
Zhu Yib481de92007-09-25 17:54:57 -07001386
Tomas Winkler857485c2008-03-21 13:53:44 -07001387 ret = iwl4965_fill_txpower_tbl(priv, band,
Johannes Berg246ed352010-08-23 10:46:32 +02001388 le16_to_cpu(ctx->active.channel),
Wey-Yi Guy7aafef12009-08-07 15:41:38 -07001389 is_ht40, ctrl_chan_high, &cmd.tx_power);
Tomas Winkler857485c2008-03-21 13:53:44 -07001390 if (ret)
1391 goto out;
Zhu Yib481de92007-09-25 17:54:57 -07001392
Tomas Winkler857485c2008-03-21 13:53:44 -07001393 ret = iwl_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd);
1394
1395out:
1396 return ret;
Zhu Yib481de92007-09-25 17:54:57 -07001397}
1398
Johannes Berg246ed352010-08-23 10:46:32 +02001399static int iwl4965_send_rxon_assoc(struct iwl_priv *priv,
1400 struct iwl_rxon_context *ctx)
Tomas Winkler7e8c5192008-04-15 16:01:43 -07001401{
1402 int ret = 0;
1403 struct iwl4965_rxon_assoc_cmd rxon_assoc;
Johannes Berg246ed352010-08-23 10:46:32 +02001404 const struct iwl_rxon_cmd *rxon1 = &ctx->staging;
1405 const struct iwl_rxon_cmd *rxon2 = &ctx->active;
Tomas Winkler7e8c5192008-04-15 16:01:43 -07001406
1407 if ((rxon1->flags == rxon2->flags) &&
1408 (rxon1->filter_flags == rxon2->filter_flags) &&
1409 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
1410 (rxon1->ofdm_ht_single_stream_basic_rates ==
1411 rxon2->ofdm_ht_single_stream_basic_rates) &&
1412 (rxon1->ofdm_ht_dual_stream_basic_rates ==
1413 rxon2->ofdm_ht_dual_stream_basic_rates) &&
1414 (rxon1->rx_chain == rxon2->rx_chain) &&
1415 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
Tomas Winklere1623442009-01-27 14:27:56 -08001416 IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
Tomas Winkler7e8c5192008-04-15 16:01:43 -07001417 return 0;
1418 }
1419
Johannes Berg246ed352010-08-23 10:46:32 +02001420 rxon_assoc.flags = ctx->staging.flags;
1421 rxon_assoc.filter_flags = ctx->staging.filter_flags;
1422 rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
1423 rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
Tomas Winkler7e8c5192008-04-15 16:01:43 -07001424 rxon_assoc.reserved = 0;
1425 rxon_assoc.ofdm_ht_single_stream_basic_rates =
Johannes Berg246ed352010-08-23 10:46:32 +02001426 ctx->staging.ofdm_ht_single_stream_basic_rates;
Tomas Winkler7e8c5192008-04-15 16:01:43 -07001427 rxon_assoc.ofdm_ht_dual_stream_basic_rates =
Johannes Berg246ed352010-08-23 10:46:32 +02001428 ctx->staging.ofdm_ht_dual_stream_basic_rates;
1429 rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
Tomas Winkler7e8c5192008-04-15 16:01:43 -07001430
1431 ret = iwl_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
1432 sizeof(rxon_assoc), &rxon_assoc, NULL);
1433 if (ret)
1434 return ret;
1435
1436 return ret;
1437}
1438
Johannes Berg2295c662010-10-23 09:15:41 -07001439static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1440{
1441 /* cast away the const for active_rxon in this function */
1442 struct iwl_rxon_cmd *active_rxon = (void *)&ctx->active;
1443 int ret;
1444 bool new_assoc =
1445 !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
1446
1447 if (!iwl_is_alive(priv))
1448 return -EBUSY;
1449
1450 if (!ctx->is_active)
1451 return 0;
1452
1453 /* always get timestamp with Rx frame */
1454 ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
1455
1456 ret = iwl_check_rxon_cmd(priv, ctx);
1457 if (ret) {
1458 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
1459 return -EINVAL;
1460 }
1461
1462 /*
1463 * receive commit_rxon request
1464 * abort any previous channel switch if still in process
1465 */
1466 if (priv->switch_rxon.switch_in_progress &&
1467 (priv->switch_rxon.channel != ctx->staging.channel)) {
1468 IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
1469 le16_to_cpu(priv->switch_rxon.channel));
1470 iwl_chswitch_done(priv, false);
1471 }
1472
1473 /* If we don't need to send a full RXON, we can use
1474 * iwl_rxon_assoc_cmd which is used to reconfigure filter
1475 * and other flags for the current radio configuration. */
1476 if (!iwl_full_rxon_required(priv, ctx)) {
1477 ret = iwl_send_rxon_assoc(priv, ctx);
1478 if (ret) {
1479 IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
1480 return ret;
1481 }
1482
1483 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
1484 iwl_print_rx_config_cmd(priv, ctx);
1485 return 0;
1486 }
1487
1488 /* If we are currently associated and the new config requires
1489 * an RXON_ASSOC and the new config wants the associated mask enabled,
1490 * we must clear the associated from the active configuration
1491 * before we apply the new config */
1492 if (iwl_is_associated_ctx(ctx) && new_assoc) {
1493 IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
1494 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1495
1496 ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
1497 sizeof(struct iwl_rxon_cmd),
1498 active_rxon);
1499
1500 /* If the mask clearing failed then we set
1501 * active_rxon back to what it was previously */
1502 if (ret) {
1503 active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
1504 IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret);
1505 return ret;
1506 }
1507 iwl_clear_ucode_stations(priv, ctx);
1508 iwl_restore_stations(priv, ctx);
1509 ret = iwl_restore_default_wep_keys(priv, ctx);
1510 if (ret) {
1511 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
1512 return ret;
1513 }
1514 }
1515
1516 IWL_DEBUG_INFO(priv, "Sending RXON\n"
1517 "* with%s RXON_FILTER_ASSOC_MSK\n"
1518 "* channel = %d\n"
1519 "* bssid = %pM\n",
1520 (new_assoc ? "" : "out"),
1521 le16_to_cpu(ctx->staging.channel),
1522 ctx->staging.bssid_addr);
1523
1524 iwl_set_rxon_hwcrypto(priv, ctx, !priv->cfg->mod_params->sw_crypto);
1525
1526 /* Apply the new configuration
1527 * RXON unassoc clears the station table in uCode so restoration of
1528 * stations is needed after it (the RXON command) completes
1529 */
1530 if (!new_assoc) {
1531 ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
1532 sizeof(struct iwl_rxon_cmd), &ctx->staging);
1533 if (ret) {
1534 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
1535 return ret;
1536 }
1537 IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n");
1538 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
1539 iwl_clear_ucode_stations(priv, ctx);
1540 iwl_restore_stations(priv, ctx);
1541 ret = iwl_restore_default_wep_keys(priv, ctx);
1542 if (ret) {
1543 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
1544 return ret;
1545 }
1546 }
1547 if (new_assoc) {
1548 priv->start_calib = 0;
1549 /* Apply the new configuration
1550 * RXON assoc doesn't clear the station table in uCode,
1551 */
1552 ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
1553 sizeof(struct iwl_rxon_cmd), &ctx->staging);
1554 if (ret) {
1555 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
1556 return ret;
1557 }
1558 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
1559 }
1560 iwl_print_rx_config_cmd(priv, ctx);
1561
1562 iwl_init_sensitivity(priv);
1563
1564 /* If we issue a new RXON command which required a tune then we must
1565 * send a new TXPOWER command or we won't be able to Tx any frames */
Stanislaw Gruszkaf844a702011-01-28 16:47:44 +01001566 ret = iwl_set_tx_power(priv, priv->tx_power_next, true);
Johannes Berg2295c662010-10-23 09:15:41 -07001567 if (ret) {
1568 IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
1569 return ret;
1570 }
1571
1572 return 0;
1573}
1574
Wey-Yi Guy79d07322010-05-06 08:54:11 -07001575static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
1576 struct ieee80211_channel_switch *ch_switch)
Zhu Yib481de92007-09-25 17:54:57 -07001577{
Johannes Berg246ed352010-08-23 10:46:32 +02001578 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
Zhu Yib481de92007-09-25 17:54:57 -07001579 int rc;
1580 u8 band = 0;
Wey-Yi Guy7aafef12009-08-07 15:41:38 -07001581 bool is_ht40 = false;
Zhu Yib481de92007-09-25 17:54:57 -07001582 u8 ctrl_chan_high = 0;
Wey-Yi Guy4a56e962009-10-23 13:42:29 -07001583 struct iwl4965_channel_switch_cmd cmd;
Assaf Kraussbf85ea42008-03-14 10:38:49 -07001584 const struct iwl_channel_info *ch_info;
Wey-Yi Guy79d07322010-05-06 08:54:11 -07001585 u32 switch_time_in_usec, ucode_switch_time;
1586 u16 ch;
1587 u32 tsf_low;
1588 u8 switch_count;
Johannes Berg246ed352010-08-23 10:46:32 +02001589 u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
Johannes Berg8bd413e2010-08-23 10:46:40 +02001590 struct ieee80211_vif *vif = ctx->vif;
Johannes Berg8318d782008-01-24 19:38:38 +01001591 band = priv->band == IEEE80211_BAND_2GHZ;
Zhu Yib481de92007-09-25 17:54:57 -07001592
Johannes Berg246ed352010-08-23 10:46:32 +02001593 is_ht40 = is_ht40_channel(ctx->staging.flags);
Zhu Yib481de92007-09-25 17:54:57 -07001594
Wey-Yi Guy7aafef12009-08-07 15:41:38 -07001595 if (is_ht40 &&
Johannes Berg246ed352010-08-23 10:46:32 +02001596 (ctx->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
Zhu Yib481de92007-09-25 17:54:57 -07001597 ctrl_chan_high = 1;
1598
1599 cmd.band = band;
1600 cmd.expect_beacon = 0;
Shanyu Zhao81e95432010-07-28 13:40:27 -07001601 ch = ch_switch->channel->hw_value;
Wey-Yi Guy79d07322010-05-06 08:54:11 -07001602 cmd.channel = cpu_to_le16(ch);
Johannes Berg246ed352010-08-23 10:46:32 +02001603 cmd.rxon_flags = ctx->staging.flags;
1604 cmd.rxon_filter_flags = ctx->staging.filter_flags;
Wey-Yi Guy79d07322010-05-06 08:54:11 -07001605 switch_count = ch_switch->count;
1606 tsf_low = ch_switch->timestamp & 0x0ffffffff;
1607 /*
1608 * calculate the ucode channel switch time
1609 * adding TSF as one of the factor for when to switch
1610 */
1611 if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
1612 if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
1613 beacon_interval)) {
1614 switch_count -= (priv->ucode_beacon_time -
1615 tsf_low) / beacon_interval;
1616 } else
1617 switch_count = 0;
1618 }
1619 if (switch_count <= 1)
1620 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
1621 else {
1622 switch_time_in_usec =
1623 vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
1624 ucode_switch_time = iwl_usecs_to_beacons(priv,
1625 switch_time_in_usec,
1626 beacon_interval);
1627 cmd.switch_time = iwl_add_beacon_time(priv,
1628 priv->ucode_beacon_time,
1629 ucode_switch_time,
1630 beacon_interval);
1631 }
1632 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
1633 cmd.switch_time);
1634 ch_info = iwl_get_channel_info(priv, priv->band, ch);
Zhu Yib481de92007-09-25 17:54:57 -07001635 if (ch_info)
1636 cmd.expect_beacon = is_channel_radar(ch_info);
Wey-Yi Guy4a56e962009-10-23 13:42:29 -07001637 else {
1638 IWL_ERR(priv, "invalid channel switch from %u to %u\n",
Johannes Berg246ed352010-08-23 10:46:32 +02001639 ctx->active.channel, ch);
Wey-Yi Guy4a56e962009-10-23 13:42:29 -07001640 return -EFAULT;
1641 }
Zhu Yib481de92007-09-25 17:54:57 -07001642
Wey-Yi Guy79d07322010-05-06 08:54:11 -07001643 rc = iwl4965_fill_txpower_tbl(priv, band, ch, is_ht40,
Zhu Yib481de92007-09-25 17:54:57 -07001644 ctrl_chan_high, &cmd.tx_power);
1645 if (rc) {
Tomas Winklere1623442009-01-27 14:27:56 -08001646 IWL_DEBUG_11H(priv, "error:%d fill txpower_tbl\n", rc);
Zhu Yib481de92007-09-25 17:54:57 -07001647 return rc;
1648 }
1649
Wey-Yi Guy79d07322010-05-06 08:54:11 -07001650 priv->switch_rxon.channel = cmd.channel;
Wey-Yi Guy0924e5192009-11-06 14:52:54 -08001651 priv->switch_rxon.switch_in_progress = true;
1652
1653 return iwl_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
Zhu Yib481de92007-09-25 17:54:57 -07001654}
1655
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001656/**
Tomas Winklere2a722e2008-04-14 21:16:10 -07001657 * iwl4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001658 */
Tomas Winklere2a722e2008-04-14 21:16:10 -07001659static void iwl4965_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
Ron Rindjunsky16466902008-05-05 10:22:50 +08001660 struct iwl_tx_queue *txq,
Tomas Winklere2a722e2008-04-14 21:16:10 -07001661 u16 byte_cnt)
Zhu Yib481de92007-09-25 17:54:57 -07001662{
Tomas Winkler4ddbb7d2008-11-07 09:58:40 -08001663 struct iwl4965_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
Tomas Winkler127901a2008-10-23 23:48:55 -07001664 int txq_id = txq->q.id;
1665 int write_ptr = txq->q.write_ptr;
1666 int len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
1667 __le16 bc_ent;
Zhu Yib481de92007-09-25 17:54:57 -07001668
Tomas Winkler127901a2008-10-23 23:48:55 -07001669 WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
Zhu Yib481de92007-09-25 17:54:57 -07001670
Tomas Winkler127901a2008-10-23 23:48:55 -07001671 bc_ent = cpu_to_le16(len & 0xFFF);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001672 /* Set up byte count within first 256 entries */
Tomas Winkler4ddbb7d2008-11-07 09:58:40 -08001673 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
Zhu Yib481de92007-09-25 17:54:57 -07001674
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001675 /* If within first 64 entries, duplicate at end */
Tomas Winkler127901a2008-10-23 23:48:55 -07001676 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
Tomas Winkler4ddbb7d2008-11-07 09:58:40 -08001677 scd_bc_tbl[txq_id].
Tomas Winkler127901a2008-10-23 23:48:55 -07001678 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
Zhu Yib481de92007-09-25 17:54:57 -07001679}
1680
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001681/**
Emmanuel Grumbach91dbc5b2008-06-12 09:47:14 +08001682 * iwl4965_hw_get_temperature - return the calibrated temperature (in Kelvin)
Zhu Yib481de92007-09-25 17:54:57 -07001683 * @statistics: Provides the temperature reading from the uCode
1684 *
1685 * A return of <0 indicates bogus data in the statistics
1686 */
Reinette Chatre3d816c72009-08-07 15:41:37 -07001687static int iwl4965_hw_get_temperature(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07001688{
1689 s32 temperature;
1690 s32 vt;
1691 s32 R1, R2, R3;
1692 u32 R4;
1693
1694 if (test_bit(STATUS_TEMPERATURE, &priv->status) &&
Wey-Yi Guyf3aebee2010-06-14 17:09:54 -07001695 (priv->_agn.statistics.flag &
1696 STATISTICS_REPLY_FLG_HT40_MODE_MSK)) {
Wey-Yi Guy7aafef12009-08-07 15:41:38 -07001697 IWL_DEBUG_TEMP(priv, "Running HT40 temperature calibration\n");
Zhu Yib481de92007-09-25 17:54:57 -07001698 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
1699 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]);
1700 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]);
1701 R4 = le32_to_cpu(priv->card_alive_init.therm_r4[1]);
1702 } else {
Tomas Winklere1623442009-01-27 14:27:56 -08001703 IWL_DEBUG_TEMP(priv, "Running temperature calibration\n");
Zhu Yib481de92007-09-25 17:54:57 -07001704 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]);
1705 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]);
1706 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]);
1707 R4 = le32_to_cpu(priv->card_alive_init.therm_r4[0]);
1708 }
1709
1710 /*
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001711 * Temperature is only 23 bits, so sign extend out to 32.
Zhu Yib481de92007-09-25 17:54:57 -07001712 *
1713 * NOTE If we haven't received a statistics notification yet
1714 * with an updated temperature, use R4 provided to us in the
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001715 * "initialize" ALIVE response.
1716 */
Zhu Yib481de92007-09-25 17:54:57 -07001717 if (!test_bit(STATUS_TEMPERATURE, &priv->status))
Andreas Herrmann7919a572010-08-30 19:04:01 +00001718 vt = sign_extend32(R4, 23);
Zhu Yib481de92007-09-25 17:54:57 -07001719 else
Andreas Herrmann7919a572010-08-30 19:04:01 +00001720 vt = sign_extend32(le32_to_cpu(priv->_agn.statistics.
Wey-Yi Guy325322e2010-07-14 08:07:27 -07001721 general.common.temperature), 23);
Zhu Yib481de92007-09-25 17:54:57 -07001722
Tomas Winklere1623442009-01-27 14:27:56 -08001723 IWL_DEBUG_TEMP(priv, "Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt);
Zhu Yib481de92007-09-25 17:54:57 -07001724
1725 if (R3 == R1) {
Winkler, Tomas15b16872008-12-19 10:37:33 +08001726 IWL_ERR(priv, "Calibration conflict R1 == R3\n");
Zhu Yib481de92007-09-25 17:54:57 -07001727 return -1;
1728 }
1729
1730 /* Calculate temperature in degrees Kelvin, adjust by 97%.
1731 * Add offset to center the adjustment around 0 degrees Centigrade. */
1732 temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2);
1733 temperature /= (R3 - R1);
Emmanuel Grumbach91dbc5b2008-06-12 09:47:14 +08001734 temperature = (temperature * 97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET;
Zhu Yib481de92007-09-25 17:54:57 -07001735
Tomas Winklere1623442009-01-27 14:27:56 -08001736 IWL_DEBUG_TEMP(priv, "Calibrated temperature: %dK, %dC\n",
Emmanuel Grumbach91dbc5b2008-06-12 09:47:14 +08001737 temperature, KELVIN_TO_CELSIUS(temperature));
Zhu Yib481de92007-09-25 17:54:57 -07001738
1739 return temperature;
1740}
1741
1742/* Adjust Txpower only if temperature variance is greater than threshold. */
1743#define IWL_TEMPERATURE_THRESHOLD 3
1744
1745/**
1746 * iwl4965_is_temp_calib_needed - determines if new calibration is needed
1747 *
1748 * If the temperature changed has changed sufficiently, then a recalibration
1749 * is needed.
1750 *
1751 * Assumes caller will replace priv->last_temperature once calibration
1752 * executed.
1753 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001754static int iwl4965_is_temp_calib_needed(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07001755{
1756 int temp_diff;
1757
1758 if (!test_bit(STATUS_STATISTICS, &priv->status)) {
Tomas Winklere1623442009-01-27 14:27:56 -08001759 IWL_DEBUG_TEMP(priv, "Temperature not updated -- no statistics.\n");
Zhu Yib481de92007-09-25 17:54:57 -07001760 return 0;
1761 }
1762
1763 temp_diff = priv->temperature - priv->last_temperature;
1764
1765 /* get absolute value */
1766 if (temp_diff < 0) {
Frans Pop91dd6c22010-03-24 14:19:58 -07001767 IWL_DEBUG_POWER(priv, "Getting cooler, delta %d\n", temp_diff);
Zhu Yib481de92007-09-25 17:54:57 -07001768 temp_diff = -temp_diff;
1769 } else if (temp_diff == 0)
Frans Pop91dd6c22010-03-24 14:19:58 -07001770 IWL_DEBUG_POWER(priv, "Temperature unchanged\n");
Zhu Yib481de92007-09-25 17:54:57 -07001771 else
Frans Pop91dd6c22010-03-24 14:19:58 -07001772 IWL_DEBUG_POWER(priv, "Getting warmer, delta %d\n", temp_diff);
Zhu Yib481de92007-09-25 17:54:57 -07001773
1774 if (temp_diff < IWL_TEMPERATURE_THRESHOLD) {
Frans Pop91dd6c22010-03-24 14:19:58 -07001775 IWL_DEBUG_POWER(priv, " => thermal txpower calib not needed\n");
Zhu Yib481de92007-09-25 17:54:57 -07001776 return 0;
1777 }
1778
Frans Pop91dd6c22010-03-24 14:19:58 -07001779 IWL_DEBUG_POWER(priv, " => thermal txpower calib needed\n");
Zhu Yib481de92007-09-25 17:54:57 -07001780
1781 return 1;
1782}
1783
Zhu Yi52256402008-06-30 17:23:31 +08001784static void iwl4965_temperature_calib(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07001785{
Zhu Yib481de92007-09-25 17:54:57 -07001786 s32 temp;
Zhu Yib481de92007-09-25 17:54:57 -07001787
Emmanuel Grumbach91dbc5b2008-06-12 09:47:14 +08001788 temp = iwl4965_hw_get_temperature(priv);
Zhu Yib481de92007-09-25 17:54:57 -07001789 if (temp < 0)
1790 return;
1791
1792 if (priv->temperature != temp) {
1793 if (priv->temperature)
Tomas Winklere1623442009-01-27 14:27:56 -08001794 IWL_DEBUG_TEMP(priv, "Temperature changed "
Zhu Yib481de92007-09-25 17:54:57 -07001795 "from %dC to %dC\n",
1796 KELVIN_TO_CELSIUS(priv->temperature),
1797 KELVIN_TO_CELSIUS(temp));
1798 else
Tomas Winklere1623442009-01-27 14:27:56 -08001799 IWL_DEBUG_TEMP(priv, "Temperature "
Zhu Yib481de92007-09-25 17:54:57 -07001800 "initialized to %dC\n",
1801 KELVIN_TO_CELSIUS(temp));
1802 }
1803
1804 priv->temperature = temp;
Wey-Yi Guy39b73fb2009-07-24 11:13:02 -07001805 iwl_tt_handler(priv);
Zhu Yib481de92007-09-25 17:54:57 -07001806 set_bit(STATUS_TEMPERATURE, &priv->status);
1807
Emmanuel Grumbach203566f2008-06-12 09:46:54 +08001808 if (!priv->disable_tx_power_cal &&
1809 unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
1810 iwl4965_is_temp_calib_needed(priv))
Zhu Yib481de92007-09-25 17:54:57 -07001811 queue_work(priv->workqueue, &priv->txpower_work);
1812}
1813
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001814/**
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001815 * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
1816 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001817static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv,
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001818 u16 txq_id)
1819{
1820 /* Simply stop the queue, but don't change any configuration;
1821 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001822 iwl_write_prph(priv,
Tomas Winkler12a81f62008-04-03 16:05:20 -07001823 IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07001824 (0 << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
1825 (1 << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001826}
1827
1828/**
Ron Rindjunsky7f3e4bb2008-06-12 09:46:55 +08001829 * txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE
Ron Rindjunskyb095d03a72008-03-06 17:36:56 -08001830 * priv->lock must be held by the caller
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001831 */
Tomas Winkler30e553e2008-05-29 16:35:16 +08001832static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
1833 u16 ssn_idx, u8 tx_fifo)
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001834{
Tomas Winkler9f17b312008-07-11 11:53:35 +08001835 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
Wey-Yi Guy7cb1b082010-10-06 08:10:00 -07001836 (IWL49_FIRST_AMPDU_QUEUE +
1837 priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
Winkler, Tomas39aadf82008-12-19 10:37:32 +08001838 IWL_WARN(priv,
1839 "queue number out of range: %d, must be %d to %d\n",
Tomas Winkler9f17b312008-07-11 11:53:35 +08001840 txq_id, IWL49_FIRST_AMPDU_QUEUE,
Wey-Yi Guy88804e22009-10-09 13:20:28 -07001841 IWL49_FIRST_AMPDU_QUEUE +
Wey-Yi Guy7cb1b082010-10-06 08:10:00 -07001842 priv->cfg->base_params->num_of_ampdu_queues - 1);
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001843 return -EINVAL;
1844 }
1845
1846 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
1847
Tomas Winkler12a81f62008-04-03 16:05:20 -07001848 iwl_clear_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001849
1850 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
1851 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
1852 /* supposes that ssn_idx is valid (!= 0xFFF) */
1853 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
1854
Tomas Winkler12a81f62008-04-03 16:05:20 -07001855 iwl_clear_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
Ron Rindjunsky36470742008-05-15 13:54:10 +08001856 iwl_txq_ctx_deactivate(priv, txq_id);
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001857 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
1858
1859 return 0;
1860}
1861
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001862/**
1863 * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
1864 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001865static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
Zhu Yib481de92007-09-25 17:54:57 -07001866 u16 txq_id)
1867{
1868 u32 tbl_dw_addr;
1869 u32 tbl_dw;
1870 u16 scd_q2ratid;
1871
Tomas Winkler30e553e2008-05-29 16:35:16 +08001872 scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
Zhu Yib481de92007-09-25 17:54:57 -07001873
1874 tbl_dw_addr = priv->scd_base_addr +
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07001875 IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
Zhu Yib481de92007-09-25 17:54:57 -07001876
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001877 tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
Zhu Yib481de92007-09-25 17:54:57 -07001878
1879 if (txq_id & 0x1)
1880 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
1881 else
1882 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
1883
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001884 iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
Zhu Yib481de92007-09-25 17:54:57 -07001885
1886 return 0;
1887}
1888
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02001889
Zhu Yib481de92007-09-25 17:54:57 -07001890/**
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001891 * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
1892 *
Ron Rindjunsky7f3e4bb2008-06-12 09:46:55 +08001893 * NOTE: txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE,
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001894 * i.e. it must be one of the higher queues used for aggregation
Zhu Yib481de92007-09-25 17:54:57 -07001895 */
Tomas Winkler30e553e2008-05-29 16:35:16 +08001896static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
1897 int tx_fifo, int sta_id, int tid, u16 ssn_idx)
Zhu Yib481de92007-09-25 17:54:57 -07001898{
1899 unsigned long flags;
Zhu Yib481de92007-09-25 17:54:57 -07001900 u16 ra_tid;
Johannes Berg4620fef2010-06-16 03:30:27 -07001901 int ret;
Zhu Yib481de92007-09-25 17:54:57 -07001902
Tomas Winkler9f17b312008-07-11 11:53:35 +08001903 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
Wey-Yi Guy7cb1b082010-10-06 08:10:00 -07001904 (IWL49_FIRST_AMPDU_QUEUE +
1905 priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
Winkler, Tomas39aadf82008-12-19 10:37:32 +08001906 IWL_WARN(priv,
1907 "queue number out of range: %d, must be %d to %d\n",
Tomas Winkler9f17b312008-07-11 11:53:35 +08001908 txq_id, IWL49_FIRST_AMPDU_QUEUE,
Wey-Yi Guy88804e22009-10-09 13:20:28 -07001909 IWL49_FIRST_AMPDU_QUEUE +
Wey-Yi Guy7cb1b082010-10-06 08:10:00 -07001910 priv->cfg->base_params->num_of_ampdu_queues - 1);
Tomas Winkler9f17b312008-07-11 11:53:35 +08001911 return -EINVAL;
1912 }
Zhu Yib481de92007-09-25 17:54:57 -07001913
1914 ra_tid = BUILD_RAxTID(sta_id, tid);
1915
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001916 /* Modify device's station table to Tx this TID */
Johannes Berg4620fef2010-06-16 03:30:27 -07001917 ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
1918 if (ret)
1919 return ret;
Zhu Yib481de92007-09-25 17:54:57 -07001920
1921 spin_lock_irqsave(&priv->lock, flags);
Zhu Yib481de92007-09-25 17:54:57 -07001922
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001923 /* Stop this Tx queue before configuring it */
Zhu Yib481de92007-09-25 17:54:57 -07001924 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
1925
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001926 /* Map receiver-address / traffic-ID to this queue */
Zhu Yib481de92007-09-25 17:54:57 -07001927 iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
1928
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001929 /* Set this queue as a chain-building queue */
Tomas Winkler12a81f62008-04-03 16:05:20 -07001930 iwl_set_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
Zhu Yib481de92007-09-25 17:54:57 -07001931
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001932 /* Place first TFD at index corresponding to start sequence number.
1933 * Assumes that ssn_idx is valid (!= 0xFFF) */
Tomas Winklerfc4b6852007-10-25 17:15:24 +08001934 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
1935 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
Zhu Yib481de92007-09-25 17:54:57 -07001936 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
1937
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001938 /* Set up Tx window size and frame limit for this queue */
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001939 iwl_write_targ_mem(priv,
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07001940 priv->scd_base_addr + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
1941 (SCD_WIN_SIZE << IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
1942 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
Zhu Yib481de92007-09-25 17:54:57 -07001943
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001944 iwl_write_targ_mem(priv, priv->scd_base_addr +
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07001945 IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
1946 (SCD_FRAME_LIMIT << IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
1947 & IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
Zhu Yib481de92007-09-25 17:54:57 -07001948
Tomas Winkler12a81f62008-04-03 16:05:20 -07001949 iwl_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
Zhu Yib481de92007-09-25 17:54:57 -07001950
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001951 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
Zhu Yib481de92007-09-25 17:54:57 -07001952 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
1953
Zhu Yib481de92007-09-25 17:54:57 -07001954 spin_unlock_irqrestore(&priv->lock, flags);
1955
1956 return 0;
1957}
1958
Tomas Winkler133636d2008-05-05 10:22:34 +08001959
Gregory Greenmanc1adf9f2008-05-15 13:53:59 +08001960static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len)
1961{
1962 switch (cmd_id) {
1963 case REPLY_RXON:
1964 return (u16) sizeof(struct iwl4965_rxon_cmd);
1965 default:
1966 return len;
1967 }
1968}
1969
Tomas Winkler133636d2008-05-05 10:22:34 +08001970static u16 iwl4965_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
1971{
1972 struct iwl4965_addsta_cmd *addsta = (struct iwl4965_addsta_cmd *)data;
1973 addsta->mode = cmd->mode;
1974 memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
1975 memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo));
1976 addsta->station_flags = cmd->station_flags;
1977 addsta->station_flags_msk = cmd->station_flags_msk;
1978 addsta->tid_disable_tx = cmd->tid_disable_tx;
1979 addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
1980 addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
1981 addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
Johannes Berg9bb487b2009-11-13 11:56:36 -08001982 addsta->sleep_tx_count = cmd->sleep_tx_count;
Harvey Harrisonc1b4aa32009-01-29 13:26:44 -08001983 addsta->reserved1 = cpu_to_le16(0);
Wey-Yi Guy62624082009-11-20 12:05:01 -08001984 addsta->reserved2 = cpu_to_le16(0);
Tomas Winkler133636d2008-05-05 10:22:34 +08001985
1986 return (u16)sizeof(struct iwl4965_addsta_cmd);
1987}
Tomas Winklerf20217d2008-05-29 16:35:10 +08001988
Tomas Winklerf20217d2008-05-29 16:35:10 +08001989static inline u32 iwl4965_get_scd_ssn(struct iwl4965_tx_resp *tx_resp)
1990{
Tomas Winkler25a65722008-06-12 09:47:07 +08001991 return le32_to_cpup(&tx_resp->u.status + tx_resp->frame_count) & MAX_SN;
Tomas Winklerf20217d2008-05-29 16:35:10 +08001992}
1993
1994/**
Tomas Winklera96a27f2008-10-23 23:48:56 -07001995 * iwl4965_tx_status_reply_tx - Handle Tx response for frames in aggregation queue
Tomas Winklerf20217d2008-05-29 16:35:10 +08001996 */
1997static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
1998 struct iwl_ht_agg *agg,
Tomas Winkler25a65722008-06-12 09:47:07 +08001999 struct iwl4965_tx_resp *tx_resp,
2000 int txq_id, u16 start_idx)
Tomas Winklerf20217d2008-05-29 16:35:10 +08002001{
2002 u16 status;
Tomas Winkler25a65722008-06-12 09:47:07 +08002003 struct agg_tx_status *frame_status = tx_resp->u.agg_status;
Tomas Winklerf20217d2008-05-29 16:35:10 +08002004 struct ieee80211_tx_info *info = NULL;
2005 struct ieee80211_hdr *hdr = NULL;
Tomas Winklere7d326a2008-06-12 09:47:11 +08002006 u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
Tomas Winkler25a65722008-06-12 09:47:07 +08002007 int i, sh, idx;
Tomas Winklerf20217d2008-05-29 16:35:10 +08002008 u16 seq;
Tomas Winklerf20217d2008-05-29 16:35:10 +08002009 if (agg->wait_for_ba)
Tomas Winklere1623442009-01-27 14:27:56 -08002010 IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n");
Tomas Winklerf20217d2008-05-29 16:35:10 +08002011
2012 agg->frame_count = tx_resp->frame_count;
2013 agg->start_idx = start_idx;
Tomas Winklere7d326a2008-06-12 09:47:11 +08002014 agg->rate_n_flags = rate_n_flags;
Tomas Winklerf20217d2008-05-29 16:35:10 +08002015 agg->bitmap = 0;
2016
Tomas Winkler3fd07a12008-10-23 23:48:49 -07002017 /* num frames attempted by Tx command */
Tomas Winklerf20217d2008-05-29 16:35:10 +08002018 if (agg->frame_count == 1) {
2019 /* Only one frame was attempted; no block-ack will arrive */
2020 status = le16_to_cpu(frame_status[0].status);
Tomas Winkler25a65722008-06-12 09:47:07 +08002021 idx = start_idx;
Tomas Winklerf20217d2008-05-29 16:35:10 +08002022
2023 /* FIXME: code repetition */
Tomas Winklere1623442009-01-27 14:27:56 -08002024 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
Tomas Winklerf20217d2008-05-29 16:35:10 +08002025 agg->frame_count, agg->start_idx, idx);
2026
Johannes Bergff0d91c2010-05-17 02:37:34 -07002027 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb);
Johannes Berge6a98542008-10-21 12:40:02 +02002028 info->status.rates[0].count = tx_resp->failure_frame + 1;
Tomas Winklerf20217d2008-05-29 16:35:10 +08002029 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
Johannes Bergc397bf12009-11-13 11:56:35 -08002030 info->flags |= iwl_tx_status_to_mac80211(status);
Wey-Yi Guy8d801082010-03-17 13:34:36 -07002031 iwlagn_hwrate_to_tx_control(priv, rate_n_flags, info);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002032 /* FIXME: code repetition end */
2033
Tomas Winklere1623442009-01-27 14:27:56 -08002034 IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n",
Tomas Winklerf20217d2008-05-29 16:35:10 +08002035 status & 0xff, tx_resp->failure_frame);
Tomas Winklere1623442009-01-27 14:27:56 -08002036 IWL_DEBUG_TX_REPLY(priv, "Rate Info rate_n_flags=%x\n", rate_n_flags);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002037
2038 agg->wait_for_ba = 0;
2039 } else {
2040 /* Two or more frames were attempted; expect block-ack */
2041 u64 bitmap = 0;
2042 int start = agg->start_idx;
2043
2044 /* Construct bit-map of pending frames within Tx window */
2045 for (i = 0; i < agg->frame_count; i++) {
2046 u16 sc;
2047 status = le16_to_cpu(frame_status[i].status);
2048 seq = le16_to_cpu(frame_status[i].sequence);
2049 idx = SEQ_TO_INDEX(seq);
2050 txq_id = SEQ_TO_QUEUE(seq);
2051
2052 if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
2053 AGG_TX_STATE_ABORT_MSK))
2054 continue;
2055
Tomas Winklere1623442009-01-27 14:27:56 -08002056 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
Tomas Winklerf20217d2008-05-29 16:35:10 +08002057 agg->frame_count, txq_id, idx);
2058
2059 hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
Stanislaw Gruszka6c6a22e2009-09-23 10:51:34 +02002060 if (!hdr) {
2061 IWL_ERR(priv,
2062 "BUG_ON idx doesn't point to valid skb"
2063 " idx=%d, txq_id=%d\n", idx, txq_id);
2064 return -1;
2065 }
Tomas Winklerf20217d2008-05-29 16:35:10 +08002066
2067 sc = le16_to_cpu(hdr->seq_ctrl);
2068 if (idx != (SEQ_TO_SN(sc) & 0xff)) {
Winkler, Tomas15b16872008-12-19 10:37:33 +08002069 IWL_ERR(priv,
2070 "BUG_ON idx doesn't match seq control"
2071 " idx=%d, seq_idx=%d, seq=%d\n",
2072 idx, SEQ_TO_SN(sc), hdr->seq_ctrl);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002073 return -1;
2074 }
2075
Tomas Winklere1623442009-01-27 14:27:56 -08002076 IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n",
Tomas Winklerf20217d2008-05-29 16:35:10 +08002077 i, idx, SEQ_TO_SN(sc));
2078
2079 sh = idx - start;
2080 if (sh > 64) {
2081 sh = (start - idx) + 0xff;
2082 bitmap = bitmap << sh;
2083 sh = 0;
2084 start = idx;
2085 } else if (sh < -64)
2086 sh = 0xff - (start - idx);
2087 else if (sh < 0) {
2088 sh = start - idx;
2089 start = idx;
2090 bitmap = bitmap << sh;
2091 sh = 0;
2092 }
Emmanuel Grumbach4aa41f12008-07-18 13:53:09 +08002093 bitmap |= 1ULL << sh;
Tomas Winklere1623442009-01-27 14:27:56 -08002094 IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n",
Emmanuel Grumbach4aa41f12008-07-18 13:53:09 +08002095 start, (unsigned long long)bitmap);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002096 }
2097
2098 agg->bitmap = bitmap;
2099 agg->start_idx = start;
Tomas Winklere1623442009-01-27 14:27:56 -08002100 IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n",
Tomas Winklerf20217d2008-05-29 16:35:10 +08002101 agg->frame_count, agg->start_idx,
2102 (unsigned long long)agg->bitmap);
2103
2104 if (bitmap)
2105 agg->wait_for_ba = 1;
2106 }
2107 return 0;
2108}
Tomas Winklerf20217d2008-05-29 16:35:10 +08002109
Johannes Bergc1182742010-04-30 11:30:48 -07002110static u8 iwl_find_station(struct iwl_priv *priv, const u8 *addr)
2111{
2112 int i;
2113 int start = 0;
2114 int ret = IWL_INVALID_STATION;
2115 unsigned long flags;
2116
2117 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) ||
2118 (priv->iw_mode == NL80211_IFTYPE_AP))
2119 start = IWL_STA_ID;
2120
2121 if (is_broadcast_ether_addr(addr))
Johannes Berga194e322010-08-27 08:53:46 -07002122 return priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
Johannes Bergc1182742010-04-30 11:30:48 -07002123
2124 spin_lock_irqsave(&priv->sta_lock, flags);
2125 for (i = start; i < priv->hw_params.max_stations; i++)
2126 if (priv->stations[i].used &&
2127 (!compare_ether_addr(priv->stations[i].sta.sta.addr,
2128 addr))) {
2129 ret = i;
2130 goto out;
2131 }
2132
2133 IWL_DEBUG_ASSOC_LIMIT(priv, "can not find STA %pM total %d\n",
2134 addr, priv->num_stations);
2135
2136 out:
2137 /*
2138 * It may be possible that more commands interacting with stations
2139 * arrive before we completed processing the adding of
2140 * station
2141 */
2142 if (ret != IWL_INVALID_STATION &&
2143 (!(priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) ||
2144 ((priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) &&
2145 (priv->stations[ret].used & IWL_STA_UCODE_INPROGRESS)))) {
2146 IWL_ERR(priv, "Requested station info for sta %d before ready.\n",
2147 ret);
2148 ret = IWL_INVALID_STATION;
2149 }
2150 spin_unlock_irqrestore(&priv->sta_lock, flags);
2151 return ret;
2152}
2153
Johannes Berg93286db2010-04-29 04:43:03 -07002154static int iwl_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
2155{
2156 if (priv->iw_mode == NL80211_IFTYPE_STATION) {
2157 return IWL_AP_ID;
2158 } else {
2159 u8 *da = ieee80211_get_DA(hdr);
2160 return iwl_find_station(priv, da);
2161 }
2162}
2163
Tomas Winklerf20217d2008-05-29 16:35:10 +08002164/**
2165 * iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response
2166 */
2167static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2168 struct iwl_rx_mem_buffer *rxb)
2169{
Zhu Yi2f301222009-10-09 17:19:45 +08002170 struct iwl_rx_packet *pkt = rxb_addr(rxb);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002171 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
2172 int txq_id = SEQ_TO_QUEUE(sequence);
2173 int index = SEQ_TO_INDEX(sequence);
2174 struct iwl_tx_queue *txq = &priv->txq[txq_id];
Tomas Winkler3fd07a12008-10-23 23:48:49 -07002175 struct ieee80211_hdr *hdr;
Tomas Winklerf20217d2008-05-29 16:35:10 +08002176 struct ieee80211_tx_info *info;
2177 struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
Tomas Winkler25a65722008-06-12 09:47:07 +08002178 u32 status = le32_to_cpu(tx_resp->u.status);
Dan Carpenter39825f42010-01-09 11:41:48 +03002179 int uninitialized_var(tid);
Tomas Winkler3fd07a12008-10-23 23:48:49 -07002180 int sta_id;
2181 int freed;
Tomas Winklerf20217d2008-05-29 16:35:10 +08002182 u8 *qc = NULL;
Reinette Chatre9c5ac092010-05-05 02:26:06 -07002183 unsigned long flags;
Tomas Winklerf20217d2008-05-29 16:35:10 +08002184
2185 if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
Winkler, Tomas15b16872008-12-19 10:37:33 +08002186 IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
Tomas Winklerf20217d2008-05-29 16:35:10 +08002187 "is out of range [0-%d] %d %d\n", txq_id,
2188 index, txq->q.n_bd, txq->q.write_ptr,
2189 txq->q.read_ptr);
2190 return;
2191 }
2192
Stanislaw Gruszka22de94d2010-12-03 15:41:48 +01002193 txq->time_stamp = jiffies;
Johannes Bergff0d91c2010-05-17 02:37:34 -07002194 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002195 memset(&info->status, 0, sizeof(info->status));
2196
Tomas Winklerf20217d2008-05-29 16:35:10 +08002197 hdr = iwl_tx_queue_get_hdr(priv, txq_id, index);
Tomas Winkler3fd07a12008-10-23 23:48:49 -07002198 if (ieee80211_is_data_qos(hdr->frame_control)) {
Harvey Harrisonfd7c8a42008-06-11 14:21:56 -07002199 qc = ieee80211_get_qos_ctl(hdr);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002200 tid = qc[0] & 0xf;
2201 }
2202
2203 sta_id = iwl_get_ra_sta_id(priv, hdr);
2204 if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) {
Winkler, Tomas15b16872008-12-19 10:37:33 +08002205 IWL_ERR(priv, "Station not known\n");
Tomas Winklerf20217d2008-05-29 16:35:10 +08002206 return;
2207 }
2208
Reinette Chatre9c5ac092010-05-05 02:26:06 -07002209 spin_lock_irqsave(&priv->sta_lock, flags);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002210 if (txq->sched_retry) {
2211 const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp);
2212 struct iwl_ht_agg *agg = NULL;
Tomas Winkler3fd07a12008-10-23 23:48:49 -07002213 WARN_ON(!qc);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002214
2215 agg = &priv->stations[sta_id].tid[tid].agg;
2216
Tomas Winkler25a65722008-06-12 09:47:07 +08002217 iwl4965_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002218
Ron Rindjunsky32354272008-07-01 10:44:51 +03002219 /* check if BAR is needed */
2220 if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status))
2221 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
Tomas Winklerf20217d2008-05-29 16:35:10 +08002222
2223 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
Tomas Winklerf20217d2008-05-29 16:35:10 +08002224 index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
Tomas Winklere1623442009-01-27 14:27:56 -08002225 IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
Tomas Winklerf20217d2008-05-29 16:35:10 +08002226 "%d index %d\n", scd_ssn , index);
Wey-Yi Guy74bcdb32010-03-17 13:34:34 -07002227 freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
Wey-Yi Guyece64442010-04-08 13:17:37 -07002228 if (qc)
2229 iwl_free_tfds_in_queue(priv, sta_id,
2230 tid, freed);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002231
Tomas Winkler3fd07a12008-10-23 23:48:49 -07002232 if (priv->mac80211_registered &&
2233 (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
Johannes Berg4bea9b92010-11-10 18:25:43 -08002234 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
Johannes Berg549a04e2010-11-10 18:25:44 -08002235 iwl_wake_queue(priv, txq);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002236 }
2237 } else {
Johannes Berge6a98542008-10-21 12:40:02 +02002238 info->status.rates[0].count = tx_resp->failure_frame + 1;
Johannes Bergc397bf12009-11-13 11:56:35 -08002239 info->flags |= iwl_tx_status_to_mac80211(status);
Wey-Yi Guy8d801082010-03-17 13:34:36 -07002240 iwlagn_hwrate_to_tx_control(priv,
Ron Rindjunsky4f85f5b2008-06-09 22:54:35 +03002241 le32_to_cpu(tx_resp->rate_n_flags),
2242 info);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002243
Tomas Winklere1623442009-01-27 14:27:56 -08002244 IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) "
Tomas Winkler3fd07a12008-10-23 23:48:49 -07002245 "rate_n_flags 0x%x retries %d\n",
2246 txq_id,
2247 iwl_get_tx_fail_reason(status), status,
2248 le32_to_cpu(tx_resp->rate_n_flags),
2249 tx_resp->failure_frame);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002250
Wey-Yi Guy74bcdb32010-03-17 13:34:34 -07002251 freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
Wey-Yi Guyece64442010-04-08 13:17:37 -07002252 if (qc && likely(sta_id != IWL_INVALID_STATION))
2253 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
2254 else if (sta_id == IWL_INVALID_STATION)
2255 IWL_DEBUG_TX_REPLY(priv, "Station not known\n");
Tomas Winkler3fd07a12008-10-23 23:48:49 -07002256
2257 if (priv->mac80211_registered &&
2258 (iwl_queue_space(&txq->q) > txq->q.low_mark))
Johannes Berg549a04e2010-11-10 18:25:44 -08002259 iwl_wake_queue(priv, txq);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002260 }
Wey-Yi Guyece64442010-04-08 13:17:37 -07002261 if (qc && likely(sta_id != IWL_INVALID_STATION))
John W. Linville1805a342010-04-09 13:42:26 -04002262 iwlagn_txq_check_empty(priv, sta_id, tid, txq_id);
Tomas Winkler3fd07a12008-10-23 23:48:49 -07002263
Wey-Yi Guy04569cb2010-03-31 17:57:28 -07002264 iwl_check_abort_status(priv, tx_resp->frame_count, status);
Reinette Chatre9c5ac092010-05-05 02:26:06 -07002265
2266 spin_unlock_irqrestore(&priv->sta_lock, flags);
Tomas Winklerf20217d2008-05-29 16:35:10 +08002267}
2268
Johannes Berg241887a2011-01-19 11:11:22 -08002269static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
2270 struct iwl_rx_mem_buffer *rxb)
2271{
2272 struct iwl_rx_packet *pkt = rxb_addr(rxb);
2273 struct iwl4965_beacon_notif *beacon = (void *)pkt->u.raw;
2274#ifdef CONFIG_IWLWIFI_DEBUG
2275 u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
2276
2277 IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d "
2278 "tsf:0x%.8x%.8x rate:%d\n",
2279 le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
2280 beacon->beacon_notify_hdr.failure_frame,
2281 le32_to_cpu(beacon->ibss_mgr_status),
2282 le32_to_cpu(beacon->high_tsf),
2283 le32_to_cpu(beacon->low_tsf), rate);
2284#endif
2285
2286 priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
2287
2288 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
2289 queue_work(priv->workqueue, &priv->beacon_update);
2290}
2291
Tomas Winklercaab8f12008-08-04 16:00:42 +08002292static int iwl4965_calc_rssi(struct iwl_priv *priv,
2293 struct iwl_rx_phy_res *rx_resp)
2294{
2295 /* data from PHY/DSP regarding signal strength, etc.,
2296 * contents are always there, not configurable by host. */
2297 struct iwl4965_rx_non_cfg_phy *ncphy =
2298 (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
2299 u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL49_AGC_DB_MASK)
2300 >> IWL49_AGC_DB_POS;
2301
2302 u32 valid_antennae =
2303 (le16_to_cpu(rx_resp->phy_flags) & IWL49_RX_PHY_FLAGS_ANTENNAE_MASK)
2304 >> IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
2305 u8 max_rssi = 0;
2306 u32 i;
2307
2308 /* Find max rssi among 3 possible receivers.
2309 * These values are measured by the digital signal processor (DSP).
2310 * They should stay fairly constant even as the signal strength varies,
2311 * if the radio's automatic gain control (AGC) is working right.
2312 * AGC value (see below) will provide the "interesting" info. */
2313 for (i = 0; i < 3; i++)
2314 if (valid_antennae & (1 << i))
2315 max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
2316
Tomas Winklere1623442009-01-27 14:27:56 -08002317 IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
Tomas Winklercaab8f12008-08-04 16:00:42 +08002318 ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
2319 max_rssi, agc);
2320
2321 /* dBm = max_rssi dB - agc dB - constant.
2322 * Higher AGC (higher radio gain) means lower signal. */
Wey-Yi Guyb744cb72010-03-23 11:37:59 -07002323 return max_rssi - agc - IWLAGN_RSSI_OFFSET;
Tomas Winklercaab8f12008-08-04 16:00:42 +08002324}
2325
Tomas Winklerf20217d2008-05-29 16:35:10 +08002326
Zhu Yib481de92007-09-25 17:54:57 -07002327/* Set up 4965-specific Rx frame reply handlers */
Emmanuel Grumbachd4789ef2008-04-24 11:55:20 -07002328static void iwl4965_rx_handler_setup(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07002329{
2330 /* Legacy Rx frames */
Wey-Yi Guy8d801082010-03-17 13:34:36 -07002331 priv->rx_handlers[REPLY_RX] = iwlagn_rx_reply_rx;
Ron Rindjunsky37a44212008-05-29 16:35:18 +08002332 /* Tx response */
Tomas Winklerf20217d2008-05-29 16:35:10 +08002333 priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx;
Johannes Berg241887a2011-01-19 11:11:22 -08002334 priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
Johannes Bergea674852011-01-04 16:22:03 -08002335
2336 /* set up notification wait support */
2337 spin_lock_init(&priv->_agn.notif_wait_lock);
2338 INIT_LIST_HEAD(&priv->_agn.notif_waits);
2339 init_waitqueue_head(&priv->_agn.notif_waitq);
Zhu Yib481de92007-09-25 17:54:57 -07002340}
2341
Emmanuel Grumbach4e393172008-06-12 09:46:53 +08002342static void iwl4965_setup_deferred_work(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07002343{
2344 INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
Zhu Yib481de92007-09-25 17:54:57 -07002345}
2346
Emmanuel Grumbach4e393172008-06-12 09:46:53 +08002347static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07002348{
Emmanuel Grumbach4e393172008-06-12 09:46:53 +08002349 cancel_work_sync(&priv->txpower_work);
Zhu Yib481de92007-09-25 17:54:57 -07002350}
2351
Tomas Winkler3c424c22008-04-15 16:01:42 -07002352static struct iwl_hcmd_ops iwl4965_hcmd = {
Tomas Winkler7e8c5192008-04-15 16:01:43 -07002353 .rxon_assoc = iwl4965_send_rxon_assoc,
Johannes Berg2295c662010-10-23 09:15:41 -07002354 .commit_rxon = iwl4965_commit_rxon,
Johannes Berg5de33062010-09-22 18:01:58 +02002355 .set_rxon_chain = iwlagn_set_rxon_chain,
Johannes Berg65b52bd2010-04-13 01:04:31 -07002356 .send_bt_config = iwl_send_bt_config,
Tomas Winkler3c424c22008-04-15 16:01:42 -07002357};
2358
Johannes Berga77029e2010-09-22 18:01:56 +02002359static void iwl4965_post_scan(struct iwl_priv *priv)
2360{
2361 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2362
2363 /*
2364 * Since setting the RXON may have been deferred while
2365 * performing the scan, fire one off if needed
2366 */
2367 if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
2368 iwlcore_commit_rxon(priv, ctx);
2369}
2370
Johannes Berg2295c662010-10-23 09:15:41 -07002371static void iwl4965_post_associate(struct iwl_priv *priv)
2372{
2373 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2374 struct ieee80211_vif *vif = ctx->vif;
2375 struct ieee80211_conf *conf = NULL;
2376 int ret = 0;
2377
2378 if (!vif || !priv->is_open)
2379 return;
2380
2381 if (vif->type == NL80211_IFTYPE_AP) {
2382 IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__);
2383 return;
2384 }
2385
2386 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2387 return;
2388
2389 iwl_scan_cancel_timeout(priv, 200);
2390
2391 conf = ieee80211_get_hw_conf(priv->hw);
2392
2393 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2394 iwlcore_commit_rxon(priv, ctx);
2395
2396 ret = iwl_send_rxon_timing(priv, ctx);
2397 if (ret)
2398 IWL_WARN(priv, "RXON timing - "
2399 "Attempting to continue.\n");
2400
2401 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2402
2403 iwl_set_rxon_ht(priv, &priv->current_ht_config);
2404
2405 if (priv->cfg->ops->hcmd->set_rxon_chain)
2406 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
2407
2408 ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
2409
2410 IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
2411 vif->bss_conf.aid, vif->bss_conf.beacon_int);
2412
2413 if (vif->bss_conf.use_short_preamble)
2414 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2415 else
2416 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2417
2418 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
2419 if (vif->bss_conf.use_short_slot)
2420 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
2421 else
2422 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2423 }
2424
2425 iwlcore_commit_rxon(priv, ctx);
2426
2427 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
2428 vif->bss_conf.aid, ctx->active.bssid_addr);
2429
2430 switch (vif->type) {
2431 case NL80211_IFTYPE_STATION:
2432 break;
2433 case NL80211_IFTYPE_ADHOC:
2434 iwlagn_send_beacon_cmd(priv);
2435 break;
2436 default:
2437 IWL_ERR(priv, "%s Should not be called in %d mode\n",
2438 __func__, vif->type);
2439 break;
2440 }
2441
2442 /* the chain noise calibration will enabled PM upon completion
2443 * If chain noise has already been run, then we need to enable
2444 * power management here */
2445 if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE)
2446 iwl_power_update_mode(priv, false);
2447
2448 /* Enable Rx differential gain and sensitivity calibrations */
2449 iwl_chain_noise_reset(priv);
2450 priv->start_calib = 1;
2451}
2452
2453static void iwl4965_config_ap(struct iwl_priv *priv)
2454{
2455 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2456 struct ieee80211_vif *vif = ctx->vif;
2457 int ret = 0;
2458
2459 lockdep_assert_held(&priv->mutex);
2460
2461 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2462 return;
2463
2464 /* The following should be done only at AP bring up */
2465 if (!iwl_is_associated_ctx(ctx)) {
2466
2467 /* RXON - unassoc (to set timing command) */
2468 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2469 iwlcore_commit_rxon(priv, ctx);
2470
2471 /* RXON Timing */
2472 ret = iwl_send_rxon_timing(priv, ctx);
2473 if (ret)
2474 IWL_WARN(priv, "RXON timing failed - "
2475 "Attempting to continue.\n");
2476
2477 /* AP has all antennas */
2478 priv->chain_noise_data.active_chains =
2479 priv->hw_params.valid_rx_ant;
2480 iwl_set_rxon_ht(priv, &priv->current_ht_config);
2481 if (priv->cfg->ops->hcmd->set_rxon_chain)
2482 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
2483
2484 ctx->staging.assoc_id = 0;
2485
2486 if (vif->bss_conf.use_short_preamble)
2487 ctx->staging.flags |=
2488 RXON_FLG_SHORT_PREAMBLE_MSK;
2489 else
2490 ctx->staging.flags &=
2491 ~RXON_FLG_SHORT_PREAMBLE_MSK;
2492
2493 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
2494 if (vif->bss_conf.use_short_slot)
2495 ctx->staging.flags |=
2496 RXON_FLG_SHORT_SLOT_MSK;
2497 else
2498 ctx->staging.flags &=
2499 ~RXON_FLG_SHORT_SLOT_MSK;
2500 }
2501 /* need to send beacon cmd before committing assoc RXON! */
2502 iwlagn_send_beacon_cmd(priv);
2503 /* restore RXON assoc */
2504 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2505 iwlcore_commit_rxon(priv, ctx);
2506 }
2507 iwlagn_send_beacon_cmd(priv);
2508
2509 /* FIXME - we need to add code here to detect a totally new
2510 * configuration, reset the AP, unassoc, rxon timing, assoc,
2511 * clear sta table, add BCAST sta... */
2512}
2513
Tomas Winkler857485c2008-03-21 13:53:44 -07002514static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
Gregory Greenmanc1adf9f2008-05-15 13:53:59 +08002515 .get_hcmd_size = iwl4965_get_hcmd_size,
Tomas Winkler133636d2008-05-05 10:22:34 +08002516 .build_addsta_hcmd = iwl4965_build_addsta_hcmd,
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -07002517 .chain_noise_reset = iwl4965_chain_noise_reset,
2518 .gain_computation = iwl4965_gain_computation,
Wey-Yi Guy708068d2010-11-10 09:56:41 -08002519 .tx_cmd_protection = iwl_legacy_tx_cmd_protection,
Tomas Winklercaab8f12008-08-04 16:00:42 +08002520 .calc_rssi = iwl4965_calc_rssi,
Johannes Bergb6e4c552010-04-06 04:12:42 -07002521 .request_scan = iwlagn_request_scan,
Johannes Berga77029e2010-09-22 18:01:56 +02002522 .post_scan = iwl4965_post_scan,
Tomas Winkler857485c2008-03-21 13:53:44 -07002523};
2524
Assaf Krauss6bc913b2008-03-11 16:17:18 -07002525static struct iwl_lib_ops iwl4965_lib = {
Tomas Winkler5425e492008-04-15 16:01:38 -07002526 .set_hw_params = iwl4965_hw_set_hw_params,
Tomas Winklere2a722e2008-04-14 21:16:10 -07002527 .txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl,
Tomas Winklerda1bc452008-05-29 16:35:00 +08002528 .txq_set_sched = iwl4965_txq_set_sched,
Tomas Winkler30e553e2008-05-29 16:35:16 +08002529 .txq_agg_enable = iwl4965_txq_agg_enable,
2530 .txq_agg_disable = iwl4965_txq_agg_disable,
Samuel Ortiz7aaa1d72009-01-19 15:30:26 -08002531 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
2532 .txq_free_tfd = iwl_hw_txq_free_tfd,
Samuel Ortiza8e74e22009-01-23 13:45:14 -08002533 .txq_init = iwl_hw_tx_queue_init,
Emmanuel Grumbachd4789ef2008-04-24 11:55:20 -07002534 .rx_handler_setup = iwl4965_rx_handler_setup,
Emmanuel Grumbach4e393172008-06-12 09:46:53 +08002535 .setup_deferred_work = iwl4965_setup_deferred_work,
2536 .cancel_deferred_work = iwl4965_cancel_deferred_work,
Tomas Winkler57aab752008-04-14 21:16:03 -07002537 .is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr,
2538 .alive_notify = iwl4965_alive_notify,
Emmanuel Grumbachf3ccc082008-05-05 10:22:45 +08002539 .init_alive_start = iwl4965_init_alive_start,
Tomas Winkler57aab752008-04-14 21:16:03 -07002540 .load_ucode = iwl4965_load_bsm,
Reinette Chatreb7a79402009-09-25 14:24:23 -07002541 .dump_nic_event_log = iwl_dump_nic_event_log,
2542 .dump_nic_error_log = iwl_dump_nic_error_log,
Ben Cahill647291f2010-03-02 12:48:25 -08002543 .dump_fh = iwl_dump_fh,
Wey-Yi Guy4a56e962009-10-23 13:42:29 -07002544 .set_channel_switch = iwl4965_hw_channel_switch,
Tomas Winkler6f4083a2008-04-16 16:34:49 -07002545 .apm_ops = {
Ben Cahillfadb3582009-10-23 13:42:21 -07002546 .init = iwl_apm_init,
Tomas Winkler694cc562008-04-24 11:55:22 -07002547 .config = iwl4965_nic_config,
Tomas Winkler6f4083a2008-04-16 16:34:49 -07002548 },
Assaf Krauss6bc913b2008-03-11 16:17:18 -07002549 .eeprom_ops = {
Tomas Winkler073d3f52008-04-21 15:41:52 -07002550 .regulatory_bands = {
2551 EEPROM_REGULATORY_BAND_1_CHANNELS,
2552 EEPROM_REGULATORY_BAND_2_CHANNELS,
2553 EEPROM_REGULATORY_BAND_3_CHANNELS,
2554 EEPROM_REGULATORY_BAND_4_CHANNELS,
2555 EEPROM_REGULATORY_BAND_5_CHANNELS,
Wey-Yi Guy7aafef12009-08-07 15:41:38 -07002556 EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS,
2557 EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS
Tomas Winkler073d3f52008-04-21 15:41:52 -07002558 },
Assaf Krauss6bc913b2008-03-11 16:17:18 -07002559 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
2560 .release_semaphore = iwlcore_eeprom_release_semaphore,
Tomas Winkler0ef2ca62008-10-23 23:48:51 -07002561 .calib_version = iwl4965_eeprom_calib_version,
Tomas Winkler073d3f52008-04-21 15:41:52 -07002562 .query_addr = iwlcore_eeprom_query_addr,
Assaf Krauss6bc913b2008-03-11 16:17:18 -07002563 },
Tomas Winkler630fe9b2008-06-12 09:47:08 +08002564 .send_tx_power = iwl4965_send_tx_power,
Emmanuel Grumbach5b9f8cd2008-10-29 14:05:46 -07002565 .update_chain_flags = iwl_update_chain_flags,
Wey-Yi Guye39fdee2010-11-10 09:56:40 -08002566 .isr_ops = {
2567 .isr = iwl_isr_legacy,
2568 },
Wey-Yi Guy62161ae2009-05-21 13:44:23 -07002569 .temp_ops = {
2570 .temperature = iwl4965_temperature_calib,
Wey-Yi Guy62161ae2009-05-21 13:44:23 -07002571 },
Abhijeet Kolekarb8c76262010-04-08 15:29:07 -07002572 .debugfs_ops = {
2573 .rx_stats_read = iwl_ucode_rx_stats_read,
2574 .tx_stats_read = iwl_ucode_tx_stats_read,
2575 .general_stats_read = iwl_ucode_general_stats_read,
Wey-Yi Guyffb7d892010-07-14 08:09:55 -07002576 .bt_stats_read = iwl_ucode_bt_stats_read,
Wey-Yi Guy54a9aa652010-09-05 10:49:42 -07002577 .reply_tx_error = iwl_reply_tx_error_read,
Abhijeet Kolekarb8c76262010-04-08 15:29:07 -07002578 },
Wey-Yi Guyfa8f1302010-03-05 14:22:46 -08002579 .check_plcp_health = iwl_good_plcp_health,
Assaf Krauss6bc913b2008-03-11 16:17:18 -07002580};
2581
Johannes Berg2295c662010-10-23 09:15:41 -07002582static const struct iwl_legacy_ops iwl4965_legacy_ops = {
2583 .post_associate = iwl4965_post_associate,
2584 .config_ap = iwl4965_config_ap,
2585 .manage_ibss_station = iwlagn_manage_ibss_station,
2586 .update_bcast_stations = iwl_update_bcast_stations,
2587};
2588
2589struct ieee80211_ops iwl4965_hw_ops = {
2590 .tx = iwlagn_mac_tx,
2591 .start = iwlagn_mac_start,
2592 .stop = iwlagn_mac_stop,
2593 .add_interface = iwl_mac_add_interface,
2594 .remove_interface = iwl_mac_remove_interface,
Johannes Bergd4daaea2010-10-23 09:15:43 -07002595 .change_interface = iwl_mac_change_interface,
Johannes Berg2295c662010-10-23 09:15:41 -07002596 .config = iwl_legacy_mac_config,
2597 .configure_filter = iwlagn_configure_filter,
2598 .set_key = iwlagn_mac_set_key,
2599 .update_tkip_key = iwlagn_mac_update_tkip_key,
2600 .conf_tx = iwl_mac_conf_tx,
2601 .reset_tsf = iwl_legacy_mac_reset_tsf,
2602 .bss_info_changed = iwl_legacy_mac_bss_info_changed,
2603 .ampdu_action = iwlagn_mac_ampdu_action,
2604 .hw_scan = iwl_mac_hw_scan,
2605 .sta_add = iwlagn_mac_sta_add,
2606 .sta_remove = iwl_mac_sta_remove,
2607 .channel_switch = iwlagn_mac_channel_switch,
2608 .flush = iwlagn_mac_flush,
2609 .tx_last_beacon = iwl_mac_tx_last_beacon,
2610};
2611
Emese Revfy45d5d802009-12-14 00:59:53 +01002612static const struct iwl_ops iwl4965_ops = {
Assaf Krauss6bc913b2008-03-11 16:17:18 -07002613 .lib = &iwl4965_lib,
Tomas Winkler3c424c22008-04-15 16:01:42 -07002614 .hcmd = &iwl4965_hcmd,
Tomas Winkler857485c2008-03-21 13:53:44 -07002615 .utils = &iwl4965_hcmd_utils,
Johannes Berge932a602009-10-02 13:44:03 -07002616 .led = &iwlagn_led_ops,
Johannes Berg2295c662010-10-23 09:15:41 -07002617 .legacy = &iwl4965_legacy_ops,
2618 .ieee80211_ops = &iwl4965_hw_ops,
Assaf Krauss6bc913b2008-03-11 16:17:18 -07002619};
2620
Wey-Yi Guy7cb1b082010-10-06 08:10:00 -07002621static struct iwl_base_params iwl4965_base_params = {
Tomas Winkler073d3f52008-04-21 15:41:52 -07002622 .eeprom_size = IWL4965_EEPROM_IMG_SIZE,
Wey-Yi Guy88804e22009-10-09 13:20:28 -07002623 .num_of_queues = IWL49_NUM_QUEUES,
2624 .num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES,
Ben Cahillfadb3582009-10-23 13:42:21 -07002625 .pll_cfg_val = 0,
2626 .set_l0s = true,
2627 .use_bsm = true,
Daniel C Halperinb2617932009-08-13 13:30:59 -07002628 .use_isr_legacy = true,
Johannes Berg96d8c6a2009-09-11 10:50:37 -07002629 .broken_powersave = true,
Wey-Yi Guyf2d0d0e2009-09-11 10:38:14 -07002630 .led_compensation = 61,
Wey-Yi Guyd8c07e72009-09-25 14:24:26 -07002631 .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
Trieu 'Andrew' Nguyen3e4fb5f2010-01-22 14:22:46 -08002632 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
Stanislaw Gruszka22de94d2010-12-03 15:41:48 +01002633 .wd_timeout = IWL_DEF_WD_TIMEOUT,
Wey-Yi Guy2f3f7f92010-03-18 10:56:32 -07002634 .temperature_kelvin = true,
Wey-Yi Guy678b3852010-03-26 12:54:37 -07002635 .max_event_log_size = 512,
Wey-Yi Guy4e7033e2010-04-27 14:33:33 -07002636 .tx_power_by_driver = true,
Wey-Yi Guy6e5c8002010-04-27 14:00:28 -07002637 .ucode_tracing = true,
Wey-Yi Guy65d1f892010-04-25 15:41:43 -07002638 .sensitivity_calib_by_driver = true,
2639 .chain_noise_calib_by_driver = true,
Wey-Yi Guy8829c9e2010-11-10 11:05:38 -08002640 .no_agg_framecnt_info = true,
Wey-Yi Guy7cb1b082010-10-06 08:10:00 -07002641};
2642
2643struct iwl_cfg iwl4965_agn_cfg = {
2644 .name = "Intel(R) Wireless WiFi Link 4965AGN",
2645 .fw_name_pre = IWL4965_FW_PRE,
2646 .ucode_api_max = IWL4965_UCODE_API_MAX,
2647 .ucode_api_min = IWL4965_UCODE_API_MIN,
Wey-Yi Guy239712e2011-01-20 08:08:04 -08002648 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
Stanislaw Gruszka61790c52010-11-30 15:33:40 +01002649 .valid_tx_ant = ANT_AB,
2650 .valid_rx_ant = ANT_ABC,
Wey-Yi Guy7cb1b082010-10-06 08:10:00 -07002651 .eeprom_ver = EEPROM_4965_EEPROM_VERSION,
2652 .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION,
2653 .ops = &iwl4965_ops,
2654 .mod_params = &iwlagn_mod_params,
2655 .base_params = &iwl4965_base_params,
Wey-Yi Guy564b3442010-11-09 09:21:34 -08002656 .led_mode = IWL_LED_BLINK,
Johannes Berge7cb4952010-04-13 01:04:35 -07002657 /*
2658 * Force use of chains B and C for scan RX on 5 GHz band
2659 * because the device has off-channel reception on chain A.
2660 */
Johannes Berg0e1654f2010-05-18 02:48:36 -07002661 .scan_rx_antennas[IEEE80211_BAND_5GHZ] = ANT_BC,
Tomas Winkler82b9a122008-03-04 18:09:30 -08002662};
2663
Tomas Winklerd16dc482008-07-11 11:53:38 +08002664/* Module firmware */
Reinette Chatrea0987a82008-12-02 12:14:06 -08002665MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX));
Tomas Winklerd16dc482008-07-11 11:53:38 +08002666