blob: a10a6e81769f735698c4195ad2d8c33c9254cebf [file] [log] [blame]
Zhu Yib481de92007-09-25 17:54:57 -07001/******************************************************************************
2 *
Reinette Chatreeb7ae892008-03-11 16:17:17 -07003 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
Zhu Yib481de92007-09-25 17:54:57 -07004 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/version.h>
30#include <linux/init.h>
31#include <linux/pci.h>
32#include <linux/dma-mapping.h>
33#include <linux/delay.h>
34#include <linux/skbuff.h>
35#include <linux/netdevice.h>
36#include <linux/wireless.h>
37#include <net/mac80211.h>
Zhu Yib481de92007-09-25 17:54:57 -070038#include <linux/etherdevice.h>
Zhu Yi12342c42007-12-20 11:27:32 +080039#include <asm/unaligned.h>
Zhu Yib481de92007-09-25 17:54:57 -070040
Assaf Krauss6bc913b2008-03-11 16:17:18 -070041#include "iwl-eeprom.h"
Zhu Yib481de92007-09-25 17:54:57 -070042#include "iwl-4965.h"
Tomas Winklerfee12472008-04-03 16:05:21 -070043#include "iwl-core.h"
Tomas Winkler3395f6e2008-03-25 16:33:37 -070044#include "iwl-io.h"
Zhu Yib481de92007-09-25 17:54:57 -070045#include "iwl-helpers.h"
46
Assaf Krauss1ea87392008-03-18 14:57:50 -070047/* module parameters */
48static struct iwl_mod_params iwl4965_mod_params = {
49 .num_of_queues = IWL_MAX_NUM_QUEUES,
50 .enable_qos = 1,
51 .amsdu_size_8K = 1,
52 /* the rest are 0 by default */
53};
54
Tomas Winklerc79dd5b2008-03-12 16:58:50 -070055static void iwl4965_hw_card_show_info(struct iwl_priv *priv);
Christoph Hellwig416e1432007-10-25 17:15:49 +080056
Zhu Yib481de92007-09-25 17:54:57 -070057#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
58 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
59 IWL_RATE_SISO_##s##M_PLCP, \
60 IWL_RATE_MIMO_##s##M_PLCP, \
61 IWL_RATE_##r##M_IEEE, \
62 IWL_RATE_##ip##M_INDEX, \
63 IWL_RATE_##in##M_INDEX, \
64 IWL_RATE_##rp##M_INDEX, \
65 IWL_RATE_##rn##M_INDEX, \
66 IWL_RATE_##pp##M_INDEX, \
67 IWL_RATE_##np##M_INDEX }
68
69/*
70 * Parameter order:
71 * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
72 *
73 * If there isn't a valid next or previous rate then INV is used which
74 * maps to IWL_RATE_INVALID
75 *
76 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -080077const struct iwl4965_rate_info iwl4965_rates[IWL_RATE_COUNT] = {
Zhu Yib481de92007-09-25 17:54:57 -070078 IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
79 IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
80 IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
81 IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
82 IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
83 IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
84 IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
85 IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
86 IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
87 IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
88 IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
89 IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
90 IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
91};
92
Ron Rindjunskyfe01b472008-01-28 14:07:24 +020093#ifdef CONFIG_IWL4965_HT
94
95static const u16 default_tid_to_tx_fifo[] = {
96 IWL_TX_FIFO_AC1,
97 IWL_TX_FIFO_AC0,
98 IWL_TX_FIFO_AC0,
99 IWL_TX_FIFO_AC1,
100 IWL_TX_FIFO_AC2,
101 IWL_TX_FIFO_AC2,
102 IWL_TX_FIFO_AC3,
103 IWL_TX_FIFO_AC3,
104 IWL_TX_FIFO_NONE,
105 IWL_TX_FIFO_NONE,
106 IWL_TX_FIFO_NONE,
107 IWL_TX_FIFO_NONE,
108 IWL_TX_FIFO_NONE,
109 IWL_TX_FIFO_NONE,
110 IWL_TX_FIFO_NONE,
111 IWL_TX_FIFO_NONE,
112 IWL_TX_FIFO_AC3
113};
114
115#endif /*CONFIG_IWL4965_HT */
116
Tomas Winkler57aab752008-04-14 21:16:03 -0700117/* check contents of special bootstrap uCode SRAM */
118static int iwl4965_verify_bsm(struct iwl_priv *priv)
119{
120 __le32 *image = priv->ucode_boot.v_addr;
121 u32 len = priv->ucode_boot.len;
122 u32 reg;
123 u32 val;
124
125 IWL_DEBUG_INFO("Begin verify bsm\n");
126
127 /* verify BSM SRAM contents */
128 val = iwl_read_prph(priv, BSM_WR_DWCOUNT_REG);
129 for (reg = BSM_SRAM_LOWER_BOUND;
130 reg < BSM_SRAM_LOWER_BOUND + len;
131 reg += sizeof(u32), image++) {
132 val = iwl_read_prph(priv, reg);
133 if (val != le32_to_cpu(*image)) {
134 IWL_ERROR("BSM uCode verification failed at "
135 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
136 BSM_SRAM_LOWER_BOUND,
137 reg - BSM_SRAM_LOWER_BOUND, len,
138 val, le32_to_cpu(*image));
139 return -EIO;
140 }
141 }
142
143 IWL_DEBUG_INFO("BSM bootstrap uCode image OK\n");
144
145 return 0;
146}
147
148/**
149 * iwl4965_load_bsm - Load bootstrap instructions
150 *
151 * BSM operation:
152 *
153 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
154 * in special SRAM that does not power down during RFKILL. When powering back
155 * up after power-saving sleeps (or during initial uCode load), the BSM loads
156 * the bootstrap program into the on-board processor, and starts it.
157 *
158 * The bootstrap program loads (via DMA) instructions and data for a new
159 * program from host DRAM locations indicated by the host driver in the
160 * BSM_DRAM_* registers. Once the new program is loaded, it starts
161 * automatically.
162 *
163 * When initializing the NIC, the host driver points the BSM to the
164 * "initialize" uCode image. This uCode sets up some internal data, then
165 * notifies host via "initialize alive" that it is complete.
166 *
167 * The host then replaces the BSM_DRAM_* pointer values to point to the
168 * normal runtime uCode instructions and a backup uCode data cache buffer
169 * (filled initially with starting data values for the on-board processor),
170 * then triggers the "initialize" uCode to load and launch the runtime uCode,
171 * which begins normal operation.
172 *
173 * When doing a power-save shutdown, runtime uCode saves data SRAM into
174 * the backup data cache in DRAM before SRAM is powered down.
175 *
176 * When powering back up, the BSM loads the bootstrap program. This reloads
177 * the runtime uCode instructions and the backup data cache into SRAM,
178 * and re-launches the runtime uCode from where it left off.
179 */
180static int iwl4965_load_bsm(struct iwl_priv *priv)
181{
182 __le32 *image = priv->ucode_boot.v_addr;
183 u32 len = priv->ucode_boot.len;
184 dma_addr_t pinst;
185 dma_addr_t pdata;
186 u32 inst_len;
187 u32 data_len;
188 int i;
189 u32 done;
190 u32 reg_offset;
191 int ret;
192
193 IWL_DEBUG_INFO("Begin load bsm\n");
194
195 /* make sure bootstrap program is no larger than BSM's SRAM size */
196 if (len > IWL_MAX_BSM_SIZE)
197 return -EINVAL;
198
199 /* Tell bootstrap uCode where to find the "Initialize" uCode
200 * in host DRAM ... host DRAM physical address bits 35:4 for 4965.
201 * NOTE: iwl4965_initialize_alive_start() will replace these values,
202 * after the "initialize" uCode has run, to point to
203 * runtime/protocol instructions and backup data cache. */
204 pinst = priv->ucode_init.p_addr >> 4;
205 pdata = priv->ucode_init_data.p_addr >> 4;
206 inst_len = priv->ucode_init.len;
207 data_len = priv->ucode_init_data.len;
208
209 ret = iwl_grab_nic_access(priv);
210 if (ret)
211 return ret;
212
213 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
214 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
215 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
216 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
217
218 /* Fill BSM memory with bootstrap instructions */
219 for (reg_offset = BSM_SRAM_LOWER_BOUND;
220 reg_offset < BSM_SRAM_LOWER_BOUND + len;
221 reg_offset += sizeof(u32), image++)
222 _iwl_write_prph(priv, reg_offset, le32_to_cpu(*image));
223
224 ret = iwl4965_verify_bsm(priv);
225 if (ret) {
226 iwl_release_nic_access(priv);
227 return ret;
228 }
229
230 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
231 iwl_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
232 iwl_write_prph(priv, BSM_WR_MEM_DST_REG, RTC_INST_LOWER_BOUND);
233 iwl_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
234
235 /* Load bootstrap code into instruction SRAM now,
236 * to prepare to load "initialize" uCode */
237 iwl_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
238
239 /* Wait for load of bootstrap uCode to finish */
240 for (i = 0; i < 100; i++) {
241 done = iwl_read_prph(priv, BSM_WR_CTRL_REG);
242 if (!(done & BSM_WR_CTRL_REG_BIT_START))
243 break;
244 udelay(10);
245 }
246 if (i < 100)
247 IWL_DEBUG_INFO("BSM write complete, poll %d iterations\n", i);
248 else {
249 IWL_ERROR("BSM write did not complete!\n");
250 return -EIO;
251 }
252
253 /* Enable future boot loads whenever power management unit triggers it
254 * (e.g. when powering back up after power-save shutdown) */
255 iwl_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
256
257 iwl_release_nic_access(priv);
258
259 return 0;
260}
261
Assaf Kraussbf85ea42008-03-14 10:38:49 -0700262static int iwl4965_init_drv(struct iwl_priv *priv)
263{
264 int ret;
265 int i;
266
Assaf Krauss1ea87392008-03-18 14:57:50 -0700267 priv->antenna = (enum iwl4965_antenna)priv->cfg->mod_params->antenna;
Assaf Kraussbf85ea42008-03-14 10:38:49 -0700268 priv->retry_rate = 1;
269 priv->ibss_beacon = NULL;
270
271 spin_lock_init(&priv->lock);
272 spin_lock_init(&priv->power_data.lock);
273 spin_lock_init(&priv->sta_lock);
274 spin_lock_init(&priv->hcmd_lock);
275 spin_lock_init(&priv->lq_mngr.lock);
276
Tomas Winkler059ff822008-04-14 21:16:14 -0700277 priv->shared_virt = pci_alloc_consistent(priv->pci_dev,
278 sizeof(struct iwl4965_shared),
279 &priv->shared_phys);
280
281 if (!priv->shared_virt) {
282 ret = -ENOMEM;
283 goto err;
284 }
285
286 memset(priv->shared_virt, 0, sizeof(struct iwl4965_shared));
287
288
Assaf Kraussbf85ea42008-03-14 10:38:49 -0700289 for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++)
290 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
291
292 INIT_LIST_HEAD(&priv->free_frames);
293
294 mutex_init(&priv->mutex);
295
296 /* Clear the driver's (not device's) station table */
297 iwlcore_clear_stations_table(priv);
298
299 priv->data_retry_limit = -1;
300 priv->ieee_channels = NULL;
301 priv->ieee_rates = NULL;
302 priv->band = IEEE80211_BAND_2GHZ;
303
304 priv->iw_mode = IEEE80211_IF_TYPE_STA;
305
306 priv->use_ant_b_for_management_frame = 1; /* start with ant B */
307 priv->valid_antenna = 0x7; /* assume all 3 connected */
308 priv->ps_mode = IWL_MIMO_PS_NONE;
309
310 /* Choose which receivers/antennas to use */
311 iwl4965_set_rxon_chain(priv);
312
313 iwlcore_reset_qos(priv);
314
315 priv->qos_data.qos_active = 0;
316 priv->qos_data.qos_cap.val = 0;
317
318 iwlcore_set_rxon_channel(priv, IEEE80211_BAND_2GHZ, 6);
319
320 priv->rates_mask = IWL_RATES_MASK;
321 /* If power management is turned on, default to AC mode */
322 priv->power_mode = IWL_POWER_AC;
323 priv->user_txpower_limit = IWL_DEFAULT_TX_POWER;
324
325 ret = iwl_init_channel_map(priv);
326 if (ret) {
327 IWL_ERROR("initializing regulatory failed: %d\n", ret);
328 goto err;
329 }
330
331 ret = iwl4965_init_geos(priv);
332 if (ret) {
333 IWL_ERROR("initializing geos failed: %d\n", ret);
334 goto err_free_channel_map;
335 }
336
Assaf Kraussbf85ea42008-03-14 10:38:49 -0700337 ret = ieee80211_register_hw(priv->hw);
338 if (ret) {
339 IWL_ERROR("Failed to register network device (error %d)\n",
340 ret);
341 goto err_free_geos;
342 }
343
344 priv->hw->conf.beacon_int = 100;
345 priv->mac80211_registered = 1;
346
347 return 0;
348
349err_free_geos:
350 iwl4965_free_geos(priv);
351err_free_channel_map:
352 iwl_free_channel_map(priv);
353err:
354 return ret;
355}
356
Zhu Yib481de92007-09-25 17:54:57 -0700357static int is_fat_channel(__le32 rxon_flags)
358{
359 return (rxon_flags & RXON_FLG_CHANNEL_MODE_PURE_40_MSK) ||
360 (rxon_flags & RXON_FLG_CHANNEL_MODE_MIXED_MSK);
361}
362
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700363static u8 is_single_stream(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700364{
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +0800365#ifdef CONFIG_IWL4965_HT
Ron Rindjunskyfd105e72007-11-26 16:14:39 +0200366 if (!priv->current_ht_config.is_ht ||
367 (priv->current_ht_config.supp_mcs_set[1] == 0) ||
Zhu Yib481de92007-09-25 17:54:57 -0700368 (priv->ps_mode == IWL_MIMO_PS_STATIC))
369 return 1;
370#else
371 return 1;
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +0800372#endif /*CONFIG_IWL4965_HT */
Zhu Yib481de92007-09-25 17:54:57 -0700373 return 0;
374}
375
Tomas Winkler17744ff2008-03-02 01:52:00 +0200376int iwl4965_hwrate_to_plcp_idx(u32 rate_n_flags)
377{
378 int idx = 0;
379
380 /* 4965 HT rate format */
381 if (rate_n_flags & RATE_MCS_HT_MSK) {
382 idx = (rate_n_flags & 0xff);
383
384 if (idx >= IWL_RATE_MIMO_6M_PLCP)
385 idx = idx - IWL_RATE_MIMO_6M_PLCP;
386
387 idx += IWL_FIRST_OFDM_RATE;
388 /* skip 9M not supported in ht*/
389 if (idx >= IWL_RATE_9M_INDEX)
390 idx += 1;
391 if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
392 return idx;
393
394 /* 4965 legacy rate format, search for match in table */
395 } else {
396 for (idx = 0; idx < ARRAY_SIZE(iwl4965_rates); idx++)
397 if (iwl4965_rates[idx].plcp == (rate_n_flags & 0xFF))
398 return idx;
399 }
400
401 return -1;
402}
403
Ron Rindjunsky4c424e42008-03-04 18:09:27 -0800404/**
405 * translate ucode response to mac80211 tx status control values
406 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700407void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
Ron Rindjunsky4c424e42008-03-04 18:09:27 -0800408 struct ieee80211_tx_control *control)
409{
410 int rate_index;
411
412 control->antenna_sel_tx =
413 ((rate_n_flags & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_A_POS);
414 if (rate_n_flags & RATE_MCS_HT_MSK)
415 control->flags |= IEEE80211_TXCTL_OFDM_HT;
416 if (rate_n_flags & RATE_MCS_GF_MSK)
417 control->flags |= IEEE80211_TXCTL_GREEN_FIELD;
418 if (rate_n_flags & RATE_MCS_FAT_MSK)
419 control->flags |= IEEE80211_TXCTL_40_MHZ_WIDTH;
420 if (rate_n_flags & RATE_MCS_DUP_MSK)
421 control->flags |= IEEE80211_TXCTL_DUP_DATA;
422 if (rate_n_flags & RATE_MCS_SGI_MSK)
423 control->flags |= IEEE80211_TXCTL_SHORT_GI;
424 /* since iwl4965_hwrate_to_plcp_idx is band indifferent, we always use
425 * IEEE80211_BAND_2GHZ band as it contains all the rates */
426 rate_index = iwl4965_hwrate_to_plcp_idx(rate_n_flags);
427 if (rate_index == -1)
428 control->tx_rate = NULL;
429 else
430 control->tx_rate =
431 &priv->bands[IEEE80211_BAND_2GHZ].bitrates[rate_index];
432}
Tomas Winkler17744ff2008-03-02 01:52:00 +0200433
Zhu Yib481de92007-09-25 17:54:57 -0700434/*
435 * Determine how many receiver/antenna chains to use.
436 * More provides better reception via diversity. Fewer saves power.
437 * MIMO (dual stream) requires at least 2, but works better with 3.
438 * This does not determine *which* chains to use, just how many.
439 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700440static int iwl4965_get_rx_chain_counter(struct iwl_priv *priv,
Zhu Yib481de92007-09-25 17:54:57 -0700441 u8 *idle_state, u8 *rx_state)
442{
443 u8 is_single = is_single_stream(priv);
444 u8 is_cam = test_bit(STATUS_POWER_PMI, &priv->status) ? 0 : 1;
445
446 /* # of Rx chains to use when expecting MIMO. */
447 if (is_single || (!is_cam && (priv->ps_mode == IWL_MIMO_PS_STATIC)))
448 *rx_state = 2;
449 else
450 *rx_state = 3;
451
452 /* # Rx chains when idling and maybe trying to save power */
453 switch (priv->ps_mode) {
454 case IWL_MIMO_PS_STATIC:
455 case IWL_MIMO_PS_DYNAMIC:
456 *idle_state = (is_cam) ? 2 : 1;
457 break;
458 case IWL_MIMO_PS_NONE:
459 *idle_state = (is_cam) ? *rx_state : 1;
460 break;
461 default:
462 *idle_state = 1;
463 break;
464 }
465
466 return 0;
467}
468
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700469int iwl4965_hw_rxq_stop(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700470{
471 int rc;
472 unsigned long flags;
473
474 spin_lock_irqsave(&priv->lock, flags);
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700475 rc = iwl_grab_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700476 if (rc) {
477 spin_unlock_irqrestore(&priv->lock, flags);
478 return rc;
479 }
480
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800481 /* stop Rx DMA */
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700482 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
483 rc = iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
Zhu Yib481de92007-09-25 17:54:57 -0700484 (1 << 24), 1000);
485 if (rc < 0)
486 IWL_ERROR("Can't stop Rx DMA.\n");
487
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700488 iwl_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700489 spin_unlock_irqrestore(&priv->lock, flags);
490
491 return 0;
492}
493
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700494u8 iwl4965_hw_find_station(struct iwl_priv *priv, const u8 *addr)
Zhu Yib481de92007-09-25 17:54:57 -0700495{
496 int i;
497 int start = 0;
498 int ret = IWL_INVALID_STATION;
499 unsigned long flags;
Joe Perches0795af52007-10-03 17:59:30 -0700500 DECLARE_MAC_BUF(mac);
Zhu Yib481de92007-09-25 17:54:57 -0700501
502 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) ||
503 (priv->iw_mode == IEEE80211_IF_TYPE_AP))
504 start = IWL_STA_ID;
505
506 if (is_broadcast_ether_addr(addr))
Tomas Winklera4062b82008-03-11 16:17:16 -0700507 return priv->hw_setting.bcast_sta_id;
Zhu Yib481de92007-09-25 17:54:57 -0700508
509 spin_lock_irqsave(&priv->sta_lock, flags);
510 for (i = start; i < priv->hw_setting.max_stations; i++)
511 if ((priv->stations[i].used) &&
512 (!compare_ether_addr
513 (priv->stations[i].sta.sta.addr, addr))) {
514 ret = i;
515 goto out;
516 }
517
John W. Linvillea50e2e32007-09-27 17:00:29 -0400518 IWL_DEBUG_ASSOC_LIMIT("can not find STA %s total %d\n",
Joe Perches0795af52007-10-03 17:59:30 -0700519 print_mac(mac, addr), priv->num_stations);
Zhu Yib481de92007-09-25 17:54:57 -0700520
521 out:
522 spin_unlock_irqrestore(&priv->sta_lock, flags);
523 return ret;
524}
525
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700526static int iwl4965_nic_set_pwr_src(struct iwl_priv *priv, int pwr_max)
Zhu Yib481de92007-09-25 17:54:57 -0700527{
Tomas Winklerd8609652007-10-25 17:15:35 +0800528 int ret;
Zhu Yib481de92007-09-25 17:54:57 -0700529 unsigned long flags;
530
531 spin_lock_irqsave(&priv->lock, flags);
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700532 ret = iwl_grab_nic_access(priv);
Tomas Winklerd8609652007-10-25 17:15:35 +0800533 if (ret) {
Zhu Yib481de92007-09-25 17:54:57 -0700534 spin_unlock_irqrestore(&priv->lock, flags);
Tomas Winklerd8609652007-10-25 17:15:35 +0800535 return ret;
Zhu Yib481de92007-09-25 17:54:57 -0700536 }
537
538 if (!pwr_max) {
539 u32 val;
540
Tomas Winklerd8609652007-10-25 17:15:35 +0800541 ret = pci_read_config_dword(priv->pci_dev, PCI_POWER_SOURCE,
Zhu Yib481de92007-09-25 17:54:57 -0700542 &val);
543
544 if (val & PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT)
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700545 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
Zhu Yib481de92007-09-25 17:54:57 -0700546 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
547 ~APMG_PS_CTRL_MSK_PWR_SRC);
548 } else
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700549 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
Zhu Yib481de92007-09-25 17:54:57 -0700550 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
551 ~APMG_PS_CTRL_MSK_PWR_SRC);
552
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700553 iwl_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700554 spin_unlock_irqrestore(&priv->lock, flags);
555
Tomas Winklerd8609652007-10-25 17:15:35 +0800556 return ret;
Zhu Yib481de92007-09-25 17:54:57 -0700557}
558
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700559static int iwl4965_rx_init(struct iwl_priv *priv, struct iwl4965_rx_queue *rxq)
Zhu Yib481de92007-09-25 17:54:57 -0700560{
Tomas Winkler059ff822008-04-14 21:16:14 -0700561 int ret;
Zhu Yib481de92007-09-25 17:54:57 -0700562 unsigned long flags;
Ron Rindjunsky9ee1ba42007-11-26 16:14:42 +0200563 unsigned int rb_size;
Zhu Yib481de92007-09-25 17:54:57 -0700564
565 spin_lock_irqsave(&priv->lock, flags);
Tomas Winkler059ff822008-04-14 21:16:14 -0700566 ret = iwl_grab_nic_access(priv);
567 if (ret) {
Zhu Yib481de92007-09-25 17:54:57 -0700568 spin_unlock_irqrestore(&priv->lock, flags);
Tomas Winkler059ff822008-04-14 21:16:14 -0700569 return ret;
Zhu Yib481de92007-09-25 17:54:57 -0700570 }
571
Assaf Krauss1ea87392008-03-18 14:57:50 -0700572 if (priv->cfg->mod_params->amsdu_size_8K)
Ron Rindjunsky9ee1ba42007-11-26 16:14:42 +0200573 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
574 else
575 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
576
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800577 /* Stop Rx DMA */
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700578 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
Zhu Yib481de92007-09-25 17:54:57 -0700579
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800580 /* Reset driver's Rx queue write index */
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700581 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800582
583 /* Tell device where to find RBD circular buffer in DRAM */
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700584 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
585 rxq->dma_addr >> 8);
Zhu Yib481de92007-09-25 17:54:57 -0700586
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800587 /* Tell device where in DRAM to update its Rx status */
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700588 iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
Tomas Winkler059ff822008-04-14 21:16:14 -0700589 (priv->shared_phys +
590 offsetof(struct iwl4965_shared, rb_closed)) >> 4);
Zhu Yib481de92007-09-25 17:54:57 -0700591
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800592 /* Enable Rx DMA, enable host interrupt, Rx buffer size 4k, 256 RBDs */
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700593 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
594 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
595 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
596 rb_size |
Tomas Winkler059ff822008-04-14 21:16:14 -0700597 /* 0x10 << 4 | */
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700598 (RX_QUEUE_SIZE_LOG <<
Zhu Yib481de92007-09-25 17:54:57 -0700599 FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
600
601 /*
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700602 * iwl_write32(priv,CSR_INT_COAL_REG,0);
Zhu Yib481de92007-09-25 17:54:57 -0700603 */
604
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700605 iwl_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700606 spin_unlock_irqrestore(&priv->lock, flags);
607
608 return 0;
609}
610
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800611/* Tell 4965 where to find the "keep warm" buffer */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700612static int iwl4965_kw_init(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700613{
614 unsigned long flags;
615 int rc;
616
617 spin_lock_irqsave(&priv->lock, flags);
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700618 rc = iwl_grab_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700619 if (rc)
620 goto out;
621
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700622 iwl_write_direct32(priv, IWL_FH_KW_MEM_ADDR_REG,
Zhu Yib481de92007-09-25 17:54:57 -0700623 priv->kw.dma_addr >> 4);
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700624 iwl_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700625out:
626 spin_unlock_irqrestore(&priv->lock, flags);
627 return rc;
628}
629
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700630static int iwl4965_kw_alloc(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700631{
632 struct pci_dev *dev = priv->pci_dev;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800633 struct iwl4965_kw *kw = &priv->kw;
Zhu Yib481de92007-09-25 17:54:57 -0700634
635 kw->size = IWL4965_KW_SIZE; /* TBW need set somewhere else */
636 kw->v_addr = pci_alloc_consistent(dev, kw->size, &kw->dma_addr);
637 if (!kw->v_addr)
638 return -ENOMEM;
639
640 return 0;
641}
642
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800643/**
644 * iwl4965_kw_free - Free the "keep warm" buffer
645 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700646static void iwl4965_kw_free(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700647{
648 struct pci_dev *dev = priv->pci_dev;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800649 struct iwl4965_kw *kw = &priv->kw;
Zhu Yib481de92007-09-25 17:54:57 -0700650
651 if (kw->v_addr) {
652 pci_free_consistent(dev, kw->size, kw->v_addr, kw->dma_addr);
653 memset(kw, 0, sizeof(*kw));
654 }
655}
656
657/**
658 * iwl4965_txq_ctx_reset - Reset TX queue context
659 * Destroys all DMA structures and initialise them again
660 *
661 * @param priv
662 * @return error code
663 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700664static int iwl4965_txq_ctx_reset(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700665{
666 int rc = 0;
667 int txq_id, slots_num;
668 unsigned long flags;
669
670 iwl4965_kw_free(priv);
671
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800672 /* Free all tx/cmd queues and keep-warm buffer */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800673 iwl4965_hw_txq_ctx_free(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700674
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800675 /* Alloc keep-warm buffer */
Zhu Yib481de92007-09-25 17:54:57 -0700676 rc = iwl4965_kw_alloc(priv);
677 if (rc) {
678 IWL_ERROR("Keep Warm allocation failed");
679 goto error_kw;
680 }
681
682 spin_lock_irqsave(&priv->lock, flags);
683
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700684 rc = iwl_grab_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700685 if (unlikely(rc)) {
686 IWL_ERROR("TX reset failed");
687 spin_unlock_irqrestore(&priv->lock, flags);
688 goto error_reset;
689 }
690
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800691 /* Turn off all Tx DMA channels */
Tomas Winkler12a81f62008-04-03 16:05:20 -0700692 iwl_write_prph(priv, IWL49_SCD_TXFACT, 0);
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700693 iwl_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700694 spin_unlock_irqrestore(&priv->lock, flags);
695
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800696 /* Tell 4965 where to find the keep-warm buffer */
Zhu Yib481de92007-09-25 17:54:57 -0700697 rc = iwl4965_kw_init(priv);
698 if (rc) {
699 IWL_ERROR("kw_init failed\n");
700 goto error_reset;
701 }
702
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800703 /* Alloc and init all (default 16) Tx queues,
704 * including the command queue (#4) */
Zhu Yib481de92007-09-25 17:54:57 -0700705 for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++) {
706 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
707 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800708 rc = iwl4965_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
Zhu Yib481de92007-09-25 17:54:57 -0700709 txq_id);
710 if (rc) {
711 IWL_ERROR("Tx %d queue init failed\n", txq_id);
712 goto error;
713 }
714 }
715
716 return rc;
717
718 error:
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800719 iwl4965_hw_txq_ctx_free(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700720 error_reset:
721 iwl4965_kw_free(priv);
722 error_kw:
723 return rc;
724}
725
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700726int iwl4965_hw_nic_init(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700727{
728 int rc;
729 unsigned long flags;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800730 struct iwl4965_rx_queue *rxq = &priv->rxq;
Zhu Yib481de92007-09-25 17:54:57 -0700731 u8 rev_id;
732 u32 val;
733 u8 val_link;
734
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800735 iwl4965_power_init_handle(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700736
737 /* nic_init */
738 spin_lock_irqsave(&priv->lock, flags);
739
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700740 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
Zhu Yib481de92007-09-25 17:54:57 -0700741 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
742
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700743 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
744 rc = iwl_poll_bit(priv, CSR_GP_CNTRL,
Zhu Yib481de92007-09-25 17:54:57 -0700745 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
746 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
747 if (rc < 0) {
748 spin_unlock_irqrestore(&priv->lock, flags);
749 IWL_DEBUG_INFO("Failed to init the card\n");
750 return rc;
751 }
752
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700753 rc = iwl_grab_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700754 if (rc) {
755 spin_unlock_irqrestore(&priv->lock, flags);
756 return rc;
757 }
758
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700759 iwl_read_prph(priv, APMG_CLK_CTRL_REG);
Zhu Yib481de92007-09-25 17:54:57 -0700760
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700761 iwl_write_prph(priv, APMG_CLK_CTRL_REG,
762 APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
763 iwl_read_prph(priv, APMG_CLK_CTRL_REG);
Zhu Yib481de92007-09-25 17:54:57 -0700764
765 udelay(20);
766
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700767 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
768 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
Zhu Yib481de92007-09-25 17:54:57 -0700769
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700770 iwl_release_nic_access(priv);
771 iwl_write32(priv, CSR_INT_COALESCING, 512 / 32);
Zhu Yib481de92007-09-25 17:54:57 -0700772 spin_unlock_irqrestore(&priv->lock, flags);
773
774 /* Determine HW type */
775 rc = pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &rev_id);
776 if (rc)
777 return rc;
778
779 IWL_DEBUG_INFO("HW Revision ID = 0x%X\n", rev_id);
780
781 iwl4965_nic_set_pwr_src(priv, 1);
782 spin_lock_irqsave(&priv->lock, flags);
783
784 if ((rev_id & 0x80) == 0x80 && (rev_id & 0x7f) < 8) {
785 pci_read_config_dword(priv->pci_dev, PCI_REG_WUM8, &val);
786 /* Enable No Snoop field */
787 pci_write_config_dword(priv->pci_dev, PCI_REG_WUM8,
788 val & ~(1 << 11));
789 }
790
791 spin_unlock_irqrestore(&priv->lock, flags);
792
Zhu Yib481de92007-09-25 17:54:57 -0700793 if (priv->eeprom.calib_version < EEPROM_TX_POWER_VERSION_NEW) {
794 IWL_ERROR("Older EEPROM detected! Aborting.\n");
795 return -EINVAL;
796 }
797
798 pci_read_config_byte(priv->pci_dev, PCI_LINK_CTRL, &val_link);
799
800 /* disable L1 entry -- workaround for pre-B1 */
801 pci_write_config_byte(priv->pci_dev, PCI_LINK_CTRL, val_link & ~0x02);
802
803 spin_lock_irqsave(&priv->lock, flags);
804
805 /* set CSR_HW_CONFIG_REG for uCode use */
806
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700807 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
808 CSR49_HW_IF_CONFIG_REG_BIT_4965_R |
809 CSR49_HW_IF_CONFIG_REG_BIT_RADIO_SI |
810 CSR49_HW_IF_CONFIG_REG_BIT_MAC_SI);
Zhu Yib481de92007-09-25 17:54:57 -0700811
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700812 rc = iwl_grab_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700813 if (rc < 0) {
814 spin_unlock_irqrestore(&priv->lock, flags);
815 IWL_DEBUG_INFO("Failed to init the card\n");
816 return rc;
817 }
818
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700819 iwl_read_prph(priv, APMG_PS_CTRL_REG);
820 iwl_set_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
Zhu Yib481de92007-09-25 17:54:57 -0700821 udelay(5);
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700822 iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
Zhu Yib481de92007-09-25 17:54:57 -0700823
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700824 iwl_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700825 spin_unlock_irqrestore(&priv->lock, flags);
826
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800827 iwl4965_hw_card_show_info(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700828
829 /* end nic_init */
830
831 /* Allocate the RX queue, or reset if it is already allocated */
832 if (!rxq->bd) {
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800833 rc = iwl4965_rx_queue_alloc(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700834 if (rc) {
835 IWL_ERROR("Unable to initialize Rx queue\n");
836 return -ENOMEM;
837 }
838 } else
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800839 iwl4965_rx_queue_reset(priv, rxq);
Zhu Yib481de92007-09-25 17:54:57 -0700840
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800841 iwl4965_rx_replenish(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700842
843 iwl4965_rx_init(priv, rxq);
844
845 spin_lock_irqsave(&priv->lock, flags);
846
847 rxq->need_update = 1;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800848 iwl4965_rx_queue_update_write_ptr(priv, rxq);
Zhu Yib481de92007-09-25 17:54:57 -0700849
850 spin_unlock_irqrestore(&priv->lock, flags);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800851
852 /* Allocate and init all Tx and Command queues */
Zhu Yib481de92007-09-25 17:54:57 -0700853 rc = iwl4965_txq_ctx_reset(priv);
854 if (rc)
855 return rc;
856
857 if (priv->eeprom.sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE)
858 IWL_DEBUG_RF_KILL("SW RF KILL supported in EEPROM.\n");
859
860 if (priv->eeprom.sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE)
861 IWL_DEBUG_RF_KILL("HW RF KILL supported in EEPROM.\n");
862
863 set_bit(STATUS_INIT, &priv->status);
864
865 return 0;
866}
867
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700868int iwl4965_hw_nic_stop_master(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700869{
870 int rc = 0;
871 u32 reg_val;
872 unsigned long flags;
873
874 spin_lock_irqsave(&priv->lock, flags);
875
876 /* set stop master bit */
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700877 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
Zhu Yib481de92007-09-25 17:54:57 -0700878
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700879 reg_val = iwl_read32(priv, CSR_GP_CNTRL);
Zhu Yib481de92007-09-25 17:54:57 -0700880
881 if (CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE ==
882 (reg_val & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE))
883 IWL_DEBUG_INFO("Card in power save, master is already "
884 "stopped\n");
885 else {
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700886 rc = iwl_poll_bit(priv, CSR_RESET,
Zhu Yib481de92007-09-25 17:54:57 -0700887 CSR_RESET_REG_FLAG_MASTER_DISABLED,
888 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
889 if (rc < 0) {
890 spin_unlock_irqrestore(&priv->lock, flags);
891 return rc;
892 }
893 }
894
895 spin_unlock_irqrestore(&priv->lock, flags);
896 IWL_DEBUG_INFO("stop master\n");
897
898 return rc;
899}
900
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800901/**
902 * iwl4965_hw_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory
903 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700904void iwl4965_hw_txq_ctx_stop(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700905{
906
907 int txq_id;
908 unsigned long flags;
909
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800910 /* Stop each Tx DMA channel, and wait for it to be idle */
Zhu Yib481de92007-09-25 17:54:57 -0700911 for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++) {
912 spin_lock_irqsave(&priv->lock, flags);
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700913 if (iwl_grab_nic_access(priv)) {
Zhu Yib481de92007-09-25 17:54:57 -0700914 spin_unlock_irqrestore(&priv->lock, flags);
915 continue;
916 }
917
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700918 iwl_write_direct32(priv,
919 IWL_FH_TCSR_CHNL_TX_CONFIG_REG(txq_id), 0x0);
920 iwl_poll_direct_bit(priv, IWL_FH_TSSR_TX_STATUS_REG,
921 IWL_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE
922 (txq_id), 200);
923 iwl_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700924 spin_unlock_irqrestore(&priv->lock, flags);
925 }
926
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800927 /* Deallocate memory for all Tx queues */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800928 iwl4965_hw_txq_ctx_free(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700929}
930
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700931int iwl4965_hw_nic_reset(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700932{
933 int rc = 0;
934 unsigned long flags;
935
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800936 iwl4965_hw_nic_stop_master(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700937
938 spin_lock_irqsave(&priv->lock, flags);
939
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700940 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
Zhu Yib481de92007-09-25 17:54:57 -0700941
942 udelay(10);
943
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700944 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
945 rc = iwl_poll_bit(priv, CSR_RESET,
Zhu Yib481de92007-09-25 17:54:57 -0700946 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
947 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25);
948
949 udelay(10);
950
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700951 rc = iwl_grab_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700952 if (!rc) {
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700953 iwl_write_prph(priv, APMG_CLK_EN_REG,
954 APMG_CLK_VAL_DMA_CLK_RQT |
955 APMG_CLK_VAL_BSM_CLK_RQT);
Zhu Yib481de92007-09-25 17:54:57 -0700956
957 udelay(10);
958
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700959 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
960 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
Zhu Yib481de92007-09-25 17:54:57 -0700961
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700962 iwl_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700963 }
964
965 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
966 wake_up_interruptible(&priv->wait_command_queue);
967
968 spin_unlock_irqrestore(&priv->lock, flags);
969
970 return rc;
971
972}
973
974#define REG_RECALIB_PERIOD (60)
975
976/**
977 * iwl4965_bg_statistics_periodic - Timer callback to queue statistics
978 *
979 * This callback is provided in order to queue the statistics_work
980 * in work_queue context (v. softirq)
981 *
982 * This timer function is continually reset to execute within
983 * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
984 * was received. We need to ensure we receive the statistics in order
985 * to update the temperature used for calibrating the TXPOWER. However,
986 * we can't send the statistics command from softirq context (which
987 * is the context which timers run at) so we have to queue off the
988 * statistics_work to actually send the command to the hardware.
989 */
990static void iwl4965_bg_statistics_periodic(unsigned long data)
991{
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700992 struct iwl_priv *priv = (struct iwl_priv *)data;
Zhu Yib481de92007-09-25 17:54:57 -0700993
994 queue_work(priv->workqueue, &priv->statistics_work);
995}
996
997/**
998 * iwl4965_bg_statistics_work - Send the statistics request to the hardware.
999 *
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001000 * This is queued by iwl4965_bg_statistics_periodic.
Zhu Yib481de92007-09-25 17:54:57 -07001001 */
1002static void iwl4965_bg_statistics_work(struct work_struct *work)
1003{
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001004 struct iwl_priv *priv = container_of(work, struct iwl_priv,
Zhu Yib481de92007-09-25 17:54:57 -07001005 statistics_work);
1006
1007 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1008 return;
1009
1010 mutex_lock(&priv->mutex);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001011 iwl4965_send_statistics_request(priv);
Zhu Yib481de92007-09-25 17:54:57 -07001012 mutex_unlock(&priv->mutex);
1013}
1014
1015#define CT_LIMIT_CONST 259
1016#define TM_CT_KILL_THRESHOLD 110
1017
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001018void iwl4965_rf_kill_ct_config(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07001019{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001020 struct iwl4965_ct_kill_config cmd;
Zhu Yib481de92007-09-25 17:54:57 -07001021 u32 R1, R2, R3;
1022 u32 temp_th;
1023 u32 crit_temperature;
1024 unsigned long flags;
Tomas Winkler857485c2008-03-21 13:53:44 -07001025 int ret = 0;
Zhu Yib481de92007-09-25 17:54:57 -07001026
1027 spin_lock_irqsave(&priv->lock, flags);
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001028 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
Zhu Yib481de92007-09-25 17:54:57 -07001029 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
1030 spin_unlock_irqrestore(&priv->lock, flags);
1031
1032 if (priv->statistics.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK) {
1033 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
1034 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]);
1035 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]);
1036 } else {
1037 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]);
1038 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]);
1039 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]);
1040 }
1041
1042 temp_th = CELSIUS_TO_KELVIN(TM_CT_KILL_THRESHOLD);
1043
1044 crit_temperature = ((temp_th * (R3-R1))/CT_LIMIT_CONST) + R2;
1045 cmd.critical_temperature_R = cpu_to_le32(crit_temperature);
Tomas Winkler857485c2008-03-21 13:53:44 -07001046 ret = iwl_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
1047 sizeof(cmd), &cmd);
1048 if (ret)
Zhu Yib481de92007-09-25 17:54:57 -07001049 IWL_ERROR("REPLY_CT_KILL_CONFIG_CMD failed\n");
1050 else
1051 IWL_DEBUG_INFO("REPLY_CT_KILL_CONFIG_CMD succeeded\n");
1052}
1053
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08001054#ifdef CONFIG_IWL4965_SENSITIVITY
Zhu Yib481de92007-09-25 17:54:57 -07001055
1056/* "false alarms" are signals that our DSP tries to lock onto,
1057 * but then determines that they are either noise, or transmissions
1058 * from a distant wireless network (also "noise", really) that get
1059 * "stepped on" by stronger transmissions within our own network.
1060 * This algorithm attempts to set a sensitivity level that is high
1061 * enough to receive all of our own network traffic, but not so
1062 * high that our DSP gets too busy trying to lock onto non-network
1063 * activity/noise. */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001064static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
Zhu Yib481de92007-09-25 17:54:57 -07001065 u32 norm_fa,
1066 u32 rx_enable_time,
1067 struct statistics_general_data *rx_info)
1068{
1069 u32 max_nrg_cck = 0;
1070 int i = 0;
1071 u8 max_silence_rssi = 0;
1072 u32 silence_ref = 0;
1073 u8 silence_rssi_a = 0;
1074 u8 silence_rssi_b = 0;
1075 u8 silence_rssi_c = 0;
1076 u32 val;
1077
1078 /* "false_alarms" values below are cross-multiplications to assess the
1079 * numbers of false alarms within the measured period of actual Rx
1080 * (Rx is off when we're txing), vs the min/max expected false alarms
1081 * (some should be expected if rx is sensitive enough) in a
1082 * hypothetical listening period of 200 time units (TU), 204.8 msec:
1083 *
1084 * MIN_FA/fixed-time < false_alarms/actual-rx-time < MAX_FA/beacon-time
1085 *
1086 * */
1087 u32 false_alarms = norm_fa * 200 * 1024;
1088 u32 max_false_alarms = MAX_FA_CCK * rx_enable_time;
1089 u32 min_false_alarms = MIN_FA_CCK * rx_enable_time;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001090 struct iwl4965_sensitivity_data *data = NULL;
Zhu Yib481de92007-09-25 17:54:57 -07001091
1092 data = &(priv->sensitivity_data);
1093
1094 data->nrg_auto_corr_silence_diff = 0;
1095
1096 /* Find max silence rssi among all 3 receivers.
1097 * This is background noise, which may include transmissions from other
1098 * networks, measured during silence before our network's beacon */
1099 silence_rssi_a = (u8)((rx_info->beacon_silence_rssi_a &
Reinette Chatre8a1b0242008-01-14 17:46:25 -08001100 ALL_BAND_FILTER) >> 8);
Zhu Yib481de92007-09-25 17:54:57 -07001101 silence_rssi_b = (u8)((rx_info->beacon_silence_rssi_b &
Reinette Chatre8a1b0242008-01-14 17:46:25 -08001102 ALL_BAND_FILTER) >> 8);
Zhu Yib481de92007-09-25 17:54:57 -07001103 silence_rssi_c = (u8)((rx_info->beacon_silence_rssi_c &
Reinette Chatre8a1b0242008-01-14 17:46:25 -08001104 ALL_BAND_FILTER) >> 8);
Zhu Yib481de92007-09-25 17:54:57 -07001105
1106 val = max(silence_rssi_b, silence_rssi_c);
1107 max_silence_rssi = max(silence_rssi_a, (u8) val);
1108
1109 /* Store silence rssi in 20-beacon history table */
1110 data->nrg_silence_rssi[data->nrg_silence_idx] = max_silence_rssi;
1111 data->nrg_silence_idx++;
1112 if (data->nrg_silence_idx >= NRG_NUM_PREV_STAT_L)
1113 data->nrg_silence_idx = 0;
1114
1115 /* Find max silence rssi across 20 beacon history */
1116 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) {
1117 val = data->nrg_silence_rssi[i];
1118 silence_ref = max(silence_ref, val);
1119 }
1120 IWL_DEBUG_CALIB("silence a %u, b %u, c %u, 20-bcn max %u\n",
1121 silence_rssi_a, silence_rssi_b, silence_rssi_c,
1122 silence_ref);
1123
1124 /* Find max rx energy (min value!) among all 3 receivers,
1125 * measured during beacon frame.
1126 * Save it in 10-beacon history table. */
1127 i = data->nrg_energy_idx;
1128 val = min(rx_info->beacon_energy_b, rx_info->beacon_energy_c);
1129 data->nrg_value[i] = min(rx_info->beacon_energy_a, val);
1130
1131 data->nrg_energy_idx++;
1132 if (data->nrg_energy_idx >= 10)
1133 data->nrg_energy_idx = 0;
1134
1135 /* Find min rx energy (max value) across 10 beacon history.
1136 * This is the minimum signal level that we want to receive well.
1137 * Add backoff (margin so we don't miss slightly lower energy frames).
1138 * This establishes an upper bound (min value) for energy threshold. */
1139 max_nrg_cck = data->nrg_value[0];
1140 for (i = 1; i < 10; i++)
1141 max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i]));
1142 max_nrg_cck += 6;
1143
1144 IWL_DEBUG_CALIB("rx energy a %u, b %u, c %u, 10-bcn max/min %u\n",
1145 rx_info->beacon_energy_a, rx_info->beacon_energy_b,
1146 rx_info->beacon_energy_c, max_nrg_cck - 6);
1147
1148 /* Count number of consecutive beacons with fewer-than-desired
1149 * false alarms. */
1150 if (false_alarms < min_false_alarms)
1151 data->num_in_cck_no_fa++;
1152 else
1153 data->num_in_cck_no_fa = 0;
1154 IWL_DEBUG_CALIB("consecutive bcns with few false alarms = %u\n",
1155 data->num_in_cck_no_fa);
1156
1157 /* If we got too many false alarms this time, reduce sensitivity */
1158 if (false_alarms > max_false_alarms) {
1159 IWL_DEBUG_CALIB("norm FA %u > max FA %u\n",
1160 false_alarms, max_false_alarms);
1161 IWL_DEBUG_CALIB("... reducing sensitivity\n");
1162 data->nrg_curr_state = IWL_FA_TOO_MANY;
1163
1164 if (data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK) {
1165 /* Store for "fewer than desired" on later beacon */
1166 data->nrg_silence_ref = silence_ref;
1167
1168 /* increase energy threshold (reduce nrg value)
1169 * to decrease sensitivity */
1170 if (data->nrg_th_cck > (NRG_MAX_CCK + NRG_STEP_CCK))
1171 data->nrg_th_cck = data->nrg_th_cck
1172 - NRG_STEP_CCK;
1173 }
1174
1175 /* increase auto_corr values to decrease sensitivity */
1176 if (data->auto_corr_cck < AUTO_CORR_MAX_TH_CCK)
1177 data->auto_corr_cck = AUTO_CORR_MAX_TH_CCK + 1;
1178 else {
1179 val = data->auto_corr_cck + AUTO_CORR_STEP_CCK;
1180 data->auto_corr_cck = min((u32)AUTO_CORR_MAX_CCK, val);
1181 }
1182 val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK;
1183 data->auto_corr_cck_mrc = min((u32)AUTO_CORR_MAX_CCK_MRC, val);
1184
1185 /* Else if we got fewer than desired, increase sensitivity */
1186 } else if (false_alarms < min_false_alarms) {
1187 data->nrg_curr_state = IWL_FA_TOO_FEW;
1188
1189 /* Compare silence level with silence level for most recent
1190 * healthy number or too many false alarms */
1191 data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref -
1192 (s32)silence_ref;
1193
1194 IWL_DEBUG_CALIB("norm FA %u < min FA %u, silence diff %d\n",
1195 false_alarms, min_false_alarms,
1196 data->nrg_auto_corr_silence_diff);
1197
1198 /* Increase value to increase sensitivity, but only if:
1199 * 1a) previous beacon did *not* have *too many* false alarms
1200 * 1b) AND there's a significant difference in Rx levels
1201 * from a previous beacon with too many, or healthy # FAs
1202 * OR 2) We've seen a lot of beacons (100) with too few
1203 * false alarms */
1204 if ((data->nrg_prev_state != IWL_FA_TOO_MANY) &&
1205 ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
1206 (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
1207
1208 IWL_DEBUG_CALIB("... increasing sensitivity\n");
1209 /* Increase nrg value to increase sensitivity */
1210 val = data->nrg_th_cck + NRG_STEP_CCK;
1211 data->nrg_th_cck = min((u32)NRG_MIN_CCK, val);
1212
1213 /* Decrease auto_corr values to increase sensitivity */
1214 val = data->auto_corr_cck - AUTO_CORR_STEP_CCK;
1215 data->auto_corr_cck = max((u32)AUTO_CORR_MIN_CCK, val);
1216
1217 val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK;
1218 data->auto_corr_cck_mrc =
1219 max((u32)AUTO_CORR_MIN_CCK_MRC, val);
1220
1221 } else
1222 IWL_DEBUG_CALIB("... but not changing sensitivity\n");
1223
1224 /* Else we got a healthy number of false alarms, keep status quo */
1225 } else {
1226 IWL_DEBUG_CALIB(" FA in safe zone\n");
1227 data->nrg_curr_state = IWL_FA_GOOD_RANGE;
1228
1229 /* Store for use in "fewer than desired" with later beacon */
1230 data->nrg_silence_ref = silence_ref;
1231
1232 /* If previous beacon had too many false alarms,
1233 * give it some extra margin by reducing sensitivity again
1234 * (but don't go below measured energy of desired Rx) */
1235 if (IWL_FA_TOO_MANY == data->nrg_prev_state) {
1236 IWL_DEBUG_CALIB("... increasing margin\n");
1237 data->nrg_th_cck -= NRG_MARGIN;
1238 }
1239 }
1240
1241 /* Make sure the energy threshold does not go above the measured
1242 * energy of the desired Rx signals (reduced by backoff margin),
1243 * or else we might start missing Rx frames.
1244 * Lower value is higher energy, so we use max()!
1245 */
1246 data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck);
1247 IWL_DEBUG_CALIB("new nrg_th_cck %u\n", data->nrg_th_cck);
1248
1249 data->nrg_prev_state = data->nrg_curr_state;
1250
1251 return 0;
1252}
1253
1254
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001255static int iwl4965_sens_auto_corr_ofdm(struct iwl_priv *priv,
Zhu Yib481de92007-09-25 17:54:57 -07001256 u32 norm_fa,
1257 u32 rx_enable_time)
1258{
1259 u32 val;
1260 u32 false_alarms = norm_fa * 200 * 1024;
1261 u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time;
1262 u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001263 struct iwl4965_sensitivity_data *data = NULL;
Zhu Yib481de92007-09-25 17:54:57 -07001264
1265 data = &(priv->sensitivity_data);
1266
1267 /* If we got too many false alarms this time, reduce sensitivity */
1268 if (false_alarms > max_false_alarms) {
1269
1270 IWL_DEBUG_CALIB("norm FA %u > max FA %u)\n",
1271 false_alarms, max_false_alarms);
1272
1273 val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM;
1274 data->auto_corr_ofdm =
1275 min((u32)AUTO_CORR_MAX_OFDM, val);
1276
1277 val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM;
1278 data->auto_corr_ofdm_mrc =
1279 min((u32)AUTO_CORR_MAX_OFDM_MRC, val);
1280
1281 val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM;
1282 data->auto_corr_ofdm_x1 =
1283 min((u32)AUTO_CORR_MAX_OFDM_X1, val);
1284
1285 val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM;
1286 data->auto_corr_ofdm_mrc_x1 =
1287 min((u32)AUTO_CORR_MAX_OFDM_MRC_X1, val);
1288 }
1289
1290 /* Else if we got fewer than desired, increase sensitivity */
1291 else if (false_alarms < min_false_alarms) {
1292
1293 IWL_DEBUG_CALIB("norm FA %u < min FA %u\n",
1294 false_alarms, min_false_alarms);
1295
1296 val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM;
1297 data->auto_corr_ofdm =
1298 max((u32)AUTO_CORR_MIN_OFDM, val);
1299
1300 val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM;
1301 data->auto_corr_ofdm_mrc =
1302 max((u32)AUTO_CORR_MIN_OFDM_MRC, val);
1303
1304 val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM;
1305 data->auto_corr_ofdm_x1 =
1306 max((u32)AUTO_CORR_MIN_OFDM_X1, val);
1307
1308 val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM;
1309 data->auto_corr_ofdm_mrc_x1 =
1310 max((u32)AUTO_CORR_MIN_OFDM_MRC_X1, val);
1311 }
1312
1313 else
1314 IWL_DEBUG_CALIB("min FA %u < norm FA %u < max FA %u OK\n",
1315 min_false_alarms, false_alarms, max_false_alarms);
1316
1317 return 0;
1318}
1319
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001320static int iwl4965_sensitivity_callback(struct iwl_priv *priv,
Tomas Winkler857485c2008-03-21 13:53:44 -07001321 struct iwl_cmd *cmd, struct sk_buff *skb)
Zhu Yib481de92007-09-25 17:54:57 -07001322{
1323 /* We didn't cache the SKB; let the caller free it */
1324 return 1;
1325}
1326
1327/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001328static int iwl4965_sensitivity_write(struct iwl_priv *priv, u8 flags)
Zhu Yib481de92007-09-25 17:54:57 -07001329{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001330 struct iwl4965_sensitivity_cmd cmd ;
1331 struct iwl4965_sensitivity_data *data = NULL;
Tomas Winkler857485c2008-03-21 13:53:44 -07001332 struct iwl_host_cmd cmd_out = {
Zhu Yib481de92007-09-25 17:54:57 -07001333 .id = SENSITIVITY_CMD,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001334 .len = sizeof(struct iwl4965_sensitivity_cmd),
Zhu Yib481de92007-09-25 17:54:57 -07001335 .meta.flags = flags,
1336 .data = &cmd,
1337 };
Tomas Winkler857485c2008-03-21 13:53:44 -07001338 int ret;
Zhu Yib481de92007-09-25 17:54:57 -07001339
1340 data = &(priv->sensitivity_data);
1341
1342 memset(&cmd, 0, sizeof(cmd));
1343
1344 cmd.table[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] =
1345 cpu_to_le16((u16)data->auto_corr_ofdm);
1346 cmd.table[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] =
1347 cpu_to_le16((u16)data->auto_corr_ofdm_mrc);
1348 cmd.table[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] =
1349 cpu_to_le16((u16)data->auto_corr_ofdm_x1);
1350 cmd.table[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] =
1351 cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1);
1352
1353 cmd.table[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] =
1354 cpu_to_le16((u16)data->auto_corr_cck);
1355 cmd.table[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] =
1356 cpu_to_le16((u16)data->auto_corr_cck_mrc);
1357
1358 cmd.table[HD_MIN_ENERGY_CCK_DET_INDEX] =
1359 cpu_to_le16((u16)data->nrg_th_cck);
1360 cmd.table[HD_MIN_ENERGY_OFDM_DET_INDEX] =
1361 cpu_to_le16((u16)data->nrg_th_ofdm);
1362
1363 cmd.table[HD_BARKER_CORR_TH_ADD_MIN_INDEX] =
1364 __constant_cpu_to_le16(190);
1365 cmd.table[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] =
1366 __constant_cpu_to_le16(390);
1367 cmd.table[HD_OFDM_ENERGY_TH_IN_INDEX] =
1368 __constant_cpu_to_le16(62);
1369
1370 IWL_DEBUG_CALIB("ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
1371 data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
1372 data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1,
1373 data->nrg_th_ofdm);
1374
1375 IWL_DEBUG_CALIB("cck: ac %u mrc %u thresh %u\n",
1376 data->auto_corr_cck, data->auto_corr_cck_mrc,
1377 data->nrg_th_cck);
1378
Ben Cahillf7d09d72007-11-29 11:09:51 +08001379 /* Update uCode's "work" table, and copy it to DSP */
Zhu Yib481de92007-09-25 17:54:57 -07001380 cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE;
1381
1382 if (flags & CMD_ASYNC)
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001383 cmd_out.meta.u.callback = iwl4965_sensitivity_callback;
Zhu Yib481de92007-09-25 17:54:57 -07001384
1385 /* Don't send command to uCode if nothing has changed */
1386 if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]),
1387 sizeof(u16)*HD_TABLE_SIZE)) {
1388 IWL_DEBUG_CALIB("No change in SENSITIVITY_CMD\n");
1389 return 0;
1390 }
1391
1392 /* Copy table for comparison next time */
1393 memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
1394 sizeof(u16)*HD_TABLE_SIZE);
1395
Tomas Winkler857485c2008-03-21 13:53:44 -07001396 ret = iwl_send_cmd(priv, &cmd_out);
1397 if (ret)
1398 IWL_ERROR("SENSITIVITY_CMD failed\n");
Zhu Yib481de92007-09-25 17:54:57 -07001399
Tomas Winkler857485c2008-03-21 13:53:44 -07001400 return ret;
Zhu Yib481de92007-09-25 17:54:57 -07001401}
1402
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001403void iwl4965_init_sensitivity(struct iwl_priv *priv, u8 flags, u8 force)
Zhu Yib481de92007-09-25 17:54:57 -07001404{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001405 struct iwl4965_sensitivity_data *data = NULL;
Tomas Winkler857485c2008-03-21 13:53:44 -07001406 int i;
1407 int ret = 0;
Zhu Yib481de92007-09-25 17:54:57 -07001408
1409 IWL_DEBUG_CALIB("Start iwl4965_init_sensitivity\n");
1410
1411 if (force)
1412 memset(&(priv->sensitivity_tbl[0]), 0,
1413 sizeof(u16)*HD_TABLE_SIZE);
1414
1415 /* Clear driver's sensitivity algo data */
1416 data = &(priv->sensitivity_data);
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001417 memset(data, 0, sizeof(struct iwl4965_sensitivity_data));
Zhu Yib481de92007-09-25 17:54:57 -07001418
1419 data->num_in_cck_no_fa = 0;
1420 data->nrg_curr_state = IWL_FA_TOO_MANY;
1421 data->nrg_prev_state = IWL_FA_TOO_MANY;
1422 data->nrg_silence_ref = 0;
1423 data->nrg_silence_idx = 0;
1424 data->nrg_energy_idx = 0;
1425
1426 for (i = 0; i < 10; i++)
1427 data->nrg_value[i] = 0;
1428
1429 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++)
1430 data->nrg_silence_rssi[i] = 0;
1431
1432 data->auto_corr_ofdm = 90;
1433 data->auto_corr_ofdm_mrc = 170;
1434 data->auto_corr_ofdm_x1 = 105;
1435 data->auto_corr_ofdm_mrc_x1 = 220;
1436 data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF;
1437 data->auto_corr_cck_mrc = 200;
1438 data->nrg_th_cck = 100;
1439 data->nrg_th_ofdm = 100;
1440
1441 data->last_bad_plcp_cnt_ofdm = 0;
1442 data->last_fa_cnt_ofdm = 0;
1443 data->last_bad_plcp_cnt_cck = 0;
1444 data->last_fa_cnt_cck = 0;
1445
1446 /* Clear prior Sensitivity command data to force send to uCode */
1447 if (force)
1448 memset(&(priv->sensitivity_tbl[0]), 0,
1449 sizeof(u16)*HD_TABLE_SIZE);
1450
Tomas Winkler857485c2008-03-21 13:53:44 -07001451 ret |= iwl4965_sensitivity_write(priv, flags);
1452 IWL_DEBUG_CALIB("<<return 0x%X\n", ret);
Zhu Yib481de92007-09-25 17:54:57 -07001453
1454 return;
1455}
1456
1457
1458/* Reset differential Rx gains in NIC to prepare for chain noise calibration.
1459 * Called after every association, but this runs only once!
1460 * ... once chain noise is calibrated the first time, it's good forever. */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001461void iwl4965_chain_noise_reset(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07001462{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001463 struct iwl4965_chain_noise_data *data = NULL;
Zhu Yib481de92007-09-25 17:54:57 -07001464
1465 data = &(priv->chain_noise_data);
Tomas Winkler3109ece2008-03-28 16:33:35 -07001466 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) {
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001467 struct iwl4965_calibration_cmd cmd;
Zhu Yib481de92007-09-25 17:54:57 -07001468
1469 memset(&cmd, 0, sizeof(cmd));
1470 cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
1471 cmd.diff_gain_a = 0;
1472 cmd.diff_gain_b = 0;
1473 cmd.diff_gain_c = 0;
Tomas Winklere5472972008-03-28 16:21:12 -07001474 iwl_send_cmd_pdu_async(priv, REPLY_PHY_CALIBRATION_CMD,
1475 sizeof(cmd), &cmd, NULL);
Zhu Yib481de92007-09-25 17:54:57 -07001476 msleep(4);
1477 data->state = IWL_CHAIN_NOISE_ACCUMULATE;
1478 IWL_DEBUG_CALIB("Run chain_noise_calibrate\n");
1479 }
1480 return;
1481}
1482
1483/*
1484 * Accumulate 20 beacons of signal and noise statistics for each of
1485 * 3 receivers/antennas/rx-chains, then figure out:
1486 * 1) Which antennas are connected.
1487 * 2) Differential rx gain settings to balance the 3 receivers.
1488 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001489static void iwl4965_noise_calibration(struct iwl_priv *priv,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001490 struct iwl4965_notif_statistics *stat_resp)
Zhu Yib481de92007-09-25 17:54:57 -07001491{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001492 struct iwl4965_chain_noise_data *data = NULL;
Tomas Winkler857485c2008-03-21 13:53:44 -07001493 int ret = 0;
Zhu Yib481de92007-09-25 17:54:57 -07001494
1495 u32 chain_noise_a;
1496 u32 chain_noise_b;
1497 u32 chain_noise_c;
1498 u32 chain_sig_a;
1499 u32 chain_sig_b;
1500 u32 chain_sig_c;
1501 u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
1502 u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
1503 u32 max_average_sig;
1504 u16 max_average_sig_antenna_i;
1505 u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE;
1506 u16 min_average_noise_antenna_i = INITIALIZATION_VALUE;
1507 u16 i = 0;
1508 u16 chan_num = INITIALIZATION_VALUE;
1509 u32 band = INITIALIZATION_VALUE;
1510 u32 active_chains = 0;
1511 unsigned long flags;
1512 struct statistics_rx_non_phy *rx_info = &(stat_resp->rx.general);
1513
1514 data = &(priv->chain_noise_data);
1515
1516 /* Accumulate just the first 20 beacons after the first association,
1517 * then we're done forever. */
1518 if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) {
1519 if (data->state == IWL_CHAIN_NOISE_ALIVE)
1520 IWL_DEBUG_CALIB("Wait for noise calib reset\n");
1521 return;
1522 }
1523
1524 spin_lock_irqsave(&priv->lock, flags);
1525 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
1526 IWL_DEBUG_CALIB(" << Interference data unavailable\n");
1527 spin_unlock_irqrestore(&priv->lock, flags);
1528 return;
1529 }
1530
1531 band = (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) ? 0 : 1;
1532 chan_num = le16_to_cpu(priv->staging_rxon.channel);
1533
1534 /* Make sure we accumulate data for just the associated channel
1535 * (even if scanning). */
1536 if ((chan_num != (le32_to_cpu(stat_resp->flag) >> 16)) ||
1537 ((STATISTICS_REPLY_FLG_BAND_24G_MSK ==
1538 (stat_resp->flag & STATISTICS_REPLY_FLG_BAND_24G_MSK)) && band)) {
1539 IWL_DEBUG_CALIB("Stats not from chan=%d, band=%d\n",
1540 chan_num, band);
1541 spin_unlock_irqrestore(&priv->lock, flags);
1542 return;
1543 }
1544
1545 /* Accumulate beacon statistics values across 20 beacons */
1546 chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) &
1547 IN_BAND_FILTER;
1548 chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) &
1549 IN_BAND_FILTER;
1550 chain_noise_c = le32_to_cpu(rx_info->beacon_silence_rssi_c) &
1551 IN_BAND_FILTER;
1552
1553 chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER;
1554 chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER;
1555 chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER;
1556
1557 spin_unlock_irqrestore(&priv->lock, flags);
1558
1559 data->beacon_count++;
1560
1561 data->chain_noise_a = (chain_noise_a + data->chain_noise_a);
1562 data->chain_noise_b = (chain_noise_b + data->chain_noise_b);
1563 data->chain_noise_c = (chain_noise_c + data->chain_noise_c);
1564
1565 data->chain_signal_a = (chain_sig_a + data->chain_signal_a);
1566 data->chain_signal_b = (chain_sig_b + data->chain_signal_b);
1567 data->chain_signal_c = (chain_sig_c + data->chain_signal_c);
1568
1569 IWL_DEBUG_CALIB("chan=%d, band=%d, beacon=%d\n", chan_num, band,
1570 data->beacon_count);
1571 IWL_DEBUG_CALIB("chain_sig: a %d b %d c %d\n",
1572 chain_sig_a, chain_sig_b, chain_sig_c);
1573 IWL_DEBUG_CALIB("chain_noise: a %d b %d c %d\n",
1574 chain_noise_a, chain_noise_b, chain_noise_c);
1575
1576 /* If this is the 20th beacon, determine:
1577 * 1) Disconnected antennas (using signal strengths)
1578 * 2) Differential gain (using silence noise) to balance receivers */
1579 if (data->beacon_count == CAL_NUM_OF_BEACONS) {
1580
1581 /* Analyze signal for disconnected antenna */
1582 average_sig[0] = (data->chain_signal_a) / CAL_NUM_OF_BEACONS;
1583 average_sig[1] = (data->chain_signal_b) / CAL_NUM_OF_BEACONS;
1584 average_sig[2] = (data->chain_signal_c) / CAL_NUM_OF_BEACONS;
1585
1586 if (average_sig[0] >= average_sig[1]) {
1587 max_average_sig = average_sig[0];
1588 max_average_sig_antenna_i = 0;
1589 active_chains = (1 << max_average_sig_antenna_i);
1590 } else {
1591 max_average_sig = average_sig[1];
1592 max_average_sig_antenna_i = 1;
1593 active_chains = (1 << max_average_sig_antenna_i);
1594 }
1595
1596 if (average_sig[2] >= max_average_sig) {
1597 max_average_sig = average_sig[2];
1598 max_average_sig_antenna_i = 2;
1599 active_chains = (1 << max_average_sig_antenna_i);
1600 }
1601
1602 IWL_DEBUG_CALIB("average_sig: a %d b %d c %d\n",
1603 average_sig[0], average_sig[1], average_sig[2]);
1604 IWL_DEBUG_CALIB("max_average_sig = %d, antenna %d\n",
1605 max_average_sig, max_average_sig_antenna_i);
1606
1607 /* Compare signal strengths for all 3 receivers. */
1608 for (i = 0; i < NUM_RX_CHAINS; i++) {
1609 if (i != max_average_sig_antenna_i) {
1610 s32 rssi_delta = (max_average_sig -
1611 average_sig[i]);
1612
1613 /* If signal is very weak, compared with
1614 * strongest, mark it as disconnected. */
1615 if (rssi_delta > MAXIMUM_ALLOWED_PATHLOSS)
1616 data->disconn_array[i] = 1;
1617 else
1618 active_chains |= (1 << i);
1619 IWL_DEBUG_CALIB("i = %d rssiDelta = %d "
1620 "disconn_array[i] = %d\n",
1621 i, rssi_delta, data->disconn_array[i]);
1622 }
1623 }
1624
1625 /*If both chains A & B are disconnected -
1626 * connect B and leave A as is */
1627 if (data->disconn_array[CHAIN_A] &&
1628 data->disconn_array[CHAIN_B]) {
1629 data->disconn_array[CHAIN_B] = 0;
1630 active_chains |= (1 << CHAIN_B);
1631 IWL_DEBUG_CALIB("both A & B chains are disconnected! "
1632 "W/A - declare B as connected\n");
1633 }
1634
1635 IWL_DEBUG_CALIB("active_chains (bitwise) = 0x%x\n",
1636 active_chains);
1637
1638 /* Save for use within RXON, TX, SCAN commands, etc. */
1639 priv->valid_antenna = active_chains;
1640
1641 /* Analyze noise for rx balance */
1642 average_noise[0] = ((data->chain_noise_a)/CAL_NUM_OF_BEACONS);
1643 average_noise[1] = ((data->chain_noise_b)/CAL_NUM_OF_BEACONS);
1644 average_noise[2] = ((data->chain_noise_c)/CAL_NUM_OF_BEACONS);
1645
1646 for (i = 0; i < NUM_RX_CHAINS; i++) {
1647 if (!(data->disconn_array[i]) &&
1648 (average_noise[i] <= min_average_noise)) {
1649 /* This means that chain i is active and has
1650 * lower noise values so far: */
1651 min_average_noise = average_noise[i];
1652 min_average_noise_antenna_i = i;
1653 }
1654 }
1655
1656 data->delta_gain_code[min_average_noise_antenna_i] = 0;
1657
1658 IWL_DEBUG_CALIB("average_noise: a %d b %d c %d\n",
1659 average_noise[0], average_noise[1],
1660 average_noise[2]);
1661
1662 IWL_DEBUG_CALIB("min_average_noise = %d, antenna %d\n",
1663 min_average_noise, min_average_noise_antenna_i);
1664
1665 for (i = 0; i < NUM_RX_CHAINS; i++) {
1666 s32 delta_g = 0;
1667
1668 if (!(data->disconn_array[i]) &&
1669 (data->delta_gain_code[i] ==
1670 CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) {
1671 delta_g = average_noise[i] - min_average_noise;
1672 data->delta_gain_code[i] = (u8)((delta_g *
1673 10) / 15);
1674 if (CHAIN_NOISE_MAX_DELTA_GAIN_CODE <
1675 data->delta_gain_code[i])
1676 data->delta_gain_code[i] =
1677 CHAIN_NOISE_MAX_DELTA_GAIN_CODE;
1678
1679 data->delta_gain_code[i] =
1680 (data->delta_gain_code[i] | (1 << 2));
1681 } else
1682 data->delta_gain_code[i] = 0;
1683 }
1684 IWL_DEBUG_CALIB("delta_gain_codes: a %d b %d c %d\n",
1685 data->delta_gain_code[0],
1686 data->delta_gain_code[1],
1687 data->delta_gain_code[2]);
1688
1689 /* Differential gain gets sent to uCode only once */
1690 if (!data->radio_write) {
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001691 struct iwl4965_calibration_cmd cmd;
Zhu Yib481de92007-09-25 17:54:57 -07001692 data->radio_write = 1;
1693
1694 memset(&cmd, 0, sizeof(cmd));
1695 cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
1696 cmd.diff_gain_a = data->delta_gain_code[0];
1697 cmd.diff_gain_b = data->delta_gain_code[1];
1698 cmd.diff_gain_c = data->delta_gain_code[2];
Tomas Winkler857485c2008-03-21 13:53:44 -07001699 ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
Zhu Yib481de92007-09-25 17:54:57 -07001700 sizeof(cmd), &cmd);
Tomas Winkler857485c2008-03-21 13:53:44 -07001701 if (ret)
Zhu Yib481de92007-09-25 17:54:57 -07001702 IWL_DEBUG_CALIB("fail sending cmd "
1703 "REPLY_PHY_CALIBRATION_CMD \n");
1704
1705 /* TODO we might want recalculate
1706 * rx_chain in rxon cmd */
1707
1708 /* Mark so we run this algo only once! */
1709 data->state = IWL_CHAIN_NOISE_CALIBRATED;
1710 }
1711 data->chain_noise_a = 0;
1712 data->chain_noise_b = 0;
1713 data->chain_noise_c = 0;
1714 data->chain_signal_a = 0;
1715 data->chain_signal_b = 0;
1716 data->chain_signal_c = 0;
1717 data->beacon_count = 0;
1718 }
1719 return;
1720}
1721
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001722static void iwl4965_sensitivity_calibration(struct iwl_priv *priv,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001723 struct iwl4965_notif_statistics *resp)
Zhu Yib481de92007-09-25 17:54:57 -07001724{
Zhu Yib481de92007-09-25 17:54:57 -07001725 u32 rx_enable_time;
1726 u32 fa_cck;
1727 u32 fa_ofdm;
1728 u32 bad_plcp_cck;
1729 u32 bad_plcp_ofdm;
1730 u32 norm_fa_ofdm;
1731 u32 norm_fa_cck;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001732 struct iwl4965_sensitivity_data *data = NULL;
Zhu Yib481de92007-09-25 17:54:57 -07001733 struct statistics_rx_non_phy *rx_info = &(resp->rx.general);
1734 struct statistics_rx *statistics = &(resp->rx);
1735 unsigned long flags;
1736 struct statistics_general_data statis;
Tomas Winkler857485c2008-03-21 13:53:44 -07001737 int ret;
Zhu Yib481de92007-09-25 17:54:57 -07001738
1739 data = &(priv->sensitivity_data);
1740
Tomas Winkler3109ece2008-03-28 16:33:35 -07001741 if (!iwl_is_associated(priv)) {
Zhu Yib481de92007-09-25 17:54:57 -07001742 IWL_DEBUG_CALIB("<< - not associated\n");
1743 return;
1744 }
1745
1746 spin_lock_irqsave(&priv->lock, flags);
1747 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
1748 IWL_DEBUG_CALIB("<< invalid data.\n");
1749 spin_unlock_irqrestore(&priv->lock, flags);
1750 return;
1751 }
1752
1753 /* Extract Statistics: */
1754 rx_enable_time = le32_to_cpu(rx_info->channel_load);
1755 fa_cck = le32_to_cpu(statistics->cck.false_alarm_cnt);
1756 fa_ofdm = le32_to_cpu(statistics->ofdm.false_alarm_cnt);
1757 bad_plcp_cck = le32_to_cpu(statistics->cck.plcp_err);
1758 bad_plcp_ofdm = le32_to_cpu(statistics->ofdm.plcp_err);
1759
1760 statis.beacon_silence_rssi_a =
1761 le32_to_cpu(statistics->general.beacon_silence_rssi_a);
1762 statis.beacon_silence_rssi_b =
1763 le32_to_cpu(statistics->general.beacon_silence_rssi_b);
1764 statis.beacon_silence_rssi_c =
1765 le32_to_cpu(statistics->general.beacon_silence_rssi_c);
1766 statis.beacon_energy_a =
1767 le32_to_cpu(statistics->general.beacon_energy_a);
1768 statis.beacon_energy_b =
1769 le32_to_cpu(statistics->general.beacon_energy_b);
1770 statis.beacon_energy_c =
1771 le32_to_cpu(statistics->general.beacon_energy_c);
1772
1773 spin_unlock_irqrestore(&priv->lock, flags);
1774
1775 IWL_DEBUG_CALIB("rx_enable_time = %u usecs\n", rx_enable_time);
1776
1777 if (!rx_enable_time) {
1778 IWL_DEBUG_CALIB("<< RX Enable Time == 0! \n");
1779 return;
1780 }
1781
1782 /* These statistics increase monotonically, and do not reset
1783 * at each beacon. Calculate difference from last value, or just
1784 * use the new statistics value if it has reset or wrapped around. */
1785 if (data->last_bad_plcp_cnt_cck > bad_plcp_cck)
1786 data->last_bad_plcp_cnt_cck = bad_plcp_cck;
1787 else {
1788 bad_plcp_cck -= data->last_bad_plcp_cnt_cck;
1789 data->last_bad_plcp_cnt_cck += bad_plcp_cck;
1790 }
1791
1792 if (data->last_bad_plcp_cnt_ofdm > bad_plcp_ofdm)
1793 data->last_bad_plcp_cnt_ofdm = bad_plcp_ofdm;
1794 else {
1795 bad_plcp_ofdm -= data->last_bad_plcp_cnt_ofdm;
1796 data->last_bad_plcp_cnt_ofdm += bad_plcp_ofdm;
1797 }
1798
1799 if (data->last_fa_cnt_ofdm > fa_ofdm)
1800 data->last_fa_cnt_ofdm = fa_ofdm;
1801 else {
1802 fa_ofdm -= data->last_fa_cnt_ofdm;
1803 data->last_fa_cnt_ofdm += fa_ofdm;
1804 }
1805
1806 if (data->last_fa_cnt_cck > fa_cck)
1807 data->last_fa_cnt_cck = fa_cck;
1808 else {
1809 fa_cck -= data->last_fa_cnt_cck;
1810 data->last_fa_cnt_cck += fa_cck;
1811 }
1812
1813 /* Total aborted signal locks */
1814 norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm;
1815 norm_fa_cck = fa_cck + bad_plcp_cck;
1816
1817 IWL_DEBUG_CALIB("cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck,
1818 bad_plcp_cck, fa_ofdm, bad_plcp_ofdm);
1819
1820 iwl4965_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time);
1821 iwl4965_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis);
Tomas Winkler857485c2008-03-21 13:53:44 -07001822 ret = iwl4965_sensitivity_write(priv, CMD_ASYNC);
Zhu Yib481de92007-09-25 17:54:57 -07001823
1824 return;
1825}
1826
1827static void iwl4965_bg_sensitivity_work(struct work_struct *work)
1828{
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001829 struct iwl_priv *priv = container_of(work, struct iwl_priv,
Zhu Yib481de92007-09-25 17:54:57 -07001830 sensitivity_work);
1831
1832 mutex_lock(&priv->mutex);
1833
1834 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
1835 test_bit(STATUS_SCANNING, &priv->status)) {
1836 mutex_unlock(&priv->mutex);
1837 return;
1838 }
1839
1840 if (priv->start_calib) {
1841 iwl4965_noise_calibration(priv, &priv->statistics);
1842
1843 if (priv->sensitivity_data.state ==
1844 IWL_SENS_CALIB_NEED_REINIT) {
1845 iwl4965_init_sensitivity(priv, CMD_ASYNC, 0);
1846 priv->sensitivity_data.state = IWL_SENS_CALIB_ALLOWED;
1847 } else
1848 iwl4965_sensitivity_calibration(priv,
1849 &priv->statistics);
1850 }
1851
1852 mutex_unlock(&priv->mutex);
1853 return;
1854}
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08001855#endif /*CONFIG_IWL4965_SENSITIVITY*/
Zhu Yib481de92007-09-25 17:54:57 -07001856
1857static void iwl4965_bg_txpower_work(struct work_struct *work)
1858{
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001859 struct iwl_priv *priv = container_of(work, struct iwl_priv,
Zhu Yib481de92007-09-25 17:54:57 -07001860 txpower_work);
1861
1862 /* If a scan happened to start before we got here
1863 * then just return; the statistics notification will
1864 * kick off another scheduled work to compensate for
1865 * any temperature delta we missed here. */
1866 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
1867 test_bit(STATUS_SCANNING, &priv->status))
1868 return;
1869
1870 mutex_lock(&priv->mutex);
1871
1872 /* Regardless of if we are assocaited, we must reconfigure the
1873 * TX power since frames can be sent on non-radar channels while
1874 * not associated */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001875 iwl4965_hw_reg_send_txpower(priv);
Zhu Yib481de92007-09-25 17:54:57 -07001876
1877 /* Update last_temperature to keep is_calib_needed from running
1878 * when it isn't needed... */
1879 priv->last_temperature = priv->temperature;
1880
1881 mutex_unlock(&priv->mutex);
1882}
1883
1884/*
1885 * Acquire priv->lock before calling this function !
1886 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001887static void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index)
Zhu Yib481de92007-09-25 17:54:57 -07001888{
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001889 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
Zhu Yib481de92007-09-25 17:54:57 -07001890 (index & 0xff) | (txq_id << 8));
Tomas Winkler12a81f62008-04-03 16:05:20 -07001891 iwl_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(txq_id), index);
Zhu Yib481de92007-09-25 17:54:57 -07001892}
1893
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001894/**
1895 * iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
1896 * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
1897 * @scd_retry: (1) Indicates queue will be used in aggregation mode
1898 *
1899 * NOTE: Acquire priv->lock before calling this function !
Zhu Yib481de92007-09-25 17:54:57 -07001900 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001901static void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001902 struct iwl4965_tx_queue *txq,
Zhu Yib481de92007-09-25 17:54:57 -07001903 int tx_fifo_id, int scd_retry)
1904{
1905 int txq_id = txq->q.id;
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001906
1907 /* Find out whether to activate Tx queue */
Zhu Yib481de92007-09-25 17:54:57 -07001908 int active = test_bit(txq_id, &priv->txq_ctx_active_msk)?1:0;
1909
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001910 /* Set up and activate */
Tomas Winkler12a81f62008-04-03 16:05:20 -07001911 iwl_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
Zhu Yib481de92007-09-25 17:54:57 -07001912 (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1913 (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) |
1914 (scd_retry << SCD_QUEUE_STTS_REG_POS_WSL) |
1915 (scd_retry << SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
1916 SCD_QUEUE_STTS_REG_MSK);
1917
1918 txq->sched_retry = scd_retry;
1919
1920 IWL_DEBUG_INFO("%s %s Queue %d on AC %d\n",
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001921 active ? "Activate" : "Deactivate",
Zhu Yib481de92007-09-25 17:54:57 -07001922 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
1923}
1924
1925static const u16 default_queue_to_tx_fifo[] = {
1926 IWL_TX_FIFO_AC3,
1927 IWL_TX_FIFO_AC2,
1928 IWL_TX_FIFO_AC1,
1929 IWL_TX_FIFO_AC0,
1930 IWL_CMD_FIFO_NUM,
1931 IWL_TX_FIFO_HCCA_1,
1932 IWL_TX_FIFO_HCCA_2
1933};
1934
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001935static inline void iwl4965_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
Zhu Yib481de92007-09-25 17:54:57 -07001936{
1937 set_bit(txq_id, &priv->txq_ctx_active_msk);
1938}
1939
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001940static inline void iwl4965_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id)
Zhu Yib481de92007-09-25 17:54:57 -07001941{
1942 clear_bit(txq_id, &priv->txq_ctx_active_msk);
1943}
1944
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001945int iwl4965_alive_notify(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07001946{
1947 u32 a;
1948 int i = 0;
1949 unsigned long flags;
Tomas Winkler857485c2008-03-21 13:53:44 -07001950 int ret;
Zhu Yib481de92007-09-25 17:54:57 -07001951
1952 spin_lock_irqsave(&priv->lock, flags);
1953
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08001954#ifdef CONFIG_IWL4965_SENSITIVITY
Zhu Yib481de92007-09-25 17:54:57 -07001955 memset(&(priv->sensitivity_data), 0,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001956 sizeof(struct iwl4965_sensitivity_data));
Zhu Yib481de92007-09-25 17:54:57 -07001957 memset(&(priv->chain_noise_data), 0,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001958 sizeof(struct iwl4965_chain_noise_data));
Zhu Yib481de92007-09-25 17:54:57 -07001959 for (i = 0; i < NUM_RX_CHAINS; i++)
1960 priv->chain_noise_data.delta_gain_code[i] =
1961 CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08001962#endif /* CONFIG_IWL4965_SENSITIVITY*/
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001963 ret = iwl_grab_nic_access(priv);
Tomas Winkler857485c2008-03-21 13:53:44 -07001964 if (ret) {
Zhu Yib481de92007-09-25 17:54:57 -07001965 spin_unlock_irqrestore(&priv->lock, flags);
Tomas Winkler857485c2008-03-21 13:53:44 -07001966 return ret;
Zhu Yib481de92007-09-25 17:54:57 -07001967 }
1968
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001969 /* Clear 4965's internal Tx Scheduler data base */
Tomas Winkler12a81f62008-04-03 16:05:20 -07001970 priv->scd_base_addr = iwl_read_prph(priv, IWL49_SCD_SRAM_BASE_ADDR);
Zhu Yib481de92007-09-25 17:54:57 -07001971 a = priv->scd_base_addr + SCD_CONTEXT_DATA_OFFSET;
1972 for (; a < priv->scd_base_addr + SCD_TX_STTS_BITMAP_OFFSET; a += 4)
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001973 iwl_write_targ_mem(priv, a, 0);
Zhu Yib481de92007-09-25 17:54:57 -07001974 for (; a < priv->scd_base_addr + SCD_TRANSLATE_TBL_OFFSET; a += 4)
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001975 iwl_write_targ_mem(priv, a, 0);
Zhu Yib481de92007-09-25 17:54:57 -07001976 for (; a < sizeof(u16) * priv->hw_setting.max_txq_num; a += 4)
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001977 iwl_write_targ_mem(priv, a, 0);
Zhu Yib481de92007-09-25 17:54:57 -07001978
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001979 /* Tel 4965 where to find Tx byte count tables */
Tomas Winkler12a81f62008-04-03 16:05:20 -07001980 iwl_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR,
Tomas Winkler059ff822008-04-14 21:16:14 -07001981 (priv->shared_phys +
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001982 offsetof(struct iwl4965_shared, queues_byte_cnt_tbls)) >> 10);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001983
1984 /* Disable chain mode for all queues */
Tomas Winkler12a81f62008-04-03 16:05:20 -07001985 iwl_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0);
Zhu Yib481de92007-09-25 17:54:57 -07001986
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001987 /* Initialize each Tx queue (including the command queue) */
Zhu Yib481de92007-09-25 17:54:57 -07001988 for (i = 0; i < priv->hw_setting.max_txq_num; i++) {
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001989
1990 /* TFD circular buffer read/write indexes */
Tomas Winkler12a81f62008-04-03 16:05:20 -07001991 iwl_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(i), 0);
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001992 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001993
1994 /* Max Tx Window size for Scheduler-ACK mode */
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001995 iwl_write_targ_mem(priv, priv->scd_base_addr +
Zhu Yib481de92007-09-25 17:54:57 -07001996 SCD_CONTEXT_QUEUE_OFFSET(i),
1997 (SCD_WIN_SIZE <<
1998 SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
1999 SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002000
2001 /* Frame limit */
Tomas Winkler3395f6e2008-03-25 16:33:37 -07002002 iwl_write_targ_mem(priv, priv->scd_base_addr +
Zhu Yib481de92007-09-25 17:54:57 -07002003 SCD_CONTEXT_QUEUE_OFFSET(i) +
2004 sizeof(u32),
2005 (SCD_FRAME_LIMIT <<
2006 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
2007 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
2008
2009 }
Tomas Winkler12a81f62008-04-03 16:05:20 -07002010 iwl_write_prph(priv, IWL49_SCD_INTERRUPT_MASK,
Zhu Yib481de92007-09-25 17:54:57 -07002011 (1 << priv->hw_setting.max_txq_num) - 1);
2012
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002013 /* Activate all Tx DMA/FIFO channels */
Tomas Winkler12a81f62008-04-03 16:05:20 -07002014 iwl_write_prph(priv, IWL49_SCD_TXFACT,
Zhu Yib481de92007-09-25 17:54:57 -07002015 SCD_TXFACT_REG_TXFIFO_MASK(0, 7));
2016
2017 iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002018
2019 /* Map each Tx/cmd queue to its corresponding fifo */
Zhu Yib481de92007-09-25 17:54:57 -07002020 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
2021 int ac = default_queue_to_tx_fifo[i];
2022 iwl4965_txq_ctx_activate(priv, i);
2023 iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
2024 }
2025
Tomas Winkler3395f6e2008-03-25 16:33:37 -07002026 iwl_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -07002027 spin_unlock_irqrestore(&priv->lock, flags);
2028
Tomas Winkler857485c2008-03-21 13:53:44 -07002029 return ret;
Zhu Yib481de92007-09-25 17:54:57 -07002030}
2031
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002032/**
2033 * iwl4965_hw_set_hw_setting
2034 *
2035 * Called when initializing driver
2036 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002037int iwl4965_hw_set_hw_setting(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07002038{
Assaf Krauss316c30d2008-03-14 10:38:46 -07002039
Assaf Krauss1ea87392008-03-18 14:57:50 -07002040 if ((priv->cfg->mod_params->num_of_queues > IWL_MAX_NUM_QUEUES) ||
2041 (priv->cfg->mod_params->num_of_queues < IWL_MIN_NUM_QUEUES)) {
Assaf Krauss316c30d2008-03-14 10:38:46 -07002042 IWL_ERROR("invalid queues_num, should be between %d and %d\n",
2043 IWL_MIN_NUM_QUEUES, IWL_MAX_NUM_QUEUES);
Tomas Winkler059ff822008-04-14 21:16:14 -07002044 return -EINVAL;
Assaf Krauss316c30d2008-03-14 10:38:46 -07002045 }
2046
Assaf Krauss1ea87392008-03-18 14:57:50 -07002047 priv->hw_setting.max_txq_num = priv->cfg->mod_params->num_of_queues;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002048 priv->hw_setting.tx_cmd_len = sizeof(struct iwl4965_tx_cmd);
Zhu Yib481de92007-09-25 17:54:57 -07002049 priv->hw_setting.max_rxq_size = RX_QUEUE_SIZE;
2050 priv->hw_setting.max_rxq_log = RX_QUEUE_SIZE_LOG;
Assaf Krauss1ea87392008-03-18 14:57:50 -07002051 if (priv->cfg->mod_params->amsdu_size_8K)
Ron Rindjunsky9ee1ba42007-11-26 16:14:42 +02002052 priv->hw_setting.rx_buf_size = IWL_RX_BUF_SIZE_8K;
2053 else
2054 priv->hw_setting.rx_buf_size = IWL_RX_BUF_SIZE_4K;
2055 priv->hw_setting.max_pkt_size = priv->hw_setting.rx_buf_size - 256;
Zhu Yib481de92007-09-25 17:54:57 -07002056 priv->hw_setting.max_stations = IWL4965_STATION_COUNT;
2057 priv->hw_setting.bcast_sta_id = IWL4965_BROADCAST_ID;
Tomas Winkler3e82a822008-02-13 11:32:31 -08002058
2059 priv->hw_setting.tx_ant_num = 2;
2060
Tomas Winkler059ff822008-04-14 21:16:14 -07002061 return 0;
Zhu Yib481de92007-09-25 17:54:57 -07002062}
2063
2064/**
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002065 * iwl4965_hw_txq_ctx_free - Free TXQ Context
Zhu Yib481de92007-09-25 17:54:57 -07002066 *
2067 * Destroy all TX DMA queues and structures
2068 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002069void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07002070{
2071 int txq_id;
2072
2073 /* Tx queues */
2074 for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++)
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002075 iwl4965_tx_queue_free(priv, &priv->txq[txq_id]);
Zhu Yib481de92007-09-25 17:54:57 -07002076
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002077 /* Keep-warm buffer */
Zhu Yib481de92007-09-25 17:54:57 -07002078 iwl4965_kw_free(priv);
2079}
2080
2081/**
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002082 * iwl4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
Zhu Yib481de92007-09-25 17:54:57 -07002083 *
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002084 * Does NOT advance any TFD circular buffer read/write indexes
2085 * Does NOT free the TFD itself (which is within circular buffer)
Zhu Yib481de92007-09-25 17:54:57 -07002086 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002087int iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl4965_tx_queue *txq)
Zhu Yib481de92007-09-25 17:54:57 -07002088{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002089 struct iwl4965_tfd_frame *bd_tmp = (struct iwl4965_tfd_frame *)&txq->bd[0];
2090 struct iwl4965_tfd_frame *bd = &bd_tmp[txq->q.read_ptr];
Zhu Yib481de92007-09-25 17:54:57 -07002091 struct pci_dev *dev = priv->pci_dev;
2092 int i;
2093 int counter = 0;
2094 int index, is_odd;
2095
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002096 /* Host command buffers stay mapped in memory, nothing to clean */
Zhu Yib481de92007-09-25 17:54:57 -07002097 if (txq->q.id == IWL_CMD_QUEUE_NUM)
Zhu Yib481de92007-09-25 17:54:57 -07002098 return 0;
2099
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002100 /* Sanity check on number of chunks */
Zhu Yib481de92007-09-25 17:54:57 -07002101 counter = IWL_GET_BITS(*bd, num_tbs);
2102 if (counter > MAX_NUM_OF_TBS) {
2103 IWL_ERROR("Too many chunks: %i\n", counter);
2104 /* @todo issue fatal error, it is quite serious situation */
2105 return 0;
2106 }
2107
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002108 /* Unmap chunks, if any.
2109 * TFD info for odd chunks is different format than for even chunks. */
Zhu Yib481de92007-09-25 17:54:57 -07002110 for (i = 0; i < counter; i++) {
2111 index = i / 2;
2112 is_odd = i & 0x1;
2113
2114 if (is_odd)
2115 pci_unmap_single(
2116 dev,
2117 IWL_GET_BITS(bd->pa[index], tb2_addr_lo16) |
2118 (IWL_GET_BITS(bd->pa[index],
2119 tb2_addr_hi20) << 16),
2120 IWL_GET_BITS(bd->pa[index], tb2_len),
2121 PCI_DMA_TODEVICE);
2122
2123 else if (i > 0)
2124 pci_unmap_single(dev,
2125 le32_to_cpu(bd->pa[index].tb1_addr),
2126 IWL_GET_BITS(bd->pa[index], tb1_len),
2127 PCI_DMA_TODEVICE);
2128
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002129 /* Free SKB, if any, for this chunk */
Tomas Winklerfc4b6852007-10-25 17:15:24 +08002130 if (txq->txb[txq->q.read_ptr].skb[i]) {
2131 struct sk_buff *skb = txq->txb[txq->q.read_ptr].skb[i];
Zhu Yib481de92007-09-25 17:54:57 -07002132
2133 dev_kfree_skb(skb);
Tomas Winklerfc4b6852007-10-25 17:15:24 +08002134 txq->txb[txq->q.read_ptr].skb[i] = NULL;
Zhu Yib481de92007-09-25 17:54:57 -07002135 }
2136 }
2137 return 0;
2138}
2139
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002140int iwl4965_hw_reg_set_txpower(struct iwl_priv *priv, s8 power)
Zhu Yib481de92007-09-25 17:54:57 -07002141{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002142 IWL_ERROR("TODO: Implement iwl4965_hw_reg_set_txpower!\n");
Zhu Yib481de92007-09-25 17:54:57 -07002143 return -EINVAL;
2144}
2145
2146static s32 iwl4965_math_div_round(s32 num, s32 denom, s32 *res)
2147{
2148 s32 sign = 1;
2149
2150 if (num < 0) {
2151 sign = -sign;
2152 num = -num;
2153 }
2154 if (denom < 0) {
2155 sign = -sign;
2156 denom = -denom;
2157 }
2158 *res = 1;
2159 *res = ((num * 2 + denom) / (denom * 2)) * sign;
2160
2161 return 1;
2162}
2163
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002164/**
2165 * iwl4965_get_voltage_compensation - Power supply voltage comp for txpower
2166 *
2167 * Determines power supply voltage compensation for txpower calculations.
2168 * Returns number of 1/2-dB steps to subtract from gain table index,
2169 * to compensate for difference between power supply voltage during
2170 * factory measurements, vs. current power supply voltage.
2171 *
2172 * Voltage indication is higher for lower voltage.
2173 * Lower voltage requires more gain (lower gain table index).
2174 */
Zhu Yib481de92007-09-25 17:54:57 -07002175static s32 iwl4965_get_voltage_compensation(s32 eeprom_voltage,
2176 s32 current_voltage)
2177{
2178 s32 comp = 0;
2179
2180 if ((TX_POWER_IWL_ILLEGAL_VOLTAGE == eeprom_voltage) ||
2181 (TX_POWER_IWL_ILLEGAL_VOLTAGE == current_voltage))
2182 return 0;
2183
2184 iwl4965_math_div_round(current_voltage - eeprom_voltage,
2185 TX_POWER_IWL_VOLTAGE_CODES_PER_03V, &comp);
2186
2187 if (current_voltage > eeprom_voltage)
2188 comp *= 2;
2189 if ((comp < -2) || (comp > 2))
2190 comp = 0;
2191
2192 return comp;
2193}
2194
Assaf Kraussbf85ea42008-03-14 10:38:49 -07002195static const struct iwl_channel_info *
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002196iwl4965_get_channel_txpower_info(struct iwl_priv *priv,
Johannes Berg8318d782008-01-24 19:38:38 +01002197 enum ieee80211_band band, u16 channel)
Zhu Yib481de92007-09-25 17:54:57 -07002198{
Assaf Kraussbf85ea42008-03-14 10:38:49 -07002199 const struct iwl_channel_info *ch_info;
Zhu Yib481de92007-09-25 17:54:57 -07002200
Assaf Krauss8622e702008-03-21 13:53:43 -07002201 ch_info = iwl_get_channel_info(priv, band, channel);
Zhu Yib481de92007-09-25 17:54:57 -07002202
2203 if (!is_channel_valid(ch_info))
2204 return NULL;
2205
2206 return ch_info;
2207}
2208
2209static s32 iwl4965_get_tx_atten_grp(u16 channel)
2210{
2211 if (channel >= CALIB_IWL_TX_ATTEN_GR5_FCH &&
2212 channel <= CALIB_IWL_TX_ATTEN_GR5_LCH)
2213 return CALIB_CH_GROUP_5;
2214
2215 if (channel >= CALIB_IWL_TX_ATTEN_GR1_FCH &&
2216 channel <= CALIB_IWL_TX_ATTEN_GR1_LCH)
2217 return CALIB_CH_GROUP_1;
2218
2219 if (channel >= CALIB_IWL_TX_ATTEN_GR2_FCH &&
2220 channel <= CALIB_IWL_TX_ATTEN_GR2_LCH)
2221 return CALIB_CH_GROUP_2;
2222
2223 if (channel >= CALIB_IWL_TX_ATTEN_GR3_FCH &&
2224 channel <= CALIB_IWL_TX_ATTEN_GR3_LCH)
2225 return CALIB_CH_GROUP_3;
2226
2227 if (channel >= CALIB_IWL_TX_ATTEN_GR4_FCH &&
2228 channel <= CALIB_IWL_TX_ATTEN_GR4_LCH)
2229 return CALIB_CH_GROUP_4;
2230
2231 IWL_ERROR("Can't find txatten group for channel %d.\n", channel);
2232 return -1;
2233}
2234
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002235static u32 iwl4965_get_sub_band(const struct iwl_priv *priv, u32 channel)
Zhu Yib481de92007-09-25 17:54:57 -07002236{
2237 s32 b = -1;
2238
2239 for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) {
2240 if (priv->eeprom.calib_info.band_info[b].ch_from == 0)
2241 continue;
2242
2243 if ((channel >= priv->eeprom.calib_info.band_info[b].ch_from)
2244 && (channel <= priv->eeprom.calib_info.band_info[b].ch_to))
2245 break;
2246 }
2247
2248 return b;
2249}
2250
2251static s32 iwl4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
2252{
2253 s32 val;
2254
2255 if (x2 == x1)
2256 return y1;
2257 else {
2258 iwl4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val);
2259 return val + y2;
2260 }
2261}
2262
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002263/**
2264 * iwl4965_interpolate_chan - Interpolate factory measurements for one channel
2265 *
2266 * Interpolates factory measurements from the two sample channels within a
2267 * sub-band, to apply to channel of interest. Interpolation is proportional to
2268 * differences in channel frequencies, which is proportional to differences
2269 * in channel number.
2270 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002271static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002272 struct iwl4965_eeprom_calib_ch_info *chan_info)
Zhu Yib481de92007-09-25 17:54:57 -07002273{
2274 s32 s = -1;
2275 u32 c;
2276 u32 m;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002277 const struct iwl4965_eeprom_calib_measure *m1;
2278 const struct iwl4965_eeprom_calib_measure *m2;
2279 struct iwl4965_eeprom_calib_measure *omeas;
Zhu Yib481de92007-09-25 17:54:57 -07002280 u32 ch_i1;
2281 u32 ch_i2;
2282
2283 s = iwl4965_get_sub_band(priv, channel);
2284 if (s >= EEPROM_TX_POWER_BANDS) {
2285 IWL_ERROR("Tx Power can not find channel %d ", channel);
2286 return -1;
2287 }
2288
2289 ch_i1 = priv->eeprom.calib_info.band_info[s].ch1.ch_num;
2290 ch_i2 = priv->eeprom.calib_info.band_info[s].ch2.ch_num;
2291 chan_info->ch_num = (u8) channel;
2292
2293 IWL_DEBUG_TXPOWER("channel %d subband %d factory cal ch %d & %d\n",
2294 channel, s, ch_i1, ch_i2);
2295
2296 for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) {
2297 for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) {
2298 m1 = &(priv->eeprom.calib_info.band_info[s].ch1.
2299 measurements[c][m]);
2300 m2 = &(priv->eeprom.calib_info.band_info[s].ch2.
2301 measurements[c][m]);
2302 omeas = &(chan_info->measurements[c][m]);
2303
2304 omeas->actual_pow =
2305 (u8) iwl4965_interpolate_value(channel, ch_i1,
2306 m1->actual_pow,
2307 ch_i2,
2308 m2->actual_pow);
2309 omeas->gain_idx =
2310 (u8) iwl4965_interpolate_value(channel, ch_i1,
2311 m1->gain_idx, ch_i2,
2312 m2->gain_idx);
2313 omeas->temperature =
2314 (u8) iwl4965_interpolate_value(channel, ch_i1,
2315 m1->temperature,
2316 ch_i2,
2317 m2->temperature);
2318 omeas->pa_det =
2319 (s8) iwl4965_interpolate_value(channel, ch_i1,
2320 m1->pa_det, ch_i2,
2321 m2->pa_det);
2322
2323 IWL_DEBUG_TXPOWER
2324 ("chain %d meas %d AP1=%d AP2=%d AP=%d\n", c, m,
2325 m1->actual_pow, m2->actual_pow, omeas->actual_pow);
2326 IWL_DEBUG_TXPOWER
2327 ("chain %d meas %d NI1=%d NI2=%d NI=%d\n", c, m,
2328 m1->gain_idx, m2->gain_idx, omeas->gain_idx);
2329 IWL_DEBUG_TXPOWER
2330 ("chain %d meas %d PA1=%d PA2=%d PA=%d\n", c, m,
2331 m1->pa_det, m2->pa_det, omeas->pa_det);
2332 IWL_DEBUG_TXPOWER
2333 ("chain %d meas %d T1=%d T2=%d T=%d\n", c, m,
2334 m1->temperature, m2->temperature,
2335 omeas->temperature);
2336 }
2337 }
2338
2339 return 0;
2340}
2341
2342/* bit-rate-dependent table to prevent Tx distortion, in half-dB units,
2343 * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */
2344static s32 back_off_table[] = {
2345 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */
2346 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */
2347 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */
2348 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */
2349 10 /* CCK */
2350};
2351
2352/* Thermal compensation values for txpower for various frequency ranges ...
2353 * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002354static struct iwl4965_txpower_comp_entry {
Zhu Yib481de92007-09-25 17:54:57 -07002355 s32 degrees_per_05db_a;
2356 s32 degrees_per_05db_a_denom;
2357} tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = {
2358 {9, 2}, /* group 0 5.2, ch 34-43 */
2359 {4, 1}, /* group 1 5.2, ch 44-70 */
2360 {4, 1}, /* group 2 5.2, ch 71-124 */
2361 {4, 1}, /* group 3 5.2, ch 125-200 */
2362 {3, 1} /* group 4 2.4, ch all */
2363};
2364
2365static s32 get_min_power_index(s32 rate_power_index, u32 band)
2366{
2367 if (!band) {
2368 if ((rate_power_index & 7) <= 4)
2369 return MIN_TX_GAIN_INDEX_52GHZ_EXT;
2370 }
2371 return MIN_TX_GAIN_INDEX;
2372}
2373
2374struct gain_entry {
2375 u8 dsp;
2376 u8 radio;
2377};
2378
2379static const struct gain_entry gain_table[2][108] = {
2380 /* 5.2GHz power gain index table */
2381 {
2382 {123, 0x3F}, /* highest txpower */
2383 {117, 0x3F},
2384 {110, 0x3F},
2385 {104, 0x3F},
2386 {98, 0x3F},
2387 {110, 0x3E},
2388 {104, 0x3E},
2389 {98, 0x3E},
2390 {110, 0x3D},
2391 {104, 0x3D},
2392 {98, 0x3D},
2393 {110, 0x3C},
2394 {104, 0x3C},
2395 {98, 0x3C},
2396 {110, 0x3B},
2397 {104, 0x3B},
2398 {98, 0x3B},
2399 {110, 0x3A},
2400 {104, 0x3A},
2401 {98, 0x3A},
2402 {110, 0x39},
2403 {104, 0x39},
2404 {98, 0x39},
2405 {110, 0x38},
2406 {104, 0x38},
2407 {98, 0x38},
2408 {110, 0x37},
2409 {104, 0x37},
2410 {98, 0x37},
2411 {110, 0x36},
2412 {104, 0x36},
2413 {98, 0x36},
2414 {110, 0x35},
2415 {104, 0x35},
2416 {98, 0x35},
2417 {110, 0x34},
2418 {104, 0x34},
2419 {98, 0x34},
2420 {110, 0x33},
2421 {104, 0x33},
2422 {98, 0x33},
2423 {110, 0x32},
2424 {104, 0x32},
2425 {98, 0x32},
2426 {110, 0x31},
2427 {104, 0x31},
2428 {98, 0x31},
2429 {110, 0x30},
2430 {104, 0x30},
2431 {98, 0x30},
2432 {110, 0x25},
2433 {104, 0x25},
2434 {98, 0x25},
2435 {110, 0x24},
2436 {104, 0x24},
2437 {98, 0x24},
2438 {110, 0x23},
2439 {104, 0x23},
2440 {98, 0x23},
2441 {110, 0x22},
2442 {104, 0x18},
2443 {98, 0x18},
2444 {110, 0x17},
2445 {104, 0x17},
2446 {98, 0x17},
2447 {110, 0x16},
2448 {104, 0x16},
2449 {98, 0x16},
2450 {110, 0x15},
2451 {104, 0x15},
2452 {98, 0x15},
2453 {110, 0x14},
2454 {104, 0x14},
2455 {98, 0x14},
2456 {110, 0x13},
2457 {104, 0x13},
2458 {98, 0x13},
2459 {110, 0x12},
2460 {104, 0x08},
2461 {98, 0x08},
2462 {110, 0x07},
2463 {104, 0x07},
2464 {98, 0x07},
2465 {110, 0x06},
2466 {104, 0x06},
2467 {98, 0x06},
2468 {110, 0x05},
2469 {104, 0x05},
2470 {98, 0x05},
2471 {110, 0x04},
2472 {104, 0x04},
2473 {98, 0x04},
2474 {110, 0x03},
2475 {104, 0x03},
2476 {98, 0x03},
2477 {110, 0x02},
2478 {104, 0x02},
2479 {98, 0x02},
2480 {110, 0x01},
2481 {104, 0x01},
2482 {98, 0x01},
2483 {110, 0x00},
2484 {104, 0x00},
2485 {98, 0x00},
2486 {93, 0x00},
2487 {88, 0x00},
2488 {83, 0x00},
2489 {78, 0x00},
2490 },
2491 /* 2.4GHz power gain index table */
2492 {
2493 {110, 0x3f}, /* highest txpower */
2494 {104, 0x3f},
2495 {98, 0x3f},
2496 {110, 0x3e},
2497 {104, 0x3e},
2498 {98, 0x3e},
2499 {110, 0x3d},
2500 {104, 0x3d},
2501 {98, 0x3d},
2502 {110, 0x3c},
2503 {104, 0x3c},
2504 {98, 0x3c},
2505 {110, 0x3b},
2506 {104, 0x3b},
2507 {98, 0x3b},
2508 {110, 0x3a},
2509 {104, 0x3a},
2510 {98, 0x3a},
2511 {110, 0x39},
2512 {104, 0x39},
2513 {98, 0x39},
2514 {110, 0x38},
2515 {104, 0x38},
2516 {98, 0x38},
2517 {110, 0x37},
2518 {104, 0x37},
2519 {98, 0x37},
2520 {110, 0x36},
2521 {104, 0x36},
2522 {98, 0x36},
2523 {110, 0x35},
2524 {104, 0x35},
2525 {98, 0x35},
2526 {110, 0x34},
2527 {104, 0x34},
2528 {98, 0x34},
2529 {110, 0x33},
2530 {104, 0x33},
2531 {98, 0x33},
2532 {110, 0x32},
2533 {104, 0x32},
2534 {98, 0x32},
2535 {110, 0x31},
2536 {104, 0x31},
2537 {98, 0x31},
2538 {110, 0x30},
2539 {104, 0x30},
2540 {98, 0x30},
2541 {110, 0x6},
2542 {104, 0x6},
2543 {98, 0x6},
2544 {110, 0x5},
2545 {104, 0x5},
2546 {98, 0x5},
2547 {110, 0x4},
2548 {104, 0x4},
2549 {98, 0x4},
2550 {110, 0x3},
2551 {104, 0x3},
2552 {98, 0x3},
2553 {110, 0x2},
2554 {104, 0x2},
2555 {98, 0x2},
2556 {110, 0x1},
2557 {104, 0x1},
2558 {98, 0x1},
2559 {110, 0x0},
2560 {104, 0x0},
2561 {98, 0x0},
2562 {97, 0},
2563 {96, 0},
2564 {95, 0},
2565 {94, 0},
2566 {93, 0},
2567 {92, 0},
2568 {91, 0},
2569 {90, 0},
2570 {89, 0},
2571 {88, 0},
2572 {87, 0},
2573 {86, 0},
2574 {85, 0},
2575 {84, 0},
2576 {83, 0},
2577 {82, 0},
2578 {81, 0},
2579 {80, 0},
2580 {79, 0},
2581 {78, 0},
2582 {77, 0},
2583 {76, 0},
2584 {75, 0},
2585 {74, 0},
2586 {73, 0},
2587 {72, 0},
2588 {71, 0},
2589 {70, 0},
2590 {69, 0},
2591 {68, 0},
2592 {67, 0},
2593 {66, 0},
2594 {65, 0},
2595 {64, 0},
2596 {63, 0},
2597 {62, 0},
2598 {61, 0},
2599 {60, 0},
2600 {59, 0},
2601 }
2602};
2603
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002604static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
Zhu Yib481de92007-09-25 17:54:57 -07002605 u8 is_fat, u8 ctrl_chan_high,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002606 struct iwl4965_tx_power_db *tx_power_tbl)
Zhu Yib481de92007-09-25 17:54:57 -07002607{
2608 u8 saturation_power;
2609 s32 target_power;
2610 s32 user_target_power;
2611 s32 power_limit;
2612 s32 current_temp;
2613 s32 reg_limit;
2614 s32 current_regulatory;
2615 s32 txatten_grp = CALIB_CH_GROUP_MAX;
2616 int i;
2617 int c;
Assaf Kraussbf85ea42008-03-14 10:38:49 -07002618 const struct iwl_channel_info *ch_info = NULL;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002619 struct iwl4965_eeprom_calib_ch_info ch_eeprom_info;
2620 const struct iwl4965_eeprom_calib_measure *measurement;
Zhu Yib481de92007-09-25 17:54:57 -07002621 s16 voltage;
2622 s32 init_voltage;
2623 s32 voltage_compensation;
2624 s32 degrees_per_05db_num;
2625 s32 degrees_per_05db_denom;
2626 s32 factory_temp;
2627 s32 temperature_comp[2];
2628 s32 factory_gain_index[2];
2629 s32 factory_actual_pwr[2];
2630 s32 power_index;
2631
2632 /* Sanity check requested level (dBm) */
2633 if (priv->user_txpower_limit < IWL_TX_POWER_TARGET_POWER_MIN) {
2634 IWL_WARNING("Requested user TXPOWER %d below limit.\n",
2635 priv->user_txpower_limit);
2636 return -EINVAL;
2637 }
2638 if (priv->user_txpower_limit > IWL_TX_POWER_TARGET_POWER_MAX) {
2639 IWL_WARNING("Requested user TXPOWER %d above limit.\n",
2640 priv->user_txpower_limit);
2641 return -EINVAL;
2642 }
2643
2644 /* user_txpower_limit is in dBm, convert to half-dBm (half-dB units
2645 * are used for indexing into txpower table) */
2646 user_target_power = 2 * priv->user_txpower_limit;
2647
2648 /* Get current (RXON) channel, band, width */
2649 ch_info =
Johannes Berg8318d782008-01-24 19:38:38 +01002650 iwl4965_get_channel_txpower_info(priv, priv->band, channel);
Zhu Yib481de92007-09-25 17:54:57 -07002651
2652 IWL_DEBUG_TXPOWER("chan %d band %d is_fat %d\n", channel, band,
2653 is_fat);
2654
2655 if (!ch_info)
2656 return -EINVAL;
2657
2658 /* get txatten group, used to select 1) thermal txpower adjustment
2659 * and 2) mimo txpower balance between Tx chains. */
2660 txatten_grp = iwl4965_get_tx_atten_grp(channel);
2661 if (txatten_grp < 0)
2662 return -EINVAL;
2663
2664 IWL_DEBUG_TXPOWER("channel %d belongs to txatten group %d\n",
2665 channel, txatten_grp);
2666
2667 if (is_fat) {
2668 if (ctrl_chan_high)
2669 channel -= 2;
2670 else
2671 channel += 2;
2672 }
2673
2674 /* hardware txpower limits ...
2675 * saturation (clipping distortion) txpowers are in half-dBm */
2676 if (band)
2677 saturation_power = priv->eeprom.calib_info.saturation_power24;
2678 else
2679 saturation_power = priv->eeprom.calib_info.saturation_power52;
2680
2681 if (saturation_power < IWL_TX_POWER_SATURATION_MIN ||
2682 saturation_power > IWL_TX_POWER_SATURATION_MAX) {
2683 if (band)
2684 saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_24;
2685 else
2686 saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_52;
2687 }
2688
2689 /* regulatory txpower limits ... reg_limit values are in half-dBm,
2690 * max_power_avg values are in dBm, convert * 2 */
2691 if (is_fat)
2692 reg_limit = ch_info->fat_max_power_avg * 2;
2693 else
2694 reg_limit = ch_info->max_power_avg * 2;
2695
2696 if ((reg_limit < IWL_TX_POWER_REGULATORY_MIN) ||
2697 (reg_limit > IWL_TX_POWER_REGULATORY_MAX)) {
2698 if (band)
2699 reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_24;
2700 else
2701 reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_52;
2702 }
2703
2704 /* Interpolate txpower calibration values for this channel,
2705 * based on factory calibration tests on spaced channels. */
2706 iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info);
2707
2708 /* calculate tx gain adjustment based on power supply voltage */
2709 voltage = priv->eeprom.calib_info.voltage;
2710 init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage);
2711 voltage_compensation =
2712 iwl4965_get_voltage_compensation(voltage, init_voltage);
2713
2714 IWL_DEBUG_TXPOWER("curr volt %d eeprom volt %d volt comp %d\n",
2715 init_voltage,
2716 voltage, voltage_compensation);
2717
2718 /* get current temperature (Celsius) */
2719 current_temp = max(priv->temperature, IWL_TX_POWER_TEMPERATURE_MIN);
2720 current_temp = min(priv->temperature, IWL_TX_POWER_TEMPERATURE_MAX);
2721 current_temp = KELVIN_TO_CELSIUS(current_temp);
2722
2723 /* select thermal txpower adjustment params, based on channel group
2724 * (same frequency group used for mimo txatten adjustment) */
2725 degrees_per_05db_num =
2726 tx_power_cmp_tble[txatten_grp].degrees_per_05db_a;
2727 degrees_per_05db_denom =
2728 tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom;
2729
2730 /* get per-chain txpower values from factory measurements */
2731 for (c = 0; c < 2; c++) {
2732 measurement = &ch_eeprom_info.measurements[c][1];
2733
2734 /* txgain adjustment (in half-dB steps) based on difference
2735 * between factory and current temperature */
2736 factory_temp = measurement->temperature;
2737 iwl4965_math_div_round((current_temp - factory_temp) *
2738 degrees_per_05db_denom,
2739 degrees_per_05db_num,
2740 &temperature_comp[c]);
2741
2742 factory_gain_index[c] = measurement->gain_idx;
2743 factory_actual_pwr[c] = measurement->actual_pow;
2744
2745 IWL_DEBUG_TXPOWER("chain = %d\n", c);
2746 IWL_DEBUG_TXPOWER("fctry tmp %d, "
2747 "curr tmp %d, comp %d steps\n",
2748 factory_temp, current_temp,
2749 temperature_comp[c]);
2750
2751 IWL_DEBUG_TXPOWER("fctry idx %d, fctry pwr %d\n",
2752 factory_gain_index[c],
2753 factory_actual_pwr[c]);
2754 }
2755
2756 /* for each of 33 bit-rates (including 1 for CCK) */
2757 for (i = 0; i < POWER_TABLE_NUM_ENTRIES; i++) {
2758 u8 is_mimo_rate;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002759 union iwl4965_tx_power_dual_stream tx_power;
Zhu Yib481de92007-09-25 17:54:57 -07002760
2761 /* for mimo, reduce each chain's txpower by half
2762 * (3dB, 6 steps), so total output power is regulatory
2763 * compliant. */
2764 if (i & 0x8) {
2765 current_regulatory = reg_limit -
2766 IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION;
2767 is_mimo_rate = 1;
2768 } else {
2769 current_regulatory = reg_limit;
2770 is_mimo_rate = 0;
2771 }
2772
2773 /* find txpower limit, either hardware or regulatory */
2774 power_limit = saturation_power - back_off_table[i];
2775 if (power_limit > current_regulatory)
2776 power_limit = current_regulatory;
2777
2778 /* reduce user's txpower request if necessary
2779 * for this rate on this channel */
2780 target_power = user_target_power;
2781 if (target_power > power_limit)
2782 target_power = power_limit;
2783
2784 IWL_DEBUG_TXPOWER("rate %d sat %d reg %d usr %d tgt %d\n",
2785 i, saturation_power - back_off_table[i],
2786 current_regulatory, user_target_power,
2787 target_power);
2788
2789 /* for each of 2 Tx chains (radio transmitters) */
2790 for (c = 0; c < 2; c++) {
2791 s32 atten_value;
2792
2793 if (is_mimo_rate)
2794 atten_value =
2795 (s32)le32_to_cpu(priv->card_alive_init.
2796 tx_atten[txatten_grp][c]);
2797 else
2798 atten_value = 0;
2799
2800 /* calculate index; higher index means lower txpower */
2801 power_index = (u8) (factory_gain_index[c] -
2802 (target_power -
2803 factory_actual_pwr[c]) -
2804 temperature_comp[c] -
2805 voltage_compensation +
2806 atten_value);
2807
2808/* IWL_DEBUG_TXPOWER("calculated txpower index %d\n",
2809 power_index); */
2810
2811 if (power_index < get_min_power_index(i, band))
2812 power_index = get_min_power_index(i, band);
2813
2814 /* adjust 5 GHz index to support negative indexes */
2815 if (!band)
2816 power_index += 9;
2817
2818 /* CCK, rate 32, reduce txpower for CCK */
2819 if (i == POWER_TABLE_CCK_ENTRY)
2820 power_index +=
2821 IWL_TX_POWER_CCK_COMPENSATION_C_STEP;
2822
2823 /* stay within the table! */
2824 if (power_index > 107) {
2825 IWL_WARNING("txpower index %d > 107\n",
2826 power_index);
2827 power_index = 107;
2828 }
2829 if (power_index < 0) {
2830 IWL_WARNING("txpower index %d < 0\n",
2831 power_index);
2832 power_index = 0;
2833 }
2834
2835 /* fill txpower command for this rate/chain */
2836 tx_power.s.radio_tx_gain[c] =
2837 gain_table[band][power_index].radio;
2838 tx_power.s.dsp_predis_atten[c] =
2839 gain_table[band][power_index].dsp;
2840
2841 IWL_DEBUG_TXPOWER("chain %d mimo %d index %d "
2842 "gain 0x%02x dsp %d\n",
2843 c, atten_value, power_index,
2844 tx_power.s.radio_tx_gain[c],
2845 tx_power.s.dsp_predis_atten[c]);
2846 }/* for each chain */
2847
2848 tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw);
2849
2850 }/* for each rate */
2851
2852 return 0;
2853}
2854
2855/**
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002856 * iwl4965_hw_reg_send_txpower - Configure the TXPOWER level user limit
Zhu Yib481de92007-09-25 17:54:57 -07002857 *
2858 * Uses the active RXON for channel, band, and characteristics (fat, high)
2859 * The power limit is taken from priv->user_txpower_limit.
2860 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002861int iwl4965_hw_reg_send_txpower(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07002862{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002863 struct iwl4965_txpowertable_cmd cmd = { 0 };
Tomas Winkler857485c2008-03-21 13:53:44 -07002864 int ret;
Zhu Yib481de92007-09-25 17:54:57 -07002865 u8 band = 0;
2866 u8 is_fat = 0;
2867 u8 ctrl_chan_high = 0;
2868
2869 if (test_bit(STATUS_SCANNING, &priv->status)) {
2870 /* If this gets hit a lot, switch it to a BUG() and catch
2871 * the stack trace to find out who is calling this during
2872 * a scan. */
2873 IWL_WARNING("TX Power requested while scanning!\n");
2874 return -EAGAIN;
2875 }
2876
Johannes Berg8318d782008-01-24 19:38:38 +01002877 band = priv->band == IEEE80211_BAND_2GHZ;
Zhu Yib481de92007-09-25 17:54:57 -07002878
2879 is_fat = is_fat_channel(priv->active_rxon.flags);
2880
2881 if (is_fat &&
2882 (priv->active_rxon.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
2883 ctrl_chan_high = 1;
2884
2885 cmd.band = band;
2886 cmd.channel = priv->active_rxon.channel;
2887
Tomas Winkler857485c2008-03-21 13:53:44 -07002888 ret = iwl4965_fill_txpower_tbl(priv, band,
Zhu Yib481de92007-09-25 17:54:57 -07002889 le16_to_cpu(priv->active_rxon.channel),
2890 is_fat, ctrl_chan_high, &cmd.tx_power);
Tomas Winkler857485c2008-03-21 13:53:44 -07002891 if (ret)
2892 goto out;
Zhu Yib481de92007-09-25 17:54:57 -07002893
Tomas Winkler857485c2008-03-21 13:53:44 -07002894 ret = iwl_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd);
2895
2896out:
2897 return ret;
Zhu Yib481de92007-09-25 17:54:57 -07002898}
2899
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002900int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel)
Zhu Yib481de92007-09-25 17:54:57 -07002901{
2902 int rc;
2903 u8 band = 0;
2904 u8 is_fat = 0;
2905 u8 ctrl_chan_high = 0;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002906 struct iwl4965_channel_switch_cmd cmd = { 0 };
Assaf Kraussbf85ea42008-03-14 10:38:49 -07002907 const struct iwl_channel_info *ch_info;
Zhu Yib481de92007-09-25 17:54:57 -07002908
Johannes Berg8318d782008-01-24 19:38:38 +01002909 band = priv->band == IEEE80211_BAND_2GHZ;
Zhu Yib481de92007-09-25 17:54:57 -07002910
Assaf Krauss8622e702008-03-21 13:53:43 -07002911 ch_info = iwl_get_channel_info(priv, priv->band, channel);
Zhu Yib481de92007-09-25 17:54:57 -07002912
2913 is_fat = is_fat_channel(priv->staging_rxon.flags);
2914
2915 if (is_fat &&
2916 (priv->active_rxon.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
2917 ctrl_chan_high = 1;
2918
2919 cmd.band = band;
2920 cmd.expect_beacon = 0;
2921 cmd.channel = cpu_to_le16(channel);
2922 cmd.rxon_flags = priv->active_rxon.flags;
2923 cmd.rxon_filter_flags = priv->active_rxon.filter_flags;
2924 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
2925 if (ch_info)
2926 cmd.expect_beacon = is_channel_radar(ch_info);
2927 else
2928 cmd.expect_beacon = 1;
2929
2930 rc = iwl4965_fill_txpower_tbl(priv, band, channel, is_fat,
2931 ctrl_chan_high, &cmd.tx_power);
2932 if (rc) {
2933 IWL_DEBUG_11H("error:%d fill txpower_tbl\n", rc);
2934 return rc;
2935 }
2936
Tomas Winkler857485c2008-03-21 13:53:44 -07002937 rc = iwl_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
Zhu Yib481de92007-09-25 17:54:57 -07002938 return rc;
2939}
2940
2941#define RTS_HCCA_RETRY_LIMIT 3
2942#define RTS_DFAULT_RETRY_LIMIT 60
2943
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002944void iwl4965_hw_build_tx_cmd_rate(struct iwl_priv *priv,
Tomas Winkler857485c2008-03-21 13:53:44 -07002945 struct iwl_cmd *cmd,
Zhu Yib481de92007-09-25 17:54:57 -07002946 struct ieee80211_tx_control *ctrl,
2947 struct ieee80211_hdr *hdr, int sta_id,
2948 int is_hcca)
2949{
Tomas Winkler87e4f7d2008-01-14 17:46:16 -08002950 struct iwl4965_tx_cmd *tx = &cmd->cmd.tx;
Zhu Yib481de92007-09-25 17:54:57 -07002951 u8 rts_retry_limit = 0;
2952 u8 data_retry_limit = 0;
Zhu Yib481de92007-09-25 17:54:57 -07002953 u16 fc = le16_to_cpu(hdr->frame_control);
Tomas Winkler87e4f7d2008-01-14 17:46:16 -08002954 u8 rate_plcp;
2955 u16 rate_flags = 0;
Johannes Berg8318d782008-01-24 19:38:38 +01002956 int rate_idx = min(ctrl->tx_rate->hw_value & 0xffff, IWL_RATE_COUNT - 1);
Zhu Yib481de92007-09-25 17:54:57 -07002957
Tomas Winkler87e4f7d2008-01-14 17:46:16 -08002958 rate_plcp = iwl4965_rates[rate_idx].plcp;
Zhu Yib481de92007-09-25 17:54:57 -07002959
2960 rts_retry_limit = (is_hcca) ?
2961 RTS_HCCA_RETRY_LIMIT : RTS_DFAULT_RETRY_LIMIT;
2962
Tomas Winkler87e4f7d2008-01-14 17:46:16 -08002963 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
2964 rate_flags |= RATE_MCS_CCK_MSK;
2965
2966
Zhu Yib481de92007-09-25 17:54:57 -07002967 if (ieee80211_is_probe_response(fc)) {
2968 data_retry_limit = 3;
2969 if (data_retry_limit < rts_retry_limit)
2970 rts_retry_limit = data_retry_limit;
2971 } else
2972 data_retry_limit = IWL_DEFAULT_TX_RETRY;
2973
2974 if (priv->data_retry_limit != -1)
2975 data_retry_limit = priv->data_retry_limit;
2976
Tomas Winkler87e4f7d2008-01-14 17:46:16 -08002977
2978 if (ieee80211_is_data(fc)) {
2979 tx->initial_rate_index = 0;
2980 tx->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
2981 } else {
Zhu Yib481de92007-09-25 17:54:57 -07002982 switch (fc & IEEE80211_FCTL_STYPE) {
2983 case IEEE80211_STYPE_AUTH:
2984 case IEEE80211_STYPE_DEAUTH:
2985 case IEEE80211_STYPE_ASSOC_REQ:
2986 case IEEE80211_STYPE_REASSOC_REQ:
Tomas Winkler87e4f7d2008-01-14 17:46:16 -08002987 if (tx->tx_flags & TX_CMD_FLG_RTS_MSK) {
2988 tx->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2989 tx->tx_flags |= TX_CMD_FLG_CTS_MSK;
Zhu Yib481de92007-09-25 17:54:57 -07002990 }
2991 break;
2992 default:
2993 break;
2994 }
Tomas Winkler87e4f7d2008-01-14 17:46:16 -08002995
2996 /* Alternate between antenna A and B for successive frames */
2997 if (priv->use_ant_b_for_management_frame) {
2998 priv->use_ant_b_for_management_frame = 0;
2999 rate_flags |= RATE_MCS_ANT_B_MSK;
3000 } else {
3001 priv->use_ant_b_for_management_frame = 1;
3002 rate_flags |= RATE_MCS_ANT_A_MSK;
3003 }
Zhu Yib481de92007-09-25 17:54:57 -07003004 }
3005
Tomas Winkler87e4f7d2008-01-14 17:46:16 -08003006 tx->rts_retry_limit = rts_retry_limit;
3007 tx->data_retry_limit = data_retry_limit;
3008 tx->rate_n_flags = iwl4965_hw_set_rate_n_flags(rate_plcp, rate_flags);
Zhu Yib481de92007-09-25 17:54:57 -07003009}
3010
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003011int iwl4965_hw_get_rx_read(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07003012{
Tomas Winkler059ff822008-04-14 21:16:14 -07003013 struct iwl4965_shared *s = priv->shared_virt;
3014 return le32_to_cpu(s->rb_closed) & 0xFFF;
Zhu Yib481de92007-09-25 17:54:57 -07003015}
3016
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003017int iwl4965_hw_get_temperature(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07003018{
3019 return priv->temperature;
3020}
3021
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003022unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003023 struct iwl4965_frame *frame, u8 rate)
Zhu Yib481de92007-09-25 17:54:57 -07003024{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003025 struct iwl4965_tx_beacon_cmd *tx_beacon_cmd;
Zhu Yib481de92007-09-25 17:54:57 -07003026 unsigned int frame_size;
3027
3028 tx_beacon_cmd = &frame->u.beacon;
3029 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
3030
Tomas Winklera4062b82008-03-11 16:17:16 -07003031 tx_beacon_cmd->tx.sta_id = priv->hw_setting.bcast_sta_id;
Zhu Yib481de92007-09-25 17:54:57 -07003032 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
3033
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003034 frame_size = iwl4965_fill_beacon_frame(priv,
Zhu Yib481de92007-09-25 17:54:57 -07003035 tx_beacon_cmd->frame,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003036 iwl4965_broadcast_addr,
Zhu Yib481de92007-09-25 17:54:57 -07003037 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
3038
3039 BUG_ON(frame_size > MAX_MPDU_SIZE);
3040 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
3041
3042 if ((rate == IWL_RATE_1M_PLCP) || (rate >= IWL_RATE_2M_PLCP))
3043 tx_beacon_cmd->tx.rate_n_flags =
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003044 iwl4965_hw_set_rate_n_flags(rate, RATE_MCS_CCK_MSK);
Zhu Yib481de92007-09-25 17:54:57 -07003045 else
3046 tx_beacon_cmd->tx.rate_n_flags =
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003047 iwl4965_hw_set_rate_n_flags(rate, 0);
Zhu Yib481de92007-09-25 17:54:57 -07003048
3049 tx_beacon_cmd->tx.tx_flags = (TX_CMD_FLG_SEQ_CTL_MSK |
3050 TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK);
3051 return (sizeof(*tx_beacon_cmd) + frame_size);
3052}
3053
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003054/*
3055 * Tell 4965 where to find circular buffer of Tx Frame Descriptors for
3056 * given Tx queue, and enable the DMA channel used for that queue.
3057 *
3058 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
3059 * channels supported in hardware.
3060 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003061int iwl4965_hw_tx_queue_init(struct iwl_priv *priv, struct iwl4965_tx_queue *txq)
Zhu Yib481de92007-09-25 17:54:57 -07003062{
3063 int rc;
3064 unsigned long flags;
3065 int txq_id = txq->q.id;
3066
3067 spin_lock_irqsave(&priv->lock, flags);
Tomas Winkler3395f6e2008-03-25 16:33:37 -07003068 rc = iwl_grab_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -07003069 if (rc) {
3070 spin_unlock_irqrestore(&priv->lock, flags);
3071 return rc;
3072 }
3073
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003074 /* Circular buffer (TFD queue in DRAM) physical base address */
Tomas Winkler3395f6e2008-03-25 16:33:37 -07003075 iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
Zhu Yib481de92007-09-25 17:54:57 -07003076 txq->q.dma_addr >> 8);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003077
3078 /* Enable DMA channel, using same id as for TFD queue */
Tomas Winkler3395f6e2008-03-25 16:33:37 -07003079 iwl_write_direct32(
Zhu Yib481de92007-09-25 17:54:57 -07003080 priv, IWL_FH_TCSR_CHNL_TX_CONFIG_REG(txq_id),
3081 IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
3082 IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
Tomas Winkler3395f6e2008-03-25 16:33:37 -07003083 iwl_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -07003084 spin_unlock_irqrestore(&priv->lock, flags);
3085
3086 return 0;
3087}
3088
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003089int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *ptr,
Zhu Yib481de92007-09-25 17:54:57 -07003090 dma_addr_t addr, u16 len)
3091{
3092 int index, is_odd;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003093 struct iwl4965_tfd_frame *tfd = ptr;
Zhu Yib481de92007-09-25 17:54:57 -07003094 u32 num_tbs = IWL_GET_BITS(*tfd, num_tbs);
3095
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003096 /* Each TFD can point to a maximum 20 Tx buffers */
Zhu Yib481de92007-09-25 17:54:57 -07003097 if ((num_tbs >= MAX_NUM_OF_TBS) || (num_tbs < 0)) {
3098 IWL_ERROR("Error can not send more than %d chunks\n",
3099 MAX_NUM_OF_TBS);
3100 return -EINVAL;
3101 }
3102
3103 index = num_tbs / 2;
3104 is_odd = num_tbs & 0x1;
3105
3106 if (!is_odd) {
3107 tfd->pa[index].tb1_addr = cpu_to_le32(addr);
3108 IWL_SET_BITS(tfd->pa[index], tb1_addr_hi,
Tomas Winkler6a218f62008-01-14 17:46:15 -08003109 iwl_get_dma_hi_address(addr));
Zhu Yib481de92007-09-25 17:54:57 -07003110 IWL_SET_BITS(tfd->pa[index], tb1_len, len);
3111 } else {
3112 IWL_SET_BITS(tfd->pa[index], tb2_addr_lo16,
3113 (u32) (addr & 0xffff));
3114 IWL_SET_BITS(tfd->pa[index], tb2_addr_hi20, addr >> 16);
3115 IWL_SET_BITS(tfd->pa[index], tb2_len, len);
3116 }
3117
3118 IWL_SET_BITS(*tfd, num_tbs, num_tbs + 1);
3119
3120 return 0;
3121}
3122
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003123static void iwl4965_hw_card_show_info(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07003124{
3125 u16 hw_version = priv->eeprom.board_revision_4965;
3126
3127 IWL_DEBUG_INFO("4965ABGN HW Version %u.%u.%u\n",
3128 ((hw_version >> 8) & 0x0F),
3129 ((hw_version >> 8) >> 4), (hw_version & 0x00FF));
3130
3131 IWL_DEBUG_INFO("4965ABGN PBA Number %.16s\n",
3132 priv->eeprom.board_pba_number_4965);
3133}
3134
3135#define IWL_TX_CRC_SIZE 4
3136#define IWL_TX_DELIMITER_SIZE 4
3137
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003138/**
Tomas Winklere2a722e2008-04-14 21:16:10 -07003139 * iwl4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003140 */
Tomas Winklere2a722e2008-04-14 21:16:10 -07003141static void iwl4965_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
3142 struct iwl4965_tx_queue *txq,
3143 u16 byte_cnt)
Zhu Yib481de92007-09-25 17:54:57 -07003144{
3145 int len;
3146 int txq_id = txq->q.id;
Tomas Winkler059ff822008-04-14 21:16:14 -07003147 struct iwl4965_shared *shared_data = priv->shared_virt;
Zhu Yib481de92007-09-25 17:54:57 -07003148
Zhu Yib481de92007-09-25 17:54:57 -07003149 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
3150
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003151 /* Set up byte count within first 256 entries */
Zhu Yib481de92007-09-25 17:54:57 -07003152 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
Tomas Winklerfc4b6852007-10-25 17:15:24 +08003153 tfd_offset[txq->q.write_ptr], byte_cnt, len);
Zhu Yib481de92007-09-25 17:54:57 -07003154
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003155 /* If within first 64 entries, duplicate at end */
Tomas Winklerfc4b6852007-10-25 17:15:24 +08003156 if (txq->q.write_ptr < IWL4965_MAX_WIN_SIZE)
Zhu Yib481de92007-09-25 17:54:57 -07003157 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
Tomas Winklerfc4b6852007-10-25 17:15:24 +08003158 tfd_offset[IWL4965_QUEUE_SIZE + txq->q.write_ptr],
Zhu Yib481de92007-09-25 17:54:57 -07003159 byte_cnt, len);
Zhu Yib481de92007-09-25 17:54:57 -07003160}
3161
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003162/**
3163 * iwl4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
3164 *
3165 * Selects how many and which Rx receivers/antennas/chains to use.
3166 * This should not be used for scan command ... it puts data in wrong place.
3167 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003168void iwl4965_set_rxon_chain(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07003169{
3170 u8 is_single = is_single_stream(priv);
3171 u8 idle_state, rx_state;
3172
3173 priv->staging_rxon.rx_chain = 0;
3174 rx_state = idle_state = 3;
3175
3176 /* Tell uCode which antennas are actually connected.
3177 * Before first association, we assume all antennas are connected.
3178 * Just after first association, iwl4965_noise_calibration()
3179 * checks which antennas actually *are* connected. */
3180 priv->staging_rxon.rx_chain |=
3181 cpu_to_le16(priv->valid_antenna << RXON_RX_CHAIN_VALID_POS);
3182
3183 /* How many receivers should we use? */
3184 iwl4965_get_rx_chain_counter(priv, &idle_state, &rx_state);
3185 priv->staging_rxon.rx_chain |=
3186 cpu_to_le16(rx_state << RXON_RX_CHAIN_MIMO_CNT_POS);
3187 priv->staging_rxon.rx_chain |=
3188 cpu_to_le16(idle_state << RXON_RX_CHAIN_CNT_POS);
3189
3190 if (!is_single && (rx_state >= 2) &&
3191 !test_bit(STATUS_POWER_PMI, &priv->status))
3192 priv->staging_rxon.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
3193 else
3194 priv->staging_rxon.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
3195
3196 IWL_DEBUG_ASSOC("rx chain %X\n", priv->staging_rxon.rx_chain);
3197}
3198
Zhu Yib481de92007-09-25 17:54:57 -07003199/**
3200 * sign_extend - Sign extend a value using specified bit as sign-bit
3201 *
3202 * Example: sign_extend(9, 3) would return -7 as bit3 of 1001b is 1
3203 * and bit0..2 is 001b which when sign extended to 1111111111111001b is -7.
3204 *
3205 * @param oper value to sign extend
3206 * @param index 0 based bit index (0<=index<32) to sign bit
3207 */
3208static s32 sign_extend(u32 oper, int index)
3209{
3210 u8 shift = 31 - index;
3211
3212 return (s32)(oper << shift) >> shift;
3213}
3214
3215/**
3216 * iwl4965_get_temperature - return the calibrated temperature (in Kelvin)
3217 * @statistics: Provides the temperature reading from the uCode
3218 *
3219 * A return of <0 indicates bogus data in the statistics
3220 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003221int iwl4965_get_temperature(const struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07003222{
3223 s32 temperature;
3224 s32 vt;
3225 s32 R1, R2, R3;
3226 u32 R4;
3227
3228 if (test_bit(STATUS_TEMPERATURE, &priv->status) &&
3229 (priv->statistics.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK)) {
3230 IWL_DEBUG_TEMP("Running FAT temperature calibration\n");
3231 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
3232 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]);
3233 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]);
3234 R4 = le32_to_cpu(priv->card_alive_init.therm_r4[1]);
3235 } else {
3236 IWL_DEBUG_TEMP("Running temperature calibration\n");
3237 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]);
3238 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]);
3239 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]);
3240 R4 = le32_to_cpu(priv->card_alive_init.therm_r4[0]);
3241 }
3242
3243 /*
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003244 * Temperature is only 23 bits, so sign extend out to 32.
Zhu Yib481de92007-09-25 17:54:57 -07003245 *
3246 * NOTE If we haven't received a statistics notification yet
3247 * with an updated temperature, use R4 provided to us in the
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003248 * "initialize" ALIVE response.
3249 */
Zhu Yib481de92007-09-25 17:54:57 -07003250 if (!test_bit(STATUS_TEMPERATURE, &priv->status))
3251 vt = sign_extend(R4, 23);
3252 else
3253 vt = sign_extend(
3254 le32_to_cpu(priv->statistics.general.temperature), 23);
3255
3256 IWL_DEBUG_TEMP("Calib values R[1-3]: %d %d %d R4: %d\n",
3257 R1, R2, R3, vt);
3258
3259 if (R3 == R1) {
3260 IWL_ERROR("Calibration conflict R1 == R3\n");
3261 return -1;
3262 }
3263
3264 /* Calculate temperature in degrees Kelvin, adjust by 97%.
3265 * Add offset to center the adjustment around 0 degrees Centigrade. */
3266 temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2);
3267 temperature /= (R3 - R1);
3268 temperature = (temperature * 97) / 100 +
3269 TEMPERATURE_CALIB_KELVIN_OFFSET;
3270
3271 IWL_DEBUG_TEMP("Calibrated temperature: %dK, %dC\n", temperature,
3272 KELVIN_TO_CELSIUS(temperature));
3273
3274 return temperature;
3275}
3276
3277/* Adjust Txpower only if temperature variance is greater than threshold. */
3278#define IWL_TEMPERATURE_THRESHOLD 3
3279
3280/**
3281 * iwl4965_is_temp_calib_needed - determines if new calibration is needed
3282 *
3283 * If the temperature changed has changed sufficiently, then a recalibration
3284 * is needed.
3285 *
3286 * Assumes caller will replace priv->last_temperature once calibration
3287 * executed.
3288 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003289static int iwl4965_is_temp_calib_needed(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07003290{
3291 int temp_diff;
3292
3293 if (!test_bit(STATUS_STATISTICS, &priv->status)) {
3294 IWL_DEBUG_TEMP("Temperature not updated -- no statistics.\n");
3295 return 0;
3296 }
3297
3298 temp_diff = priv->temperature - priv->last_temperature;
3299
3300 /* get absolute value */
3301 if (temp_diff < 0) {
3302 IWL_DEBUG_POWER("Getting cooler, delta %d, \n", temp_diff);
3303 temp_diff = -temp_diff;
3304 } else if (temp_diff == 0)
3305 IWL_DEBUG_POWER("Same temp, \n");
3306 else
3307 IWL_DEBUG_POWER("Getting warmer, delta %d, \n", temp_diff);
3308
3309 if (temp_diff < IWL_TEMPERATURE_THRESHOLD) {
3310 IWL_DEBUG_POWER("Thermal txpower calib not needed\n");
3311 return 0;
3312 }
3313
3314 IWL_DEBUG_POWER("Thermal txpower calib needed\n");
3315
3316 return 1;
3317}
3318
3319/* Calculate noise level, based on measurements during network silence just
3320 * before arriving beacon. This measurement can be done only if we know
3321 * exactly when to expect beacons, therefore only when we're associated. */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003322static void iwl4965_rx_calc_noise(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07003323{
3324 struct statistics_rx_non_phy *rx_info
3325 = &(priv->statistics.rx.general);
3326 int num_active_rx = 0;
3327 int total_silence = 0;
3328 int bcn_silence_a =
3329 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
3330 int bcn_silence_b =
3331 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
3332 int bcn_silence_c =
3333 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
3334
3335 if (bcn_silence_a) {
3336 total_silence += bcn_silence_a;
3337 num_active_rx++;
3338 }
3339 if (bcn_silence_b) {
3340 total_silence += bcn_silence_b;
3341 num_active_rx++;
3342 }
3343 if (bcn_silence_c) {
3344 total_silence += bcn_silence_c;
3345 num_active_rx++;
3346 }
3347
3348 /* Average among active antennas */
3349 if (num_active_rx)
3350 priv->last_rx_noise = (total_silence / num_active_rx) - 107;
3351 else
3352 priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
3353
3354 IWL_DEBUG_CALIB("inband silence a %u, b %u, c %u, dBm %d\n",
3355 bcn_silence_a, bcn_silence_b, bcn_silence_c,
3356 priv->last_rx_noise);
3357}
3358
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003359void iwl4965_hw_rx_statistics(struct iwl_priv *priv, struct iwl4965_rx_mem_buffer *rxb)
Zhu Yib481de92007-09-25 17:54:57 -07003360{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003361 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
Zhu Yib481de92007-09-25 17:54:57 -07003362 int change;
3363 s32 temp;
3364
3365 IWL_DEBUG_RX("Statistics notification received (%d vs %d).\n",
3366 (int)sizeof(priv->statistics), pkt->len);
3367
3368 change = ((priv->statistics.general.temperature !=
3369 pkt->u.stats.general.temperature) ||
3370 ((priv->statistics.flag &
3371 STATISTICS_REPLY_FLG_FAT_MODE_MSK) !=
3372 (pkt->u.stats.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK)));
3373
3374 memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics));
3375
3376 set_bit(STATUS_STATISTICS, &priv->status);
3377
3378 /* Reschedule the statistics timer to occur in
3379 * REG_RECALIB_PERIOD seconds to ensure we get a
3380 * thermal update even if the uCode doesn't give
3381 * us one */
3382 mod_timer(&priv->statistics_periodic, jiffies +
3383 msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
3384
3385 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
3386 (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
3387 iwl4965_rx_calc_noise(priv);
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08003388#ifdef CONFIG_IWL4965_SENSITIVITY
Zhu Yib481de92007-09-25 17:54:57 -07003389 queue_work(priv->workqueue, &priv->sensitivity_work);
3390#endif
3391 }
3392
Mohamed Abbasab53d8a2008-03-25 16:33:36 -07003393 iwl_leds_background(priv);
3394
Zhu Yib481de92007-09-25 17:54:57 -07003395 /* If the hardware hasn't reported a change in
3396 * temperature then don't bother computing a
3397 * calibrated temperature value */
3398 if (!change)
3399 return;
3400
3401 temp = iwl4965_get_temperature(priv);
3402 if (temp < 0)
3403 return;
3404
3405 if (priv->temperature != temp) {
3406 if (priv->temperature)
3407 IWL_DEBUG_TEMP("Temperature changed "
3408 "from %dC to %dC\n",
3409 KELVIN_TO_CELSIUS(priv->temperature),
3410 KELVIN_TO_CELSIUS(temp));
3411 else
3412 IWL_DEBUG_TEMP("Temperature "
3413 "initialized to %dC\n",
3414 KELVIN_TO_CELSIUS(temp));
3415 }
3416
3417 priv->temperature = temp;
3418 set_bit(STATUS_TEMPERATURE, &priv->status);
3419
3420 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
3421 iwl4965_is_temp_calib_needed(priv))
3422 queue_work(priv->workqueue, &priv->txpower_work);
3423}
3424
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003425static void iwl4965_add_radiotap(struct iwl_priv *priv,
Zhu Yi12342c42007-12-20 11:27:32 +08003426 struct sk_buff *skb,
3427 struct iwl4965_rx_phy_res *rx_start,
3428 struct ieee80211_rx_status *stats,
3429 u32 ampdu_status)
3430{
3431 s8 signal = stats->ssi;
3432 s8 noise = 0;
Johannes Berg8318d782008-01-24 19:38:38 +01003433 int rate = stats->rate_idx;
Zhu Yi12342c42007-12-20 11:27:32 +08003434 u64 tsf = stats->mactime;
Johannes Berga0b484f2008-04-01 17:51:47 +02003435 __le16 antenna;
Zhu Yi12342c42007-12-20 11:27:32 +08003436 __le16 phy_flags_hw = rx_start->phy_flags;
3437 struct iwl4965_rt_rx_hdr {
3438 struct ieee80211_radiotap_header rt_hdr;
3439 __le64 rt_tsf; /* TSF */
3440 u8 rt_flags; /* radiotap packet flags */
3441 u8 rt_rate; /* rate in 500kb/s */
3442 __le16 rt_channelMHz; /* channel in MHz */
3443 __le16 rt_chbitmask; /* channel bitfield */
3444 s8 rt_dbmsignal; /* signal in dBm, kluged to signed */
3445 s8 rt_dbmnoise;
3446 u8 rt_antenna; /* antenna number */
3447 } __attribute__ ((packed)) *iwl4965_rt;
3448
3449 /* TODO: We won't have enough headroom for HT frames. Fix it later. */
3450 if (skb_headroom(skb) < sizeof(*iwl4965_rt)) {
3451 if (net_ratelimit())
3452 printk(KERN_ERR "not enough headroom [%d] for "
Miguel Botón01c20982008-01-04 23:34:35 +01003453 "radiotap head [%zd]\n",
Zhu Yi12342c42007-12-20 11:27:32 +08003454 skb_headroom(skb), sizeof(*iwl4965_rt));
3455 return;
3456 }
3457
3458 /* put radiotap header in front of 802.11 header and data */
3459 iwl4965_rt = (void *)skb_push(skb, sizeof(*iwl4965_rt));
3460
3461 /* initialise radiotap header */
3462 iwl4965_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
3463 iwl4965_rt->rt_hdr.it_pad = 0;
3464
3465 /* total header + data */
3466 put_unaligned(cpu_to_le16(sizeof(*iwl4965_rt)),
3467 &iwl4965_rt->rt_hdr.it_len);
3468
3469 /* Indicate all the fields we add to the radiotap header */
3470 put_unaligned(cpu_to_le32((1 << IEEE80211_RADIOTAP_TSFT) |
3471 (1 << IEEE80211_RADIOTAP_FLAGS) |
3472 (1 << IEEE80211_RADIOTAP_RATE) |
3473 (1 << IEEE80211_RADIOTAP_CHANNEL) |
3474 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
3475 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
3476 (1 << IEEE80211_RADIOTAP_ANTENNA)),
3477 &iwl4965_rt->rt_hdr.it_present);
3478
3479 /* Zero the flags, we'll add to them as we go */
3480 iwl4965_rt->rt_flags = 0;
3481
3482 put_unaligned(cpu_to_le64(tsf), &iwl4965_rt->rt_tsf);
3483
3484 iwl4965_rt->rt_dbmsignal = signal;
3485 iwl4965_rt->rt_dbmnoise = noise;
3486
3487 /* Convert the channel frequency and set the flags */
3488 put_unaligned(cpu_to_le16(stats->freq), &iwl4965_rt->rt_channelMHz);
3489 if (!(phy_flags_hw & RX_RES_PHY_FLAGS_BAND_24_MSK))
3490 put_unaligned(cpu_to_le16(IEEE80211_CHAN_OFDM |
3491 IEEE80211_CHAN_5GHZ),
3492 &iwl4965_rt->rt_chbitmask);
3493 else if (phy_flags_hw & RX_RES_PHY_FLAGS_MOD_CCK_MSK)
3494 put_unaligned(cpu_to_le16(IEEE80211_CHAN_CCK |
3495 IEEE80211_CHAN_2GHZ),
3496 &iwl4965_rt->rt_chbitmask);
3497 else /* 802.11g */
3498 put_unaligned(cpu_to_le16(IEEE80211_CHAN_OFDM |
3499 IEEE80211_CHAN_2GHZ),
3500 &iwl4965_rt->rt_chbitmask);
3501
Zhu Yi12342c42007-12-20 11:27:32 +08003502 if (rate == -1)
3503 iwl4965_rt->rt_rate = 0;
3504 else
3505 iwl4965_rt->rt_rate = iwl4965_rates[rate].ieee;
3506
3507 /*
3508 * "antenna number"
3509 *
3510 * It seems that the antenna field in the phy flags value
3511 * is actually a bitfield. This is undefined by radiotap,
3512 * it wants an actual antenna number but I always get "7"
3513 * for most legacy frames I receive indicating that the
3514 * same frame was received on all three RX chains.
3515 *
3516 * I think this field should be removed in favour of a
3517 * new 802.11n radiotap field "RX chains" that is defined
3518 * as a bitmask.
3519 */
Johannes Berga0b484f2008-04-01 17:51:47 +02003520 antenna = phy_flags_hw & RX_RES_PHY_FLAGS_ANTENNA_MSK;
3521 iwl4965_rt->rt_antenna = le16_to_cpu(antenna) >> 4;
Zhu Yi12342c42007-12-20 11:27:32 +08003522
3523 /* set the preamble flag if appropriate */
3524 if (phy_flags_hw & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
3525 iwl4965_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3526
3527 stats->flag |= RX_FLAG_RADIOTAP;
3528}
3529
Tomas Winkler19758be2008-03-12 16:58:51 -07003530static void iwl_update_rx_stats(struct iwl_priv *priv, u16 fc, u16 len)
3531{
3532 /* 0 - mgmt, 1 - cnt, 2 - data */
3533 int idx = (fc & IEEE80211_FCTL_FTYPE) >> 2;
3534 priv->rx_stats[idx].cnt++;
3535 priv->rx_stats[idx].bytes += len;
3536}
3537
Emmanuel Grumbach17e476b2008-03-19 16:41:42 -07003538static u32 iwl4965_translate_rx_status(u32 decrypt_in)
3539{
3540 u32 decrypt_out = 0;
3541
3542 if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
3543 RX_RES_STATUS_STATION_FOUND)
3544 decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
3545 RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
3546
3547 decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
3548
3549 /* packet was not encrypted */
3550 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
3551 RX_RES_STATUS_SEC_TYPE_NONE)
3552 return decrypt_out;
3553
3554 /* packet was encrypted with unknown alg */
3555 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
3556 RX_RES_STATUS_SEC_TYPE_ERR)
3557 return decrypt_out;
3558
3559 /* decryption was not done in HW */
3560 if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
3561 RX_MPDU_RES_STATUS_DEC_DONE_MSK)
3562 return decrypt_out;
3563
3564 switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
3565
3566 case RX_RES_STATUS_SEC_TYPE_CCMP:
3567 /* alg is CCM: check MIC only */
3568 if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
3569 /* Bad MIC */
3570 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
3571 else
3572 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
3573
3574 break;
3575
3576 case RX_RES_STATUS_SEC_TYPE_TKIP:
3577 if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
3578 /* Bad TTAK */
3579 decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
3580 break;
3581 }
3582 /* fall through if TTAK OK */
3583 default:
3584 if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
3585 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
3586 else
3587 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
3588 break;
3589 };
3590
3591 IWL_DEBUG_RX("decrypt_in:0x%x decrypt_out = 0x%x\n",
3592 decrypt_in, decrypt_out);
3593
3594 return decrypt_out;
3595}
3596
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003597static void iwl4965_handle_data_packet(struct iwl_priv *priv, int is_data,
Zhu Yib481de92007-09-25 17:54:57 -07003598 int include_phy,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003599 struct iwl4965_rx_mem_buffer *rxb,
Zhu Yib481de92007-09-25 17:54:57 -07003600 struct ieee80211_rx_status *stats)
3601{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003602 struct iwl4965_rx_packet *pkt = (struct iwl4965_rx_packet *)rxb->skb->data;
Zhu Yib481de92007-09-25 17:54:57 -07003603 struct iwl4965_rx_phy_res *rx_start = (include_phy) ?
3604 (struct iwl4965_rx_phy_res *)&(pkt->u.raw[0]) : NULL;
3605 struct ieee80211_hdr *hdr;
3606 u16 len;
3607 __le32 *rx_end;
3608 unsigned int skblen;
3609 u32 ampdu_status;
Emmanuel Grumbach17e476b2008-03-19 16:41:42 -07003610 u32 ampdu_status_legacy;
Zhu Yib481de92007-09-25 17:54:57 -07003611
3612 if (!include_phy && priv->last_phy_res[0])
3613 rx_start = (struct iwl4965_rx_phy_res *)&priv->last_phy_res[1];
3614
3615 if (!rx_start) {
3616 IWL_ERROR("MPDU frame without a PHY data\n");
3617 return;
3618 }
3619 if (include_phy) {
3620 hdr = (struct ieee80211_hdr *)((u8 *) & rx_start[1] +
3621 rx_start->cfg_phy_cnt);
3622
3623 len = le16_to_cpu(rx_start->byte_count);
3624
3625 rx_end = (__le32 *) ((u8 *) & pkt->u.raw[0] +
3626 sizeof(struct iwl4965_rx_phy_res) +
3627 rx_start->cfg_phy_cnt + len);
3628
3629 } else {
3630 struct iwl4965_rx_mpdu_res_start *amsdu =
3631 (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw;
3632
3633 hdr = (struct ieee80211_hdr *)(pkt->u.raw +
3634 sizeof(struct iwl4965_rx_mpdu_res_start));
3635 len = le16_to_cpu(amsdu->byte_count);
3636 rx_start->byte_count = amsdu->byte_count;
3637 rx_end = (__le32 *) (((u8 *) hdr) + len);
3638 }
Ron Rindjunsky9ee1ba42007-11-26 16:14:42 +02003639 if (len > priv->hw_setting.max_pkt_size || len < 16) {
Zhu Yi12342c42007-12-20 11:27:32 +08003640 IWL_WARNING("byte count out of range [16,4K] : %d\n", len);
Zhu Yib481de92007-09-25 17:54:57 -07003641 return;
3642 }
3643
3644 ampdu_status = le32_to_cpu(*rx_end);
3645 skblen = ((u8 *) rx_end - (u8 *) & pkt->u.raw[0]) + sizeof(u32);
3646
Emmanuel Grumbach17e476b2008-03-19 16:41:42 -07003647 if (!include_phy) {
3648 /* New status scheme, need to translate */
3649 ampdu_status_legacy = ampdu_status;
3650 ampdu_status = iwl4965_translate_rx_status(ampdu_status);
3651 }
3652
Zhu Yib481de92007-09-25 17:54:57 -07003653 /* start from MAC */
3654 skb_reserve(rxb->skb, (void *)hdr - (void *)pkt);
3655 skb_put(rxb->skb, len); /* end where data ends */
3656
3657 /* We only process data packets if the interface is open */
3658 if (unlikely(!priv->is_open)) {
3659 IWL_DEBUG_DROP_LIMIT
3660 ("Dropping packet while interface is not open.\n");
3661 return;
3662 }
3663
Zhu Yib481de92007-09-25 17:54:57 -07003664 stats->flag = 0;
3665 hdr = (struct ieee80211_hdr *)rxb->skb->data;
3666
Assaf Krauss1ea87392008-03-18 14:57:50 -07003667 if (priv->cfg->mod_params->hw_crypto)
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003668 iwl4965_set_decrypted_flag(priv, rxb->skb, ampdu_status, stats);
Zhu Yib481de92007-09-25 17:54:57 -07003669
Zhu Yi12342c42007-12-20 11:27:32 +08003670 if (priv->add_radiotap)
3671 iwl4965_add_radiotap(priv, rxb->skb, rx_start, stats, ampdu_status);
3672
Tomas Winkler19758be2008-03-12 16:58:51 -07003673 iwl_update_rx_stats(priv, le16_to_cpu(hdr->frame_control), len);
Zhu Yib481de92007-09-25 17:54:57 -07003674 ieee80211_rx_irqsafe(priv->hw, rxb->skb, stats);
3675 priv->alloc_rxb_skb--;
3676 rxb->skb = NULL;
Zhu Yib481de92007-09-25 17:54:57 -07003677}
3678
3679/* Calc max signal level (dBm) among 3 possible receivers */
3680static int iwl4965_calc_rssi(struct iwl4965_rx_phy_res *rx_resp)
3681{
3682 /* data from PHY/DSP regarding signal strength, etc.,
3683 * contents are always there, not configurable by host. */
3684 struct iwl4965_rx_non_cfg_phy *ncphy =
3685 (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy;
3686 u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL_AGC_DB_MASK)
3687 >> IWL_AGC_DB_POS;
3688
3689 u32 valid_antennae =
3690 (le16_to_cpu(rx_resp->phy_flags) & RX_PHY_FLAGS_ANTENNAE_MASK)
3691 >> RX_PHY_FLAGS_ANTENNAE_OFFSET;
3692 u8 max_rssi = 0;
3693 u32 i;
3694
3695 /* Find max rssi among 3 possible receivers.
3696 * These values are measured by the digital signal processor (DSP).
3697 * They should stay fairly constant even as the signal strength varies,
3698 * if the radio's automatic gain control (AGC) is working right.
3699 * AGC value (see below) will provide the "interesting" info. */
3700 for (i = 0; i < 3; i++)
3701 if (valid_antennae & (1 << i))
3702 max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
3703
3704 IWL_DEBUG_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n",
3705 ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
3706 max_rssi, agc);
3707
3708 /* dBm = max_rssi dB - agc dB - constant.
3709 * Higher AGC (higher radio gain) means lower signal. */
3710 return (max_rssi - agc - IWL_RSSI_OFFSET);
3711}
3712
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08003713#ifdef CONFIG_IWL4965_HT
Zhu Yib481de92007-09-25 17:54:57 -07003714
Assaf Krauss1ea87392008-03-18 14:57:50 -07003715void iwl4965_init_ht_hw_capab(struct iwl_priv *priv,
3716 struct ieee80211_ht_info *ht_info,
Tomas Winkler78330fd2008-02-06 02:37:18 +02003717 enum ieee80211_band band)
Ron Rindjunsky326eeee2007-11-26 16:14:37 +02003718{
3719 ht_info->cap = 0;
3720 memset(ht_info->supp_mcs_set, 0, 16);
3721
3722 ht_info->ht_supported = 1;
3723
Tomas Winkler78330fd2008-02-06 02:37:18 +02003724 if (band == IEEE80211_BAND_5GHZ) {
Ron Rindjunsky326eeee2007-11-26 16:14:37 +02003725 ht_info->cap |= (u16)IEEE80211_HT_CAP_SUP_WIDTH;
3726 ht_info->cap |= (u16)IEEE80211_HT_CAP_SGI_40;
3727 ht_info->supp_mcs_set[4] = 0x01;
3728 }
3729 ht_info->cap |= (u16)IEEE80211_HT_CAP_GRN_FLD;
3730 ht_info->cap |= (u16)IEEE80211_HT_CAP_SGI_20;
3731 ht_info->cap |= (u16)(IEEE80211_HT_CAP_MIMO_PS &
3732 (IWL_MIMO_PS_NONE << 2));
Assaf Krauss1ea87392008-03-18 14:57:50 -07003733
3734 if (priv->cfg->mod_params->amsdu_size_8K)
Ron Rindjunsky9ee1ba42007-11-26 16:14:42 +02003735 ht_info->cap |= (u16)IEEE80211_HT_CAP_MAX_AMSDU;
Ron Rindjunsky326eeee2007-11-26 16:14:37 +02003736
3737 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
3738 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
3739
3740 ht_info->supp_mcs_set[0] = 0xFF;
3741 ht_info->supp_mcs_set[1] = 0xFF;
3742}
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08003743#endif /* CONFIG_IWL4965_HT */
Zhu Yib481de92007-09-25 17:54:57 -07003744
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003745static void iwl4965_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
Zhu Yib481de92007-09-25 17:54:57 -07003746{
3747 unsigned long flags;
3748
3749 spin_lock_irqsave(&priv->sta_lock, flags);
3750 priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK;
3751 priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
3752 priv->stations[sta_id].sta.sta.modify_mask = 0;
3753 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3754 spin_unlock_irqrestore(&priv->sta_lock, flags);
3755
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003756 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
Zhu Yib481de92007-09-25 17:54:57 -07003757}
3758
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003759static void iwl4965_update_ps_mode(struct iwl_priv *priv, u16 ps_bit, u8 *addr)
Zhu Yib481de92007-09-25 17:54:57 -07003760{
3761 /* FIXME: need locking over ps_status ??? */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003762 u8 sta_id = iwl4965_hw_find_station(priv, addr);
Zhu Yib481de92007-09-25 17:54:57 -07003763
3764 if (sta_id != IWL_INVALID_STATION) {
3765 u8 sta_awake = priv->stations[sta_id].
3766 ps_status == STA_PS_STATUS_WAKE;
3767
3768 if (sta_awake && ps_bit)
3769 priv->stations[sta_id].ps_status = STA_PS_STATUS_SLEEP;
3770 else if (!sta_awake && !ps_bit) {
3771 iwl4965_sta_modify_ps_wake(priv, sta_id);
3772 priv->stations[sta_id].ps_status = STA_PS_STATUS_WAKE;
3773 }
3774 }
3775}
Tomas Winkler0a6857e2008-03-12 16:58:49 -07003776#ifdef CONFIG_IWLWIFI_DEBUG
Tomas Winkler17744ff2008-03-02 01:52:00 +02003777
3778/**
3779 * iwl4965_dbg_report_frame - dump frame to syslog during debug sessions
3780 *
3781 * You may hack this function to show different aspects of received frames,
3782 * including selective frame dumps.
3783 * group100 parameter selects whether to show 1 out of 100 good frames.
3784 *
3785 * TODO: This was originally written for 3945, need to audit for
3786 * proper operation with 4965.
3787 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003788static void iwl4965_dbg_report_frame(struct iwl_priv *priv,
Tomas Winkler17744ff2008-03-02 01:52:00 +02003789 struct iwl4965_rx_packet *pkt,
3790 struct ieee80211_hdr *header, int group100)
3791{
3792 u32 to_us;
3793 u32 print_summary = 0;
3794 u32 print_dump = 0; /* set to 1 to dump all frames' contents */
3795 u32 hundred = 0;
3796 u32 dataframe = 0;
3797 u16 fc;
3798 u16 seq_ctl;
3799 u16 channel;
3800 u16 phy_flags;
3801 int rate_sym;
3802 u16 length;
3803 u16 status;
3804 u16 bcn_tmr;
3805 u32 tsf_low;
3806 u64 tsf;
3807 u8 rssi;
3808 u8 agc;
3809 u16 sig_avg;
3810 u16 noise_diff;
3811 struct iwl4965_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
3812 struct iwl4965_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
3813 struct iwl4965_rx_frame_end *rx_end = IWL_RX_END(pkt);
3814 u8 *data = IWL_RX_DATA(pkt);
3815
Tomas Winkler0a6857e2008-03-12 16:58:49 -07003816 if (likely(!(iwl_debug_level & IWL_DL_RX)))
Tomas Winkler17744ff2008-03-02 01:52:00 +02003817 return;
3818
3819 /* MAC header */
3820 fc = le16_to_cpu(header->frame_control);
3821 seq_ctl = le16_to_cpu(header->seq_ctrl);
3822
3823 /* metadata */
3824 channel = le16_to_cpu(rx_hdr->channel);
3825 phy_flags = le16_to_cpu(rx_hdr->phy_flags);
3826 rate_sym = rx_hdr->rate;
3827 length = le16_to_cpu(rx_hdr->len);
3828
3829 /* end-of-frame status and timestamp */
3830 status = le32_to_cpu(rx_end->status);
3831 bcn_tmr = le32_to_cpu(rx_end->beacon_timestamp);
3832 tsf_low = le64_to_cpu(rx_end->timestamp) & 0x0ffffffff;
3833 tsf = le64_to_cpu(rx_end->timestamp);
3834
3835 /* signal statistics */
3836 rssi = rx_stats->rssi;
3837 agc = rx_stats->agc;
3838 sig_avg = le16_to_cpu(rx_stats->sig_avg);
3839 noise_diff = le16_to_cpu(rx_stats->noise_diff);
3840
3841 to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
3842
3843 /* if data frame is to us and all is good,
3844 * (optionally) print summary for only 1 out of every 100 */
3845 if (to_us && (fc & ~IEEE80211_FCTL_PROTECTED) ==
3846 (IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
3847 dataframe = 1;
3848 if (!group100)
3849 print_summary = 1; /* print each frame */
3850 else if (priv->framecnt_to_us < 100) {
3851 priv->framecnt_to_us++;
3852 print_summary = 0;
3853 } else {
3854 priv->framecnt_to_us = 0;
3855 print_summary = 1;
3856 hundred = 1;
3857 }
3858 } else {
3859 /* print summary for all other frames */
3860 print_summary = 1;
3861 }
3862
3863 if (print_summary) {
3864 char *title;
3865 int rate_idx;
3866 u32 bitrate;
3867
3868 if (hundred)
3869 title = "100Frames";
3870 else if (fc & IEEE80211_FCTL_RETRY)
3871 title = "Retry";
3872 else if (ieee80211_is_assoc_response(fc))
3873 title = "AscRsp";
3874 else if (ieee80211_is_reassoc_response(fc))
3875 title = "RasRsp";
3876 else if (ieee80211_is_probe_response(fc)) {
3877 title = "PrbRsp";
3878 print_dump = 1; /* dump frame contents */
3879 } else if (ieee80211_is_beacon(fc)) {
3880 title = "Beacon";
3881 print_dump = 1; /* dump frame contents */
3882 } else if (ieee80211_is_atim(fc))
3883 title = "ATIM";
3884 else if (ieee80211_is_auth(fc))
3885 title = "Auth";
3886 else if (ieee80211_is_deauth(fc))
3887 title = "DeAuth";
3888 else if (ieee80211_is_disassoc(fc))
3889 title = "DisAssoc";
3890 else
3891 title = "Frame";
3892
3893 rate_idx = iwl4965_hwrate_to_plcp_idx(rate_sym);
3894 if (unlikely(rate_idx == -1))
3895 bitrate = 0;
3896 else
3897 bitrate = iwl4965_rates[rate_idx].ieee / 2;
3898
3899 /* print frame summary.
3900 * MAC addresses show just the last byte (for brevity),
3901 * but you can hack it to show more, if you'd like to. */
3902 if (dataframe)
3903 IWL_DEBUG_RX("%s: mhd=0x%04x, dst=0x%02x, "
3904 "len=%u, rssi=%d, chnl=%d, rate=%u, \n",
3905 title, fc, header->addr1[5],
3906 length, rssi, channel, bitrate);
3907 else {
3908 /* src/dst addresses assume managed mode */
3909 IWL_DEBUG_RX("%s: 0x%04x, dst=0x%02x, "
3910 "src=0x%02x, rssi=%u, tim=%lu usec, "
3911 "phy=0x%02x, chnl=%d\n",
3912 title, fc, header->addr1[5],
3913 header->addr3[5], rssi,
3914 tsf_low - priv->scan_start_tsf,
3915 phy_flags, channel);
3916 }
3917 }
3918 if (print_dump)
Tomas Winkler0a6857e2008-03-12 16:58:49 -07003919 iwl_print_hex_dump(IWL_DL_RX, data, length);
Tomas Winkler17744ff2008-03-02 01:52:00 +02003920}
3921#else
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003922static inline void iwl4965_dbg_report_frame(struct iwl_priv *priv,
Tomas Winkler17744ff2008-03-02 01:52:00 +02003923 struct iwl4965_rx_packet *pkt,
3924 struct ieee80211_hdr *header,
3925 int group100)
3926{
3927}
3928#endif
3929
Zhu Yib481de92007-09-25 17:54:57 -07003930
Mohamed Abbas7878a5a2007-11-29 11:10:13 +08003931
Tomas Winkler857485c2008-03-21 13:53:44 -07003932/* Called for REPLY_RX (legacy ABG frames), or
Zhu Yib481de92007-09-25 17:54:57 -07003933 * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003934static void iwl4965_rx_reply_rx(struct iwl_priv *priv,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003935 struct iwl4965_rx_mem_buffer *rxb)
Zhu Yib481de92007-09-25 17:54:57 -07003936{
Tomas Winkler17744ff2008-03-02 01:52:00 +02003937 struct ieee80211_hdr *header;
3938 struct ieee80211_rx_status rx_status;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003939 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
Zhu Yib481de92007-09-25 17:54:57 -07003940 /* Use phy data (Rx signal strength, etc.) contained within
3941 * this rx packet for legacy frames,
3942 * or phy data cached from REPLY_RX_PHY_CMD for HT frames. */
Tomas Winkler857485c2008-03-21 13:53:44 -07003943 int include_phy = (pkt->hdr.cmd == REPLY_RX);
Zhu Yib481de92007-09-25 17:54:57 -07003944 struct iwl4965_rx_phy_res *rx_start = (include_phy) ?
3945 (struct iwl4965_rx_phy_res *)&(pkt->u.raw[0]) :
3946 (struct iwl4965_rx_phy_res *)&priv->last_phy_res[1];
3947 __le32 *rx_end;
3948 unsigned int len = 0;
Zhu Yib481de92007-09-25 17:54:57 -07003949 u16 fc;
Zhu Yib481de92007-09-25 17:54:57 -07003950 u8 network_packet;
3951
Tomas Winkler17744ff2008-03-02 01:52:00 +02003952 rx_status.mactime = le64_to_cpu(rx_start->timestamp);
Tomas Winklerdc92e492008-04-03 16:05:22 -07003953 rx_status.freq =
3954 ieee80211_frequency_to_channel(le16_to_cpu(rx_start->channel));
Tomas Winkler17744ff2008-03-02 01:52:00 +02003955 rx_status.band = (rx_start->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
3956 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
Tomas Winklerdc92e492008-04-03 16:05:22 -07003957 rx_status.rate_idx =
3958 iwl4965_hwrate_to_plcp_idx(le32_to_cpu(rx_start->rate_n_flags));
Tomas Winkler17744ff2008-03-02 01:52:00 +02003959 if (rx_status.band == IEEE80211_BAND_5GHZ)
3960 rx_status.rate_idx -= IWL_FIRST_OFDM_RATE;
3961
3962 rx_status.antenna = 0;
3963 rx_status.flag = 0;
3964
Zhu Yib481de92007-09-25 17:54:57 -07003965 if ((unlikely(rx_start->cfg_phy_cnt > 20))) {
Tomas Winklerdc92e492008-04-03 16:05:22 -07003966 IWL_DEBUG_DROP("dsp size out of range [0,20]: %d/n",
3967 rx_start->cfg_phy_cnt);
Zhu Yib481de92007-09-25 17:54:57 -07003968 return;
3969 }
Tomas Winkler17744ff2008-03-02 01:52:00 +02003970
Zhu Yib481de92007-09-25 17:54:57 -07003971 if (!include_phy) {
3972 if (priv->last_phy_res[0])
3973 rx_start = (struct iwl4965_rx_phy_res *)
3974 &priv->last_phy_res[1];
3975 else
3976 rx_start = NULL;
3977 }
3978
3979 if (!rx_start) {
3980 IWL_ERROR("MPDU frame without a PHY data\n");
3981 return;
3982 }
3983
3984 if (include_phy) {
3985 header = (struct ieee80211_hdr *)((u8 *) & rx_start[1]
3986 + rx_start->cfg_phy_cnt);
3987
3988 len = le16_to_cpu(rx_start->byte_count);
Tomas Winkler17744ff2008-03-02 01:52:00 +02003989 rx_end = (__le32 *)(pkt->u.raw + rx_start->cfg_phy_cnt +
Zhu Yib481de92007-09-25 17:54:57 -07003990 sizeof(struct iwl4965_rx_phy_res) + len);
3991 } else {
3992 struct iwl4965_rx_mpdu_res_start *amsdu =
3993 (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw;
3994
3995 header = (void *)(pkt->u.raw +
3996 sizeof(struct iwl4965_rx_mpdu_res_start));
3997 len = le16_to_cpu(amsdu->byte_count);
3998 rx_end = (__le32 *) (pkt->u.raw +
3999 sizeof(struct iwl4965_rx_mpdu_res_start) + len);
4000 }
4001
4002 if (!(*rx_end & RX_RES_STATUS_NO_CRC32_ERROR) ||
4003 !(*rx_end & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
4004 IWL_DEBUG_RX("Bad CRC or FIFO: 0x%08X.\n",
4005 le32_to_cpu(*rx_end));
4006 return;
4007 }
4008
4009 priv->ucode_beacon_time = le32_to_cpu(rx_start->beacon_time_stamp);
4010
Zhu Yib481de92007-09-25 17:54:57 -07004011 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
Tomas Winkler17744ff2008-03-02 01:52:00 +02004012 rx_status.ssi = iwl4965_calc_rssi(rx_start);
Zhu Yib481de92007-09-25 17:54:57 -07004013
4014 /* Meaningful noise values are available only from beacon statistics,
4015 * which are gathered only when associated, and indicate noise
4016 * only for the associated network channel ...
4017 * Ignore these noise values while scanning (other channels) */
Tomas Winkler3109ece2008-03-28 16:33:35 -07004018 if (iwl_is_associated(priv) &&
Zhu Yib481de92007-09-25 17:54:57 -07004019 !test_bit(STATUS_SCANNING, &priv->status)) {
Tomas Winkler17744ff2008-03-02 01:52:00 +02004020 rx_status.noise = priv->last_rx_noise;
4021 rx_status.signal = iwl4965_calc_sig_qual(rx_status.ssi,
4022 rx_status.noise);
Zhu Yib481de92007-09-25 17:54:57 -07004023 } else {
Tomas Winkler17744ff2008-03-02 01:52:00 +02004024 rx_status.noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
4025 rx_status.signal = iwl4965_calc_sig_qual(rx_status.ssi, 0);
Zhu Yib481de92007-09-25 17:54:57 -07004026 }
4027
4028 /* Reset beacon noise level if not associated. */
Tomas Winkler3109ece2008-03-28 16:33:35 -07004029 if (!iwl_is_associated(priv))
Zhu Yib481de92007-09-25 17:54:57 -07004030 priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
4031
Tomas Winkler17744ff2008-03-02 01:52:00 +02004032 /* Set "1" to report good data frames in groups of 100 */
4033 /* FIXME: need to optimze the call: */
4034 iwl4965_dbg_report_frame(priv, pkt, header, 1);
Zhu Yib481de92007-09-25 17:54:57 -07004035
Tomas Winkler17744ff2008-03-02 01:52:00 +02004036 IWL_DEBUG_STATS_LIMIT("Rssi %d, noise %d, qual %d, TSF %llu\n",
4037 rx_status.ssi, rx_status.noise, rx_status.signal,
John W. Linville06501d22008-04-01 17:38:47 -04004038 (unsigned long long)rx_status.mactime);
Zhu Yib481de92007-09-25 17:54:57 -07004039
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004040 network_packet = iwl4965_is_network_packet(priv, header);
Zhu Yib481de92007-09-25 17:54:57 -07004041 if (network_packet) {
Tomas Winkler17744ff2008-03-02 01:52:00 +02004042 priv->last_rx_rssi = rx_status.ssi;
Zhu Yib481de92007-09-25 17:54:57 -07004043 priv->last_beacon_time = priv->ucode_beacon_time;
4044 priv->last_tsf = le64_to_cpu(rx_start->timestamp);
4045 }
4046
4047 fc = le16_to_cpu(header->frame_control);
4048 switch (fc & IEEE80211_FCTL_FTYPE) {
4049 case IEEE80211_FTYPE_MGMT:
Zhu Yib481de92007-09-25 17:54:57 -07004050 if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
4051 iwl4965_update_ps_mode(priv, fc & IEEE80211_FCTL_PM,
4052 header->addr2);
Tomas Winkler17744ff2008-03-02 01:52:00 +02004053 iwl4965_handle_data_packet(priv, 0, include_phy, rxb, &rx_status);
Zhu Yib481de92007-09-25 17:54:57 -07004054 break;
4055
4056 case IEEE80211_FTYPE_CTL:
Ron Rindjunsky9ab46172007-12-25 17:00:38 +02004057#ifdef CONFIG_IWL4965_HT
Zhu Yib481de92007-09-25 17:54:57 -07004058 switch (fc & IEEE80211_FCTL_STYPE) {
4059 case IEEE80211_STYPE_BACK_REQ:
4060 IWL_DEBUG_HT("IEEE80211_STYPE_BACK_REQ arrived\n");
4061 iwl4965_handle_data_packet(priv, 0, include_phy,
Tomas Winkler17744ff2008-03-02 01:52:00 +02004062 rxb, &rx_status);
Zhu Yib481de92007-09-25 17:54:57 -07004063 break;
4064 default:
4065 break;
4066 }
4067#endif
Zhu Yib481de92007-09-25 17:54:57 -07004068 break;
4069
Joe Perches0795af52007-10-03 17:59:30 -07004070 case IEEE80211_FTYPE_DATA: {
4071 DECLARE_MAC_BUF(mac1);
4072 DECLARE_MAC_BUF(mac2);
4073 DECLARE_MAC_BUF(mac3);
4074
Zhu Yib481de92007-09-25 17:54:57 -07004075 if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
4076 iwl4965_update_ps_mode(priv, fc & IEEE80211_FCTL_PM,
4077 header->addr2);
4078
4079 if (unlikely(!network_packet))
4080 IWL_DEBUG_DROP("Dropping (non network): "
Joe Perches0795af52007-10-03 17:59:30 -07004081 "%s, %s, %s\n",
4082 print_mac(mac1, header->addr1),
4083 print_mac(mac2, header->addr2),
4084 print_mac(mac3, header->addr3));
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004085 else if (unlikely(iwl4965_is_duplicate_packet(priv, header)))
Joe Perches0795af52007-10-03 17:59:30 -07004086 IWL_DEBUG_DROP("Dropping (dup): %s, %s, %s\n",
4087 print_mac(mac1, header->addr1),
4088 print_mac(mac2, header->addr2),
4089 print_mac(mac3, header->addr3));
Zhu Yib481de92007-09-25 17:54:57 -07004090 else
4091 iwl4965_handle_data_packet(priv, 1, include_phy, rxb,
Tomas Winkler17744ff2008-03-02 01:52:00 +02004092 &rx_status);
Zhu Yib481de92007-09-25 17:54:57 -07004093 break;
Joe Perches0795af52007-10-03 17:59:30 -07004094 }
Zhu Yib481de92007-09-25 17:54:57 -07004095 default:
4096 break;
4097
4098 }
4099}
4100
4101/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
4102 * This will be used later in iwl4965_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004103static void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004104 struct iwl4965_rx_mem_buffer *rxb)
Zhu Yib481de92007-09-25 17:54:57 -07004105{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004106 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
Zhu Yib481de92007-09-25 17:54:57 -07004107 priv->last_phy_res[0] = 1;
4108 memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]),
4109 sizeof(struct iwl4965_rx_phy_res));
4110}
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004111static void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004112 struct iwl4965_rx_mem_buffer *rxb)
Zhu Yib481de92007-09-25 17:54:57 -07004113
4114{
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08004115#ifdef CONFIG_IWL4965_SENSITIVITY
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004116 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
4117 struct iwl4965_missed_beacon_notif *missed_beacon;
Zhu Yib481de92007-09-25 17:54:57 -07004118
4119 missed_beacon = &pkt->u.missed_beacon;
4120 if (le32_to_cpu(missed_beacon->consequtive_missed_beacons) > 5) {
4121 IWL_DEBUG_CALIB("missed bcn cnsq %d totl %d rcd %d expctd %d\n",
4122 le32_to_cpu(missed_beacon->consequtive_missed_beacons),
4123 le32_to_cpu(missed_beacon->total_missed_becons),
4124 le32_to_cpu(missed_beacon->num_recvd_beacons),
4125 le32_to_cpu(missed_beacon->num_expected_beacons));
4126 priv->sensitivity_data.state = IWL_SENS_CALIB_NEED_REINIT;
4127 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)))
4128 queue_work(priv->workqueue, &priv->sensitivity_work);
4129 }
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08004130#endif /*CONFIG_IWL4965_SENSITIVITY*/
Zhu Yib481de92007-09-25 17:54:57 -07004131}
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08004132#ifdef CONFIG_IWL4965_HT
Zhu Yib481de92007-09-25 17:54:57 -07004133
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004134/**
4135 * iwl4965_sta_modify_enable_tid_tx - Enable Tx for this TID in station table
4136 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004137static void iwl4965_sta_modify_enable_tid_tx(struct iwl_priv *priv,
Zhu Yib481de92007-09-25 17:54:57 -07004138 int sta_id, int tid)
4139{
4140 unsigned long flags;
4141
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004142 /* Remove "disable" flag, to enable Tx for this TID */
Zhu Yib481de92007-09-25 17:54:57 -07004143 spin_lock_irqsave(&priv->sta_lock, flags);
4144 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
4145 priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
4146 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
4147 spin_unlock_irqrestore(&priv->sta_lock, flags);
4148
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004149 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
Zhu Yib481de92007-09-25 17:54:57 -07004150}
4151
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004152/**
4153 * iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack
4154 *
4155 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
4156 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
4157 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004158static int iwl4965_tx_status_reply_compressed_ba(struct iwl_priv *priv,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004159 struct iwl4965_ht_agg *agg,
4160 struct iwl4965_compressed_ba_resp*
Zhu Yib481de92007-09-25 17:54:57 -07004161 ba_resp)
4162
4163{
4164 int i, sh, ack;
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004165 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
4166 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
4167 u64 bitmap;
4168 int successes = 0;
4169 struct ieee80211_tx_status *tx_status;
Zhu Yib481de92007-09-25 17:54:57 -07004170
4171 if (unlikely(!agg->wait_for_ba)) {
4172 IWL_ERROR("Received BA when not expected\n");
4173 return -EINVAL;
4174 }
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004175
4176 /* Mark that the expected block-ack response arrived */
Zhu Yib481de92007-09-25 17:54:57 -07004177 agg->wait_for_ba = 0;
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004178 IWL_DEBUG_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004179
4180 /* Calculate shift to align block-ack bits with our Tx window bits */
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004181 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl>>4);
Ian Schram01ebd062007-10-25 17:15:22 +08004182 if (sh < 0) /* tbw something is wrong with indices */
Zhu Yib481de92007-09-25 17:54:57 -07004183 sh += 0x100;
4184
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004185 /* don't use 64-bit values for now */
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004186 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
Zhu Yib481de92007-09-25 17:54:57 -07004187
4188 if (agg->frame_count > (64 - sh)) {
4189 IWL_DEBUG_TX_REPLY("more frames than bitmap size");
4190 return -1;
4191 }
4192
4193 /* check for success or failure according to the
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004194 * transmitted bitmap and block-ack bitmap */
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004195 bitmap &= agg->bitmap;
Zhu Yib481de92007-09-25 17:54:57 -07004196
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004197 /* For each frame attempted in aggregation,
4198 * update driver's record of tx frame's status. */
Zhu Yib481de92007-09-25 17:54:57 -07004199 for (i = 0; i < agg->frame_count ; i++) {
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004200 ack = bitmap & (1 << i);
4201 successes += !!ack;
Zhu Yib481de92007-09-25 17:54:57 -07004202 IWL_DEBUG_TX_REPLY("%s ON i=%d idx=%d raw=%d\n",
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004203 ack? "ACK":"NACK", i, (agg->start_idx + i) & 0xff,
4204 agg->start_idx + i);
Zhu Yib481de92007-09-25 17:54:57 -07004205 }
4206
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004207 tx_status = &priv->txq[scd_flow].txb[agg->start_idx].status;
4208 tx_status->flags = IEEE80211_TX_STATUS_ACK;
Ron Rindjunsky99556432008-01-28 14:07:25 +02004209 tx_status->flags |= IEEE80211_TX_STATUS_AMPDU;
4210 tx_status->ampdu_ack_map = successes;
4211 tx_status->ampdu_ack_len = agg->frame_count;
Ron Rindjunsky4c424e42008-03-04 18:09:27 -08004212 iwl4965_hwrate_to_tx_control(priv, agg->rate_n_flags,
4213 &tx_status->control);
Zhu Yib481de92007-09-25 17:54:57 -07004214
John W. Linvillef868f4e2008-03-07 16:38:43 -05004215 IWL_DEBUG_TX_REPLY("Bitmap %llx\n", (unsigned long long)bitmap);
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004216
4217 return 0;
4218}
4219
4220/**
4221 * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
4222 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004223static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv,
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004224 u16 txq_id)
4225{
4226 /* Simply stop the queue, but don't change any configuration;
4227 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
Tomas Winkler3395f6e2008-03-25 16:33:37 -07004228 iwl_write_prph(priv,
Tomas Winkler12a81f62008-04-03 16:05:20 -07004229 IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004230 (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
4231 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
4232}
4233
4234/**
4235 * txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID
Ron Rindjunskyb095d032008-03-06 17:36:56 -08004236 * priv->lock must be held by the caller
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004237 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004238static int iwl4965_tx_queue_agg_disable(struct iwl_priv *priv, u16 txq_id,
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004239 u16 ssn_idx, u8 tx_fifo)
4240{
Ron Rindjunskyb095d032008-03-06 17:36:56 -08004241 int ret = 0;
4242
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004243 if (IWL_BACK_QUEUE_FIRST_ID > txq_id) {
4244 IWL_WARNING("queue number too small: %d, must be > %d\n",
4245 txq_id, IWL_BACK_QUEUE_FIRST_ID);
4246 return -EINVAL;
4247 }
4248
Tomas Winkler3395f6e2008-03-25 16:33:37 -07004249 ret = iwl_grab_nic_access(priv);
Ron Rindjunskyb095d032008-03-06 17:36:56 -08004250 if (ret)
4251 return ret;
4252
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004253 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
4254
Tomas Winkler12a81f62008-04-03 16:05:20 -07004255 iwl_clear_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004256
4257 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
4258 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
4259 /* supposes that ssn_idx is valid (!= 0xFFF) */
4260 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
4261
Tomas Winkler12a81f62008-04-03 16:05:20 -07004262 iwl_clear_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004263 iwl4965_txq_ctx_deactivate(priv, txq_id);
4264 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
4265
Tomas Winkler3395f6e2008-03-25 16:33:37 -07004266 iwl_release_nic_access(priv);
Ron Rindjunskyb095d032008-03-06 17:36:56 -08004267
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004268 return 0;
4269}
4270
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004271int iwl4965_check_empty_hw_queue(struct iwl_priv *priv, int sta_id,
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004272 u8 tid, int txq_id)
4273{
4274 struct iwl4965_queue *q = &priv->txq[txq_id].q;
4275 u8 *addr = priv->stations[sta_id].sta.sta.addr;
4276 struct iwl4965_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
4277
4278 switch (priv->stations[sta_id].tid[tid].agg.state) {
4279 case IWL_EMPTYING_HW_QUEUE_DELBA:
4280 /* We are reclaiming the last packet of the */
4281 /* aggregated HW queue */
4282 if (txq_id == tid_data->agg.txq_id &&
4283 q->read_ptr == q->write_ptr) {
4284 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
4285 int tx_fifo = default_tid_to_tx_fifo[tid];
4286 IWL_DEBUG_HT("HW queue empty: continue DELBA flow\n");
4287 iwl4965_tx_queue_agg_disable(priv, txq_id,
4288 ssn, tx_fifo);
4289 tid_data->agg.state = IWL_AGG_OFF;
4290 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, addr, tid);
4291 }
4292 break;
4293 case IWL_EMPTYING_HW_QUEUE_ADDBA:
4294 /* We are reclaiming the last packet of the queue */
4295 if (tid_data->tfds_in_queue == 0) {
4296 IWL_DEBUG_HT("HW queue empty: continue ADDBA flow\n");
4297 tid_data->agg.state = IWL_AGG_ON;
4298 ieee80211_start_tx_ba_cb_irqsafe(priv->hw, addr, tid);
4299 }
4300 break;
4301 }
Zhu Yib481de92007-09-25 17:54:57 -07004302 return 0;
4303}
4304
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004305/**
4306 * iwl4965_queue_dec_wrap - Decrement queue index, wrap back to end if needed
4307 * @index -- current index
4308 * @n_bd -- total number of entries in queue (s/b power of 2)
4309 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004310static inline int iwl4965_queue_dec_wrap(int index, int n_bd)
Zhu Yib481de92007-09-25 17:54:57 -07004311{
4312 return (index == 0) ? n_bd - 1 : index - 1;
4313}
4314
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004315/**
4316 * iwl4965_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
4317 *
4318 * Handles block-acknowledge notification from device, which reports success
4319 * of frames sent via aggregation.
4320 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004321static void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004322 struct iwl4965_rx_mem_buffer *rxb)
Zhu Yib481de92007-09-25 17:54:57 -07004323{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004324 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
4325 struct iwl4965_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
Zhu Yib481de92007-09-25 17:54:57 -07004326 int index;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004327 struct iwl4965_tx_queue *txq = NULL;
4328 struct iwl4965_ht_agg *agg;
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004329 DECLARE_MAC_BUF(mac);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004330
4331 /* "flow" corresponds to Tx queue */
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004332 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004333
4334 /* "ssn" is start of block-ack Tx window, corresponds to index
4335 * (in Tx queue's circular buffer) of first TFD/frame in window */
Zhu Yib481de92007-09-25 17:54:57 -07004336 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
4337
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004338 if (scd_flow >= ARRAY_SIZE(priv->txq)) {
Zhu Yib481de92007-09-25 17:54:57 -07004339 IWL_ERROR("BUG_ON scd_flow is bigger than number of queues");
4340 return;
4341 }
4342
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004343 txq = &priv->txq[scd_flow];
Zhu Yib481de92007-09-25 17:54:57 -07004344 agg = &priv->stations[ba_resp->sta_id].tid[ba_resp->tid].agg;
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004345
4346 /* Find index just before block-ack window */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004347 index = iwl4965_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
Zhu Yib481de92007-09-25 17:54:57 -07004348
Ian Schram01ebd062007-10-25 17:15:22 +08004349 /* TODO: Need to get this copy more safely - now good for debug */
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004350
Joe Perches0795af52007-10-03 17:59:30 -07004351 IWL_DEBUG_TX_REPLY("REPLY_COMPRESSED_BA [%d]Received from %s, "
4352 "sta_id = %d\n",
Zhu Yib481de92007-09-25 17:54:57 -07004353 agg->wait_for_ba,
Joe Perches0795af52007-10-03 17:59:30 -07004354 print_mac(mac, (u8*) &ba_resp->sta_addr_lo32),
Zhu Yib481de92007-09-25 17:54:57 -07004355 ba_resp->sta_id);
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004356 IWL_DEBUG_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
Zhu Yib481de92007-09-25 17:54:57 -07004357 "%d, scd_ssn = %d\n",
4358 ba_resp->tid,
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004359 ba_resp->seq_ctl,
Tomas Winkler0310ae72008-03-11 16:17:19 -07004360 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
Zhu Yib481de92007-09-25 17:54:57 -07004361 ba_resp->scd_flow,
4362 ba_resp->scd_ssn);
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004363 IWL_DEBUG_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx \n",
Zhu Yib481de92007-09-25 17:54:57 -07004364 agg->start_idx,
John W. Linvillef868f4e2008-03-07 16:38:43 -05004365 (unsigned long long)agg->bitmap);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004366
4367 /* Update driver's record of ACK vs. not for each frame in window */
Zhu Yib481de92007-09-25 17:54:57 -07004368 iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004369
4370 /* Release all TFDs before the SSN, i.e. all TFDs in front of
4371 * block-ack window (we assume that they've been successfully
4372 * transmitted ... if not, it's too late anyway). */
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004373 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
4374 int freed = iwl4965_tx_queue_reclaim(priv, scd_flow, index);
4375 priv->stations[ba_resp->sta_id].
4376 tid[ba_resp->tid].tfds_in_queue -= freed;
4377 if (iwl4965_queue_space(&txq->q) > txq->q.low_mark &&
4378 priv->mac80211_registered &&
4379 agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)
4380 ieee80211_wake_queue(priv->hw, scd_flow);
4381 iwl4965_check_empty_hw_queue(priv, ba_resp->sta_id,
4382 ba_resp->tid, scd_flow);
4383 }
Zhu Yib481de92007-09-25 17:54:57 -07004384}
4385
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004386/**
4387 * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
4388 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004389static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
Zhu Yib481de92007-09-25 17:54:57 -07004390 u16 txq_id)
4391{
4392 u32 tbl_dw_addr;
4393 u32 tbl_dw;
4394 u16 scd_q2ratid;
4395
4396 scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
4397
4398 tbl_dw_addr = priv->scd_base_addr +
4399 SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
4400
Tomas Winkler3395f6e2008-03-25 16:33:37 -07004401 tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
Zhu Yib481de92007-09-25 17:54:57 -07004402
4403 if (txq_id & 0x1)
4404 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
4405 else
4406 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
4407
Tomas Winkler3395f6e2008-03-25 16:33:37 -07004408 iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
Zhu Yib481de92007-09-25 17:54:57 -07004409
4410 return 0;
4411}
4412
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004413
Zhu Yib481de92007-09-25 17:54:57 -07004414/**
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004415 * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
4416 *
4417 * NOTE: txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID,
4418 * i.e. it must be one of the higher queues used for aggregation
Zhu Yib481de92007-09-25 17:54:57 -07004419 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004420static int iwl4965_tx_queue_agg_enable(struct iwl_priv *priv, int txq_id,
Zhu Yib481de92007-09-25 17:54:57 -07004421 int tx_fifo, int sta_id, int tid,
4422 u16 ssn_idx)
4423{
4424 unsigned long flags;
4425 int rc;
4426 u16 ra_tid;
4427
4428 if (IWL_BACK_QUEUE_FIRST_ID > txq_id)
4429 IWL_WARNING("queue number too small: %d, must be > %d\n",
4430 txq_id, IWL_BACK_QUEUE_FIRST_ID);
4431
4432 ra_tid = BUILD_RAxTID(sta_id, tid);
4433
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004434 /* Modify device's station table to Tx this TID */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004435 iwl4965_sta_modify_enable_tid_tx(priv, sta_id, tid);
Zhu Yib481de92007-09-25 17:54:57 -07004436
4437 spin_lock_irqsave(&priv->lock, flags);
Tomas Winkler3395f6e2008-03-25 16:33:37 -07004438 rc = iwl_grab_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -07004439 if (rc) {
4440 spin_unlock_irqrestore(&priv->lock, flags);
4441 return rc;
4442 }
4443
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004444 /* Stop this Tx queue before configuring it */
Zhu Yib481de92007-09-25 17:54:57 -07004445 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
4446
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004447 /* Map receiver-address / traffic-ID to this queue */
Zhu Yib481de92007-09-25 17:54:57 -07004448 iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
4449
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004450 /* Set this queue as a chain-building queue */
Tomas Winkler12a81f62008-04-03 16:05:20 -07004451 iwl_set_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
Zhu Yib481de92007-09-25 17:54:57 -07004452
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004453 /* Place first TFD at index corresponding to start sequence number.
4454 * Assumes that ssn_idx is valid (!= 0xFFF) */
Tomas Winklerfc4b6852007-10-25 17:15:24 +08004455 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
4456 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
Zhu Yib481de92007-09-25 17:54:57 -07004457 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
4458
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004459 /* Set up Tx window size and frame limit for this queue */
Tomas Winkler3395f6e2008-03-25 16:33:37 -07004460 iwl_write_targ_mem(priv,
Zhu Yib481de92007-09-25 17:54:57 -07004461 priv->scd_base_addr + SCD_CONTEXT_QUEUE_OFFSET(txq_id),
4462 (SCD_WIN_SIZE << SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
4463 SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
4464
Tomas Winkler3395f6e2008-03-25 16:33:37 -07004465 iwl_write_targ_mem(priv, priv->scd_base_addr +
Zhu Yib481de92007-09-25 17:54:57 -07004466 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
4467 (SCD_FRAME_LIMIT << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
4468 & SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
4469
Tomas Winkler12a81f62008-04-03 16:05:20 -07004470 iwl_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
Zhu Yib481de92007-09-25 17:54:57 -07004471
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004472 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
Zhu Yib481de92007-09-25 17:54:57 -07004473 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
4474
Tomas Winkler3395f6e2008-03-25 16:33:37 -07004475 iwl_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -07004476 spin_unlock_irqrestore(&priv->lock, flags);
4477
4478 return 0;
4479}
4480
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08004481#endif /* CONFIG_IWL4965_HT */
Zhu Yib481de92007-09-25 17:54:57 -07004482
4483/**
4484 * iwl4965_add_station - Initialize a station's hardware rate table
4485 *
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004486 * The uCode's station table contains a table of fallback rates
Zhu Yib481de92007-09-25 17:54:57 -07004487 * for automatic fallback during transmission.
4488 *
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004489 * NOTE: This sets up a default set of values. These will be replaced later
4490 * if the driver's iwl-4965-rs rate scaling algorithm is used, instead of
4491 * rc80211_simple.
Zhu Yib481de92007-09-25 17:54:57 -07004492 *
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004493 * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
4494 * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
4495 * which requires station table entry to exist).
Zhu Yib481de92007-09-25 17:54:57 -07004496 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004497void iwl4965_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap)
Zhu Yib481de92007-09-25 17:54:57 -07004498{
4499 int i, r;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004500 struct iwl4965_link_quality_cmd link_cmd = {
Zhu Yib481de92007-09-25 17:54:57 -07004501 .reserved1 = 0,
4502 };
4503 u16 rate_flags;
4504
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004505 /* Set up the rate scaling to start at selected rate, fall back
4506 * all the way down to 1M in IEEE order, and then spin on 1M */
Zhu Yib481de92007-09-25 17:54:57 -07004507 if (is_ap)
4508 r = IWL_RATE_54M_INDEX;
Johannes Berg8318d782008-01-24 19:38:38 +01004509 else if (priv->band == IEEE80211_BAND_5GHZ)
Zhu Yib481de92007-09-25 17:54:57 -07004510 r = IWL_RATE_6M_INDEX;
4511 else
4512 r = IWL_RATE_1M_INDEX;
4513
4514 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
4515 rate_flags = 0;
4516 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
4517 rate_flags |= RATE_MCS_CCK_MSK;
4518
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004519 /* Use Tx antenna B only */
Zhu Yib481de92007-09-25 17:54:57 -07004520 rate_flags |= RATE_MCS_ANT_B_MSK;
4521 rate_flags &= ~RATE_MCS_ANT_A_MSK;
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004522
Zhu Yib481de92007-09-25 17:54:57 -07004523 link_cmd.rs_table[i].rate_n_flags =
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004524 iwl4965_hw_set_rate_n_flags(iwl4965_rates[r].plcp, rate_flags);
4525 r = iwl4965_get_prev_ieee_rate(r);
Zhu Yib481de92007-09-25 17:54:57 -07004526 }
4527
4528 link_cmd.general_params.single_stream_ant_msk = 2;
4529 link_cmd.general_params.dual_stream_ant_msk = 3;
4530 link_cmd.agg_params.agg_dis_start_th = 3;
4531 link_cmd.agg_params.agg_time_limit = cpu_to_le16(4000);
4532
4533 /* Update the rate scaling for control frame Tx to AP */
Tomas Winklera4062b82008-03-11 16:17:16 -07004534 link_cmd.sta_id = is_ap ? IWL_AP_ID : priv->hw_setting.bcast_sta_id;
Zhu Yib481de92007-09-25 17:54:57 -07004535
Tomas Winklere5472972008-03-28 16:21:12 -07004536 iwl_send_cmd_pdu_async(priv, REPLY_TX_LINK_QUALITY_CMD,
4537 sizeof(link_cmd), &link_cmd, NULL);
Zhu Yib481de92007-09-25 17:54:57 -07004538}
4539
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08004540#ifdef CONFIG_IWL4965_HT
Zhu Yib481de92007-09-25 17:54:57 -07004541
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004542static u8 iwl4965_is_channel_extension(struct iwl_priv *priv,
Johannes Berg8318d782008-01-24 19:38:38 +01004543 enum ieee80211_band band,
Tomas Winkler78330fd2008-02-06 02:37:18 +02004544 u16 channel, u8 extension_chan_offset)
Zhu Yib481de92007-09-25 17:54:57 -07004545{
Assaf Kraussbf85ea42008-03-14 10:38:49 -07004546 const struct iwl_channel_info *ch_info;
Zhu Yib481de92007-09-25 17:54:57 -07004547
Assaf Krauss8622e702008-03-21 13:53:43 -07004548 ch_info = iwl_get_channel_info(priv, band, channel);
Zhu Yib481de92007-09-25 17:54:57 -07004549 if (!is_channel_valid(ch_info))
4550 return 0;
4551
Guy Cohen134eb5d2008-03-04 18:09:25 -08004552 if (extension_chan_offset == IWL_EXT_CHANNEL_OFFSET_NONE)
Zhu Yib481de92007-09-25 17:54:57 -07004553 return 0;
4554
4555 if ((ch_info->fat_extension_channel == extension_chan_offset) ||
4556 (ch_info->fat_extension_channel == HT_IE_EXT_CHANNEL_MAX))
4557 return 1;
4558
4559 return 0;
4560}
4561
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004562static u8 iwl4965_is_fat_tx_allowed(struct iwl_priv *priv,
Ron Rindjunskyfd105e72007-11-26 16:14:39 +02004563 struct ieee80211_ht_info *sta_ht_inf)
Zhu Yib481de92007-09-25 17:54:57 -07004564{
Ron Rindjunskyfd105e72007-11-26 16:14:39 +02004565 struct iwl_ht_info *iwl_ht_conf = &priv->current_ht_config;
Zhu Yib481de92007-09-25 17:54:57 -07004566
Ron Rindjunskyfd105e72007-11-26 16:14:39 +02004567 if ((!iwl_ht_conf->is_ht) ||
4568 (iwl_ht_conf->supported_chan_width != IWL_CHANNEL_WIDTH_40MHZ) ||
Guy Cohen134eb5d2008-03-04 18:09:25 -08004569 (iwl_ht_conf->extension_chan_offset == IWL_EXT_CHANNEL_OFFSET_NONE))
Zhu Yib481de92007-09-25 17:54:57 -07004570 return 0;
4571
Ron Rindjunskyfd105e72007-11-26 16:14:39 +02004572 if (sta_ht_inf) {
4573 if ((!sta_ht_inf->ht_supported) ||
Roel Kluin194c7ca2008-02-02 20:48:48 +01004574 (!(sta_ht_inf->cap & IEEE80211_HT_CAP_SUP_WIDTH)))
Ron Rindjunskyfd105e72007-11-26 16:14:39 +02004575 return 0;
4576 }
Zhu Yib481de92007-09-25 17:54:57 -07004577
Tomas Winkler78330fd2008-02-06 02:37:18 +02004578 return (iwl4965_is_channel_extension(priv, priv->band,
Ron Rindjunskyfd105e72007-11-26 16:14:39 +02004579 iwl_ht_conf->control_channel,
4580 iwl_ht_conf->extension_chan_offset));
Zhu Yib481de92007-09-25 17:54:57 -07004581}
4582
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004583void iwl4965_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info)
Zhu Yib481de92007-09-25 17:54:57 -07004584{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004585 struct iwl4965_rxon_cmd *rxon = &priv->staging_rxon;
Zhu Yib481de92007-09-25 17:54:57 -07004586 u32 val;
4587
4588 if (!ht_info->is_ht)
4589 return;
4590
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004591 /* Set up channel bandwidth: 20 MHz only, or 20/40 mixed if fat ok */
Ron Rindjunskyfd105e72007-11-26 16:14:39 +02004592 if (iwl4965_is_fat_tx_allowed(priv, NULL))
Zhu Yib481de92007-09-25 17:54:57 -07004593 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED_MSK;
4594 else
4595 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
4596 RXON_FLG_CHANNEL_MODE_PURE_40_MSK);
4597
4598 if (le16_to_cpu(rxon->channel) != ht_info->control_channel) {
4599 IWL_DEBUG_ASSOC("control diff than current %d %d\n",
4600 le16_to_cpu(rxon->channel),
4601 ht_info->control_channel);
4602 rxon->channel = cpu_to_le16(ht_info->control_channel);
4603 return;
4604 }
4605
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004606 /* Note: control channel is opposite of extension channel */
Zhu Yib481de92007-09-25 17:54:57 -07004607 switch (ht_info->extension_chan_offset) {
4608 case IWL_EXT_CHANNEL_OFFSET_ABOVE:
4609 rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
4610 break;
4611 case IWL_EXT_CHANNEL_OFFSET_BELOW:
4612 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
4613 break;
Guy Cohen134eb5d2008-03-04 18:09:25 -08004614 case IWL_EXT_CHANNEL_OFFSET_NONE:
Zhu Yib481de92007-09-25 17:54:57 -07004615 default:
4616 rxon->flags &= ~RXON_FLG_CHANNEL_MODE_MIXED_MSK;
4617 break;
4618 }
4619
Ron Rindjunskyfd105e72007-11-26 16:14:39 +02004620 val = ht_info->ht_protection;
Zhu Yib481de92007-09-25 17:54:57 -07004621
4622 rxon->flags |= cpu_to_le32(val << RXON_FLG_HT_OPERATING_MODE_POS);
4623
Zhu Yib481de92007-09-25 17:54:57 -07004624 iwl4965_set_rxon_chain(priv);
4625
4626 IWL_DEBUG_ASSOC("supported HT rate 0x%X %X "
4627 "rxon flags 0x%X operation mode :0x%X "
4628 "extension channel offset 0x%x "
4629 "control chan %d\n",
Ron Rindjunskyfd105e72007-11-26 16:14:39 +02004630 ht_info->supp_mcs_set[0], ht_info->supp_mcs_set[1],
4631 le32_to_cpu(rxon->flags), ht_info->ht_protection,
Zhu Yib481de92007-09-25 17:54:57 -07004632 ht_info->extension_chan_offset,
4633 ht_info->control_channel);
4634 return;
4635}
4636
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004637void iwl4965_set_ht_add_station(struct iwl_priv *priv, u8 index,
Ron Rindjunsky67d62032007-11-26 16:14:40 +02004638 struct ieee80211_ht_info *sta_ht_inf)
Zhu Yib481de92007-09-25 17:54:57 -07004639{
4640 __le32 sta_flags;
Tomas Winklere53cfe02008-01-30 22:05:13 -08004641 u8 mimo_ps_mode;
Zhu Yib481de92007-09-25 17:54:57 -07004642
Ron Rindjunsky67d62032007-11-26 16:14:40 +02004643 if (!sta_ht_inf || !sta_ht_inf->ht_supported)
Zhu Yib481de92007-09-25 17:54:57 -07004644 goto done;
4645
Tomas Winklere53cfe02008-01-30 22:05:13 -08004646 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2;
4647
Zhu Yib481de92007-09-25 17:54:57 -07004648 sta_flags = priv->stations[index].sta.station_flags;
4649
Tomas Winklere53cfe02008-01-30 22:05:13 -08004650 sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
4651
4652 switch (mimo_ps_mode) {
4653 case WLAN_HT_CAP_MIMO_PS_STATIC:
4654 sta_flags |= STA_FLG_MIMO_DIS_MSK;
4655 break;
4656 case WLAN_HT_CAP_MIMO_PS_DYNAMIC:
Zhu Yib481de92007-09-25 17:54:57 -07004657 sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
Tomas Winklere53cfe02008-01-30 22:05:13 -08004658 break;
4659 case WLAN_HT_CAP_MIMO_PS_DISABLED:
4660 break;
4661 default:
4662 IWL_WARNING("Invalid MIMO PS mode %d", mimo_ps_mode);
4663 break;
4664 }
Zhu Yib481de92007-09-25 17:54:57 -07004665
4666 sta_flags |= cpu_to_le32(
Ron Rindjunsky67d62032007-11-26 16:14:40 +02004667 (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
Zhu Yib481de92007-09-25 17:54:57 -07004668
4669 sta_flags |= cpu_to_le32(
Ron Rindjunsky67d62032007-11-26 16:14:40 +02004670 (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
Zhu Yib481de92007-09-25 17:54:57 -07004671
Ron Rindjunsky67d62032007-11-26 16:14:40 +02004672 if (iwl4965_is_fat_tx_allowed(priv, sta_ht_inf))
Zhu Yib481de92007-09-25 17:54:57 -07004673 sta_flags |= STA_FLG_FAT_EN_MSK;
Ron Rindjunsky67d62032007-11-26 16:14:40 +02004674 else
Tomas Winklere53cfe02008-01-30 22:05:13 -08004675 sta_flags &= ~STA_FLG_FAT_EN_MSK;
Ron Rindjunsky67d62032007-11-26 16:14:40 +02004676
Zhu Yib481de92007-09-25 17:54:57 -07004677 priv->stations[index].sta.station_flags = sta_flags;
4678 done:
4679 return;
4680}
4681
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004682static void iwl4965_sta_modify_add_ba_tid(struct iwl_priv *priv,
Zhu Yib481de92007-09-25 17:54:57 -07004683 int sta_id, int tid, u16 ssn)
4684{
4685 unsigned long flags;
4686
4687 spin_lock_irqsave(&priv->sta_lock, flags);
4688 priv->stations[sta_id].sta.station_flags_msk = 0;
4689 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
4690 priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
4691 priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
4692 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
4693 spin_unlock_irqrestore(&priv->sta_lock, flags);
4694
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004695 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
Zhu Yib481de92007-09-25 17:54:57 -07004696}
4697
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004698static void iwl4965_sta_modify_del_ba_tid(struct iwl_priv *priv,
Zhu Yib481de92007-09-25 17:54:57 -07004699 int sta_id, int tid)
4700{
4701 unsigned long flags;
4702
4703 spin_lock_irqsave(&priv->sta_lock, flags);
4704 priv->stations[sta_id].sta.station_flags_msk = 0;
4705 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
4706 priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
4707 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
4708 spin_unlock_irqrestore(&priv->sta_lock, flags);
4709
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004710 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
Zhu Yib481de92007-09-25 17:54:57 -07004711}
4712
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08004713/*
4714 * Find first available (lowest unused) Tx Queue, mark it "active".
4715 * Called only when finding queue for aggregation.
4716 * Should never return anything < 7, because they should already
4717 * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6).
4718 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004719static int iwl4965_txq_ctx_activate_free(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07004720{
4721 int txq_id;
4722
4723 for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++)
4724 if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
4725 return txq_id;
4726 return -1;
4727}
4728
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004729static int iwl4965_mac_ht_tx_agg_start(struct ieee80211_hw *hw, const u8 *da,
4730 u16 tid, u16 *start_seq_num)
Zhu Yib481de92007-09-25 17:54:57 -07004731{
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004732 struct iwl_priv *priv = hw->priv;
Zhu Yib481de92007-09-25 17:54:57 -07004733 int sta_id;
4734 int tx_fifo;
4735 int txq_id;
4736 int ssn = -1;
Ron Rindjunskyb095d032008-03-06 17:36:56 -08004737 int ret = 0;
Zhu Yib481de92007-09-25 17:54:57 -07004738 unsigned long flags;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004739 struct iwl4965_tid_data *tid_data;
Joe Perches0795af52007-10-03 17:59:30 -07004740 DECLARE_MAC_BUF(mac);
Zhu Yib481de92007-09-25 17:54:57 -07004741
4742 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
4743 tx_fifo = default_tid_to_tx_fifo[tid];
4744 else
4745 return -EINVAL;
4746
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004747 IWL_WARNING("%s on da = %s tid = %d\n",
4748 __func__, print_mac(mac, da), tid);
Zhu Yib481de92007-09-25 17:54:57 -07004749
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004750 sta_id = iwl4965_hw_find_station(priv, da);
Zhu Yib481de92007-09-25 17:54:57 -07004751 if (sta_id == IWL_INVALID_STATION)
4752 return -ENXIO;
4753
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004754 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
4755 IWL_ERROR("Start AGG when state is not IWL_AGG_OFF !\n");
4756 return -ENXIO;
4757 }
4758
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004759 txq_id = iwl4965_txq_ctx_activate_free(priv);
Zhu Yib481de92007-09-25 17:54:57 -07004760 if (txq_id == -1)
4761 return -ENXIO;
4762
4763 spin_lock_irqsave(&priv->sta_lock, flags);
4764 tid_data = &priv->stations[sta_id].tid[tid];
4765 ssn = SEQ_TO_SN(tid_data->seq_number);
4766 tid_data->agg.txq_id = txq_id;
4767 spin_unlock_irqrestore(&priv->sta_lock, flags);
4768
4769 *start_seq_num = ssn;
Ron Rindjunskyb095d032008-03-06 17:36:56 -08004770 ret = iwl4965_tx_queue_agg_enable(priv, txq_id, tx_fifo,
4771 sta_id, tid, ssn);
4772 if (ret)
4773 return ret;
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004774
Ron Rindjunskyb095d032008-03-06 17:36:56 -08004775 ret = 0;
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004776 if (tid_data->tfds_in_queue == 0) {
4777 printk(KERN_ERR "HW queue is empty\n");
4778 tid_data->agg.state = IWL_AGG_ON;
4779 ieee80211_start_tx_ba_cb_irqsafe(hw, da, tid);
4780 } else {
4781 IWL_DEBUG_HT("HW queue is NOT empty: %d packets in HW queue\n",
4782 tid_data->tfds_in_queue);
4783 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
4784 }
Ron Rindjunskyb095d032008-03-06 17:36:56 -08004785 return ret;
Zhu Yib481de92007-09-25 17:54:57 -07004786}
4787
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004788static int iwl4965_mac_ht_tx_agg_stop(struct ieee80211_hw *hw, const u8 *da,
4789 u16 tid)
Zhu Yib481de92007-09-25 17:54:57 -07004790{
4791
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004792 struct iwl_priv *priv = hw->priv;
Zhu Yib481de92007-09-25 17:54:57 -07004793 int tx_fifo_id, txq_id, sta_id, ssn = -1;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004794 struct iwl4965_tid_data *tid_data;
Ron Rindjunskyb095d032008-03-06 17:36:56 -08004795 int ret, write_ptr, read_ptr;
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004796 unsigned long flags;
Joe Perches0795af52007-10-03 17:59:30 -07004797 DECLARE_MAC_BUF(mac);
4798
Zhu Yib481de92007-09-25 17:54:57 -07004799 if (!da) {
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004800 IWL_ERROR("da = NULL\n");
Zhu Yib481de92007-09-25 17:54:57 -07004801 return -EINVAL;
4802 }
4803
4804 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
4805 tx_fifo_id = default_tid_to_tx_fifo[tid];
4806 else
4807 return -EINVAL;
4808
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004809 sta_id = iwl4965_hw_find_station(priv, da);
Zhu Yib481de92007-09-25 17:54:57 -07004810
4811 if (sta_id == IWL_INVALID_STATION)
4812 return -ENXIO;
4813
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004814 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
4815 IWL_WARNING("Stopping AGG while state not IWL_AGG_ON\n");
4816
Zhu Yib481de92007-09-25 17:54:57 -07004817 tid_data = &priv->stations[sta_id].tid[tid];
4818 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
4819 txq_id = tid_data->agg.txq_id;
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004820 write_ptr = priv->txq[txq_id].q.write_ptr;
4821 read_ptr = priv->txq[txq_id].q.read_ptr;
Zhu Yib481de92007-09-25 17:54:57 -07004822
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004823 /* The queue is not empty */
4824 if (write_ptr != read_ptr) {
4825 IWL_DEBUG_HT("Stopping a non empty AGG HW QUEUE\n");
4826 priv->stations[sta_id].tid[tid].agg.state =
4827 IWL_EMPTYING_HW_QUEUE_DELBA;
4828 return 0;
4829 }
4830
4831 IWL_DEBUG_HT("HW queue empty\n");;
4832 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
4833
4834 spin_lock_irqsave(&priv->lock, flags);
Ron Rindjunskyb095d032008-03-06 17:36:56 -08004835 ret = iwl4965_tx_queue_agg_disable(priv, txq_id, ssn, tx_fifo_id);
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004836 spin_unlock_irqrestore(&priv->lock, flags);
4837
Ron Rindjunskyb095d032008-03-06 17:36:56 -08004838 if (ret)
4839 return ret;
Zhu Yib481de92007-09-25 17:54:57 -07004840
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004841 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, da, tid);
4842
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004843 IWL_DEBUG_INFO("iwl4965_mac_ht_tx_agg_stop on da=%s tid=%d\n",
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02004844 print_mac(mac, da), tid);
Zhu Yib481de92007-09-25 17:54:57 -07004845
4846 return 0;
4847}
4848
Ron Rindjunsky8114fcf2008-01-28 14:07:23 +02004849int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
4850 enum ieee80211_ampdu_mlme_action action,
4851 const u8 *addr, u16 tid, u16 *ssn)
4852{
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004853 struct iwl_priv *priv = hw->priv;
Ron Rindjunsky8114fcf2008-01-28 14:07:23 +02004854 int sta_id;
4855 DECLARE_MAC_BUF(mac);
4856
4857 IWL_DEBUG_HT("A-MPDU action on da=%s tid=%d ",
4858 print_mac(mac, addr), tid);
4859 sta_id = iwl4965_hw_find_station(priv, addr);
4860 switch (action) {
4861 case IEEE80211_AMPDU_RX_START:
4862 IWL_DEBUG_HT("start Rx\n");
4863 iwl4965_sta_modify_add_ba_tid(priv, sta_id, tid, *ssn);
4864 break;
4865 case IEEE80211_AMPDU_RX_STOP:
4866 IWL_DEBUG_HT("stop Rx\n");
4867 iwl4965_sta_modify_del_ba_tid(priv, sta_id, tid);
4868 break;
4869 case IEEE80211_AMPDU_TX_START:
4870 IWL_DEBUG_HT("start Tx\n");
4871 return iwl4965_mac_ht_tx_agg_start(hw, addr, tid, ssn);
4872 case IEEE80211_AMPDU_TX_STOP:
4873 IWL_DEBUG_HT("stop Tx\n");
4874 return iwl4965_mac_ht_tx_agg_stop(hw, addr, tid);
4875 default:
4876 IWL_DEBUG_HT("unknown\n");
4877 return -EINVAL;
4878 break;
4879 }
4880 return 0;
4881}
4882
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08004883#endif /* CONFIG_IWL4965_HT */
Zhu Yib481de92007-09-25 17:54:57 -07004884
4885/* Set up 4965-specific Rx frame reply handlers */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004886void iwl4965_hw_rx_handler_setup(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07004887{
4888 /* Legacy Rx frames */
Tomas Winkler857485c2008-03-21 13:53:44 -07004889 priv->rx_handlers[REPLY_RX] = iwl4965_rx_reply_rx;
Zhu Yib481de92007-09-25 17:54:57 -07004890
4891 /* High-throughput (HT) Rx frames */
4892 priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl4965_rx_reply_rx_phy;
4893 priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl4965_rx_reply_rx;
4894
4895 priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
4896 iwl4965_rx_missed_beacon_notif;
4897
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08004898#ifdef CONFIG_IWL4965_HT
Zhu Yib481de92007-09-25 17:54:57 -07004899 priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba;
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08004900#endif /* CONFIG_IWL4965_HT */
Zhu Yib481de92007-09-25 17:54:57 -07004901}
4902
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004903void iwl4965_hw_setup_deferred_work(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07004904{
4905 INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
4906 INIT_WORK(&priv->statistics_work, iwl4965_bg_statistics_work);
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08004907#ifdef CONFIG_IWL4965_SENSITIVITY
Zhu Yib481de92007-09-25 17:54:57 -07004908 INIT_WORK(&priv->sensitivity_work, iwl4965_bg_sensitivity_work);
4909#endif
Zhu Yib481de92007-09-25 17:54:57 -07004910 init_timer(&priv->statistics_periodic);
4911 priv->statistics_periodic.data = (unsigned long)priv;
4912 priv->statistics_periodic.function = iwl4965_bg_statistics_periodic;
4913}
4914
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004915void iwl4965_hw_cancel_deferred_work(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07004916{
4917 del_timer_sync(&priv->statistics_periodic);
4918
4919 cancel_delayed_work(&priv->init_alive_start);
4920}
4921
Tomas Winkler857485c2008-03-21 13:53:44 -07004922static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
4923 .enqueue_hcmd = iwl4965_enqueue_hcmd,
4924};
4925
Assaf Krauss6bc913b2008-03-11 16:17:18 -07004926static struct iwl_lib_ops iwl4965_lib = {
Assaf Kraussbf85ea42008-03-14 10:38:49 -07004927 .init_drv = iwl4965_init_drv,
Tomas Winklere2a722e2008-04-14 21:16:10 -07004928 .txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl,
Tomas Winkler57aab752008-04-14 21:16:03 -07004929 .hw_nic_init = iwl4965_hw_nic_init,
4930 .is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr,
4931 .alive_notify = iwl4965_alive_notify,
4932 .load_ucode = iwl4965_load_bsm,
Assaf Krauss6bc913b2008-03-11 16:17:18 -07004933 .eeprom_ops = {
4934 .verify_signature = iwlcore_eeprom_verify_signature,
4935 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
4936 .release_semaphore = iwlcore_eeprom_release_semaphore,
4937 },
Mohamed Abbasad97edd2008-03-28 16:21:06 -07004938 .radio_kill_sw = iwl4965_radio_kill_sw,
Assaf Krauss6bc913b2008-03-11 16:17:18 -07004939};
4940
4941static struct iwl_ops iwl4965_ops = {
4942 .lib = &iwl4965_lib,
Tomas Winkler857485c2008-03-21 13:53:44 -07004943 .utils = &iwl4965_hcmd_utils,
Assaf Krauss6bc913b2008-03-11 16:17:18 -07004944};
4945
Tomas Winkler82b9a122008-03-04 18:09:30 -08004946static struct iwl_cfg iwl4965_agn_cfg = {
4947 .name = "4965AGN",
Tomas Winkler4bf775c2008-03-04 18:09:31 -08004948 .fw_name = "iwlwifi-4965" IWL4965_UCODE_API ".ucode",
Tomas Winkler82b9a122008-03-04 18:09:30 -08004949 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
Assaf Krauss6bc913b2008-03-11 16:17:18 -07004950 .ops = &iwl4965_ops,
Assaf Krauss1ea87392008-03-18 14:57:50 -07004951 .mod_params = &iwl4965_mod_params,
Tomas Winkler82b9a122008-03-04 18:09:30 -08004952};
4953
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004954struct pci_device_id iwl4965_hw_card_ids[] = {
Tomas Winkler82b9a122008-03-04 18:09:30 -08004955 {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_agn_cfg)},
4956 {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_agn_cfg)},
Zhu Yib481de92007-09-25 17:54:57 -07004957 {0}
4958};
4959
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08004960MODULE_DEVICE_TABLE(pci, iwl4965_hw_card_ids);
Assaf Krauss1ea87392008-03-18 14:57:50 -07004961
4962module_param_named(antenna, iwl4965_mod_params.antenna, int, 0444);
4963MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
4964module_param_named(disable, iwl4965_mod_params.disable, int, 0444);
4965MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
4966module_param_named(hwcrypto, iwl4965_mod_params.hw_crypto, int, 0444);
4967MODULE_PARM_DESC(hwcrypto,
4968 "using hardware crypto engine (default 0 [software])\n");
4969module_param_named(debug, iwl4965_mod_params.debug, int, 0444);
4970MODULE_PARM_DESC(debug, "debug output mask");
4971module_param_named(
4972 disable_hw_scan, iwl4965_mod_params.disable_hw_scan, int, 0444);
4973MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
4974
4975module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, 0444);
4976MODULE_PARM_DESC(queues_num, "number of hw queues.");
4977
4978/* QoS */
4979module_param_named(qos_enable, iwl4965_mod_params.enable_qos, int, 0444);
4980MODULE_PARM_DESC(qos_enable, "enable all QoS functionality");
4981module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K, int, 0444);
4982MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
4983