| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1 | /****************************************************************************** | 
|  | 2 | * | 
| Reinette Chatre | eb7ae89 | 2008-03-11 16:17:17 -0700 | [diff] [blame] | 3 | * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved. | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4 | * | 
|  | 5 | * This program is free software; you can redistribute it and/or modify it | 
|  | 6 | * under the terms of version 2 of the GNU General Public License as | 
|  | 7 | * published by the Free Software Foundation. | 
|  | 8 | * | 
|  | 9 | * This program is distributed in the hope that it will be useful, but WITHOUT | 
|  | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 
|  | 11 | * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for | 
|  | 12 | * more details. | 
|  | 13 | * | 
|  | 14 | * You should have received a copy of the GNU General Public License along with | 
|  | 15 | * this program; if not, write to the Free Software Foundation, Inc., | 
|  | 16 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA | 
|  | 17 | * | 
|  | 18 | * The full GNU General Public License is included in this distribution in the | 
|  | 19 | * file called LICENSE. | 
|  | 20 | * | 
|  | 21 | * Contact Information: | 
|  | 22 | * James P. Ketrenos <ipw2100-admin@linux.intel.com> | 
|  | 23 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | 
|  | 24 | * | 
|  | 25 | *****************************************************************************/ | 
|  | 26 |  | 
|  | 27 | #include <linux/kernel.h> | 
|  | 28 | #include <linux/module.h> | 
|  | 29 | #include <linux/version.h> | 
|  | 30 | #include <linux/init.h> | 
|  | 31 | #include <linux/pci.h> | 
|  | 32 | #include <linux/dma-mapping.h> | 
|  | 33 | #include <linux/delay.h> | 
|  | 34 | #include <linux/skbuff.h> | 
|  | 35 | #include <linux/netdevice.h> | 
|  | 36 | #include <linux/wireless.h> | 
|  | 37 | #include <net/mac80211.h> | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 38 | #include <linux/etherdevice.h> | 
| Zhu Yi | 12342c4 | 2007-12-20 11:27:32 +0800 | [diff] [blame] | 39 | #include <asm/unaligned.h> | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 40 |  | 
| Assaf Krauss | 6bc913b | 2008-03-11 16:17:18 -0700 | [diff] [blame] | 41 | #include "iwl-eeprom.h" | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 42 | #include "iwl-4965.h" | 
| Tomas Winkler | fee1247 | 2008-04-03 16:05:21 -0700 | [diff] [blame] | 43 | #include "iwl-core.h" | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 44 | #include "iwl-io.h" | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 45 | #include "iwl-helpers.h" | 
|  | 46 |  | 
| Assaf Krauss | 1ea8739 | 2008-03-18 14:57:50 -0700 | [diff] [blame] | 47 | /* module parameters */ | 
|  | 48 | static struct iwl_mod_params iwl4965_mod_params = { | 
| Ron Rindjunsky | dfe7d45 | 2008-04-15 16:01:45 -0700 | [diff] [blame] | 49 | .num_of_queues = IWL4965_MAX_NUM_QUEUES, | 
| Assaf Krauss | 1ea8739 | 2008-03-18 14:57:50 -0700 | [diff] [blame] | 50 | .enable_qos = 1, | 
|  | 51 | .amsdu_size_8K = 1, | 
|  | 52 | /* the rest are 0 by default */ | 
|  | 53 | }; | 
|  | 54 |  | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 55 | static void iwl4965_hw_card_show_info(struct iwl_priv *priv); | 
| Christoph Hellwig | 416e143 | 2007-10-25 17:15:49 +0800 | [diff] [blame] | 56 |  | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 57 | #define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np)    \ | 
|  | 58 | [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP,      \ | 
|  | 59 | IWL_RATE_SISO_##s##M_PLCP, \ | 
|  | 60 | IWL_RATE_MIMO_##s##M_PLCP, \ | 
|  | 61 | IWL_RATE_##r##M_IEEE,      \ | 
|  | 62 | IWL_RATE_##ip##M_INDEX,    \ | 
|  | 63 | IWL_RATE_##in##M_INDEX,    \ | 
|  | 64 | IWL_RATE_##rp##M_INDEX,    \ | 
|  | 65 | IWL_RATE_##rn##M_INDEX,    \ | 
|  | 66 | IWL_RATE_##pp##M_INDEX,    \ | 
|  | 67 | IWL_RATE_##np##M_INDEX } | 
|  | 68 |  | 
|  | 69 | /* | 
|  | 70 | * Parameter order: | 
|  | 71 | *   rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate | 
|  | 72 | * | 
|  | 73 | * If there isn't a valid next or previous rate then INV is used which | 
|  | 74 | * maps to IWL_RATE_INVALID | 
|  | 75 | * | 
|  | 76 | */ | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 77 | const struct iwl4965_rate_info iwl4965_rates[IWL_RATE_COUNT] = { | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 78 | IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2),    /*  1mbps */ | 
|  | 79 | IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5),          /*  2mbps */ | 
|  | 80 | IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11),        /*5.5mbps */ | 
|  | 81 | IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18),      /* 11mbps */ | 
|  | 82 | IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11),        /*  6mbps */ | 
|  | 83 | IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11),       /*  9mbps */ | 
|  | 84 | IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18),   /* 12mbps */ | 
|  | 85 | IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24),   /* 18mbps */ | 
|  | 86 | IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36),   /* 24mbps */ | 
|  | 87 | IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48),   /* 36mbps */ | 
|  | 88 | IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54),   /* 48mbps */ | 
|  | 89 | IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */ | 
|  | 90 | IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */ | 
|  | 91 | }; | 
|  | 92 |  | 
| Ron Rindjunsky | fe01b47 | 2008-01-28 14:07:24 +0200 | [diff] [blame] | 93 | #ifdef CONFIG_IWL4965_HT | 
|  | 94 |  | 
|  | 95 | static const u16 default_tid_to_tx_fifo[] = { | 
|  | 96 | IWL_TX_FIFO_AC1, | 
|  | 97 | IWL_TX_FIFO_AC0, | 
|  | 98 | IWL_TX_FIFO_AC0, | 
|  | 99 | IWL_TX_FIFO_AC1, | 
|  | 100 | IWL_TX_FIFO_AC2, | 
|  | 101 | IWL_TX_FIFO_AC2, | 
|  | 102 | IWL_TX_FIFO_AC3, | 
|  | 103 | IWL_TX_FIFO_AC3, | 
|  | 104 | IWL_TX_FIFO_NONE, | 
|  | 105 | IWL_TX_FIFO_NONE, | 
|  | 106 | IWL_TX_FIFO_NONE, | 
|  | 107 | IWL_TX_FIFO_NONE, | 
|  | 108 | IWL_TX_FIFO_NONE, | 
|  | 109 | IWL_TX_FIFO_NONE, | 
|  | 110 | IWL_TX_FIFO_NONE, | 
|  | 111 | IWL_TX_FIFO_NONE, | 
|  | 112 | IWL_TX_FIFO_AC3 | 
|  | 113 | }; | 
|  | 114 |  | 
|  | 115 | #endif	/*CONFIG_IWL4965_HT */ | 
|  | 116 |  | 
| Tomas Winkler | 57aab75 | 2008-04-14 21:16:03 -0700 | [diff] [blame] | 117 | /* check contents of special bootstrap uCode SRAM */ | 
|  | 118 | static int iwl4965_verify_bsm(struct iwl_priv *priv) | 
|  | 119 | { | 
|  | 120 | __le32 *image = priv->ucode_boot.v_addr; | 
|  | 121 | u32 len = priv->ucode_boot.len; | 
|  | 122 | u32 reg; | 
|  | 123 | u32 val; | 
|  | 124 |  | 
|  | 125 | IWL_DEBUG_INFO("Begin verify bsm\n"); | 
|  | 126 |  | 
|  | 127 | /* verify BSM SRAM contents */ | 
|  | 128 | val = iwl_read_prph(priv, BSM_WR_DWCOUNT_REG); | 
|  | 129 | for (reg = BSM_SRAM_LOWER_BOUND; | 
|  | 130 | reg < BSM_SRAM_LOWER_BOUND + len; | 
|  | 131 | reg += sizeof(u32), image++) { | 
|  | 132 | val = iwl_read_prph(priv, reg); | 
|  | 133 | if (val != le32_to_cpu(*image)) { | 
|  | 134 | IWL_ERROR("BSM uCode verification failed at " | 
|  | 135 | "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n", | 
|  | 136 | BSM_SRAM_LOWER_BOUND, | 
|  | 137 | reg - BSM_SRAM_LOWER_BOUND, len, | 
|  | 138 | val, le32_to_cpu(*image)); | 
|  | 139 | return -EIO; | 
|  | 140 | } | 
|  | 141 | } | 
|  | 142 |  | 
|  | 143 | IWL_DEBUG_INFO("BSM bootstrap uCode image OK\n"); | 
|  | 144 |  | 
|  | 145 | return 0; | 
|  | 146 | } | 
|  | 147 |  | 
|  | 148 | /** | 
|  | 149 | * iwl4965_load_bsm - Load bootstrap instructions | 
|  | 150 | * | 
|  | 151 | * BSM operation: | 
|  | 152 | * | 
|  | 153 | * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program | 
|  | 154 | * in special SRAM that does not power down during RFKILL.  When powering back | 
|  | 155 | * up after power-saving sleeps (or during initial uCode load), the BSM loads | 
|  | 156 | * the bootstrap program into the on-board processor, and starts it. | 
|  | 157 | * | 
|  | 158 | * The bootstrap program loads (via DMA) instructions and data for a new | 
|  | 159 | * program from host DRAM locations indicated by the host driver in the | 
|  | 160 | * BSM_DRAM_* registers.  Once the new program is loaded, it starts | 
|  | 161 | * automatically. | 
|  | 162 | * | 
|  | 163 | * When initializing the NIC, the host driver points the BSM to the | 
|  | 164 | * "initialize" uCode image.  This uCode sets up some internal data, then | 
|  | 165 | * notifies host via "initialize alive" that it is complete. | 
|  | 166 | * | 
|  | 167 | * The host then replaces the BSM_DRAM_* pointer values to point to the | 
|  | 168 | * normal runtime uCode instructions and a backup uCode data cache buffer | 
|  | 169 | * (filled initially with starting data values for the on-board processor), | 
|  | 170 | * then triggers the "initialize" uCode to load and launch the runtime uCode, | 
|  | 171 | * which begins normal operation. | 
|  | 172 | * | 
|  | 173 | * When doing a power-save shutdown, runtime uCode saves data SRAM into | 
|  | 174 | * the backup data cache in DRAM before SRAM is powered down. | 
|  | 175 | * | 
|  | 176 | * When powering back up, the BSM loads the bootstrap program.  This reloads | 
|  | 177 | * the runtime uCode instructions and the backup data cache into SRAM, | 
|  | 178 | * and re-launches the runtime uCode from where it left off. | 
|  | 179 | */ | 
|  | 180 | static int iwl4965_load_bsm(struct iwl_priv *priv) | 
|  | 181 | { | 
|  | 182 | __le32 *image = priv->ucode_boot.v_addr; | 
|  | 183 | u32 len = priv->ucode_boot.len; | 
|  | 184 | dma_addr_t pinst; | 
|  | 185 | dma_addr_t pdata; | 
|  | 186 | u32 inst_len; | 
|  | 187 | u32 data_len; | 
|  | 188 | int i; | 
|  | 189 | u32 done; | 
|  | 190 | u32 reg_offset; | 
|  | 191 | int ret; | 
|  | 192 |  | 
|  | 193 | IWL_DEBUG_INFO("Begin load bsm\n"); | 
|  | 194 |  | 
|  | 195 | /* make sure bootstrap program is no larger than BSM's SRAM size */ | 
|  | 196 | if (len > IWL_MAX_BSM_SIZE) | 
|  | 197 | return -EINVAL; | 
|  | 198 |  | 
|  | 199 | /* Tell bootstrap uCode where to find the "Initialize" uCode | 
|  | 200 | *   in host DRAM ... host DRAM physical address bits 35:4 for 4965. | 
|  | 201 | * NOTE:  iwl4965_initialize_alive_start() will replace these values, | 
|  | 202 | *        after the "initialize" uCode has run, to point to | 
|  | 203 | *        runtime/protocol instructions and backup data cache. */ | 
|  | 204 | pinst = priv->ucode_init.p_addr >> 4; | 
|  | 205 | pdata = priv->ucode_init_data.p_addr >> 4; | 
|  | 206 | inst_len = priv->ucode_init.len; | 
|  | 207 | data_len = priv->ucode_init_data.len; | 
|  | 208 |  | 
|  | 209 | ret = iwl_grab_nic_access(priv); | 
|  | 210 | if (ret) | 
|  | 211 | return ret; | 
|  | 212 |  | 
|  | 213 | iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst); | 
|  | 214 | iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata); | 
|  | 215 | iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len); | 
|  | 216 | iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len); | 
|  | 217 |  | 
|  | 218 | /* Fill BSM memory with bootstrap instructions */ | 
|  | 219 | for (reg_offset = BSM_SRAM_LOWER_BOUND; | 
|  | 220 | reg_offset < BSM_SRAM_LOWER_BOUND + len; | 
|  | 221 | reg_offset += sizeof(u32), image++) | 
|  | 222 | _iwl_write_prph(priv, reg_offset, le32_to_cpu(*image)); | 
|  | 223 |  | 
|  | 224 | ret = iwl4965_verify_bsm(priv); | 
|  | 225 | if (ret) { | 
|  | 226 | iwl_release_nic_access(priv); | 
|  | 227 | return ret; | 
|  | 228 | } | 
|  | 229 |  | 
|  | 230 | /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */ | 
|  | 231 | iwl_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0); | 
|  | 232 | iwl_write_prph(priv, BSM_WR_MEM_DST_REG, RTC_INST_LOWER_BOUND); | 
|  | 233 | iwl_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32)); | 
|  | 234 |  | 
|  | 235 | /* Load bootstrap code into instruction SRAM now, | 
|  | 236 | *   to prepare to load "initialize" uCode */ | 
|  | 237 | iwl_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START); | 
|  | 238 |  | 
|  | 239 | /* Wait for load of bootstrap uCode to finish */ | 
|  | 240 | for (i = 0; i < 100; i++) { | 
|  | 241 | done = iwl_read_prph(priv, BSM_WR_CTRL_REG); | 
|  | 242 | if (!(done & BSM_WR_CTRL_REG_BIT_START)) | 
|  | 243 | break; | 
|  | 244 | udelay(10); | 
|  | 245 | } | 
|  | 246 | if (i < 100) | 
|  | 247 | IWL_DEBUG_INFO("BSM write complete, poll %d iterations\n", i); | 
|  | 248 | else { | 
|  | 249 | IWL_ERROR("BSM write did not complete!\n"); | 
|  | 250 | return -EIO; | 
|  | 251 | } | 
|  | 252 |  | 
|  | 253 | /* Enable future boot loads whenever power management unit triggers it | 
|  | 254 | *   (e.g. when powering back up after power-save shutdown) */ | 
|  | 255 | iwl_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN); | 
|  | 256 |  | 
|  | 257 | iwl_release_nic_access(priv); | 
|  | 258 |  | 
|  | 259 | return 0; | 
|  | 260 | } | 
|  | 261 |  | 
| Assaf Krauss | bf85ea4 | 2008-03-14 10:38:49 -0700 | [diff] [blame] | 262 | static int iwl4965_init_drv(struct iwl_priv *priv) | 
|  | 263 | { | 
|  | 264 | int ret; | 
|  | 265 | int i; | 
|  | 266 |  | 
| Assaf Krauss | 1ea8739 | 2008-03-18 14:57:50 -0700 | [diff] [blame] | 267 | priv->antenna = (enum iwl4965_antenna)priv->cfg->mod_params->antenna; | 
| Assaf Krauss | bf85ea4 | 2008-03-14 10:38:49 -0700 | [diff] [blame] | 268 | priv->retry_rate = 1; | 
|  | 269 | priv->ibss_beacon = NULL; | 
|  | 270 |  | 
|  | 271 | spin_lock_init(&priv->lock); | 
|  | 272 | spin_lock_init(&priv->power_data.lock); | 
|  | 273 | spin_lock_init(&priv->sta_lock); | 
|  | 274 | spin_lock_init(&priv->hcmd_lock); | 
|  | 275 | spin_lock_init(&priv->lq_mngr.lock); | 
|  | 276 |  | 
| Tomas Winkler | 059ff82 | 2008-04-14 21:16:14 -0700 | [diff] [blame] | 277 | priv->shared_virt = pci_alloc_consistent(priv->pci_dev, | 
|  | 278 | sizeof(struct iwl4965_shared), | 
|  | 279 | &priv->shared_phys); | 
|  | 280 |  | 
|  | 281 | if (!priv->shared_virt) { | 
|  | 282 | ret = -ENOMEM; | 
|  | 283 | goto err; | 
|  | 284 | } | 
|  | 285 |  | 
|  | 286 | memset(priv->shared_virt, 0, sizeof(struct iwl4965_shared)); | 
|  | 287 |  | 
|  | 288 |  | 
| Assaf Krauss | bf85ea4 | 2008-03-14 10:38:49 -0700 | [diff] [blame] | 289 | for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++) | 
|  | 290 | INIT_LIST_HEAD(&priv->ibss_mac_hash[i]); | 
|  | 291 |  | 
|  | 292 | INIT_LIST_HEAD(&priv->free_frames); | 
|  | 293 |  | 
|  | 294 | mutex_init(&priv->mutex); | 
|  | 295 |  | 
|  | 296 | /* Clear the driver's (not device's) station table */ | 
|  | 297 | iwlcore_clear_stations_table(priv); | 
|  | 298 |  | 
|  | 299 | priv->data_retry_limit = -1; | 
|  | 300 | priv->ieee_channels = NULL; | 
|  | 301 | priv->ieee_rates = NULL; | 
|  | 302 | priv->band = IEEE80211_BAND_2GHZ; | 
|  | 303 |  | 
|  | 304 | priv->iw_mode = IEEE80211_IF_TYPE_STA; | 
|  | 305 |  | 
|  | 306 | priv->use_ant_b_for_management_frame = 1; /* start with ant B */ | 
|  | 307 | priv->valid_antenna = 0x7;	/* assume all 3 connected */ | 
|  | 308 | priv->ps_mode = IWL_MIMO_PS_NONE; | 
|  | 309 |  | 
|  | 310 | /* Choose which receivers/antennas to use */ | 
|  | 311 | iwl4965_set_rxon_chain(priv); | 
|  | 312 |  | 
|  | 313 | iwlcore_reset_qos(priv); | 
|  | 314 |  | 
|  | 315 | priv->qos_data.qos_active = 0; | 
|  | 316 | priv->qos_data.qos_cap.val = 0; | 
|  | 317 |  | 
|  | 318 | iwlcore_set_rxon_channel(priv, IEEE80211_BAND_2GHZ, 6); | 
|  | 319 |  | 
|  | 320 | priv->rates_mask = IWL_RATES_MASK; | 
|  | 321 | /* If power management is turned on, default to AC mode */ | 
|  | 322 | priv->power_mode = IWL_POWER_AC; | 
|  | 323 | priv->user_txpower_limit = IWL_DEFAULT_TX_POWER; | 
|  | 324 |  | 
|  | 325 | ret = iwl_init_channel_map(priv); | 
|  | 326 | if (ret) { | 
|  | 327 | IWL_ERROR("initializing regulatory failed: %d\n", ret); | 
|  | 328 | goto err; | 
|  | 329 | } | 
|  | 330 |  | 
|  | 331 | ret = iwl4965_init_geos(priv); | 
|  | 332 | if (ret) { | 
|  | 333 | IWL_ERROR("initializing geos failed: %d\n", ret); | 
|  | 334 | goto err_free_channel_map; | 
|  | 335 | } | 
|  | 336 |  | 
| Assaf Krauss | bf85ea4 | 2008-03-14 10:38:49 -0700 | [diff] [blame] | 337 | ret = ieee80211_register_hw(priv->hw); | 
|  | 338 | if (ret) { | 
|  | 339 | IWL_ERROR("Failed to register network device (error %d)\n", | 
|  | 340 | ret); | 
|  | 341 | goto err_free_geos; | 
|  | 342 | } | 
|  | 343 |  | 
|  | 344 | priv->hw->conf.beacon_int = 100; | 
|  | 345 | priv->mac80211_registered = 1; | 
|  | 346 |  | 
|  | 347 | return 0; | 
|  | 348 |  | 
|  | 349 | err_free_geos: | 
|  | 350 | iwl4965_free_geos(priv); | 
|  | 351 | err_free_channel_map: | 
|  | 352 | iwl_free_channel_map(priv); | 
|  | 353 | err: | 
|  | 354 | return ret; | 
|  | 355 | } | 
|  | 356 |  | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 357 | static int is_fat_channel(__le32 rxon_flags) | 
|  | 358 | { | 
|  | 359 | return (rxon_flags & RXON_FLG_CHANNEL_MODE_PURE_40_MSK) || | 
|  | 360 | (rxon_flags & RXON_FLG_CHANNEL_MODE_MIXED_MSK); | 
|  | 361 | } | 
|  | 362 |  | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 363 | static u8 is_single_stream(struct iwl_priv *priv) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 364 | { | 
| Christoph Hellwig | c8b0e6e | 2007-10-25 17:15:51 +0800 | [diff] [blame] | 365 | #ifdef CONFIG_IWL4965_HT | 
| Ron Rindjunsky | fd105e7 | 2007-11-26 16:14:39 +0200 | [diff] [blame] | 366 | if (!priv->current_ht_config.is_ht || | 
|  | 367 | (priv->current_ht_config.supp_mcs_set[1] == 0) || | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 368 | (priv->ps_mode == IWL_MIMO_PS_STATIC)) | 
|  | 369 | return 1; | 
|  | 370 | #else | 
|  | 371 | return 1; | 
| Christoph Hellwig | c8b0e6e | 2007-10-25 17:15:51 +0800 | [diff] [blame] | 372 | #endif	/*CONFIG_IWL4965_HT */ | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 373 | return 0; | 
|  | 374 | } | 
|  | 375 |  | 
| Tomas Winkler | 17744ff | 2008-03-02 01:52:00 +0200 | [diff] [blame] | 376 | int iwl4965_hwrate_to_plcp_idx(u32 rate_n_flags) | 
|  | 377 | { | 
|  | 378 | int idx = 0; | 
|  | 379 |  | 
|  | 380 | /* 4965 HT rate format */ | 
|  | 381 | if (rate_n_flags & RATE_MCS_HT_MSK) { | 
|  | 382 | idx = (rate_n_flags & 0xff); | 
|  | 383 |  | 
|  | 384 | if (idx >= IWL_RATE_MIMO_6M_PLCP) | 
|  | 385 | idx = idx - IWL_RATE_MIMO_6M_PLCP; | 
|  | 386 |  | 
|  | 387 | idx += IWL_FIRST_OFDM_RATE; | 
|  | 388 | /* skip 9M not supported in ht*/ | 
|  | 389 | if (idx >= IWL_RATE_9M_INDEX) | 
|  | 390 | idx += 1; | 
|  | 391 | if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE)) | 
|  | 392 | return idx; | 
|  | 393 |  | 
|  | 394 | /* 4965 legacy rate format, search for match in table */ | 
|  | 395 | } else { | 
|  | 396 | for (idx = 0; idx < ARRAY_SIZE(iwl4965_rates); idx++) | 
|  | 397 | if (iwl4965_rates[idx].plcp == (rate_n_flags & 0xFF)) | 
|  | 398 | return idx; | 
|  | 399 | } | 
|  | 400 |  | 
|  | 401 | return -1; | 
|  | 402 | } | 
|  | 403 |  | 
| Ron Rindjunsky | 4c424e4 | 2008-03-04 18:09:27 -0800 | [diff] [blame] | 404 | /** | 
|  | 405 | * translate ucode response to mac80211 tx status control values | 
|  | 406 | */ | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 407 | void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags, | 
| Ron Rindjunsky | 4c424e4 | 2008-03-04 18:09:27 -0800 | [diff] [blame] | 408 | struct ieee80211_tx_control *control) | 
|  | 409 | { | 
|  | 410 | int rate_index; | 
|  | 411 |  | 
|  | 412 | control->antenna_sel_tx = | 
| Tomas Winkler | ec35cf2 | 2008-04-15 16:01:39 -0700 | [diff] [blame] | 413 | ((rate_n_flags & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS); | 
| Ron Rindjunsky | 4c424e4 | 2008-03-04 18:09:27 -0800 | [diff] [blame] | 414 | if (rate_n_flags & RATE_MCS_HT_MSK) | 
|  | 415 | control->flags |= IEEE80211_TXCTL_OFDM_HT; | 
|  | 416 | if (rate_n_flags & RATE_MCS_GF_MSK) | 
|  | 417 | control->flags |= IEEE80211_TXCTL_GREEN_FIELD; | 
|  | 418 | if (rate_n_flags & RATE_MCS_FAT_MSK) | 
|  | 419 | control->flags |= IEEE80211_TXCTL_40_MHZ_WIDTH; | 
|  | 420 | if (rate_n_flags & RATE_MCS_DUP_MSK) | 
|  | 421 | control->flags |= IEEE80211_TXCTL_DUP_DATA; | 
|  | 422 | if (rate_n_flags & RATE_MCS_SGI_MSK) | 
|  | 423 | control->flags |= IEEE80211_TXCTL_SHORT_GI; | 
|  | 424 | /* since iwl4965_hwrate_to_plcp_idx is band indifferent, we always use | 
|  | 425 | * IEEE80211_BAND_2GHZ band as it contains all the rates */ | 
|  | 426 | rate_index = iwl4965_hwrate_to_plcp_idx(rate_n_flags); | 
|  | 427 | if (rate_index == -1) | 
|  | 428 | control->tx_rate = NULL; | 
|  | 429 | else | 
|  | 430 | control->tx_rate = | 
|  | 431 | &priv->bands[IEEE80211_BAND_2GHZ].bitrates[rate_index]; | 
|  | 432 | } | 
| Tomas Winkler | 17744ff | 2008-03-02 01:52:00 +0200 | [diff] [blame] | 433 |  | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 434 | /* | 
|  | 435 | * Determine how many receiver/antenna chains to use. | 
|  | 436 | * More provides better reception via diversity.  Fewer saves power. | 
|  | 437 | * MIMO (dual stream) requires at least 2, but works better with 3. | 
|  | 438 | * This does not determine *which* chains to use, just how many. | 
|  | 439 | */ | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 440 | static int iwl4965_get_rx_chain_counter(struct iwl_priv *priv, | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 441 | u8 *idle_state, u8 *rx_state) | 
|  | 442 | { | 
|  | 443 | u8 is_single = is_single_stream(priv); | 
|  | 444 | u8 is_cam = test_bit(STATUS_POWER_PMI, &priv->status) ? 0 : 1; | 
|  | 445 |  | 
|  | 446 | /* # of Rx chains to use when expecting MIMO. */ | 
|  | 447 | if (is_single || (!is_cam && (priv->ps_mode == IWL_MIMO_PS_STATIC))) | 
|  | 448 | *rx_state = 2; | 
|  | 449 | else | 
|  | 450 | *rx_state = 3; | 
|  | 451 |  | 
|  | 452 | /* # Rx chains when idling and maybe trying to save power */ | 
|  | 453 | switch (priv->ps_mode) { | 
|  | 454 | case IWL_MIMO_PS_STATIC: | 
|  | 455 | case IWL_MIMO_PS_DYNAMIC: | 
|  | 456 | *idle_state = (is_cam) ? 2 : 1; | 
|  | 457 | break; | 
|  | 458 | case IWL_MIMO_PS_NONE: | 
|  | 459 | *idle_state = (is_cam) ? *rx_state : 1; | 
|  | 460 | break; | 
|  | 461 | default: | 
|  | 462 | *idle_state = 1; | 
|  | 463 | break; | 
|  | 464 | } | 
|  | 465 |  | 
|  | 466 | return 0; | 
|  | 467 | } | 
|  | 468 |  | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 469 | int iwl4965_hw_rxq_stop(struct iwl_priv *priv) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 470 | { | 
|  | 471 | int rc; | 
|  | 472 | unsigned long flags; | 
|  | 473 |  | 
|  | 474 | spin_lock_irqsave(&priv->lock, flags); | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 475 | rc = iwl_grab_nic_access(priv); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 476 | if (rc) { | 
|  | 477 | spin_unlock_irqrestore(&priv->lock, flags); | 
|  | 478 | return rc; | 
|  | 479 | } | 
|  | 480 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 481 | /* stop Rx DMA */ | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 482 | iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); | 
|  | 483 | rc = iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG, | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 484 | (1 << 24), 1000); | 
|  | 485 | if (rc < 0) | 
|  | 486 | IWL_ERROR("Can't stop Rx DMA.\n"); | 
|  | 487 |  | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 488 | iwl_release_nic_access(priv); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 489 | spin_unlock_irqrestore(&priv->lock, flags); | 
|  | 490 |  | 
|  | 491 | return 0; | 
|  | 492 | } | 
|  | 493 |  | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 494 | u8 iwl4965_hw_find_station(struct iwl_priv *priv, const u8 *addr) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 495 | { | 
|  | 496 | int i; | 
|  | 497 | int start = 0; | 
|  | 498 | int ret = IWL_INVALID_STATION; | 
|  | 499 | unsigned long flags; | 
| Joe Perches | 0795af5 | 2007-10-03 17:59:30 -0700 | [diff] [blame] | 500 | DECLARE_MAC_BUF(mac); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 501 |  | 
|  | 502 | if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) || | 
|  | 503 | (priv->iw_mode == IEEE80211_IF_TYPE_AP)) | 
|  | 504 | start = IWL_STA_ID; | 
|  | 505 |  | 
|  | 506 | if (is_broadcast_ether_addr(addr)) | 
| Tomas Winkler | 5425e49 | 2008-04-15 16:01:38 -0700 | [diff] [blame] | 507 | return priv->hw_params.bcast_sta_id; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 508 |  | 
|  | 509 | spin_lock_irqsave(&priv->sta_lock, flags); | 
| Tomas Winkler | 5425e49 | 2008-04-15 16:01:38 -0700 | [diff] [blame] | 510 | for (i = start; i < priv->hw_params.max_stations; i++) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 511 | if ((priv->stations[i].used) && | 
|  | 512 | (!compare_ether_addr | 
|  | 513 | (priv->stations[i].sta.sta.addr, addr))) { | 
|  | 514 | ret = i; | 
|  | 515 | goto out; | 
|  | 516 | } | 
|  | 517 |  | 
| John W. Linville | a50e2e3 | 2007-09-27 17:00:29 -0400 | [diff] [blame] | 518 | IWL_DEBUG_ASSOC_LIMIT("can not find STA %s total %d\n", | 
| Joe Perches | 0795af5 | 2007-10-03 17:59:30 -0700 | [diff] [blame] | 519 | print_mac(mac, addr), priv->num_stations); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 520 |  | 
|  | 521 | out: | 
|  | 522 | spin_unlock_irqrestore(&priv->sta_lock, flags); | 
|  | 523 | return ret; | 
|  | 524 | } | 
|  | 525 |  | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 526 | static int iwl4965_nic_set_pwr_src(struct iwl_priv *priv, int pwr_max) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 527 | { | 
| Tomas Winkler | d860965 | 2007-10-25 17:15:35 +0800 | [diff] [blame] | 528 | int ret; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 529 | unsigned long flags; | 
|  | 530 |  | 
|  | 531 | spin_lock_irqsave(&priv->lock, flags); | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 532 | ret = iwl_grab_nic_access(priv); | 
| Tomas Winkler | d860965 | 2007-10-25 17:15:35 +0800 | [diff] [blame] | 533 | if (ret) { | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 534 | spin_unlock_irqrestore(&priv->lock, flags); | 
| Tomas Winkler | d860965 | 2007-10-25 17:15:35 +0800 | [diff] [blame] | 535 | return ret; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 536 | } | 
|  | 537 |  | 
|  | 538 | if (!pwr_max) { | 
|  | 539 | u32 val; | 
|  | 540 |  | 
| Tomas Winkler | d860965 | 2007-10-25 17:15:35 +0800 | [diff] [blame] | 541 | ret = pci_read_config_dword(priv->pci_dev, PCI_POWER_SOURCE, | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 542 | &val); | 
|  | 543 |  | 
|  | 544 | if (val & PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT) | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 545 | iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 546 | APMG_PS_CTRL_VAL_PWR_SRC_VAUX, | 
|  | 547 | ~APMG_PS_CTRL_MSK_PWR_SRC); | 
|  | 548 | } else | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 549 | iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 550 | APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, | 
|  | 551 | ~APMG_PS_CTRL_MSK_PWR_SRC); | 
|  | 552 |  | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 553 | iwl_release_nic_access(priv); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 554 | spin_unlock_irqrestore(&priv->lock, flags); | 
|  | 555 |  | 
| Tomas Winkler | d860965 | 2007-10-25 17:15:35 +0800 | [diff] [blame] | 556 | return ret; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 557 | } | 
|  | 558 |  | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 559 | static int iwl4965_rx_init(struct iwl_priv *priv, struct iwl4965_rx_queue *rxq) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 560 | { | 
| Tomas Winkler | 059ff82 | 2008-04-14 21:16:14 -0700 | [diff] [blame] | 561 | int ret; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 562 | unsigned long flags; | 
| Ron Rindjunsky | 9ee1ba4 | 2007-11-26 16:14:42 +0200 | [diff] [blame] | 563 | unsigned int rb_size; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 564 |  | 
|  | 565 | spin_lock_irqsave(&priv->lock, flags); | 
| Tomas Winkler | 059ff82 | 2008-04-14 21:16:14 -0700 | [diff] [blame] | 566 | ret = iwl_grab_nic_access(priv); | 
|  | 567 | if (ret) { | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 568 | spin_unlock_irqrestore(&priv->lock, flags); | 
| Tomas Winkler | 059ff82 | 2008-04-14 21:16:14 -0700 | [diff] [blame] | 569 | return ret; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 570 | } | 
|  | 571 |  | 
| Assaf Krauss | 1ea8739 | 2008-03-18 14:57:50 -0700 | [diff] [blame] | 572 | if (priv->cfg->mod_params->amsdu_size_8K) | 
| Ron Rindjunsky | 9ee1ba4 | 2007-11-26 16:14:42 +0200 | [diff] [blame] | 573 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; | 
|  | 574 | else | 
|  | 575 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; | 
|  | 576 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 577 | /* Stop Rx DMA */ | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 578 | iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 579 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 580 | /* Reset driver's Rx queue write index */ | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 581 | iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 582 |  | 
|  | 583 | /* Tell device where to find RBD circular buffer in DRAM */ | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 584 | iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG, | 
|  | 585 | rxq->dma_addr >> 8); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 586 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 587 | /* Tell device where in DRAM to update its Rx status */ | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 588 | iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG, | 
| Tomas Winkler | 059ff82 | 2008-04-14 21:16:14 -0700 | [diff] [blame] | 589 | (priv->shared_phys + | 
|  | 590 | offsetof(struct iwl4965_shared, rb_closed)) >> 4); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 591 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 592 | /* Enable Rx DMA, enable host interrupt, Rx buffer size 4k, 256 RBDs */ | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 593 | iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, | 
|  | 594 | FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | | 
|  | 595 | FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | | 
|  | 596 | rb_size | | 
| Tomas Winkler | 059ff82 | 2008-04-14 21:16:14 -0700 | [diff] [blame] | 597 | /* 0x10 << 4 | */ | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 598 | (RX_QUEUE_SIZE_LOG << | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 599 | FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT)); | 
|  | 600 |  | 
|  | 601 | /* | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 602 | * iwl_write32(priv,CSR_INT_COAL_REG,0); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 603 | */ | 
|  | 604 |  | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 605 | iwl_release_nic_access(priv); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 606 | spin_unlock_irqrestore(&priv->lock, flags); | 
|  | 607 |  | 
|  | 608 | return 0; | 
|  | 609 | } | 
|  | 610 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 611 | /* Tell 4965 where to find the "keep warm" buffer */ | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 612 | static int iwl4965_kw_init(struct iwl_priv *priv) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 613 | { | 
|  | 614 | unsigned long flags; | 
|  | 615 | int rc; | 
|  | 616 |  | 
|  | 617 | spin_lock_irqsave(&priv->lock, flags); | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 618 | rc = iwl_grab_nic_access(priv); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 619 | if (rc) | 
|  | 620 | goto out; | 
|  | 621 |  | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 622 | iwl_write_direct32(priv, IWL_FH_KW_MEM_ADDR_REG, | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 623 | priv->kw.dma_addr >> 4); | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 624 | iwl_release_nic_access(priv); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 625 | out: | 
|  | 626 | spin_unlock_irqrestore(&priv->lock, flags); | 
|  | 627 | return rc; | 
|  | 628 | } | 
|  | 629 |  | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 630 | static int iwl4965_kw_alloc(struct iwl_priv *priv) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 631 | { | 
|  | 632 | struct pci_dev *dev = priv->pci_dev; | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 633 | struct iwl4965_kw *kw = &priv->kw; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 634 |  | 
|  | 635 | kw->size = IWL4965_KW_SIZE;	/* TBW need set somewhere else */ | 
|  | 636 | kw->v_addr = pci_alloc_consistent(dev, kw->size, &kw->dma_addr); | 
|  | 637 | if (!kw->v_addr) | 
|  | 638 | return -ENOMEM; | 
|  | 639 |  | 
|  | 640 | return 0; | 
|  | 641 | } | 
|  | 642 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 643 | /** | 
|  | 644 | * iwl4965_kw_free - Free the "keep warm" buffer | 
|  | 645 | */ | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 646 | static void iwl4965_kw_free(struct iwl_priv *priv) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 647 | { | 
|  | 648 | struct pci_dev *dev = priv->pci_dev; | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 649 | struct iwl4965_kw *kw = &priv->kw; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 650 |  | 
|  | 651 | if (kw->v_addr) { | 
|  | 652 | pci_free_consistent(dev, kw->size, kw->v_addr, kw->dma_addr); | 
|  | 653 | memset(kw, 0, sizeof(*kw)); | 
|  | 654 | } | 
|  | 655 | } | 
|  | 656 |  | 
|  | 657 | /** | 
|  | 658 | * iwl4965_txq_ctx_reset - Reset TX queue context | 
|  | 659 | * Destroys all DMA structures and initialise them again | 
|  | 660 | * | 
|  | 661 | * @param priv | 
|  | 662 | * @return error code | 
|  | 663 | */ | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 664 | static int iwl4965_txq_ctx_reset(struct iwl_priv *priv) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 665 | { | 
|  | 666 | int rc = 0; | 
|  | 667 | int txq_id, slots_num; | 
|  | 668 | unsigned long flags; | 
|  | 669 |  | 
|  | 670 | iwl4965_kw_free(priv); | 
|  | 671 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 672 | /* Free all tx/cmd queues and keep-warm buffer */ | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 673 | iwl4965_hw_txq_ctx_free(priv); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 674 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 675 | /* Alloc keep-warm buffer */ | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 676 | rc = iwl4965_kw_alloc(priv); | 
|  | 677 | if (rc) { | 
|  | 678 | IWL_ERROR("Keep Warm allocation failed"); | 
|  | 679 | goto error_kw; | 
|  | 680 | } | 
|  | 681 |  | 
|  | 682 | spin_lock_irqsave(&priv->lock, flags); | 
|  | 683 |  | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 684 | rc = iwl_grab_nic_access(priv); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 685 | if (unlikely(rc)) { | 
|  | 686 | IWL_ERROR("TX reset failed"); | 
|  | 687 | spin_unlock_irqrestore(&priv->lock, flags); | 
|  | 688 | goto error_reset; | 
|  | 689 | } | 
|  | 690 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 691 | /* Turn off all Tx DMA channels */ | 
| Tomas Winkler | 12a81f6 | 2008-04-03 16:05:20 -0700 | [diff] [blame] | 692 | iwl_write_prph(priv, IWL49_SCD_TXFACT, 0); | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 693 | iwl_release_nic_access(priv); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 694 | spin_unlock_irqrestore(&priv->lock, flags); | 
|  | 695 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 696 | /* Tell 4965 where to find the keep-warm buffer */ | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 697 | rc = iwl4965_kw_init(priv); | 
|  | 698 | if (rc) { | 
|  | 699 | IWL_ERROR("kw_init failed\n"); | 
|  | 700 | goto error_reset; | 
|  | 701 | } | 
|  | 702 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 703 | /* Alloc and init all (default 16) Tx queues, | 
|  | 704 | * including the command queue (#4) */ | 
| Tomas Winkler | 5425e49 | 2008-04-15 16:01:38 -0700 | [diff] [blame] | 705 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 706 | slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ? | 
|  | 707 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 708 | rc = iwl4965_tx_queue_init(priv, &priv->txq[txq_id], slots_num, | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 709 | txq_id); | 
|  | 710 | if (rc) { | 
|  | 711 | IWL_ERROR("Tx %d queue init failed\n", txq_id); | 
|  | 712 | goto error; | 
|  | 713 | } | 
|  | 714 | } | 
|  | 715 |  | 
|  | 716 | return rc; | 
|  | 717 |  | 
|  | 718 | error: | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 719 | iwl4965_hw_txq_ctx_free(priv); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 720 | error_reset: | 
|  | 721 | iwl4965_kw_free(priv); | 
|  | 722 | error_kw: | 
|  | 723 | return rc; | 
|  | 724 | } | 
|  | 725 |  | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 726 | int iwl4965_hw_nic_init(struct iwl_priv *priv) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 727 | { | 
|  | 728 | int rc; | 
|  | 729 | unsigned long flags; | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 730 | struct iwl4965_rx_queue *rxq = &priv->rxq; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 731 | u8 rev_id; | 
|  | 732 | u32 val; | 
|  | 733 | u8 val_link; | 
|  | 734 |  | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 735 | iwl4965_power_init_handle(priv); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 736 |  | 
|  | 737 | /* nic_init */ | 
|  | 738 | spin_lock_irqsave(&priv->lock, flags); | 
|  | 739 |  | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 740 | iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS, | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 741 | CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); | 
|  | 742 |  | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 743 | iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); | 
|  | 744 | rc = iwl_poll_bit(priv, CSR_GP_CNTRL, | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 745 | CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, | 
|  | 746 | CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); | 
|  | 747 | if (rc < 0) { | 
|  | 748 | spin_unlock_irqrestore(&priv->lock, flags); | 
|  | 749 | IWL_DEBUG_INFO("Failed to init the card\n"); | 
|  | 750 | return rc; | 
|  | 751 | } | 
|  | 752 |  | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 753 | rc = iwl_grab_nic_access(priv); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 754 | if (rc) { | 
|  | 755 | spin_unlock_irqrestore(&priv->lock, flags); | 
|  | 756 | return rc; | 
|  | 757 | } | 
|  | 758 |  | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 759 | iwl_read_prph(priv, APMG_CLK_CTRL_REG); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 760 |  | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 761 | iwl_write_prph(priv, APMG_CLK_CTRL_REG, | 
|  | 762 | APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT); | 
|  | 763 | iwl_read_prph(priv, APMG_CLK_CTRL_REG); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 764 |  | 
|  | 765 | udelay(20); | 
|  | 766 |  | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 767 | iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG, | 
|  | 768 | APMG_PCIDEV_STT_VAL_L1_ACT_DIS); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 769 |  | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 770 | iwl_release_nic_access(priv); | 
|  | 771 | iwl_write32(priv, CSR_INT_COALESCING, 512 / 32); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 772 | spin_unlock_irqrestore(&priv->lock, flags); | 
|  | 773 |  | 
|  | 774 | /* Determine HW type */ | 
|  | 775 | rc = pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &rev_id); | 
|  | 776 | if (rc) | 
|  | 777 | return rc; | 
|  | 778 |  | 
|  | 779 | IWL_DEBUG_INFO("HW Revision ID = 0x%X\n", rev_id); | 
|  | 780 |  | 
|  | 781 | iwl4965_nic_set_pwr_src(priv, 1); | 
|  | 782 | spin_lock_irqsave(&priv->lock, flags); | 
|  | 783 |  | 
|  | 784 | if ((rev_id & 0x80) == 0x80 && (rev_id & 0x7f) < 8) { | 
|  | 785 | pci_read_config_dword(priv->pci_dev, PCI_REG_WUM8, &val); | 
|  | 786 | /* Enable No Snoop field */ | 
|  | 787 | pci_write_config_dword(priv->pci_dev, PCI_REG_WUM8, | 
|  | 788 | val & ~(1 << 11)); | 
|  | 789 | } | 
|  | 790 |  | 
|  | 791 | spin_unlock_irqrestore(&priv->lock, flags); | 
|  | 792 |  | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 793 | if (priv->eeprom.calib_version < EEPROM_TX_POWER_VERSION_NEW) { | 
|  | 794 | IWL_ERROR("Older EEPROM detected!  Aborting.\n"); | 
|  | 795 | return -EINVAL; | 
|  | 796 | } | 
|  | 797 |  | 
|  | 798 | pci_read_config_byte(priv->pci_dev, PCI_LINK_CTRL, &val_link); | 
|  | 799 |  | 
|  | 800 | /* disable L1 entry -- workaround for pre-B1 */ | 
|  | 801 | pci_write_config_byte(priv->pci_dev, PCI_LINK_CTRL, val_link & ~0x02); | 
|  | 802 |  | 
|  | 803 | spin_lock_irqsave(&priv->lock, flags); | 
|  | 804 |  | 
|  | 805 | /* set CSR_HW_CONFIG_REG for uCode use */ | 
|  | 806 |  | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 807 | iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, | 
|  | 808 | CSR49_HW_IF_CONFIG_REG_BIT_4965_R | | 
|  | 809 | CSR49_HW_IF_CONFIG_REG_BIT_RADIO_SI | | 
|  | 810 | CSR49_HW_IF_CONFIG_REG_BIT_MAC_SI); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 811 |  | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 812 | rc = iwl_grab_nic_access(priv); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 813 | if (rc < 0) { | 
|  | 814 | spin_unlock_irqrestore(&priv->lock, flags); | 
|  | 815 | IWL_DEBUG_INFO("Failed to init the card\n"); | 
|  | 816 | return rc; | 
|  | 817 | } | 
|  | 818 |  | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 819 | iwl_read_prph(priv, APMG_PS_CTRL_REG); | 
|  | 820 | iwl_set_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 821 | udelay(5); | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 822 | iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 823 |  | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 824 | iwl_release_nic_access(priv); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 825 | spin_unlock_irqrestore(&priv->lock, flags); | 
|  | 826 |  | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 827 | iwl4965_hw_card_show_info(priv); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 828 |  | 
|  | 829 | /* end nic_init */ | 
|  | 830 |  | 
|  | 831 | /* Allocate the RX queue, or reset if it is already allocated */ | 
|  | 832 | if (!rxq->bd) { | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 833 | rc = iwl4965_rx_queue_alloc(priv); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 834 | if (rc) { | 
|  | 835 | IWL_ERROR("Unable to initialize Rx queue\n"); | 
|  | 836 | return -ENOMEM; | 
|  | 837 | } | 
|  | 838 | } else | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 839 | iwl4965_rx_queue_reset(priv, rxq); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 840 |  | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 841 | iwl4965_rx_replenish(priv); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 842 |  | 
|  | 843 | iwl4965_rx_init(priv, rxq); | 
|  | 844 |  | 
|  | 845 | spin_lock_irqsave(&priv->lock, flags); | 
|  | 846 |  | 
|  | 847 | rxq->need_update = 1; | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 848 | iwl4965_rx_queue_update_write_ptr(priv, rxq); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 849 |  | 
|  | 850 | spin_unlock_irqrestore(&priv->lock, flags); | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 851 |  | 
|  | 852 | /* Allocate and init all Tx and Command queues */ | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 853 | rc = iwl4965_txq_ctx_reset(priv); | 
|  | 854 | if (rc) | 
|  | 855 | return rc; | 
|  | 856 |  | 
|  | 857 | if (priv->eeprom.sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE) | 
|  | 858 | IWL_DEBUG_RF_KILL("SW RF KILL supported in EEPROM.\n"); | 
|  | 859 |  | 
|  | 860 | if (priv->eeprom.sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE) | 
|  | 861 | IWL_DEBUG_RF_KILL("HW RF KILL supported in EEPROM.\n"); | 
|  | 862 |  | 
|  | 863 | set_bit(STATUS_INIT, &priv->status); | 
|  | 864 |  | 
|  | 865 | return 0; | 
|  | 866 | } | 
|  | 867 |  | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 868 | int iwl4965_hw_nic_stop_master(struct iwl_priv *priv) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 869 | { | 
|  | 870 | int rc = 0; | 
|  | 871 | u32 reg_val; | 
|  | 872 | unsigned long flags; | 
|  | 873 |  | 
|  | 874 | spin_lock_irqsave(&priv->lock, flags); | 
|  | 875 |  | 
|  | 876 | /* set stop master bit */ | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 877 | iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 878 |  | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 879 | reg_val = iwl_read32(priv, CSR_GP_CNTRL); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 880 |  | 
|  | 881 | if (CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE == | 
|  | 882 | (reg_val & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE)) | 
|  | 883 | IWL_DEBUG_INFO("Card in power save, master is already " | 
|  | 884 | "stopped\n"); | 
|  | 885 | else { | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 886 | rc = iwl_poll_bit(priv, CSR_RESET, | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 887 | CSR_RESET_REG_FLAG_MASTER_DISABLED, | 
|  | 888 | CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); | 
|  | 889 | if (rc < 0) { | 
|  | 890 | spin_unlock_irqrestore(&priv->lock, flags); | 
|  | 891 | return rc; | 
|  | 892 | } | 
|  | 893 | } | 
|  | 894 |  | 
|  | 895 | spin_unlock_irqrestore(&priv->lock, flags); | 
|  | 896 | IWL_DEBUG_INFO("stop master\n"); | 
|  | 897 |  | 
|  | 898 | return rc; | 
|  | 899 | } | 
|  | 900 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 901 | /** | 
|  | 902 | * iwl4965_hw_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory | 
|  | 903 | */ | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 904 | void iwl4965_hw_txq_ctx_stop(struct iwl_priv *priv) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 905 | { | 
|  | 906 |  | 
|  | 907 | int txq_id; | 
|  | 908 | unsigned long flags; | 
|  | 909 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 910 | /* Stop each Tx DMA channel, and wait for it to be idle */ | 
| Tomas Winkler | 5425e49 | 2008-04-15 16:01:38 -0700 | [diff] [blame] | 911 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 912 | spin_lock_irqsave(&priv->lock, flags); | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 913 | if (iwl_grab_nic_access(priv)) { | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 914 | spin_unlock_irqrestore(&priv->lock, flags); | 
|  | 915 | continue; | 
|  | 916 | } | 
|  | 917 |  | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 918 | iwl_write_direct32(priv, | 
|  | 919 | IWL_FH_TCSR_CHNL_TX_CONFIG_REG(txq_id), 0x0); | 
|  | 920 | iwl_poll_direct_bit(priv, IWL_FH_TSSR_TX_STATUS_REG, | 
|  | 921 | IWL_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE | 
|  | 922 | (txq_id), 200); | 
|  | 923 | iwl_release_nic_access(priv); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 924 | spin_unlock_irqrestore(&priv->lock, flags); | 
|  | 925 | } | 
|  | 926 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 927 | /* Deallocate memory for all Tx queues */ | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 928 | iwl4965_hw_txq_ctx_free(priv); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 929 | } | 
|  | 930 |  | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 931 | int iwl4965_hw_nic_reset(struct iwl_priv *priv) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 932 | { | 
|  | 933 | int rc = 0; | 
|  | 934 | unsigned long flags; | 
|  | 935 |  | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 936 | iwl4965_hw_nic_stop_master(priv); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 937 |  | 
|  | 938 | spin_lock_irqsave(&priv->lock, flags); | 
|  | 939 |  | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 940 | iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 941 |  | 
|  | 942 | udelay(10); | 
|  | 943 |  | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 944 | iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); | 
|  | 945 | rc = iwl_poll_bit(priv, CSR_RESET, | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 946 | CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, | 
|  | 947 | CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25); | 
|  | 948 |  | 
|  | 949 | udelay(10); | 
|  | 950 |  | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 951 | rc = iwl_grab_nic_access(priv); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 952 | if (!rc) { | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 953 | iwl_write_prph(priv, APMG_CLK_EN_REG, | 
|  | 954 | APMG_CLK_VAL_DMA_CLK_RQT | | 
|  | 955 | APMG_CLK_VAL_BSM_CLK_RQT); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 956 |  | 
|  | 957 | udelay(10); | 
|  | 958 |  | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 959 | iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG, | 
|  | 960 | APMG_PCIDEV_STT_VAL_L1_ACT_DIS); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 961 |  | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 962 | iwl_release_nic_access(priv); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 963 | } | 
|  | 964 |  | 
|  | 965 | clear_bit(STATUS_HCMD_ACTIVE, &priv->status); | 
|  | 966 | wake_up_interruptible(&priv->wait_command_queue); | 
|  | 967 |  | 
|  | 968 | spin_unlock_irqrestore(&priv->lock, flags); | 
|  | 969 |  | 
|  | 970 | return rc; | 
|  | 971 |  | 
|  | 972 | } | 
|  | 973 |  | 
|  | 974 | #define REG_RECALIB_PERIOD (60) | 
|  | 975 |  | 
|  | 976 | /** | 
|  | 977 | * iwl4965_bg_statistics_periodic - Timer callback to queue statistics | 
|  | 978 | * | 
| Emmanuel Grumbach | 49ea859 | 2008-04-15 16:01:37 -0700 | [diff] [blame] | 979 | * This callback is provided in order to send a statistics request. | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 980 | * | 
|  | 981 | * This timer function is continually reset to execute within | 
|  | 982 | * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION | 
|  | 983 | * was received.  We need to ensure we receive the statistics in order | 
| Emmanuel Grumbach | 49ea859 | 2008-04-15 16:01:37 -0700 | [diff] [blame] | 984 | * to update the temperature used for calibrating the TXPOWER. | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 985 | */ | 
|  | 986 | static void iwl4965_bg_statistics_periodic(unsigned long data) | 
|  | 987 | { | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 988 | struct iwl_priv *priv = (struct iwl_priv *)data; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 989 |  | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 990 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | 
|  | 991 | return; | 
|  | 992 |  | 
| Emmanuel Grumbach | 49ea859 | 2008-04-15 16:01:37 -0700 | [diff] [blame] | 993 | iwl_send_statistics_request(priv, CMD_ASYNC); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 994 | } | 
|  | 995 |  | 
|  | 996 | #define CT_LIMIT_CONST		259 | 
|  | 997 | #define TM_CT_KILL_THRESHOLD	110 | 
|  | 998 |  | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 999 | void iwl4965_rf_kill_ct_config(struct iwl_priv *priv) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1000 | { | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 1001 | struct iwl4965_ct_kill_config cmd; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1002 | u32 R1, R2, R3; | 
|  | 1003 | u32 temp_th; | 
|  | 1004 | u32 crit_temperature; | 
|  | 1005 | unsigned long flags; | 
| Tomas Winkler | 857485c | 2008-03-21 13:53:44 -0700 | [diff] [blame] | 1006 | int ret = 0; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1007 |  | 
|  | 1008 | spin_lock_irqsave(&priv->lock, flags); | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 1009 | iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1010 | CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); | 
|  | 1011 | spin_unlock_irqrestore(&priv->lock, flags); | 
|  | 1012 |  | 
|  | 1013 | if (priv->statistics.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK) { | 
|  | 1014 | R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]); | 
|  | 1015 | R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]); | 
|  | 1016 | R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]); | 
|  | 1017 | } else { | 
|  | 1018 | R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]); | 
|  | 1019 | R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]); | 
|  | 1020 | R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]); | 
|  | 1021 | } | 
|  | 1022 |  | 
|  | 1023 | temp_th = CELSIUS_TO_KELVIN(TM_CT_KILL_THRESHOLD); | 
|  | 1024 |  | 
|  | 1025 | crit_temperature = ((temp_th * (R3-R1))/CT_LIMIT_CONST) + R2; | 
|  | 1026 | cmd.critical_temperature_R =  cpu_to_le32(crit_temperature); | 
| Tomas Winkler | 857485c | 2008-03-21 13:53:44 -0700 | [diff] [blame] | 1027 | ret = iwl_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD, | 
|  | 1028 | sizeof(cmd), &cmd); | 
|  | 1029 | if (ret) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1030 | IWL_ERROR("REPLY_CT_KILL_CONFIG_CMD failed\n"); | 
|  | 1031 | else | 
|  | 1032 | IWL_DEBUG_INFO("REPLY_CT_KILL_CONFIG_CMD succeeded\n"); | 
|  | 1033 | } | 
|  | 1034 |  | 
| Christoph Hellwig | c8b0e6e | 2007-10-25 17:15:51 +0800 | [diff] [blame] | 1035 | #ifdef CONFIG_IWL4965_SENSITIVITY | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1036 |  | 
|  | 1037 | /* "false alarms" are signals that our DSP tries to lock onto, | 
|  | 1038 | *   but then determines that they are either noise, or transmissions | 
|  | 1039 | *   from a distant wireless network (also "noise", really) that get | 
|  | 1040 | *   "stepped on" by stronger transmissions within our own network. | 
|  | 1041 | * This algorithm attempts to set a sensitivity level that is high | 
|  | 1042 | *   enough to receive all of our own network traffic, but not so | 
|  | 1043 | *   high that our DSP gets too busy trying to lock onto non-network | 
|  | 1044 | *   activity/noise. */ | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 1045 | static int iwl4965_sens_energy_cck(struct iwl_priv *priv, | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1046 | u32 norm_fa, | 
|  | 1047 | u32 rx_enable_time, | 
|  | 1048 | struct statistics_general_data *rx_info) | 
|  | 1049 | { | 
|  | 1050 | u32 max_nrg_cck = 0; | 
|  | 1051 | int i = 0; | 
|  | 1052 | u8 max_silence_rssi = 0; | 
|  | 1053 | u32 silence_ref = 0; | 
|  | 1054 | u8 silence_rssi_a = 0; | 
|  | 1055 | u8 silence_rssi_b = 0; | 
|  | 1056 | u8 silence_rssi_c = 0; | 
|  | 1057 | u32 val; | 
|  | 1058 |  | 
|  | 1059 | /* "false_alarms" values below are cross-multiplications to assess the | 
|  | 1060 | *   numbers of false alarms within the measured period of actual Rx | 
|  | 1061 | *   (Rx is off when we're txing), vs the min/max expected false alarms | 
|  | 1062 | *   (some should be expected if rx is sensitive enough) in a | 
|  | 1063 | *   hypothetical listening period of 200 time units (TU), 204.8 msec: | 
|  | 1064 | * | 
|  | 1065 | * MIN_FA/fixed-time < false_alarms/actual-rx-time < MAX_FA/beacon-time | 
|  | 1066 | * | 
|  | 1067 | * */ | 
|  | 1068 | u32 false_alarms = norm_fa * 200 * 1024; | 
|  | 1069 | u32 max_false_alarms = MAX_FA_CCK * rx_enable_time; | 
|  | 1070 | u32 min_false_alarms = MIN_FA_CCK * rx_enable_time; | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 1071 | struct iwl4965_sensitivity_data *data = NULL; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1072 |  | 
|  | 1073 | data = &(priv->sensitivity_data); | 
|  | 1074 |  | 
|  | 1075 | data->nrg_auto_corr_silence_diff = 0; | 
|  | 1076 |  | 
|  | 1077 | /* Find max silence rssi among all 3 receivers. | 
|  | 1078 | * This is background noise, which may include transmissions from other | 
|  | 1079 | *    networks, measured during silence before our network's beacon */ | 
|  | 1080 | silence_rssi_a = (u8)((rx_info->beacon_silence_rssi_a & | 
| Reinette Chatre | 8a1b024 | 2008-01-14 17:46:25 -0800 | [diff] [blame] | 1081 | ALL_BAND_FILTER) >> 8); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1082 | silence_rssi_b = (u8)((rx_info->beacon_silence_rssi_b & | 
| Reinette Chatre | 8a1b024 | 2008-01-14 17:46:25 -0800 | [diff] [blame] | 1083 | ALL_BAND_FILTER) >> 8); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1084 | silence_rssi_c = (u8)((rx_info->beacon_silence_rssi_c & | 
| Reinette Chatre | 8a1b024 | 2008-01-14 17:46:25 -0800 | [diff] [blame] | 1085 | ALL_BAND_FILTER) >> 8); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1086 |  | 
|  | 1087 | val = max(silence_rssi_b, silence_rssi_c); | 
|  | 1088 | max_silence_rssi = max(silence_rssi_a, (u8) val); | 
|  | 1089 |  | 
|  | 1090 | /* Store silence rssi in 20-beacon history table */ | 
|  | 1091 | data->nrg_silence_rssi[data->nrg_silence_idx] = max_silence_rssi; | 
|  | 1092 | data->nrg_silence_idx++; | 
|  | 1093 | if (data->nrg_silence_idx >= NRG_NUM_PREV_STAT_L) | 
|  | 1094 | data->nrg_silence_idx = 0; | 
|  | 1095 |  | 
|  | 1096 | /* Find max silence rssi across 20 beacon history */ | 
|  | 1097 | for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) { | 
|  | 1098 | val = data->nrg_silence_rssi[i]; | 
|  | 1099 | silence_ref = max(silence_ref, val); | 
|  | 1100 | } | 
|  | 1101 | IWL_DEBUG_CALIB("silence a %u, b %u, c %u, 20-bcn max %u\n", | 
|  | 1102 | silence_rssi_a, silence_rssi_b, silence_rssi_c, | 
|  | 1103 | silence_ref); | 
|  | 1104 |  | 
|  | 1105 | /* Find max rx energy (min value!) among all 3 receivers, | 
|  | 1106 | *   measured during beacon frame. | 
|  | 1107 | * Save it in 10-beacon history table. */ | 
|  | 1108 | i = data->nrg_energy_idx; | 
|  | 1109 | val = min(rx_info->beacon_energy_b, rx_info->beacon_energy_c); | 
|  | 1110 | data->nrg_value[i] = min(rx_info->beacon_energy_a, val); | 
|  | 1111 |  | 
|  | 1112 | data->nrg_energy_idx++; | 
|  | 1113 | if (data->nrg_energy_idx >= 10) | 
|  | 1114 | data->nrg_energy_idx = 0; | 
|  | 1115 |  | 
|  | 1116 | /* Find min rx energy (max value) across 10 beacon history. | 
|  | 1117 | * This is the minimum signal level that we want to receive well. | 
|  | 1118 | * Add backoff (margin so we don't miss slightly lower energy frames). | 
|  | 1119 | * This establishes an upper bound (min value) for energy threshold. */ | 
|  | 1120 | max_nrg_cck = data->nrg_value[0]; | 
|  | 1121 | for (i = 1; i < 10; i++) | 
|  | 1122 | max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i])); | 
|  | 1123 | max_nrg_cck += 6; | 
|  | 1124 |  | 
|  | 1125 | IWL_DEBUG_CALIB("rx energy a %u, b %u, c %u, 10-bcn max/min %u\n", | 
|  | 1126 | rx_info->beacon_energy_a, rx_info->beacon_energy_b, | 
|  | 1127 | rx_info->beacon_energy_c, max_nrg_cck - 6); | 
|  | 1128 |  | 
|  | 1129 | /* Count number of consecutive beacons with fewer-than-desired | 
|  | 1130 | *   false alarms. */ | 
|  | 1131 | if (false_alarms < min_false_alarms) | 
|  | 1132 | data->num_in_cck_no_fa++; | 
|  | 1133 | else | 
|  | 1134 | data->num_in_cck_no_fa = 0; | 
|  | 1135 | IWL_DEBUG_CALIB("consecutive bcns with few false alarms = %u\n", | 
|  | 1136 | data->num_in_cck_no_fa); | 
|  | 1137 |  | 
|  | 1138 | /* If we got too many false alarms this time, reduce sensitivity */ | 
|  | 1139 | if (false_alarms > max_false_alarms) { | 
|  | 1140 | IWL_DEBUG_CALIB("norm FA %u > max FA %u\n", | 
|  | 1141 | false_alarms, max_false_alarms); | 
|  | 1142 | IWL_DEBUG_CALIB("... reducing sensitivity\n"); | 
|  | 1143 | data->nrg_curr_state = IWL_FA_TOO_MANY; | 
|  | 1144 |  | 
|  | 1145 | if (data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK) { | 
|  | 1146 | /* Store for "fewer than desired" on later beacon */ | 
|  | 1147 | data->nrg_silence_ref = silence_ref; | 
|  | 1148 |  | 
|  | 1149 | /* increase energy threshold (reduce nrg value) | 
|  | 1150 | *   to decrease sensitivity */ | 
|  | 1151 | if (data->nrg_th_cck > (NRG_MAX_CCK + NRG_STEP_CCK)) | 
|  | 1152 | data->nrg_th_cck = data->nrg_th_cck | 
|  | 1153 | - NRG_STEP_CCK; | 
|  | 1154 | } | 
|  | 1155 |  | 
|  | 1156 | /* increase auto_corr values to decrease sensitivity */ | 
|  | 1157 | if (data->auto_corr_cck < AUTO_CORR_MAX_TH_CCK) | 
|  | 1158 | data->auto_corr_cck = AUTO_CORR_MAX_TH_CCK + 1; | 
|  | 1159 | else { | 
|  | 1160 | val = data->auto_corr_cck + AUTO_CORR_STEP_CCK; | 
|  | 1161 | data->auto_corr_cck = min((u32)AUTO_CORR_MAX_CCK, val); | 
|  | 1162 | } | 
|  | 1163 | val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK; | 
|  | 1164 | data->auto_corr_cck_mrc = min((u32)AUTO_CORR_MAX_CCK_MRC, val); | 
|  | 1165 |  | 
|  | 1166 | /* Else if we got fewer than desired, increase sensitivity */ | 
|  | 1167 | } else if (false_alarms < min_false_alarms) { | 
|  | 1168 | data->nrg_curr_state = IWL_FA_TOO_FEW; | 
|  | 1169 |  | 
|  | 1170 | /* Compare silence level with silence level for most recent | 
|  | 1171 | *   healthy number or too many false alarms */ | 
|  | 1172 | data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref - | 
|  | 1173 | (s32)silence_ref; | 
|  | 1174 |  | 
|  | 1175 | IWL_DEBUG_CALIB("norm FA %u < min FA %u, silence diff %d\n", | 
|  | 1176 | false_alarms, min_false_alarms, | 
|  | 1177 | data->nrg_auto_corr_silence_diff); | 
|  | 1178 |  | 
|  | 1179 | /* Increase value to increase sensitivity, but only if: | 
|  | 1180 | * 1a) previous beacon did *not* have *too many* false alarms | 
|  | 1181 | * 1b) AND there's a significant difference in Rx levels | 
|  | 1182 | *      from a previous beacon with too many, or healthy # FAs | 
|  | 1183 | * OR 2) We've seen a lot of beacons (100) with too few | 
|  | 1184 | *       false alarms */ | 
|  | 1185 | if ((data->nrg_prev_state != IWL_FA_TOO_MANY) && | 
|  | 1186 | ((data->nrg_auto_corr_silence_diff > NRG_DIFF) || | 
|  | 1187 | (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) { | 
|  | 1188 |  | 
|  | 1189 | IWL_DEBUG_CALIB("... increasing sensitivity\n"); | 
|  | 1190 | /* Increase nrg value to increase sensitivity */ | 
|  | 1191 | val = data->nrg_th_cck + NRG_STEP_CCK; | 
|  | 1192 | data->nrg_th_cck = min((u32)NRG_MIN_CCK, val); | 
|  | 1193 |  | 
|  | 1194 | /* Decrease auto_corr values to increase sensitivity */ | 
|  | 1195 | val = data->auto_corr_cck - AUTO_CORR_STEP_CCK; | 
|  | 1196 | data->auto_corr_cck = max((u32)AUTO_CORR_MIN_CCK, val); | 
|  | 1197 |  | 
|  | 1198 | val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK; | 
|  | 1199 | data->auto_corr_cck_mrc = | 
|  | 1200 | max((u32)AUTO_CORR_MIN_CCK_MRC, val); | 
|  | 1201 |  | 
|  | 1202 | } else | 
|  | 1203 | IWL_DEBUG_CALIB("... but not changing sensitivity\n"); | 
|  | 1204 |  | 
|  | 1205 | /* Else we got a healthy number of false alarms, keep status quo */ | 
|  | 1206 | } else { | 
|  | 1207 | IWL_DEBUG_CALIB(" FA in safe zone\n"); | 
|  | 1208 | data->nrg_curr_state = IWL_FA_GOOD_RANGE; | 
|  | 1209 |  | 
|  | 1210 | /* Store for use in "fewer than desired" with later beacon */ | 
|  | 1211 | data->nrg_silence_ref = silence_ref; | 
|  | 1212 |  | 
|  | 1213 | /* If previous beacon had too many false alarms, | 
|  | 1214 | *   give it some extra margin by reducing sensitivity again | 
|  | 1215 | *   (but don't go below measured energy of desired Rx) */ | 
|  | 1216 | if (IWL_FA_TOO_MANY == data->nrg_prev_state) { | 
|  | 1217 | IWL_DEBUG_CALIB("... increasing margin\n"); | 
|  | 1218 | data->nrg_th_cck -= NRG_MARGIN; | 
|  | 1219 | } | 
|  | 1220 | } | 
|  | 1221 |  | 
|  | 1222 | /* Make sure the energy threshold does not go above the measured | 
|  | 1223 | * energy of the desired Rx signals (reduced by backoff margin), | 
|  | 1224 | * or else we might start missing Rx frames. | 
|  | 1225 | * Lower value is higher energy, so we use max()! | 
|  | 1226 | */ | 
|  | 1227 | data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck); | 
|  | 1228 | IWL_DEBUG_CALIB("new nrg_th_cck %u\n", data->nrg_th_cck); | 
|  | 1229 |  | 
|  | 1230 | data->nrg_prev_state = data->nrg_curr_state; | 
|  | 1231 |  | 
|  | 1232 | return 0; | 
|  | 1233 | } | 
|  | 1234 |  | 
|  | 1235 |  | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 1236 | static int iwl4965_sens_auto_corr_ofdm(struct iwl_priv *priv, | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1237 | u32 norm_fa, | 
|  | 1238 | u32 rx_enable_time) | 
|  | 1239 | { | 
|  | 1240 | u32 val; | 
|  | 1241 | u32 false_alarms = norm_fa * 200 * 1024; | 
|  | 1242 | u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time; | 
|  | 1243 | u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time; | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 1244 | struct iwl4965_sensitivity_data *data = NULL; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1245 |  | 
|  | 1246 | data = &(priv->sensitivity_data); | 
|  | 1247 |  | 
|  | 1248 | /* If we got too many false alarms this time, reduce sensitivity */ | 
|  | 1249 | if (false_alarms > max_false_alarms) { | 
|  | 1250 |  | 
|  | 1251 | IWL_DEBUG_CALIB("norm FA %u > max FA %u)\n", | 
|  | 1252 | false_alarms, max_false_alarms); | 
|  | 1253 |  | 
|  | 1254 | val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM; | 
|  | 1255 | data->auto_corr_ofdm = | 
|  | 1256 | min((u32)AUTO_CORR_MAX_OFDM, val); | 
|  | 1257 |  | 
|  | 1258 | val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM; | 
|  | 1259 | data->auto_corr_ofdm_mrc = | 
|  | 1260 | min((u32)AUTO_CORR_MAX_OFDM_MRC, val); | 
|  | 1261 |  | 
|  | 1262 | val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM; | 
|  | 1263 | data->auto_corr_ofdm_x1 = | 
|  | 1264 | min((u32)AUTO_CORR_MAX_OFDM_X1, val); | 
|  | 1265 |  | 
|  | 1266 | val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM; | 
|  | 1267 | data->auto_corr_ofdm_mrc_x1 = | 
|  | 1268 | min((u32)AUTO_CORR_MAX_OFDM_MRC_X1, val); | 
|  | 1269 | } | 
|  | 1270 |  | 
|  | 1271 | /* Else if we got fewer than desired, increase sensitivity */ | 
|  | 1272 | else if (false_alarms < min_false_alarms) { | 
|  | 1273 |  | 
|  | 1274 | IWL_DEBUG_CALIB("norm FA %u < min FA %u\n", | 
|  | 1275 | false_alarms, min_false_alarms); | 
|  | 1276 |  | 
|  | 1277 | val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM; | 
|  | 1278 | data->auto_corr_ofdm = | 
|  | 1279 | max((u32)AUTO_CORR_MIN_OFDM, val); | 
|  | 1280 |  | 
|  | 1281 | val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM; | 
|  | 1282 | data->auto_corr_ofdm_mrc = | 
|  | 1283 | max((u32)AUTO_CORR_MIN_OFDM_MRC, val); | 
|  | 1284 |  | 
|  | 1285 | val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM; | 
|  | 1286 | data->auto_corr_ofdm_x1 = | 
|  | 1287 | max((u32)AUTO_CORR_MIN_OFDM_X1, val); | 
|  | 1288 |  | 
|  | 1289 | val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM; | 
|  | 1290 | data->auto_corr_ofdm_mrc_x1 = | 
|  | 1291 | max((u32)AUTO_CORR_MIN_OFDM_MRC_X1, val); | 
|  | 1292 | } | 
|  | 1293 |  | 
|  | 1294 | else | 
|  | 1295 | IWL_DEBUG_CALIB("min FA %u < norm FA %u < max FA %u OK\n", | 
|  | 1296 | min_false_alarms, false_alarms, max_false_alarms); | 
|  | 1297 |  | 
|  | 1298 | return 0; | 
|  | 1299 | } | 
|  | 1300 |  | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 1301 | static int iwl4965_sensitivity_callback(struct iwl_priv *priv, | 
| Tomas Winkler | 857485c | 2008-03-21 13:53:44 -0700 | [diff] [blame] | 1302 | struct iwl_cmd *cmd, struct sk_buff *skb) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1303 | { | 
|  | 1304 | /* We didn't cache the SKB; let the caller free it */ | 
|  | 1305 | return 1; | 
|  | 1306 | } | 
|  | 1307 |  | 
|  | 1308 | /* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */ | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 1309 | static int iwl4965_sensitivity_write(struct iwl_priv *priv, u8 flags) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1310 | { | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 1311 | struct iwl4965_sensitivity_cmd cmd ; | 
|  | 1312 | struct iwl4965_sensitivity_data *data = NULL; | 
| Tomas Winkler | 857485c | 2008-03-21 13:53:44 -0700 | [diff] [blame] | 1313 | struct iwl_host_cmd cmd_out = { | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1314 | .id = SENSITIVITY_CMD, | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 1315 | .len = sizeof(struct iwl4965_sensitivity_cmd), | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1316 | .meta.flags = flags, | 
|  | 1317 | .data = &cmd, | 
|  | 1318 | }; | 
| Tomas Winkler | 857485c | 2008-03-21 13:53:44 -0700 | [diff] [blame] | 1319 | int ret; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1320 |  | 
|  | 1321 | data = &(priv->sensitivity_data); | 
|  | 1322 |  | 
|  | 1323 | memset(&cmd, 0, sizeof(cmd)); | 
|  | 1324 |  | 
|  | 1325 | cmd.table[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] = | 
|  | 1326 | cpu_to_le16((u16)data->auto_corr_ofdm); | 
|  | 1327 | cmd.table[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] = | 
|  | 1328 | cpu_to_le16((u16)data->auto_corr_ofdm_mrc); | 
|  | 1329 | cmd.table[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] = | 
|  | 1330 | cpu_to_le16((u16)data->auto_corr_ofdm_x1); | 
|  | 1331 | cmd.table[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] = | 
|  | 1332 | cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1); | 
|  | 1333 |  | 
|  | 1334 | cmd.table[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] = | 
|  | 1335 | cpu_to_le16((u16)data->auto_corr_cck); | 
|  | 1336 | cmd.table[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] = | 
|  | 1337 | cpu_to_le16((u16)data->auto_corr_cck_mrc); | 
|  | 1338 |  | 
|  | 1339 | cmd.table[HD_MIN_ENERGY_CCK_DET_INDEX] = | 
|  | 1340 | cpu_to_le16((u16)data->nrg_th_cck); | 
|  | 1341 | cmd.table[HD_MIN_ENERGY_OFDM_DET_INDEX] = | 
|  | 1342 | cpu_to_le16((u16)data->nrg_th_ofdm); | 
|  | 1343 |  | 
|  | 1344 | cmd.table[HD_BARKER_CORR_TH_ADD_MIN_INDEX] = | 
|  | 1345 | __constant_cpu_to_le16(190); | 
|  | 1346 | cmd.table[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] = | 
|  | 1347 | __constant_cpu_to_le16(390); | 
|  | 1348 | cmd.table[HD_OFDM_ENERGY_TH_IN_INDEX] = | 
|  | 1349 | __constant_cpu_to_le16(62); | 
|  | 1350 |  | 
|  | 1351 | IWL_DEBUG_CALIB("ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n", | 
|  | 1352 | data->auto_corr_ofdm, data->auto_corr_ofdm_mrc, | 
|  | 1353 | data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1, | 
|  | 1354 | data->nrg_th_ofdm); | 
|  | 1355 |  | 
|  | 1356 | IWL_DEBUG_CALIB("cck: ac %u mrc %u thresh %u\n", | 
|  | 1357 | data->auto_corr_cck, data->auto_corr_cck_mrc, | 
|  | 1358 | data->nrg_th_cck); | 
|  | 1359 |  | 
| Ben Cahill | f7d09d7 | 2007-11-29 11:09:51 +0800 | [diff] [blame] | 1360 | /* Update uCode's "work" table, and copy it to DSP */ | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1361 | cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE; | 
|  | 1362 |  | 
|  | 1363 | if (flags & CMD_ASYNC) | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 1364 | cmd_out.meta.u.callback = iwl4965_sensitivity_callback; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1365 |  | 
|  | 1366 | /* Don't send command to uCode if nothing has changed */ | 
|  | 1367 | if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]), | 
|  | 1368 | sizeof(u16)*HD_TABLE_SIZE)) { | 
|  | 1369 | IWL_DEBUG_CALIB("No change in SENSITIVITY_CMD\n"); | 
|  | 1370 | return 0; | 
|  | 1371 | } | 
|  | 1372 |  | 
|  | 1373 | /* Copy table for comparison next time */ | 
|  | 1374 | memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]), | 
|  | 1375 | sizeof(u16)*HD_TABLE_SIZE); | 
|  | 1376 |  | 
| Tomas Winkler | 857485c | 2008-03-21 13:53:44 -0700 | [diff] [blame] | 1377 | ret = iwl_send_cmd(priv, &cmd_out); | 
|  | 1378 | if (ret) | 
|  | 1379 | IWL_ERROR("SENSITIVITY_CMD failed\n"); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1380 |  | 
| Tomas Winkler | 857485c | 2008-03-21 13:53:44 -0700 | [diff] [blame] | 1381 | return ret; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1382 | } | 
|  | 1383 |  | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 1384 | void iwl4965_init_sensitivity(struct iwl_priv *priv, u8 flags, u8 force) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1385 | { | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 1386 | struct iwl4965_sensitivity_data *data = NULL; | 
| Tomas Winkler | 857485c | 2008-03-21 13:53:44 -0700 | [diff] [blame] | 1387 | int i; | 
|  | 1388 | int ret  = 0; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1389 |  | 
|  | 1390 | IWL_DEBUG_CALIB("Start iwl4965_init_sensitivity\n"); | 
|  | 1391 |  | 
|  | 1392 | if (force) | 
|  | 1393 | memset(&(priv->sensitivity_tbl[0]), 0, | 
|  | 1394 | sizeof(u16)*HD_TABLE_SIZE); | 
|  | 1395 |  | 
|  | 1396 | /* Clear driver's sensitivity algo data */ | 
|  | 1397 | data = &(priv->sensitivity_data); | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 1398 | memset(data, 0, sizeof(struct iwl4965_sensitivity_data)); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1399 |  | 
|  | 1400 | data->num_in_cck_no_fa = 0; | 
|  | 1401 | data->nrg_curr_state = IWL_FA_TOO_MANY; | 
|  | 1402 | data->nrg_prev_state = IWL_FA_TOO_MANY; | 
|  | 1403 | data->nrg_silence_ref = 0; | 
|  | 1404 | data->nrg_silence_idx = 0; | 
|  | 1405 | data->nrg_energy_idx = 0; | 
|  | 1406 |  | 
|  | 1407 | for (i = 0; i < 10; i++) | 
|  | 1408 | data->nrg_value[i] = 0; | 
|  | 1409 |  | 
|  | 1410 | for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) | 
|  | 1411 | data->nrg_silence_rssi[i] = 0; | 
|  | 1412 |  | 
|  | 1413 | data->auto_corr_ofdm = 90; | 
|  | 1414 | data->auto_corr_ofdm_mrc = 170; | 
|  | 1415 | data->auto_corr_ofdm_x1  = 105; | 
|  | 1416 | data->auto_corr_ofdm_mrc_x1 = 220; | 
|  | 1417 | data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF; | 
|  | 1418 | data->auto_corr_cck_mrc = 200; | 
|  | 1419 | data->nrg_th_cck = 100; | 
|  | 1420 | data->nrg_th_ofdm = 100; | 
|  | 1421 |  | 
|  | 1422 | data->last_bad_plcp_cnt_ofdm = 0; | 
|  | 1423 | data->last_fa_cnt_ofdm = 0; | 
|  | 1424 | data->last_bad_plcp_cnt_cck = 0; | 
|  | 1425 | data->last_fa_cnt_cck = 0; | 
|  | 1426 |  | 
|  | 1427 | /* Clear prior Sensitivity command data to force send to uCode */ | 
|  | 1428 | if (force) | 
|  | 1429 | memset(&(priv->sensitivity_tbl[0]), 0, | 
|  | 1430 | sizeof(u16)*HD_TABLE_SIZE); | 
|  | 1431 |  | 
| Tomas Winkler | 857485c | 2008-03-21 13:53:44 -0700 | [diff] [blame] | 1432 | ret |= iwl4965_sensitivity_write(priv, flags); | 
|  | 1433 | IWL_DEBUG_CALIB("<<return 0x%X\n", ret); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1434 |  | 
|  | 1435 | return; | 
|  | 1436 | } | 
|  | 1437 |  | 
|  | 1438 |  | 
|  | 1439 | /* Reset differential Rx gains in NIC to prepare for chain noise calibration. | 
|  | 1440 | * Called after every association, but this runs only once! | 
|  | 1441 | *  ... once chain noise is calibrated the first time, it's good forever.  */ | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 1442 | void iwl4965_chain_noise_reset(struct iwl_priv *priv) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1443 | { | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 1444 | struct iwl4965_chain_noise_data *data = NULL; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1445 |  | 
|  | 1446 | data = &(priv->chain_noise_data); | 
| Tomas Winkler | 3109ece | 2008-03-28 16:33:35 -0700 | [diff] [blame] | 1447 | if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) { | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 1448 | struct iwl4965_calibration_cmd cmd; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1449 |  | 
|  | 1450 | memset(&cmd, 0, sizeof(cmd)); | 
|  | 1451 | cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD; | 
|  | 1452 | cmd.diff_gain_a = 0; | 
|  | 1453 | cmd.diff_gain_b = 0; | 
|  | 1454 | cmd.diff_gain_c = 0; | 
| Tomas Winkler | e547297 | 2008-03-28 16:21:12 -0700 | [diff] [blame] | 1455 | iwl_send_cmd_pdu_async(priv, REPLY_PHY_CALIBRATION_CMD, | 
|  | 1456 | sizeof(cmd), &cmd, NULL); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1457 | msleep(4); | 
|  | 1458 | data->state = IWL_CHAIN_NOISE_ACCUMULATE; | 
|  | 1459 | IWL_DEBUG_CALIB("Run chain_noise_calibrate\n"); | 
|  | 1460 | } | 
|  | 1461 | return; | 
|  | 1462 | } | 
|  | 1463 |  | 
|  | 1464 | /* | 
|  | 1465 | * Accumulate 20 beacons of signal and noise statistics for each of | 
|  | 1466 | *   3 receivers/antennas/rx-chains, then figure out: | 
|  | 1467 | * 1)  Which antennas are connected. | 
|  | 1468 | * 2)  Differential rx gain settings to balance the 3 receivers. | 
|  | 1469 | */ | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 1470 | static void iwl4965_noise_calibration(struct iwl_priv *priv, | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 1471 | struct iwl4965_notif_statistics *stat_resp) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1472 | { | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 1473 | struct iwl4965_chain_noise_data *data = NULL; | 
| Tomas Winkler | 857485c | 2008-03-21 13:53:44 -0700 | [diff] [blame] | 1474 | int ret = 0; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1475 |  | 
|  | 1476 | u32 chain_noise_a; | 
|  | 1477 | u32 chain_noise_b; | 
|  | 1478 | u32 chain_noise_c; | 
|  | 1479 | u32 chain_sig_a; | 
|  | 1480 | u32 chain_sig_b; | 
|  | 1481 | u32 chain_sig_c; | 
|  | 1482 | u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE}; | 
|  | 1483 | u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE}; | 
|  | 1484 | u32 max_average_sig; | 
|  | 1485 | u16 max_average_sig_antenna_i; | 
|  | 1486 | u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE; | 
|  | 1487 | u16 min_average_noise_antenna_i = INITIALIZATION_VALUE; | 
|  | 1488 | u16 i = 0; | 
|  | 1489 | u16 chan_num = INITIALIZATION_VALUE; | 
|  | 1490 | u32 band = INITIALIZATION_VALUE; | 
|  | 1491 | u32 active_chains = 0; | 
|  | 1492 | unsigned long flags; | 
|  | 1493 | struct statistics_rx_non_phy *rx_info = &(stat_resp->rx.general); | 
|  | 1494 |  | 
|  | 1495 | data = &(priv->chain_noise_data); | 
|  | 1496 |  | 
|  | 1497 | /* Accumulate just the first 20 beacons after the first association, | 
|  | 1498 | *   then we're done forever. */ | 
|  | 1499 | if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) { | 
|  | 1500 | if (data->state == IWL_CHAIN_NOISE_ALIVE) | 
|  | 1501 | IWL_DEBUG_CALIB("Wait for noise calib reset\n"); | 
|  | 1502 | return; | 
|  | 1503 | } | 
|  | 1504 |  | 
|  | 1505 | spin_lock_irqsave(&priv->lock, flags); | 
|  | 1506 | if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) { | 
|  | 1507 | IWL_DEBUG_CALIB(" << Interference data unavailable\n"); | 
|  | 1508 | spin_unlock_irqrestore(&priv->lock, flags); | 
|  | 1509 | return; | 
|  | 1510 | } | 
|  | 1511 |  | 
|  | 1512 | band = (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) ? 0 : 1; | 
|  | 1513 | chan_num = le16_to_cpu(priv->staging_rxon.channel); | 
|  | 1514 |  | 
|  | 1515 | /* Make sure we accumulate data for just the associated channel | 
|  | 1516 | *   (even if scanning). */ | 
|  | 1517 | if ((chan_num != (le32_to_cpu(stat_resp->flag) >> 16)) || | 
|  | 1518 | ((STATISTICS_REPLY_FLG_BAND_24G_MSK == | 
|  | 1519 | (stat_resp->flag & STATISTICS_REPLY_FLG_BAND_24G_MSK)) && band)) { | 
|  | 1520 | IWL_DEBUG_CALIB("Stats not from chan=%d, band=%d\n", | 
|  | 1521 | chan_num, band); | 
|  | 1522 | spin_unlock_irqrestore(&priv->lock, flags); | 
|  | 1523 | return; | 
|  | 1524 | } | 
|  | 1525 |  | 
|  | 1526 | /* Accumulate beacon statistics values across 20 beacons */ | 
|  | 1527 | chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) & | 
|  | 1528 | IN_BAND_FILTER; | 
|  | 1529 | chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) & | 
|  | 1530 | IN_BAND_FILTER; | 
|  | 1531 | chain_noise_c = le32_to_cpu(rx_info->beacon_silence_rssi_c) & | 
|  | 1532 | IN_BAND_FILTER; | 
|  | 1533 |  | 
|  | 1534 | chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER; | 
|  | 1535 | chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER; | 
|  | 1536 | chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER; | 
|  | 1537 |  | 
|  | 1538 | spin_unlock_irqrestore(&priv->lock, flags); | 
|  | 1539 |  | 
|  | 1540 | data->beacon_count++; | 
|  | 1541 |  | 
|  | 1542 | data->chain_noise_a = (chain_noise_a + data->chain_noise_a); | 
|  | 1543 | data->chain_noise_b = (chain_noise_b + data->chain_noise_b); | 
|  | 1544 | data->chain_noise_c = (chain_noise_c + data->chain_noise_c); | 
|  | 1545 |  | 
|  | 1546 | data->chain_signal_a = (chain_sig_a + data->chain_signal_a); | 
|  | 1547 | data->chain_signal_b = (chain_sig_b + data->chain_signal_b); | 
|  | 1548 | data->chain_signal_c = (chain_sig_c + data->chain_signal_c); | 
|  | 1549 |  | 
|  | 1550 | IWL_DEBUG_CALIB("chan=%d, band=%d, beacon=%d\n", chan_num, band, | 
|  | 1551 | data->beacon_count); | 
|  | 1552 | IWL_DEBUG_CALIB("chain_sig: a %d b %d c %d\n", | 
|  | 1553 | chain_sig_a, chain_sig_b, chain_sig_c); | 
|  | 1554 | IWL_DEBUG_CALIB("chain_noise: a %d b %d c %d\n", | 
|  | 1555 | chain_noise_a, chain_noise_b, chain_noise_c); | 
|  | 1556 |  | 
|  | 1557 | /* If this is the 20th beacon, determine: | 
|  | 1558 | * 1)  Disconnected antennas (using signal strengths) | 
|  | 1559 | * 2)  Differential gain (using silence noise) to balance receivers */ | 
|  | 1560 | if (data->beacon_count == CAL_NUM_OF_BEACONS) { | 
|  | 1561 |  | 
|  | 1562 | /* Analyze signal for disconnected antenna */ | 
|  | 1563 | average_sig[0] = (data->chain_signal_a) / CAL_NUM_OF_BEACONS; | 
|  | 1564 | average_sig[1] = (data->chain_signal_b) / CAL_NUM_OF_BEACONS; | 
|  | 1565 | average_sig[2] = (data->chain_signal_c) / CAL_NUM_OF_BEACONS; | 
|  | 1566 |  | 
|  | 1567 | if (average_sig[0] >= average_sig[1]) { | 
|  | 1568 | max_average_sig = average_sig[0]; | 
|  | 1569 | max_average_sig_antenna_i = 0; | 
|  | 1570 | active_chains = (1 << max_average_sig_antenna_i); | 
|  | 1571 | } else { | 
|  | 1572 | max_average_sig = average_sig[1]; | 
|  | 1573 | max_average_sig_antenna_i = 1; | 
|  | 1574 | active_chains = (1 << max_average_sig_antenna_i); | 
|  | 1575 | } | 
|  | 1576 |  | 
|  | 1577 | if (average_sig[2] >= max_average_sig) { | 
|  | 1578 | max_average_sig = average_sig[2]; | 
|  | 1579 | max_average_sig_antenna_i = 2; | 
|  | 1580 | active_chains = (1 << max_average_sig_antenna_i); | 
|  | 1581 | } | 
|  | 1582 |  | 
|  | 1583 | IWL_DEBUG_CALIB("average_sig: a %d b %d c %d\n", | 
|  | 1584 | average_sig[0], average_sig[1], average_sig[2]); | 
|  | 1585 | IWL_DEBUG_CALIB("max_average_sig = %d, antenna %d\n", | 
|  | 1586 | max_average_sig, max_average_sig_antenna_i); | 
|  | 1587 |  | 
|  | 1588 | /* Compare signal strengths for all 3 receivers. */ | 
|  | 1589 | for (i = 0; i < NUM_RX_CHAINS; i++) { | 
|  | 1590 | if (i != max_average_sig_antenna_i) { | 
|  | 1591 | s32 rssi_delta = (max_average_sig - | 
|  | 1592 | average_sig[i]); | 
|  | 1593 |  | 
|  | 1594 | /* If signal is very weak, compared with | 
|  | 1595 | * strongest, mark it as disconnected. */ | 
|  | 1596 | if (rssi_delta > MAXIMUM_ALLOWED_PATHLOSS) | 
|  | 1597 | data->disconn_array[i] = 1; | 
|  | 1598 | else | 
|  | 1599 | active_chains |= (1 << i); | 
|  | 1600 | IWL_DEBUG_CALIB("i = %d  rssiDelta = %d  " | 
|  | 1601 | "disconn_array[i] = %d\n", | 
|  | 1602 | i, rssi_delta, data->disconn_array[i]); | 
|  | 1603 | } | 
|  | 1604 | } | 
|  | 1605 |  | 
|  | 1606 | /*If both chains A & B are disconnected - | 
|  | 1607 | * connect B and leave A as is */ | 
|  | 1608 | if (data->disconn_array[CHAIN_A] && | 
|  | 1609 | data->disconn_array[CHAIN_B]) { | 
|  | 1610 | data->disconn_array[CHAIN_B] = 0; | 
|  | 1611 | active_chains |= (1 << CHAIN_B); | 
|  | 1612 | IWL_DEBUG_CALIB("both A & B chains are disconnected! " | 
|  | 1613 | "W/A - declare B as connected\n"); | 
|  | 1614 | } | 
|  | 1615 |  | 
|  | 1616 | IWL_DEBUG_CALIB("active_chains (bitwise) = 0x%x\n", | 
|  | 1617 | active_chains); | 
|  | 1618 |  | 
|  | 1619 | /* Save for use within RXON, TX, SCAN commands, etc. */ | 
|  | 1620 | priv->valid_antenna = active_chains; | 
|  | 1621 |  | 
|  | 1622 | /* Analyze noise for rx balance */ | 
|  | 1623 | average_noise[0] = ((data->chain_noise_a)/CAL_NUM_OF_BEACONS); | 
|  | 1624 | average_noise[1] = ((data->chain_noise_b)/CAL_NUM_OF_BEACONS); | 
|  | 1625 | average_noise[2] = ((data->chain_noise_c)/CAL_NUM_OF_BEACONS); | 
|  | 1626 |  | 
|  | 1627 | for (i = 0; i < NUM_RX_CHAINS; i++) { | 
|  | 1628 | if (!(data->disconn_array[i]) && | 
|  | 1629 | (average_noise[i] <= min_average_noise)) { | 
|  | 1630 | /* This means that chain i is active and has | 
|  | 1631 | * lower noise values so far: */ | 
|  | 1632 | min_average_noise = average_noise[i]; | 
|  | 1633 | min_average_noise_antenna_i = i; | 
|  | 1634 | } | 
|  | 1635 | } | 
|  | 1636 |  | 
|  | 1637 | data->delta_gain_code[min_average_noise_antenna_i] = 0; | 
|  | 1638 |  | 
|  | 1639 | IWL_DEBUG_CALIB("average_noise: a %d b %d c %d\n", | 
|  | 1640 | average_noise[0], average_noise[1], | 
|  | 1641 | average_noise[2]); | 
|  | 1642 |  | 
|  | 1643 | IWL_DEBUG_CALIB("min_average_noise = %d, antenna %d\n", | 
|  | 1644 | min_average_noise, min_average_noise_antenna_i); | 
|  | 1645 |  | 
|  | 1646 | for (i = 0; i < NUM_RX_CHAINS; i++) { | 
|  | 1647 | s32 delta_g = 0; | 
|  | 1648 |  | 
|  | 1649 | if (!(data->disconn_array[i]) && | 
|  | 1650 | (data->delta_gain_code[i] == | 
|  | 1651 | CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) { | 
|  | 1652 | delta_g = average_noise[i] - min_average_noise; | 
|  | 1653 | data->delta_gain_code[i] = (u8)((delta_g * | 
|  | 1654 | 10) / 15); | 
|  | 1655 | if (CHAIN_NOISE_MAX_DELTA_GAIN_CODE < | 
|  | 1656 | data->delta_gain_code[i]) | 
|  | 1657 | data->delta_gain_code[i] = | 
|  | 1658 | CHAIN_NOISE_MAX_DELTA_GAIN_CODE; | 
|  | 1659 |  | 
|  | 1660 | data->delta_gain_code[i] = | 
|  | 1661 | (data->delta_gain_code[i] | (1 << 2)); | 
|  | 1662 | } else | 
|  | 1663 | data->delta_gain_code[i] = 0; | 
|  | 1664 | } | 
|  | 1665 | IWL_DEBUG_CALIB("delta_gain_codes: a %d b %d c %d\n", | 
|  | 1666 | data->delta_gain_code[0], | 
|  | 1667 | data->delta_gain_code[1], | 
|  | 1668 | data->delta_gain_code[2]); | 
|  | 1669 |  | 
|  | 1670 | /* Differential gain gets sent to uCode only once */ | 
|  | 1671 | if (!data->radio_write) { | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 1672 | struct iwl4965_calibration_cmd cmd; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1673 | data->radio_write = 1; | 
|  | 1674 |  | 
|  | 1675 | memset(&cmd, 0, sizeof(cmd)); | 
|  | 1676 | cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD; | 
|  | 1677 | cmd.diff_gain_a = data->delta_gain_code[0]; | 
|  | 1678 | cmd.diff_gain_b = data->delta_gain_code[1]; | 
|  | 1679 | cmd.diff_gain_c = data->delta_gain_code[2]; | 
| Tomas Winkler | 857485c | 2008-03-21 13:53:44 -0700 | [diff] [blame] | 1680 | ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD, | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1681 | sizeof(cmd), &cmd); | 
| Tomas Winkler | 857485c | 2008-03-21 13:53:44 -0700 | [diff] [blame] | 1682 | if (ret) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1683 | IWL_DEBUG_CALIB("fail sending cmd " | 
|  | 1684 | "REPLY_PHY_CALIBRATION_CMD \n"); | 
|  | 1685 |  | 
|  | 1686 | /* TODO we might want recalculate | 
|  | 1687 | * rx_chain in rxon cmd */ | 
|  | 1688 |  | 
|  | 1689 | /* Mark so we run this algo only once! */ | 
|  | 1690 | data->state = IWL_CHAIN_NOISE_CALIBRATED; | 
|  | 1691 | } | 
|  | 1692 | data->chain_noise_a = 0; | 
|  | 1693 | data->chain_noise_b = 0; | 
|  | 1694 | data->chain_noise_c = 0; | 
|  | 1695 | data->chain_signal_a = 0; | 
|  | 1696 | data->chain_signal_b = 0; | 
|  | 1697 | data->chain_signal_c = 0; | 
|  | 1698 | data->beacon_count = 0; | 
|  | 1699 | } | 
|  | 1700 | return; | 
|  | 1701 | } | 
|  | 1702 |  | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 1703 | static void iwl4965_sensitivity_calibration(struct iwl_priv *priv, | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 1704 | struct iwl4965_notif_statistics *resp) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1705 | { | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1706 | u32 rx_enable_time; | 
|  | 1707 | u32 fa_cck; | 
|  | 1708 | u32 fa_ofdm; | 
|  | 1709 | u32 bad_plcp_cck; | 
|  | 1710 | u32 bad_plcp_ofdm; | 
|  | 1711 | u32 norm_fa_ofdm; | 
|  | 1712 | u32 norm_fa_cck; | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 1713 | struct iwl4965_sensitivity_data *data = NULL; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1714 | struct statistics_rx_non_phy *rx_info = &(resp->rx.general); | 
|  | 1715 | struct statistics_rx *statistics = &(resp->rx); | 
|  | 1716 | unsigned long flags; | 
|  | 1717 | struct statistics_general_data statis; | 
| Tomas Winkler | 857485c | 2008-03-21 13:53:44 -0700 | [diff] [blame] | 1718 | int ret; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1719 |  | 
|  | 1720 | data = &(priv->sensitivity_data); | 
|  | 1721 |  | 
| Tomas Winkler | 3109ece | 2008-03-28 16:33:35 -0700 | [diff] [blame] | 1722 | if (!iwl_is_associated(priv)) { | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1723 | IWL_DEBUG_CALIB("<< - not associated\n"); | 
|  | 1724 | return; | 
|  | 1725 | } | 
|  | 1726 |  | 
|  | 1727 | spin_lock_irqsave(&priv->lock, flags); | 
|  | 1728 | if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) { | 
|  | 1729 | IWL_DEBUG_CALIB("<< invalid data.\n"); | 
|  | 1730 | spin_unlock_irqrestore(&priv->lock, flags); | 
|  | 1731 | return; | 
|  | 1732 | } | 
|  | 1733 |  | 
|  | 1734 | /* Extract Statistics: */ | 
|  | 1735 | rx_enable_time = le32_to_cpu(rx_info->channel_load); | 
|  | 1736 | fa_cck = le32_to_cpu(statistics->cck.false_alarm_cnt); | 
|  | 1737 | fa_ofdm = le32_to_cpu(statistics->ofdm.false_alarm_cnt); | 
|  | 1738 | bad_plcp_cck = le32_to_cpu(statistics->cck.plcp_err); | 
|  | 1739 | bad_plcp_ofdm = le32_to_cpu(statistics->ofdm.plcp_err); | 
|  | 1740 |  | 
|  | 1741 | statis.beacon_silence_rssi_a = | 
|  | 1742 | le32_to_cpu(statistics->general.beacon_silence_rssi_a); | 
|  | 1743 | statis.beacon_silence_rssi_b = | 
|  | 1744 | le32_to_cpu(statistics->general.beacon_silence_rssi_b); | 
|  | 1745 | statis.beacon_silence_rssi_c = | 
|  | 1746 | le32_to_cpu(statistics->general.beacon_silence_rssi_c); | 
|  | 1747 | statis.beacon_energy_a = | 
|  | 1748 | le32_to_cpu(statistics->general.beacon_energy_a); | 
|  | 1749 | statis.beacon_energy_b = | 
|  | 1750 | le32_to_cpu(statistics->general.beacon_energy_b); | 
|  | 1751 | statis.beacon_energy_c = | 
|  | 1752 | le32_to_cpu(statistics->general.beacon_energy_c); | 
|  | 1753 |  | 
|  | 1754 | spin_unlock_irqrestore(&priv->lock, flags); | 
|  | 1755 |  | 
|  | 1756 | IWL_DEBUG_CALIB("rx_enable_time = %u usecs\n", rx_enable_time); | 
|  | 1757 |  | 
|  | 1758 | if (!rx_enable_time) { | 
|  | 1759 | IWL_DEBUG_CALIB("<< RX Enable Time == 0! \n"); | 
|  | 1760 | return; | 
|  | 1761 | } | 
|  | 1762 |  | 
|  | 1763 | /* These statistics increase monotonically, and do not reset | 
|  | 1764 | *   at each beacon.  Calculate difference from last value, or just | 
|  | 1765 | *   use the new statistics value if it has reset or wrapped around. */ | 
|  | 1766 | if (data->last_bad_plcp_cnt_cck > bad_plcp_cck) | 
|  | 1767 | data->last_bad_plcp_cnt_cck = bad_plcp_cck; | 
|  | 1768 | else { | 
|  | 1769 | bad_plcp_cck -= data->last_bad_plcp_cnt_cck; | 
|  | 1770 | data->last_bad_plcp_cnt_cck += bad_plcp_cck; | 
|  | 1771 | } | 
|  | 1772 |  | 
|  | 1773 | if (data->last_bad_plcp_cnt_ofdm > bad_plcp_ofdm) | 
|  | 1774 | data->last_bad_plcp_cnt_ofdm = bad_plcp_ofdm; | 
|  | 1775 | else { | 
|  | 1776 | bad_plcp_ofdm -= data->last_bad_plcp_cnt_ofdm; | 
|  | 1777 | data->last_bad_plcp_cnt_ofdm += bad_plcp_ofdm; | 
|  | 1778 | } | 
|  | 1779 |  | 
|  | 1780 | if (data->last_fa_cnt_ofdm > fa_ofdm) | 
|  | 1781 | data->last_fa_cnt_ofdm = fa_ofdm; | 
|  | 1782 | else { | 
|  | 1783 | fa_ofdm -= data->last_fa_cnt_ofdm; | 
|  | 1784 | data->last_fa_cnt_ofdm += fa_ofdm; | 
|  | 1785 | } | 
|  | 1786 |  | 
|  | 1787 | if (data->last_fa_cnt_cck > fa_cck) | 
|  | 1788 | data->last_fa_cnt_cck = fa_cck; | 
|  | 1789 | else { | 
|  | 1790 | fa_cck -= data->last_fa_cnt_cck; | 
|  | 1791 | data->last_fa_cnt_cck += fa_cck; | 
|  | 1792 | } | 
|  | 1793 |  | 
|  | 1794 | /* Total aborted signal locks */ | 
|  | 1795 | norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm; | 
|  | 1796 | norm_fa_cck = fa_cck + bad_plcp_cck; | 
|  | 1797 |  | 
|  | 1798 | IWL_DEBUG_CALIB("cck: fa %u badp %u  ofdm: fa %u badp %u\n", fa_cck, | 
|  | 1799 | bad_plcp_cck, fa_ofdm, bad_plcp_ofdm); | 
|  | 1800 |  | 
|  | 1801 | iwl4965_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time); | 
|  | 1802 | iwl4965_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis); | 
| Tomas Winkler | 857485c | 2008-03-21 13:53:44 -0700 | [diff] [blame] | 1803 | ret = iwl4965_sensitivity_write(priv, CMD_ASYNC); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1804 |  | 
|  | 1805 | return; | 
|  | 1806 | } | 
|  | 1807 |  | 
|  | 1808 | static void iwl4965_bg_sensitivity_work(struct work_struct *work) | 
|  | 1809 | { | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 1810 | struct iwl_priv *priv = container_of(work, struct iwl_priv, | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1811 | sensitivity_work); | 
|  | 1812 |  | 
|  | 1813 | mutex_lock(&priv->mutex); | 
|  | 1814 |  | 
|  | 1815 | if (test_bit(STATUS_EXIT_PENDING, &priv->status) || | 
|  | 1816 | test_bit(STATUS_SCANNING, &priv->status)) { | 
|  | 1817 | mutex_unlock(&priv->mutex); | 
|  | 1818 | return; | 
|  | 1819 | } | 
|  | 1820 |  | 
|  | 1821 | if (priv->start_calib) { | 
|  | 1822 | iwl4965_noise_calibration(priv, &priv->statistics); | 
|  | 1823 |  | 
|  | 1824 | if (priv->sensitivity_data.state == | 
|  | 1825 | IWL_SENS_CALIB_NEED_REINIT) { | 
|  | 1826 | iwl4965_init_sensitivity(priv, CMD_ASYNC, 0); | 
|  | 1827 | priv->sensitivity_data.state = IWL_SENS_CALIB_ALLOWED; | 
|  | 1828 | } else | 
|  | 1829 | iwl4965_sensitivity_calibration(priv, | 
|  | 1830 | &priv->statistics); | 
|  | 1831 | } | 
|  | 1832 |  | 
|  | 1833 | mutex_unlock(&priv->mutex); | 
|  | 1834 | return; | 
|  | 1835 | } | 
| Christoph Hellwig | c8b0e6e | 2007-10-25 17:15:51 +0800 | [diff] [blame] | 1836 | #endif /*CONFIG_IWL4965_SENSITIVITY*/ | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1837 |  | 
|  | 1838 | static void iwl4965_bg_txpower_work(struct work_struct *work) | 
|  | 1839 | { | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 1840 | struct iwl_priv *priv = container_of(work, struct iwl_priv, | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1841 | txpower_work); | 
|  | 1842 |  | 
|  | 1843 | /* If a scan happened to start before we got here | 
|  | 1844 | * then just return; the statistics notification will | 
|  | 1845 | * kick off another scheduled work to compensate for | 
|  | 1846 | * any temperature delta we missed here. */ | 
|  | 1847 | if (test_bit(STATUS_EXIT_PENDING, &priv->status) || | 
|  | 1848 | test_bit(STATUS_SCANNING, &priv->status)) | 
|  | 1849 | return; | 
|  | 1850 |  | 
|  | 1851 | mutex_lock(&priv->mutex); | 
|  | 1852 |  | 
|  | 1853 | /* Regardless of if we are assocaited, we must reconfigure the | 
|  | 1854 | * TX power since frames can be sent on non-radar channels while | 
|  | 1855 | * not associated */ | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 1856 | iwl4965_hw_reg_send_txpower(priv); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1857 |  | 
|  | 1858 | /* Update last_temperature to keep is_calib_needed from running | 
|  | 1859 | * when it isn't needed... */ | 
|  | 1860 | priv->last_temperature = priv->temperature; | 
|  | 1861 |  | 
|  | 1862 | mutex_unlock(&priv->mutex); | 
|  | 1863 | } | 
|  | 1864 |  | 
|  | 1865 | /* | 
|  | 1866 | * Acquire priv->lock before calling this function ! | 
|  | 1867 | */ | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 1868 | static void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1869 | { | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 1870 | iwl_write_direct32(priv, HBUS_TARG_WRPTR, | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1871 | (index & 0xff) | (txq_id << 8)); | 
| Tomas Winkler | 12a81f6 | 2008-04-03 16:05:20 -0700 | [diff] [blame] | 1872 | iwl_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(txq_id), index); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1873 | } | 
|  | 1874 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 1875 | /** | 
|  | 1876 | * iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue | 
|  | 1877 | * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed | 
|  | 1878 | * @scd_retry: (1) Indicates queue will be used in aggregation mode | 
|  | 1879 | * | 
|  | 1880 | * NOTE:  Acquire priv->lock before calling this function ! | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1881 | */ | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 1882 | static void iwl4965_tx_queue_set_status(struct iwl_priv *priv, | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 1883 | struct iwl4965_tx_queue *txq, | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1884 | int tx_fifo_id, int scd_retry) | 
|  | 1885 | { | 
|  | 1886 | int txq_id = txq->q.id; | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 1887 |  | 
|  | 1888 | /* Find out whether to activate Tx queue */ | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1889 | int active = test_bit(txq_id, &priv->txq_ctx_active_msk)?1:0; | 
|  | 1890 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 1891 | /* Set up and activate */ | 
| Tomas Winkler | 12a81f6 | 2008-04-03 16:05:20 -0700 | [diff] [blame] | 1892 | iwl_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id), | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1893 | (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) | | 
|  | 1894 | (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) | | 
|  | 1895 | (scd_retry << SCD_QUEUE_STTS_REG_POS_WSL) | | 
|  | 1896 | (scd_retry << SCD_QUEUE_STTS_REG_POS_SCD_ACK) | | 
|  | 1897 | SCD_QUEUE_STTS_REG_MSK); | 
|  | 1898 |  | 
|  | 1899 | txq->sched_retry = scd_retry; | 
|  | 1900 |  | 
|  | 1901 | IWL_DEBUG_INFO("%s %s Queue %d on AC %d\n", | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 1902 | active ? "Activate" : "Deactivate", | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1903 | scd_retry ? "BA" : "AC", txq_id, tx_fifo_id); | 
|  | 1904 | } | 
|  | 1905 |  | 
|  | 1906 | static const u16 default_queue_to_tx_fifo[] = { | 
|  | 1907 | IWL_TX_FIFO_AC3, | 
|  | 1908 | IWL_TX_FIFO_AC2, | 
|  | 1909 | IWL_TX_FIFO_AC1, | 
|  | 1910 | IWL_TX_FIFO_AC0, | 
|  | 1911 | IWL_CMD_FIFO_NUM, | 
|  | 1912 | IWL_TX_FIFO_HCCA_1, | 
|  | 1913 | IWL_TX_FIFO_HCCA_2 | 
|  | 1914 | }; | 
|  | 1915 |  | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 1916 | static inline void iwl4965_txq_ctx_activate(struct iwl_priv *priv, int txq_id) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1917 | { | 
|  | 1918 | set_bit(txq_id, &priv->txq_ctx_active_msk); | 
|  | 1919 | } | 
|  | 1920 |  | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 1921 | static inline void iwl4965_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1922 | { | 
|  | 1923 | clear_bit(txq_id, &priv->txq_ctx_active_msk); | 
|  | 1924 | } | 
|  | 1925 |  | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 1926 | int iwl4965_alive_notify(struct iwl_priv *priv) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1927 | { | 
|  | 1928 | u32 a; | 
|  | 1929 | int i = 0; | 
|  | 1930 | unsigned long flags; | 
| Tomas Winkler | 857485c | 2008-03-21 13:53:44 -0700 | [diff] [blame] | 1931 | int ret; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1932 |  | 
|  | 1933 | spin_lock_irqsave(&priv->lock, flags); | 
|  | 1934 |  | 
| Christoph Hellwig | c8b0e6e | 2007-10-25 17:15:51 +0800 | [diff] [blame] | 1935 | #ifdef CONFIG_IWL4965_SENSITIVITY | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1936 | memset(&(priv->sensitivity_data), 0, | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 1937 | sizeof(struct iwl4965_sensitivity_data)); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1938 | memset(&(priv->chain_noise_data), 0, | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 1939 | sizeof(struct iwl4965_chain_noise_data)); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1940 | for (i = 0; i < NUM_RX_CHAINS; i++) | 
|  | 1941 | priv->chain_noise_data.delta_gain_code[i] = | 
|  | 1942 | CHAIN_NOISE_DELTA_GAIN_INIT_VAL; | 
| Christoph Hellwig | c8b0e6e | 2007-10-25 17:15:51 +0800 | [diff] [blame] | 1943 | #endif /* CONFIG_IWL4965_SENSITIVITY*/ | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 1944 | ret = iwl_grab_nic_access(priv); | 
| Tomas Winkler | 857485c | 2008-03-21 13:53:44 -0700 | [diff] [blame] | 1945 | if (ret) { | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1946 | spin_unlock_irqrestore(&priv->lock, flags); | 
| Tomas Winkler | 857485c | 2008-03-21 13:53:44 -0700 | [diff] [blame] | 1947 | return ret; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1948 | } | 
|  | 1949 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 1950 | /* Clear 4965's internal Tx Scheduler data base */ | 
| Tomas Winkler | 12a81f6 | 2008-04-03 16:05:20 -0700 | [diff] [blame] | 1951 | priv->scd_base_addr = iwl_read_prph(priv, IWL49_SCD_SRAM_BASE_ADDR); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1952 | a = priv->scd_base_addr + SCD_CONTEXT_DATA_OFFSET; | 
|  | 1953 | for (; a < priv->scd_base_addr + SCD_TX_STTS_BITMAP_OFFSET; a += 4) | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 1954 | iwl_write_targ_mem(priv, a, 0); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1955 | for (; a < priv->scd_base_addr + SCD_TRANSLATE_TBL_OFFSET; a += 4) | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 1956 | iwl_write_targ_mem(priv, a, 0); | 
| Tomas Winkler | 5425e49 | 2008-04-15 16:01:38 -0700 | [diff] [blame] | 1957 | for (; a < sizeof(u16) * priv->hw_params.max_txq_num; a += 4) | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 1958 | iwl_write_targ_mem(priv, a, 0); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1959 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 1960 | /* Tel 4965 where to find Tx byte count tables */ | 
| Tomas Winkler | 12a81f6 | 2008-04-03 16:05:20 -0700 | [diff] [blame] | 1961 | iwl_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR, | 
| Tomas Winkler | 059ff82 | 2008-04-14 21:16:14 -0700 | [diff] [blame] | 1962 | (priv->shared_phys + | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 1963 | offsetof(struct iwl4965_shared, queues_byte_cnt_tbls)) >> 10); | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 1964 |  | 
|  | 1965 | /* Disable chain mode for all queues */ | 
| Tomas Winkler | 12a81f6 | 2008-04-03 16:05:20 -0700 | [diff] [blame] | 1966 | iwl_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1967 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 1968 | /* Initialize each Tx queue (including the command queue) */ | 
| Tomas Winkler | 5425e49 | 2008-04-15 16:01:38 -0700 | [diff] [blame] | 1969 | for (i = 0; i < priv->hw_params.max_txq_num; i++) { | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 1970 |  | 
|  | 1971 | /* TFD circular buffer read/write indexes */ | 
| Tomas Winkler | 12a81f6 | 2008-04-03 16:05:20 -0700 | [diff] [blame] | 1972 | iwl_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(i), 0); | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 1973 | iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8)); | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 1974 |  | 
|  | 1975 | /* Max Tx Window size for Scheduler-ACK mode */ | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 1976 | iwl_write_targ_mem(priv, priv->scd_base_addr + | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1977 | SCD_CONTEXT_QUEUE_OFFSET(i), | 
|  | 1978 | (SCD_WIN_SIZE << | 
|  | 1979 | SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) & | 
|  | 1980 | SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK); | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 1981 |  | 
|  | 1982 | /* Frame limit */ | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 1983 | iwl_write_targ_mem(priv, priv->scd_base_addr + | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1984 | SCD_CONTEXT_QUEUE_OFFSET(i) + | 
|  | 1985 | sizeof(u32), | 
|  | 1986 | (SCD_FRAME_LIMIT << | 
|  | 1987 | SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & | 
|  | 1988 | SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK); | 
|  | 1989 |  | 
|  | 1990 | } | 
| Tomas Winkler | 12a81f6 | 2008-04-03 16:05:20 -0700 | [diff] [blame] | 1991 | iwl_write_prph(priv, IWL49_SCD_INTERRUPT_MASK, | 
| Tomas Winkler | 5425e49 | 2008-04-15 16:01:38 -0700 | [diff] [blame] | 1992 | (1 << priv->hw_params.max_txq_num) - 1); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1993 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 1994 | /* Activate all Tx DMA/FIFO channels */ | 
| Tomas Winkler | 12a81f6 | 2008-04-03 16:05:20 -0700 | [diff] [blame] | 1995 | iwl_write_prph(priv, IWL49_SCD_TXFACT, | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 1996 | SCD_TXFACT_REG_TXFIFO_MASK(0, 7)); | 
|  | 1997 |  | 
|  | 1998 | iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0); | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 1999 |  | 
|  | 2000 | /* Map each Tx/cmd queue to its corresponding fifo */ | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2001 | for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) { | 
|  | 2002 | int ac = default_queue_to_tx_fifo[i]; | 
|  | 2003 | iwl4965_txq_ctx_activate(priv, i); | 
|  | 2004 | iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0); | 
|  | 2005 | } | 
|  | 2006 |  | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 2007 | iwl_release_nic_access(priv); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2008 | spin_unlock_irqrestore(&priv->lock, flags); | 
|  | 2009 |  | 
| Emmanuel Grumbach | 49ea859 | 2008-04-15 16:01:37 -0700 | [diff] [blame] | 2010 | /* Ask for statistics now, the uCode will send statistics notification | 
|  | 2011 | * periodically after association */ | 
|  | 2012 | iwl_send_statistics_request(priv, CMD_ASYNC); | 
| Tomas Winkler | 857485c | 2008-03-21 13:53:44 -0700 | [diff] [blame] | 2013 | return ret; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2014 | } | 
|  | 2015 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 2016 | /** | 
| Tomas Winkler | 5425e49 | 2008-04-15 16:01:38 -0700 | [diff] [blame] | 2017 | * iwl4965_hw_set_hw_params | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 2018 | * | 
|  | 2019 | * Called when initializing driver | 
|  | 2020 | */ | 
| Tomas Winkler | 5425e49 | 2008-04-15 16:01:38 -0700 | [diff] [blame] | 2021 | int iwl4965_hw_set_hw_params(struct iwl_priv *priv) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2022 | { | 
| Assaf Krauss | 316c30d | 2008-03-14 10:38:46 -0700 | [diff] [blame] | 2023 |  | 
| Ron Rindjunsky | dfe7d45 | 2008-04-15 16:01:45 -0700 | [diff] [blame] | 2024 | if ((priv->cfg->mod_params->num_of_queues > IWL4965_MAX_NUM_QUEUES) || | 
| Assaf Krauss | 1ea8739 | 2008-03-18 14:57:50 -0700 | [diff] [blame] | 2025 | (priv->cfg->mod_params->num_of_queues < IWL_MIN_NUM_QUEUES)) { | 
| Assaf Krauss | 316c30d | 2008-03-14 10:38:46 -0700 | [diff] [blame] | 2026 | IWL_ERROR("invalid queues_num, should be between %d and %d\n", | 
| Ron Rindjunsky | dfe7d45 | 2008-04-15 16:01:45 -0700 | [diff] [blame] | 2027 | IWL_MIN_NUM_QUEUES, IWL4965_MAX_NUM_QUEUES); | 
| Tomas Winkler | 059ff82 | 2008-04-14 21:16:14 -0700 | [diff] [blame] | 2028 | return -EINVAL; | 
| Assaf Krauss | 316c30d | 2008-03-14 10:38:46 -0700 | [diff] [blame] | 2029 | } | 
|  | 2030 |  | 
| Tomas Winkler | 5425e49 | 2008-04-15 16:01:38 -0700 | [diff] [blame] | 2031 | priv->hw_params.max_txq_num = priv->cfg->mod_params->num_of_queues; | 
|  | 2032 | priv->hw_params.tx_cmd_len = sizeof(struct iwl4965_tx_cmd); | 
|  | 2033 | priv->hw_params.max_rxq_size = RX_QUEUE_SIZE; | 
|  | 2034 | priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG; | 
| Assaf Krauss | 1ea8739 | 2008-03-18 14:57:50 -0700 | [diff] [blame] | 2035 | if (priv->cfg->mod_params->amsdu_size_8K) | 
| Tomas Winkler | 5425e49 | 2008-04-15 16:01:38 -0700 | [diff] [blame] | 2036 | priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_8K; | 
| Ron Rindjunsky | 9ee1ba4 | 2007-11-26 16:14:42 +0200 | [diff] [blame] | 2037 | else | 
| Tomas Winkler | 5425e49 | 2008-04-15 16:01:38 -0700 | [diff] [blame] | 2038 | priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_4K; | 
|  | 2039 | priv->hw_params.max_pkt_size = priv->hw_params.rx_buf_size - 256; | 
|  | 2040 | priv->hw_params.max_stations = IWL4965_STATION_COUNT; | 
|  | 2041 | priv->hw_params.bcast_sta_id = IWL4965_BROADCAST_ID; | 
| Tomas Winkler | 3e82a82 | 2008-02-13 11:32:31 -0800 | [diff] [blame] | 2042 |  | 
| Tomas Winkler | ec35cf2 | 2008-04-15 16:01:39 -0700 | [diff] [blame] | 2043 | priv->hw_params.tx_chains_num = 2; | 
|  | 2044 | priv->hw_params.rx_chains_num = 2; | 
|  | 2045 | priv->hw_params.valid_tx_ant = (IWL_ANTENNA_MAIN | IWL_ANTENNA_AUX); | 
|  | 2046 | priv->hw_params.valid_rx_ant = (IWL_ANTENNA_MAIN | IWL_ANTENNA_AUX); | 
| Tomas Winkler | 3e82a82 | 2008-02-13 11:32:31 -0800 | [diff] [blame] | 2047 |  | 
| Tomas Winkler | 059ff82 | 2008-04-14 21:16:14 -0700 | [diff] [blame] | 2048 | return 0; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2049 | } | 
|  | 2050 |  | 
|  | 2051 | /** | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 2052 | * iwl4965_hw_txq_ctx_free - Free TXQ Context | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2053 | * | 
|  | 2054 | * Destroy all TX DMA queues and structures | 
|  | 2055 | */ | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 2056 | void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2057 | { | 
|  | 2058 | int txq_id; | 
|  | 2059 |  | 
|  | 2060 | /* Tx queues */ | 
| Tomas Winkler | 5425e49 | 2008-04-15 16:01:38 -0700 | [diff] [blame] | 2061 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 2062 | iwl4965_tx_queue_free(priv, &priv->txq[txq_id]); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2063 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 2064 | /* Keep-warm buffer */ | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2065 | iwl4965_kw_free(priv); | 
|  | 2066 | } | 
|  | 2067 |  | 
|  | 2068 | /** | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 2069 | * iwl4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2070 | * | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 2071 | * Does NOT advance any TFD circular buffer read/write indexes | 
|  | 2072 | * Does NOT free the TFD itself (which is within circular buffer) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2073 | */ | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 2074 | int iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl4965_tx_queue *txq) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2075 | { | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 2076 | struct iwl4965_tfd_frame *bd_tmp = (struct iwl4965_tfd_frame *)&txq->bd[0]; | 
|  | 2077 | struct iwl4965_tfd_frame *bd = &bd_tmp[txq->q.read_ptr]; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2078 | struct pci_dev *dev = priv->pci_dev; | 
|  | 2079 | int i; | 
|  | 2080 | int counter = 0; | 
|  | 2081 | int index, is_odd; | 
|  | 2082 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 2083 | /* Host command buffers stay mapped in memory, nothing to clean */ | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2084 | if (txq->q.id == IWL_CMD_QUEUE_NUM) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2085 | return 0; | 
|  | 2086 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 2087 | /* Sanity check on number of chunks */ | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2088 | counter = IWL_GET_BITS(*bd, num_tbs); | 
|  | 2089 | if (counter > MAX_NUM_OF_TBS) { | 
|  | 2090 | IWL_ERROR("Too many chunks: %i\n", counter); | 
|  | 2091 | /* @todo issue fatal error, it is quite serious situation */ | 
|  | 2092 | return 0; | 
|  | 2093 | } | 
|  | 2094 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 2095 | /* Unmap chunks, if any. | 
|  | 2096 | * TFD info for odd chunks is different format than for even chunks. */ | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2097 | for (i = 0; i < counter; i++) { | 
|  | 2098 | index = i / 2; | 
|  | 2099 | is_odd = i & 0x1; | 
|  | 2100 |  | 
|  | 2101 | if (is_odd) | 
|  | 2102 | pci_unmap_single( | 
|  | 2103 | dev, | 
|  | 2104 | IWL_GET_BITS(bd->pa[index], tb2_addr_lo16) | | 
|  | 2105 | (IWL_GET_BITS(bd->pa[index], | 
|  | 2106 | tb2_addr_hi20) << 16), | 
|  | 2107 | IWL_GET_BITS(bd->pa[index], tb2_len), | 
|  | 2108 | PCI_DMA_TODEVICE); | 
|  | 2109 |  | 
|  | 2110 | else if (i > 0) | 
|  | 2111 | pci_unmap_single(dev, | 
|  | 2112 | le32_to_cpu(bd->pa[index].tb1_addr), | 
|  | 2113 | IWL_GET_BITS(bd->pa[index], tb1_len), | 
|  | 2114 | PCI_DMA_TODEVICE); | 
|  | 2115 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 2116 | /* Free SKB, if any, for this chunk */ | 
| Tomas Winkler | fc4b685 | 2007-10-25 17:15:24 +0800 | [diff] [blame] | 2117 | if (txq->txb[txq->q.read_ptr].skb[i]) { | 
|  | 2118 | struct sk_buff *skb = txq->txb[txq->q.read_ptr].skb[i]; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2119 |  | 
|  | 2120 | dev_kfree_skb(skb); | 
| Tomas Winkler | fc4b685 | 2007-10-25 17:15:24 +0800 | [diff] [blame] | 2121 | txq->txb[txq->q.read_ptr].skb[i] = NULL; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2122 | } | 
|  | 2123 | } | 
|  | 2124 | return 0; | 
|  | 2125 | } | 
|  | 2126 |  | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 2127 | int iwl4965_hw_reg_set_txpower(struct iwl_priv *priv, s8 power) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2128 | { | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 2129 | IWL_ERROR("TODO: Implement iwl4965_hw_reg_set_txpower!\n"); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2130 | return -EINVAL; | 
|  | 2131 | } | 
|  | 2132 |  | 
|  | 2133 | static s32 iwl4965_math_div_round(s32 num, s32 denom, s32 *res) | 
|  | 2134 | { | 
|  | 2135 | s32 sign = 1; | 
|  | 2136 |  | 
|  | 2137 | if (num < 0) { | 
|  | 2138 | sign = -sign; | 
|  | 2139 | num = -num; | 
|  | 2140 | } | 
|  | 2141 | if (denom < 0) { | 
|  | 2142 | sign = -sign; | 
|  | 2143 | denom = -denom; | 
|  | 2144 | } | 
|  | 2145 | *res = 1; | 
|  | 2146 | *res = ((num * 2 + denom) / (denom * 2)) * sign; | 
|  | 2147 |  | 
|  | 2148 | return 1; | 
|  | 2149 | } | 
|  | 2150 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 2151 | /** | 
|  | 2152 | * iwl4965_get_voltage_compensation - Power supply voltage comp for txpower | 
|  | 2153 | * | 
|  | 2154 | * Determines power supply voltage compensation for txpower calculations. | 
|  | 2155 | * Returns number of 1/2-dB steps to subtract from gain table index, | 
|  | 2156 | * to compensate for difference between power supply voltage during | 
|  | 2157 | * factory measurements, vs. current power supply voltage. | 
|  | 2158 | * | 
|  | 2159 | * Voltage indication is higher for lower voltage. | 
|  | 2160 | * Lower voltage requires more gain (lower gain table index). | 
|  | 2161 | */ | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2162 | static s32 iwl4965_get_voltage_compensation(s32 eeprom_voltage, | 
|  | 2163 | s32 current_voltage) | 
|  | 2164 | { | 
|  | 2165 | s32 comp = 0; | 
|  | 2166 |  | 
|  | 2167 | if ((TX_POWER_IWL_ILLEGAL_VOLTAGE == eeprom_voltage) || | 
|  | 2168 | (TX_POWER_IWL_ILLEGAL_VOLTAGE == current_voltage)) | 
|  | 2169 | return 0; | 
|  | 2170 |  | 
|  | 2171 | iwl4965_math_div_round(current_voltage - eeprom_voltage, | 
|  | 2172 | TX_POWER_IWL_VOLTAGE_CODES_PER_03V, &comp); | 
|  | 2173 |  | 
|  | 2174 | if (current_voltage > eeprom_voltage) | 
|  | 2175 | comp *= 2; | 
|  | 2176 | if ((comp < -2) || (comp > 2)) | 
|  | 2177 | comp = 0; | 
|  | 2178 |  | 
|  | 2179 | return comp; | 
|  | 2180 | } | 
|  | 2181 |  | 
| Assaf Krauss | bf85ea4 | 2008-03-14 10:38:49 -0700 | [diff] [blame] | 2182 | static const struct iwl_channel_info * | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 2183 | iwl4965_get_channel_txpower_info(struct iwl_priv *priv, | 
| Johannes Berg | 8318d78 | 2008-01-24 19:38:38 +0100 | [diff] [blame] | 2184 | enum ieee80211_band band, u16 channel) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2185 | { | 
| Assaf Krauss | bf85ea4 | 2008-03-14 10:38:49 -0700 | [diff] [blame] | 2186 | const struct iwl_channel_info *ch_info; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2187 |  | 
| Assaf Krauss | 8622e70 | 2008-03-21 13:53:43 -0700 | [diff] [blame] | 2188 | ch_info = iwl_get_channel_info(priv, band, channel); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2189 |  | 
|  | 2190 | if (!is_channel_valid(ch_info)) | 
|  | 2191 | return NULL; | 
|  | 2192 |  | 
|  | 2193 | return ch_info; | 
|  | 2194 | } | 
|  | 2195 |  | 
|  | 2196 | static s32 iwl4965_get_tx_atten_grp(u16 channel) | 
|  | 2197 | { | 
|  | 2198 | if (channel >= CALIB_IWL_TX_ATTEN_GR5_FCH && | 
|  | 2199 | channel <= CALIB_IWL_TX_ATTEN_GR5_LCH) | 
|  | 2200 | return CALIB_CH_GROUP_5; | 
|  | 2201 |  | 
|  | 2202 | if (channel >= CALIB_IWL_TX_ATTEN_GR1_FCH && | 
|  | 2203 | channel <= CALIB_IWL_TX_ATTEN_GR1_LCH) | 
|  | 2204 | return CALIB_CH_GROUP_1; | 
|  | 2205 |  | 
|  | 2206 | if (channel >= CALIB_IWL_TX_ATTEN_GR2_FCH && | 
|  | 2207 | channel <= CALIB_IWL_TX_ATTEN_GR2_LCH) | 
|  | 2208 | return CALIB_CH_GROUP_2; | 
|  | 2209 |  | 
|  | 2210 | if (channel >= CALIB_IWL_TX_ATTEN_GR3_FCH && | 
|  | 2211 | channel <= CALIB_IWL_TX_ATTEN_GR3_LCH) | 
|  | 2212 | return CALIB_CH_GROUP_3; | 
|  | 2213 |  | 
|  | 2214 | if (channel >= CALIB_IWL_TX_ATTEN_GR4_FCH && | 
|  | 2215 | channel <= CALIB_IWL_TX_ATTEN_GR4_LCH) | 
|  | 2216 | return CALIB_CH_GROUP_4; | 
|  | 2217 |  | 
|  | 2218 | IWL_ERROR("Can't find txatten group for channel %d.\n", channel); | 
|  | 2219 | return -1; | 
|  | 2220 | } | 
|  | 2221 |  | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 2222 | static u32 iwl4965_get_sub_band(const struct iwl_priv *priv, u32 channel) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2223 | { | 
|  | 2224 | s32 b = -1; | 
|  | 2225 |  | 
|  | 2226 | for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) { | 
|  | 2227 | if (priv->eeprom.calib_info.band_info[b].ch_from == 0) | 
|  | 2228 | continue; | 
|  | 2229 |  | 
|  | 2230 | if ((channel >= priv->eeprom.calib_info.band_info[b].ch_from) | 
|  | 2231 | && (channel <= priv->eeprom.calib_info.band_info[b].ch_to)) | 
|  | 2232 | break; | 
|  | 2233 | } | 
|  | 2234 |  | 
|  | 2235 | return b; | 
|  | 2236 | } | 
|  | 2237 |  | 
|  | 2238 | static s32 iwl4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2) | 
|  | 2239 | { | 
|  | 2240 | s32 val; | 
|  | 2241 |  | 
|  | 2242 | if (x2 == x1) | 
|  | 2243 | return y1; | 
|  | 2244 | else { | 
|  | 2245 | iwl4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val); | 
|  | 2246 | return val + y2; | 
|  | 2247 | } | 
|  | 2248 | } | 
|  | 2249 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 2250 | /** | 
|  | 2251 | * iwl4965_interpolate_chan - Interpolate factory measurements for one channel | 
|  | 2252 | * | 
|  | 2253 | * Interpolates factory measurements from the two sample channels within a | 
|  | 2254 | * sub-band, to apply to channel of interest.  Interpolation is proportional to | 
|  | 2255 | * differences in channel frequencies, which is proportional to differences | 
|  | 2256 | * in channel number. | 
|  | 2257 | */ | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 2258 | static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel, | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 2259 | struct iwl4965_eeprom_calib_ch_info *chan_info) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2260 | { | 
|  | 2261 | s32 s = -1; | 
|  | 2262 | u32 c; | 
|  | 2263 | u32 m; | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 2264 | const struct iwl4965_eeprom_calib_measure *m1; | 
|  | 2265 | const struct iwl4965_eeprom_calib_measure *m2; | 
|  | 2266 | struct iwl4965_eeprom_calib_measure *omeas; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2267 | u32 ch_i1; | 
|  | 2268 | u32 ch_i2; | 
|  | 2269 |  | 
|  | 2270 | s = iwl4965_get_sub_band(priv, channel); | 
|  | 2271 | if (s >= EEPROM_TX_POWER_BANDS) { | 
|  | 2272 | IWL_ERROR("Tx Power can not find channel %d ", channel); | 
|  | 2273 | return -1; | 
|  | 2274 | } | 
|  | 2275 |  | 
|  | 2276 | ch_i1 = priv->eeprom.calib_info.band_info[s].ch1.ch_num; | 
|  | 2277 | ch_i2 = priv->eeprom.calib_info.band_info[s].ch2.ch_num; | 
|  | 2278 | chan_info->ch_num = (u8) channel; | 
|  | 2279 |  | 
|  | 2280 | IWL_DEBUG_TXPOWER("channel %d subband %d factory cal ch %d & %d\n", | 
|  | 2281 | channel, s, ch_i1, ch_i2); | 
|  | 2282 |  | 
|  | 2283 | for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) { | 
|  | 2284 | for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) { | 
|  | 2285 | m1 = &(priv->eeprom.calib_info.band_info[s].ch1. | 
|  | 2286 | measurements[c][m]); | 
|  | 2287 | m2 = &(priv->eeprom.calib_info.band_info[s].ch2. | 
|  | 2288 | measurements[c][m]); | 
|  | 2289 | omeas = &(chan_info->measurements[c][m]); | 
|  | 2290 |  | 
|  | 2291 | omeas->actual_pow = | 
|  | 2292 | (u8) iwl4965_interpolate_value(channel, ch_i1, | 
|  | 2293 | m1->actual_pow, | 
|  | 2294 | ch_i2, | 
|  | 2295 | m2->actual_pow); | 
|  | 2296 | omeas->gain_idx = | 
|  | 2297 | (u8) iwl4965_interpolate_value(channel, ch_i1, | 
|  | 2298 | m1->gain_idx, ch_i2, | 
|  | 2299 | m2->gain_idx); | 
|  | 2300 | omeas->temperature = | 
|  | 2301 | (u8) iwl4965_interpolate_value(channel, ch_i1, | 
|  | 2302 | m1->temperature, | 
|  | 2303 | ch_i2, | 
|  | 2304 | m2->temperature); | 
|  | 2305 | omeas->pa_det = | 
|  | 2306 | (s8) iwl4965_interpolate_value(channel, ch_i1, | 
|  | 2307 | m1->pa_det, ch_i2, | 
|  | 2308 | m2->pa_det); | 
|  | 2309 |  | 
|  | 2310 | IWL_DEBUG_TXPOWER | 
|  | 2311 | ("chain %d meas %d AP1=%d AP2=%d AP=%d\n", c, m, | 
|  | 2312 | m1->actual_pow, m2->actual_pow, omeas->actual_pow); | 
|  | 2313 | IWL_DEBUG_TXPOWER | 
|  | 2314 | ("chain %d meas %d NI1=%d NI2=%d NI=%d\n", c, m, | 
|  | 2315 | m1->gain_idx, m2->gain_idx, omeas->gain_idx); | 
|  | 2316 | IWL_DEBUG_TXPOWER | 
|  | 2317 | ("chain %d meas %d PA1=%d PA2=%d PA=%d\n", c, m, | 
|  | 2318 | m1->pa_det, m2->pa_det, omeas->pa_det); | 
|  | 2319 | IWL_DEBUG_TXPOWER | 
|  | 2320 | ("chain %d meas %d  T1=%d  T2=%d  T=%d\n", c, m, | 
|  | 2321 | m1->temperature, m2->temperature, | 
|  | 2322 | omeas->temperature); | 
|  | 2323 | } | 
|  | 2324 | } | 
|  | 2325 |  | 
|  | 2326 | return 0; | 
|  | 2327 | } | 
|  | 2328 |  | 
|  | 2329 | /* bit-rate-dependent table to prevent Tx distortion, in half-dB units, | 
|  | 2330 | * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */ | 
|  | 2331 | static s32 back_off_table[] = { | 
|  | 2332 | 10, 10, 10, 10, 10, 15, 17, 20,	/* OFDM SISO 20 MHz */ | 
|  | 2333 | 10, 10, 10, 10, 10, 15, 17, 20,	/* OFDM MIMO 20 MHz */ | 
|  | 2334 | 10, 10, 10, 10, 10, 15, 17, 20,	/* OFDM SISO 40 MHz */ | 
|  | 2335 | 10, 10, 10, 10, 10, 15, 17, 20,	/* OFDM MIMO 40 MHz */ | 
|  | 2336 | 10			/* CCK */ | 
|  | 2337 | }; | 
|  | 2338 |  | 
|  | 2339 | /* Thermal compensation values for txpower for various frequency ranges ... | 
|  | 2340 | *   ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */ | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 2341 | static struct iwl4965_txpower_comp_entry { | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2342 | s32 degrees_per_05db_a; | 
|  | 2343 | s32 degrees_per_05db_a_denom; | 
|  | 2344 | } tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = { | 
|  | 2345 | {9, 2},			/* group 0 5.2, ch  34-43 */ | 
|  | 2346 | {4, 1},			/* group 1 5.2, ch  44-70 */ | 
|  | 2347 | {4, 1},			/* group 2 5.2, ch  71-124 */ | 
|  | 2348 | {4, 1},			/* group 3 5.2, ch 125-200 */ | 
|  | 2349 | {3, 1}			/* group 4 2.4, ch   all */ | 
|  | 2350 | }; | 
|  | 2351 |  | 
|  | 2352 | static s32 get_min_power_index(s32 rate_power_index, u32 band) | 
|  | 2353 | { | 
|  | 2354 | if (!band) { | 
|  | 2355 | if ((rate_power_index & 7) <= 4) | 
|  | 2356 | return MIN_TX_GAIN_INDEX_52GHZ_EXT; | 
|  | 2357 | } | 
|  | 2358 | return MIN_TX_GAIN_INDEX; | 
|  | 2359 | } | 
|  | 2360 |  | 
|  | 2361 | struct gain_entry { | 
|  | 2362 | u8 dsp; | 
|  | 2363 | u8 radio; | 
|  | 2364 | }; | 
|  | 2365 |  | 
|  | 2366 | static const struct gain_entry gain_table[2][108] = { | 
|  | 2367 | /* 5.2GHz power gain index table */ | 
|  | 2368 | { | 
|  | 2369 | {123, 0x3F},		/* highest txpower */ | 
|  | 2370 | {117, 0x3F}, | 
|  | 2371 | {110, 0x3F}, | 
|  | 2372 | {104, 0x3F}, | 
|  | 2373 | {98, 0x3F}, | 
|  | 2374 | {110, 0x3E}, | 
|  | 2375 | {104, 0x3E}, | 
|  | 2376 | {98, 0x3E}, | 
|  | 2377 | {110, 0x3D}, | 
|  | 2378 | {104, 0x3D}, | 
|  | 2379 | {98, 0x3D}, | 
|  | 2380 | {110, 0x3C}, | 
|  | 2381 | {104, 0x3C}, | 
|  | 2382 | {98, 0x3C}, | 
|  | 2383 | {110, 0x3B}, | 
|  | 2384 | {104, 0x3B}, | 
|  | 2385 | {98, 0x3B}, | 
|  | 2386 | {110, 0x3A}, | 
|  | 2387 | {104, 0x3A}, | 
|  | 2388 | {98, 0x3A}, | 
|  | 2389 | {110, 0x39}, | 
|  | 2390 | {104, 0x39}, | 
|  | 2391 | {98, 0x39}, | 
|  | 2392 | {110, 0x38}, | 
|  | 2393 | {104, 0x38}, | 
|  | 2394 | {98, 0x38}, | 
|  | 2395 | {110, 0x37}, | 
|  | 2396 | {104, 0x37}, | 
|  | 2397 | {98, 0x37}, | 
|  | 2398 | {110, 0x36}, | 
|  | 2399 | {104, 0x36}, | 
|  | 2400 | {98, 0x36}, | 
|  | 2401 | {110, 0x35}, | 
|  | 2402 | {104, 0x35}, | 
|  | 2403 | {98, 0x35}, | 
|  | 2404 | {110, 0x34}, | 
|  | 2405 | {104, 0x34}, | 
|  | 2406 | {98, 0x34}, | 
|  | 2407 | {110, 0x33}, | 
|  | 2408 | {104, 0x33}, | 
|  | 2409 | {98, 0x33}, | 
|  | 2410 | {110, 0x32}, | 
|  | 2411 | {104, 0x32}, | 
|  | 2412 | {98, 0x32}, | 
|  | 2413 | {110, 0x31}, | 
|  | 2414 | {104, 0x31}, | 
|  | 2415 | {98, 0x31}, | 
|  | 2416 | {110, 0x30}, | 
|  | 2417 | {104, 0x30}, | 
|  | 2418 | {98, 0x30}, | 
|  | 2419 | {110, 0x25}, | 
|  | 2420 | {104, 0x25}, | 
|  | 2421 | {98, 0x25}, | 
|  | 2422 | {110, 0x24}, | 
|  | 2423 | {104, 0x24}, | 
|  | 2424 | {98, 0x24}, | 
|  | 2425 | {110, 0x23}, | 
|  | 2426 | {104, 0x23}, | 
|  | 2427 | {98, 0x23}, | 
|  | 2428 | {110, 0x22}, | 
|  | 2429 | {104, 0x18}, | 
|  | 2430 | {98, 0x18}, | 
|  | 2431 | {110, 0x17}, | 
|  | 2432 | {104, 0x17}, | 
|  | 2433 | {98, 0x17}, | 
|  | 2434 | {110, 0x16}, | 
|  | 2435 | {104, 0x16}, | 
|  | 2436 | {98, 0x16}, | 
|  | 2437 | {110, 0x15}, | 
|  | 2438 | {104, 0x15}, | 
|  | 2439 | {98, 0x15}, | 
|  | 2440 | {110, 0x14}, | 
|  | 2441 | {104, 0x14}, | 
|  | 2442 | {98, 0x14}, | 
|  | 2443 | {110, 0x13}, | 
|  | 2444 | {104, 0x13}, | 
|  | 2445 | {98, 0x13}, | 
|  | 2446 | {110, 0x12}, | 
|  | 2447 | {104, 0x08}, | 
|  | 2448 | {98, 0x08}, | 
|  | 2449 | {110, 0x07}, | 
|  | 2450 | {104, 0x07}, | 
|  | 2451 | {98, 0x07}, | 
|  | 2452 | {110, 0x06}, | 
|  | 2453 | {104, 0x06}, | 
|  | 2454 | {98, 0x06}, | 
|  | 2455 | {110, 0x05}, | 
|  | 2456 | {104, 0x05}, | 
|  | 2457 | {98, 0x05}, | 
|  | 2458 | {110, 0x04}, | 
|  | 2459 | {104, 0x04}, | 
|  | 2460 | {98, 0x04}, | 
|  | 2461 | {110, 0x03}, | 
|  | 2462 | {104, 0x03}, | 
|  | 2463 | {98, 0x03}, | 
|  | 2464 | {110, 0x02}, | 
|  | 2465 | {104, 0x02}, | 
|  | 2466 | {98, 0x02}, | 
|  | 2467 | {110, 0x01}, | 
|  | 2468 | {104, 0x01}, | 
|  | 2469 | {98, 0x01}, | 
|  | 2470 | {110, 0x00}, | 
|  | 2471 | {104, 0x00}, | 
|  | 2472 | {98, 0x00}, | 
|  | 2473 | {93, 0x00}, | 
|  | 2474 | {88, 0x00}, | 
|  | 2475 | {83, 0x00}, | 
|  | 2476 | {78, 0x00}, | 
|  | 2477 | }, | 
|  | 2478 | /* 2.4GHz power gain index table */ | 
|  | 2479 | { | 
|  | 2480 | {110, 0x3f},		/* highest txpower */ | 
|  | 2481 | {104, 0x3f}, | 
|  | 2482 | {98, 0x3f}, | 
|  | 2483 | {110, 0x3e}, | 
|  | 2484 | {104, 0x3e}, | 
|  | 2485 | {98, 0x3e}, | 
|  | 2486 | {110, 0x3d}, | 
|  | 2487 | {104, 0x3d}, | 
|  | 2488 | {98, 0x3d}, | 
|  | 2489 | {110, 0x3c}, | 
|  | 2490 | {104, 0x3c}, | 
|  | 2491 | {98, 0x3c}, | 
|  | 2492 | {110, 0x3b}, | 
|  | 2493 | {104, 0x3b}, | 
|  | 2494 | {98, 0x3b}, | 
|  | 2495 | {110, 0x3a}, | 
|  | 2496 | {104, 0x3a}, | 
|  | 2497 | {98, 0x3a}, | 
|  | 2498 | {110, 0x39}, | 
|  | 2499 | {104, 0x39}, | 
|  | 2500 | {98, 0x39}, | 
|  | 2501 | {110, 0x38}, | 
|  | 2502 | {104, 0x38}, | 
|  | 2503 | {98, 0x38}, | 
|  | 2504 | {110, 0x37}, | 
|  | 2505 | {104, 0x37}, | 
|  | 2506 | {98, 0x37}, | 
|  | 2507 | {110, 0x36}, | 
|  | 2508 | {104, 0x36}, | 
|  | 2509 | {98, 0x36}, | 
|  | 2510 | {110, 0x35}, | 
|  | 2511 | {104, 0x35}, | 
|  | 2512 | {98, 0x35}, | 
|  | 2513 | {110, 0x34}, | 
|  | 2514 | {104, 0x34}, | 
|  | 2515 | {98, 0x34}, | 
|  | 2516 | {110, 0x33}, | 
|  | 2517 | {104, 0x33}, | 
|  | 2518 | {98, 0x33}, | 
|  | 2519 | {110, 0x32}, | 
|  | 2520 | {104, 0x32}, | 
|  | 2521 | {98, 0x32}, | 
|  | 2522 | {110, 0x31}, | 
|  | 2523 | {104, 0x31}, | 
|  | 2524 | {98, 0x31}, | 
|  | 2525 | {110, 0x30}, | 
|  | 2526 | {104, 0x30}, | 
|  | 2527 | {98, 0x30}, | 
|  | 2528 | {110, 0x6}, | 
|  | 2529 | {104, 0x6}, | 
|  | 2530 | {98, 0x6}, | 
|  | 2531 | {110, 0x5}, | 
|  | 2532 | {104, 0x5}, | 
|  | 2533 | {98, 0x5}, | 
|  | 2534 | {110, 0x4}, | 
|  | 2535 | {104, 0x4}, | 
|  | 2536 | {98, 0x4}, | 
|  | 2537 | {110, 0x3}, | 
|  | 2538 | {104, 0x3}, | 
|  | 2539 | {98, 0x3}, | 
|  | 2540 | {110, 0x2}, | 
|  | 2541 | {104, 0x2}, | 
|  | 2542 | {98, 0x2}, | 
|  | 2543 | {110, 0x1}, | 
|  | 2544 | {104, 0x1}, | 
|  | 2545 | {98, 0x1}, | 
|  | 2546 | {110, 0x0}, | 
|  | 2547 | {104, 0x0}, | 
|  | 2548 | {98, 0x0}, | 
|  | 2549 | {97, 0}, | 
|  | 2550 | {96, 0}, | 
|  | 2551 | {95, 0}, | 
|  | 2552 | {94, 0}, | 
|  | 2553 | {93, 0}, | 
|  | 2554 | {92, 0}, | 
|  | 2555 | {91, 0}, | 
|  | 2556 | {90, 0}, | 
|  | 2557 | {89, 0}, | 
|  | 2558 | {88, 0}, | 
|  | 2559 | {87, 0}, | 
|  | 2560 | {86, 0}, | 
|  | 2561 | {85, 0}, | 
|  | 2562 | {84, 0}, | 
|  | 2563 | {83, 0}, | 
|  | 2564 | {82, 0}, | 
|  | 2565 | {81, 0}, | 
|  | 2566 | {80, 0}, | 
|  | 2567 | {79, 0}, | 
|  | 2568 | {78, 0}, | 
|  | 2569 | {77, 0}, | 
|  | 2570 | {76, 0}, | 
|  | 2571 | {75, 0}, | 
|  | 2572 | {74, 0}, | 
|  | 2573 | {73, 0}, | 
|  | 2574 | {72, 0}, | 
|  | 2575 | {71, 0}, | 
|  | 2576 | {70, 0}, | 
|  | 2577 | {69, 0}, | 
|  | 2578 | {68, 0}, | 
|  | 2579 | {67, 0}, | 
|  | 2580 | {66, 0}, | 
|  | 2581 | {65, 0}, | 
|  | 2582 | {64, 0}, | 
|  | 2583 | {63, 0}, | 
|  | 2584 | {62, 0}, | 
|  | 2585 | {61, 0}, | 
|  | 2586 | {60, 0}, | 
|  | 2587 | {59, 0}, | 
|  | 2588 | } | 
|  | 2589 | }; | 
|  | 2590 |  | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 2591 | static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel, | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2592 | u8 is_fat, u8 ctrl_chan_high, | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 2593 | struct iwl4965_tx_power_db *tx_power_tbl) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2594 | { | 
|  | 2595 | u8 saturation_power; | 
|  | 2596 | s32 target_power; | 
|  | 2597 | s32 user_target_power; | 
|  | 2598 | s32 power_limit; | 
|  | 2599 | s32 current_temp; | 
|  | 2600 | s32 reg_limit; | 
|  | 2601 | s32 current_regulatory; | 
|  | 2602 | s32 txatten_grp = CALIB_CH_GROUP_MAX; | 
|  | 2603 | int i; | 
|  | 2604 | int c; | 
| Assaf Krauss | bf85ea4 | 2008-03-14 10:38:49 -0700 | [diff] [blame] | 2605 | const struct iwl_channel_info *ch_info = NULL; | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 2606 | struct iwl4965_eeprom_calib_ch_info ch_eeprom_info; | 
|  | 2607 | const struct iwl4965_eeprom_calib_measure *measurement; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2608 | s16 voltage; | 
|  | 2609 | s32 init_voltage; | 
|  | 2610 | s32 voltage_compensation; | 
|  | 2611 | s32 degrees_per_05db_num; | 
|  | 2612 | s32 degrees_per_05db_denom; | 
|  | 2613 | s32 factory_temp; | 
|  | 2614 | s32 temperature_comp[2]; | 
|  | 2615 | s32 factory_gain_index[2]; | 
|  | 2616 | s32 factory_actual_pwr[2]; | 
|  | 2617 | s32 power_index; | 
|  | 2618 |  | 
|  | 2619 | /* Sanity check requested level (dBm) */ | 
|  | 2620 | if (priv->user_txpower_limit < IWL_TX_POWER_TARGET_POWER_MIN) { | 
|  | 2621 | IWL_WARNING("Requested user TXPOWER %d below limit.\n", | 
|  | 2622 | priv->user_txpower_limit); | 
|  | 2623 | return -EINVAL; | 
|  | 2624 | } | 
|  | 2625 | if (priv->user_txpower_limit > IWL_TX_POWER_TARGET_POWER_MAX) { | 
|  | 2626 | IWL_WARNING("Requested user TXPOWER %d above limit.\n", | 
|  | 2627 | priv->user_txpower_limit); | 
|  | 2628 | return -EINVAL; | 
|  | 2629 | } | 
|  | 2630 |  | 
|  | 2631 | /* user_txpower_limit is in dBm, convert to half-dBm (half-dB units | 
|  | 2632 | *   are used for indexing into txpower table) */ | 
|  | 2633 | user_target_power = 2 * priv->user_txpower_limit; | 
|  | 2634 |  | 
|  | 2635 | /* Get current (RXON) channel, band, width */ | 
|  | 2636 | ch_info = | 
| Johannes Berg | 8318d78 | 2008-01-24 19:38:38 +0100 | [diff] [blame] | 2637 | iwl4965_get_channel_txpower_info(priv, priv->band, channel); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2638 |  | 
|  | 2639 | IWL_DEBUG_TXPOWER("chan %d band %d is_fat %d\n", channel, band, | 
|  | 2640 | is_fat); | 
|  | 2641 |  | 
|  | 2642 | if (!ch_info) | 
|  | 2643 | return -EINVAL; | 
|  | 2644 |  | 
|  | 2645 | /* get txatten group, used to select 1) thermal txpower adjustment | 
|  | 2646 | *   and 2) mimo txpower balance between Tx chains. */ | 
|  | 2647 | txatten_grp = iwl4965_get_tx_atten_grp(channel); | 
|  | 2648 | if (txatten_grp < 0) | 
|  | 2649 | return -EINVAL; | 
|  | 2650 |  | 
|  | 2651 | IWL_DEBUG_TXPOWER("channel %d belongs to txatten group %d\n", | 
|  | 2652 | channel, txatten_grp); | 
|  | 2653 |  | 
|  | 2654 | if (is_fat) { | 
|  | 2655 | if (ctrl_chan_high) | 
|  | 2656 | channel -= 2; | 
|  | 2657 | else | 
|  | 2658 | channel += 2; | 
|  | 2659 | } | 
|  | 2660 |  | 
|  | 2661 | /* hardware txpower limits ... | 
|  | 2662 | * saturation (clipping distortion) txpowers are in half-dBm */ | 
|  | 2663 | if (band) | 
|  | 2664 | saturation_power = priv->eeprom.calib_info.saturation_power24; | 
|  | 2665 | else | 
|  | 2666 | saturation_power = priv->eeprom.calib_info.saturation_power52; | 
|  | 2667 |  | 
|  | 2668 | if (saturation_power < IWL_TX_POWER_SATURATION_MIN || | 
|  | 2669 | saturation_power > IWL_TX_POWER_SATURATION_MAX) { | 
|  | 2670 | if (band) | 
|  | 2671 | saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_24; | 
|  | 2672 | else | 
|  | 2673 | saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_52; | 
|  | 2674 | } | 
|  | 2675 |  | 
|  | 2676 | /* regulatory txpower limits ... reg_limit values are in half-dBm, | 
|  | 2677 | *   max_power_avg values are in dBm, convert * 2 */ | 
|  | 2678 | if (is_fat) | 
|  | 2679 | reg_limit = ch_info->fat_max_power_avg * 2; | 
|  | 2680 | else | 
|  | 2681 | reg_limit = ch_info->max_power_avg * 2; | 
|  | 2682 |  | 
|  | 2683 | if ((reg_limit < IWL_TX_POWER_REGULATORY_MIN) || | 
|  | 2684 | (reg_limit > IWL_TX_POWER_REGULATORY_MAX)) { | 
|  | 2685 | if (band) | 
|  | 2686 | reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_24; | 
|  | 2687 | else | 
|  | 2688 | reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_52; | 
|  | 2689 | } | 
|  | 2690 |  | 
|  | 2691 | /* Interpolate txpower calibration values for this channel, | 
|  | 2692 | *   based on factory calibration tests on spaced channels. */ | 
|  | 2693 | iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info); | 
|  | 2694 |  | 
|  | 2695 | /* calculate tx gain adjustment based on power supply voltage */ | 
|  | 2696 | voltage = priv->eeprom.calib_info.voltage; | 
|  | 2697 | init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage); | 
|  | 2698 | voltage_compensation = | 
|  | 2699 | iwl4965_get_voltage_compensation(voltage, init_voltage); | 
|  | 2700 |  | 
|  | 2701 | IWL_DEBUG_TXPOWER("curr volt %d eeprom volt %d volt comp %d\n", | 
|  | 2702 | init_voltage, | 
|  | 2703 | voltage, voltage_compensation); | 
|  | 2704 |  | 
|  | 2705 | /* get current temperature (Celsius) */ | 
|  | 2706 | current_temp = max(priv->temperature, IWL_TX_POWER_TEMPERATURE_MIN); | 
|  | 2707 | current_temp = min(priv->temperature, IWL_TX_POWER_TEMPERATURE_MAX); | 
|  | 2708 | current_temp = KELVIN_TO_CELSIUS(current_temp); | 
|  | 2709 |  | 
|  | 2710 | /* select thermal txpower adjustment params, based on channel group | 
|  | 2711 | *   (same frequency group used for mimo txatten adjustment) */ | 
|  | 2712 | degrees_per_05db_num = | 
|  | 2713 | tx_power_cmp_tble[txatten_grp].degrees_per_05db_a; | 
|  | 2714 | degrees_per_05db_denom = | 
|  | 2715 | tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom; | 
|  | 2716 |  | 
|  | 2717 | /* get per-chain txpower values from factory measurements */ | 
|  | 2718 | for (c = 0; c < 2; c++) { | 
|  | 2719 | measurement = &ch_eeprom_info.measurements[c][1]; | 
|  | 2720 |  | 
|  | 2721 | /* txgain adjustment (in half-dB steps) based on difference | 
|  | 2722 | *   between factory and current temperature */ | 
|  | 2723 | factory_temp = measurement->temperature; | 
|  | 2724 | iwl4965_math_div_round((current_temp - factory_temp) * | 
|  | 2725 | degrees_per_05db_denom, | 
|  | 2726 | degrees_per_05db_num, | 
|  | 2727 | &temperature_comp[c]); | 
|  | 2728 |  | 
|  | 2729 | factory_gain_index[c] = measurement->gain_idx; | 
|  | 2730 | factory_actual_pwr[c] = measurement->actual_pow; | 
|  | 2731 |  | 
|  | 2732 | IWL_DEBUG_TXPOWER("chain = %d\n", c); | 
|  | 2733 | IWL_DEBUG_TXPOWER("fctry tmp %d, " | 
|  | 2734 | "curr tmp %d, comp %d steps\n", | 
|  | 2735 | factory_temp, current_temp, | 
|  | 2736 | temperature_comp[c]); | 
|  | 2737 |  | 
|  | 2738 | IWL_DEBUG_TXPOWER("fctry idx %d, fctry pwr %d\n", | 
|  | 2739 | factory_gain_index[c], | 
|  | 2740 | factory_actual_pwr[c]); | 
|  | 2741 | } | 
|  | 2742 |  | 
|  | 2743 | /* for each of 33 bit-rates (including 1 for CCK) */ | 
|  | 2744 | for (i = 0; i < POWER_TABLE_NUM_ENTRIES; i++) { | 
|  | 2745 | u8 is_mimo_rate; | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 2746 | union iwl4965_tx_power_dual_stream tx_power; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2747 |  | 
|  | 2748 | /* for mimo, reduce each chain's txpower by half | 
|  | 2749 | * (3dB, 6 steps), so total output power is regulatory | 
|  | 2750 | * compliant. */ | 
|  | 2751 | if (i & 0x8) { | 
|  | 2752 | current_regulatory = reg_limit - | 
|  | 2753 | IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION; | 
|  | 2754 | is_mimo_rate = 1; | 
|  | 2755 | } else { | 
|  | 2756 | current_regulatory = reg_limit; | 
|  | 2757 | is_mimo_rate = 0; | 
|  | 2758 | } | 
|  | 2759 |  | 
|  | 2760 | /* find txpower limit, either hardware or regulatory */ | 
|  | 2761 | power_limit = saturation_power - back_off_table[i]; | 
|  | 2762 | if (power_limit > current_regulatory) | 
|  | 2763 | power_limit = current_regulatory; | 
|  | 2764 |  | 
|  | 2765 | /* reduce user's txpower request if necessary | 
|  | 2766 | * for this rate on this channel */ | 
|  | 2767 | target_power = user_target_power; | 
|  | 2768 | if (target_power > power_limit) | 
|  | 2769 | target_power = power_limit; | 
|  | 2770 |  | 
|  | 2771 | IWL_DEBUG_TXPOWER("rate %d sat %d reg %d usr %d tgt %d\n", | 
|  | 2772 | i, saturation_power - back_off_table[i], | 
|  | 2773 | current_regulatory, user_target_power, | 
|  | 2774 | target_power); | 
|  | 2775 |  | 
|  | 2776 | /* for each of 2 Tx chains (radio transmitters) */ | 
|  | 2777 | for (c = 0; c < 2; c++) { | 
|  | 2778 | s32 atten_value; | 
|  | 2779 |  | 
|  | 2780 | if (is_mimo_rate) | 
|  | 2781 | atten_value = | 
|  | 2782 | (s32)le32_to_cpu(priv->card_alive_init. | 
|  | 2783 | tx_atten[txatten_grp][c]); | 
|  | 2784 | else | 
|  | 2785 | atten_value = 0; | 
|  | 2786 |  | 
|  | 2787 | /* calculate index; higher index means lower txpower */ | 
|  | 2788 | power_index = (u8) (factory_gain_index[c] - | 
|  | 2789 | (target_power - | 
|  | 2790 | factory_actual_pwr[c]) - | 
|  | 2791 | temperature_comp[c] - | 
|  | 2792 | voltage_compensation + | 
|  | 2793 | atten_value); | 
|  | 2794 |  | 
|  | 2795 | /*			IWL_DEBUG_TXPOWER("calculated txpower index %d\n", | 
|  | 2796 | power_index); */ | 
|  | 2797 |  | 
|  | 2798 | if (power_index < get_min_power_index(i, band)) | 
|  | 2799 | power_index = get_min_power_index(i, band); | 
|  | 2800 |  | 
|  | 2801 | /* adjust 5 GHz index to support negative indexes */ | 
|  | 2802 | if (!band) | 
|  | 2803 | power_index += 9; | 
|  | 2804 |  | 
|  | 2805 | /* CCK, rate 32, reduce txpower for CCK */ | 
|  | 2806 | if (i == POWER_TABLE_CCK_ENTRY) | 
|  | 2807 | power_index += | 
|  | 2808 | IWL_TX_POWER_CCK_COMPENSATION_C_STEP; | 
|  | 2809 |  | 
|  | 2810 | /* stay within the table! */ | 
|  | 2811 | if (power_index > 107) { | 
|  | 2812 | IWL_WARNING("txpower index %d > 107\n", | 
|  | 2813 | power_index); | 
|  | 2814 | power_index = 107; | 
|  | 2815 | } | 
|  | 2816 | if (power_index < 0) { | 
|  | 2817 | IWL_WARNING("txpower index %d < 0\n", | 
|  | 2818 | power_index); | 
|  | 2819 | power_index = 0; | 
|  | 2820 | } | 
|  | 2821 |  | 
|  | 2822 | /* fill txpower command for this rate/chain */ | 
|  | 2823 | tx_power.s.radio_tx_gain[c] = | 
|  | 2824 | gain_table[band][power_index].radio; | 
|  | 2825 | tx_power.s.dsp_predis_atten[c] = | 
|  | 2826 | gain_table[band][power_index].dsp; | 
|  | 2827 |  | 
|  | 2828 | IWL_DEBUG_TXPOWER("chain %d mimo %d index %d " | 
|  | 2829 | "gain 0x%02x dsp %d\n", | 
|  | 2830 | c, atten_value, power_index, | 
|  | 2831 | tx_power.s.radio_tx_gain[c], | 
|  | 2832 | tx_power.s.dsp_predis_atten[c]); | 
|  | 2833 | }/* for each chain */ | 
|  | 2834 |  | 
|  | 2835 | tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw); | 
|  | 2836 |  | 
|  | 2837 | }/* for each rate */ | 
|  | 2838 |  | 
|  | 2839 | return 0; | 
|  | 2840 | } | 
|  | 2841 |  | 
|  | 2842 | /** | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 2843 | * iwl4965_hw_reg_send_txpower - Configure the TXPOWER level user limit | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2844 | * | 
|  | 2845 | * Uses the active RXON for channel, band, and characteristics (fat, high) | 
|  | 2846 | * The power limit is taken from priv->user_txpower_limit. | 
|  | 2847 | */ | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 2848 | int iwl4965_hw_reg_send_txpower(struct iwl_priv *priv) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2849 | { | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 2850 | struct iwl4965_txpowertable_cmd cmd = { 0 }; | 
| Tomas Winkler | 857485c | 2008-03-21 13:53:44 -0700 | [diff] [blame] | 2851 | int ret; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2852 | u8 band = 0; | 
|  | 2853 | u8 is_fat = 0; | 
|  | 2854 | u8 ctrl_chan_high = 0; | 
|  | 2855 |  | 
|  | 2856 | if (test_bit(STATUS_SCANNING, &priv->status)) { | 
|  | 2857 | /* If this gets hit a lot, switch it to a BUG() and catch | 
|  | 2858 | * the stack trace to find out who is calling this during | 
|  | 2859 | * a scan. */ | 
|  | 2860 | IWL_WARNING("TX Power requested while scanning!\n"); | 
|  | 2861 | return -EAGAIN; | 
|  | 2862 | } | 
|  | 2863 |  | 
| Johannes Berg | 8318d78 | 2008-01-24 19:38:38 +0100 | [diff] [blame] | 2864 | band = priv->band == IEEE80211_BAND_2GHZ; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2865 |  | 
|  | 2866 | is_fat =  is_fat_channel(priv->active_rxon.flags); | 
|  | 2867 |  | 
|  | 2868 | if (is_fat && | 
|  | 2869 | (priv->active_rxon.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK)) | 
|  | 2870 | ctrl_chan_high = 1; | 
|  | 2871 |  | 
|  | 2872 | cmd.band = band; | 
|  | 2873 | cmd.channel = priv->active_rxon.channel; | 
|  | 2874 |  | 
| Tomas Winkler | 857485c | 2008-03-21 13:53:44 -0700 | [diff] [blame] | 2875 | ret = iwl4965_fill_txpower_tbl(priv, band, | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2876 | le16_to_cpu(priv->active_rxon.channel), | 
|  | 2877 | is_fat, ctrl_chan_high, &cmd.tx_power); | 
| Tomas Winkler | 857485c | 2008-03-21 13:53:44 -0700 | [diff] [blame] | 2878 | if (ret) | 
|  | 2879 | goto out; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2880 |  | 
| Tomas Winkler | 857485c | 2008-03-21 13:53:44 -0700 | [diff] [blame] | 2881 | ret = iwl_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd); | 
|  | 2882 |  | 
|  | 2883 | out: | 
|  | 2884 | return ret; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2885 | } | 
|  | 2886 |  | 
| Tomas Winkler | 7e8c519 | 2008-04-15 16:01:43 -0700 | [diff] [blame] | 2887 | static int iwl4965_send_rxon_assoc(struct iwl_priv *priv) | 
|  | 2888 | { | 
|  | 2889 | int ret = 0; | 
|  | 2890 | struct iwl4965_rxon_assoc_cmd rxon_assoc; | 
|  | 2891 | const struct iwl4965_rxon_cmd *rxon1 = &priv->staging_rxon; | 
|  | 2892 | const struct iwl4965_rxon_cmd *rxon2 = &priv->active_rxon; | 
|  | 2893 |  | 
|  | 2894 | if ((rxon1->flags == rxon2->flags) && | 
|  | 2895 | (rxon1->filter_flags == rxon2->filter_flags) && | 
|  | 2896 | (rxon1->cck_basic_rates == rxon2->cck_basic_rates) && | 
|  | 2897 | (rxon1->ofdm_ht_single_stream_basic_rates == | 
|  | 2898 | rxon2->ofdm_ht_single_stream_basic_rates) && | 
|  | 2899 | (rxon1->ofdm_ht_dual_stream_basic_rates == | 
|  | 2900 | rxon2->ofdm_ht_dual_stream_basic_rates) && | 
|  | 2901 | (rxon1->rx_chain == rxon2->rx_chain) && | 
|  | 2902 | (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) { | 
|  | 2903 | IWL_DEBUG_INFO("Using current RXON_ASSOC.  Not resending.\n"); | 
|  | 2904 | return 0; | 
|  | 2905 | } | 
|  | 2906 |  | 
|  | 2907 | rxon_assoc.flags = priv->staging_rxon.flags; | 
|  | 2908 | rxon_assoc.filter_flags = priv->staging_rxon.filter_flags; | 
|  | 2909 | rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates; | 
|  | 2910 | rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates; | 
|  | 2911 | rxon_assoc.reserved = 0; | 
|  | 2912 | rxon_assoc.ofdm_ht_single_stream_basic_rates = | 
|  | 2913 | priv->staging_rxon.ofdm_ht_single_stream_basic_rates; | 
|  | 2914 | rxon_assoc.ofdm_ht_dual_stream_basic_rates = | 
|  | 2915 | priv->staging_rxon.ofdm_ht_dual_stream_basic_rates; | 
|  | 2916 | rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain; | 
|  | 2917 |  | 
|  | 2918 | ret = iwl_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC, | 
|  | 2919 | sizeof(rxon_assoc), &rxon_assoc, NULL); | 
|  | 2920 | if (ret) | 
|  | 2921 | return ret; | 
|  | 2922 |  | 
|  | 2923 | return ret; | 
|  | 2924 | } | 
|  | 2925 |  | 
|  | 2926 |  | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 2927 | int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2928 | { | 
|  | 2929 | int rc; | 
|  | 2930 | u8 band = 0; | 
|  | 2931 | u8 is_fat = 0; | 
|  | 2932 | u8 ctrl_chan_high = 0; | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 2933 | struct iwl4965_channel_switch_cmd cmd = { 0 }; | 
| Assaf Krauss | bf85ea4 | 2008-03-14 10:38:49 -0700 | [diff] [blame] | 2934 | const struct iwl_channel_info *ch_info; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2935 |  | 
| Johannes Berg | 8318d78 | 2008-01-24 19:38:38 +0100 | [diff] [blame] | 2936 | band = priv->band == IEEE80211_BAND_2GHZ; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2937 |  | 
| Assaf Krauss | 8622e70 | 2008-03-21 13:53:43 -0700 | [diff] [blame] | 2938 | ch_info = iwl_get_channel_info(priv, priv->band, channel); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2939 |  | 
|  | 2940 | is_fat = is_fat_channel(priv->staging_rxon.flags); | 
|  | 2941 |  | 
|  | 2942 | if (is_fat && | 
|  | 2943 | (priv->active_rxon.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK)) | 
|  | 2944 | ctrl_chan_high = 1; | 
|  | 2945 |  | 
|  | 2946 | cmd.band = band; | 
|  | 2947 | cmd.expect_beacon = 0; | 
|  | 2948 | cmd.channel = cpu_to_le16(channel); | 
|  | 2949 | cmd.rxon_flags = priv->active_rxon.flags; | 
|  | 2950 | cmd.rxon_filter_flags = priv->active_rxon.filter_flags; | 
|  | 2951 | cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time); | 
|  | 2952 | if (ch_info) | 
|  | 2953 | cmd.expect_beacon = is_channel_radar(ch_info); | 
|  | 2954 | else | 
|  | 2955 | cmd.expect_beacon = 1; | 
|  | 2956 |  | 
|  | 2957 | rc = iwl4965_fill_txpower_tbl(priv, band, channel, is_fat, | 
|  | 2958 | ctrl_chan_high, &cmd.tx_power); | 
|  | 2959 | if (rc) { | 
|  | 2960 | IWL_DEBUG_11H("error:%d  fill txpower_tbl\n", rc); | 
|  | 2961 | return rc; | 
|  | 2962 | } | 
|  | 2963 |  | 
| Tomas Winkler | 857485c | 2008-03-21 13:53:44 -0700 | [diff] [blame] | 2964 | rc = iwl_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2965 | return rc; | 
|  | 2966 | } | 
|  | 2967 |  | 
|  | 2968 | #define RTS_HCCA_RETRY_LIMIT		3 | 
|  | 2969 | #define RTS_DFAULT_RETRY_LIMIT		60 | 
|  | 2970 |  | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 2971 | void iwl4965_hw_build_tx_cmd_rate(struct iwl_priv *priv, | 
| Tomas Winkler | 857485c | 2008-03-21 13:53:44 -0700 | [diff] [blame] | 2972 | struct iwl_cmd *cmd, | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2973 | struct ieee80211_tx_control *ctrl, | 
|  | 2974 | struct ieee80211_hdr *hdr, int sta_id, | 
|  | 2975 | int is_hcca) | 
|  | 2976 | { | 
| Tomas Winkler | 87e4f7d | 2008-01-14 17:46:16 -0800 | [diff] [blame] | 2977 | struct iwl4965_tx_cmd *tx = &cmd->cmd.tx; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2978 | u8 rts_retry_limit = 0; | 
|  | 2979 | u8 data_retry_limit = 0; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2980 | u16 fc = le16_to_cpu(hdr->frame_control); | 
| Tomas Winkler | 87e4f7d | 2008-01-14 17:46:16 -0800 | [diff] [blame] | 2981 | u8 rate_plcp; | 
|  | 2982 | u16 rate_flags = 0; | 
| Johannes Berg | 8318d78 | 2008-01-24 19:38:38 +0100 | [diff] [blame] | 2983 | int rate_idx = min(ctrl->tx_rate->hw_value & 0xffff, IWL_RATE_COUNT - 1); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2984 |  | 
| Tomas Winkler | 87e4f7d | 2008-01-14 17:46:16 -0800 | [diff] [blame] | 2985 | rate_plcp = iwl4965_rates[rate_idx].plcp; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2986 |  | 
|  | 2987 | rts_retry_limit = (is_hcca) ? | 
|  | 2988 | RTS_HCCA_RETRY_LIMIT : RTS_DFAULT_RETRY_LIMIT; | 
|  | 2989 |  | 
| Tomas Winkler | 87e4f7d | 2008-01-14 17:46:16 -0800 | [diff] [blame] | 2990 | if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE)) | 
|  | 2991 | rate_flags |= RATE_MCS_CCK_MSK; | 
|  | 2992 |  | 
|  | 2993 |  | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 2994 | if (ieee80211_is_probe_response(fc)) { | 
|  | 2995 | data_retry_limit = 3; | 
|  | 2996 | if (data_retry_limit < rts_retry_limit) | 
|  | 2997 | rts_retry_limit = data_retry_limit; | 
|  | 2998 | } else | 
|  | 2999 | data_retry_limit = IWL_DEFAULT_TX_RETRY; | 
|  | 3000 |  | 
|  | 3001 | if (priv->data_retry_limit != -1) | 
|  | 3002 | data_retry_limit = priv->data_retry_limit; | 
|  | 3003 |  | 
| Tomas Winkler | 87e4f7d | 2008-01-14 17:46:16 -0800 | [diff] [blame] | 3004 |  | 
|  | 3005 | if (ieee80211_is_data(fc)) { | 
|  | 3006 | tx->initial_rate_index = 0; | 
|  | 3007 | tx->tx_flags |= TX_CMD_FLG_STA_RATE_MSK; | 
|  | 3008 | } else { | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3009 | switch (fc & IEEE80211_FCTL_STYPE) { | 
|  | 3010 | case IEEE80211_STYPE_AUTH: | 
|  | 3011 | case IEEE80211_STYPE_DEAUTH: | 
|  | 3012 | case IEEE80211_STYPE_ASSOC_REQ: | 
|  | 3013 | case IEEE80211_STYPE_REASSOC_REQ: | 
| Tomas Winkler | 87e4f7d | 2008-01-14 17:46:16 -0800 | [diff] [blame] | 3014 | if (tx->tx_flags & TX_CMD_FLG_RTS_MSK) { | 
|  | 3015 | tx->tx_flags &= ~TX_CMD_FLG_RTS_MSK; | 
|  | 3016 | tx->tx_flags |= TX_CMD_FLG_CTS_MSK; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3017 | } | 
|  | 3018 | break; | 
|  | 3019 | default: | 
|  | 3020 | break; | 
|  | 3021 | } | 
| Tomas Winkler | 87e4f7d | 2008-01-14 17:46:16 -0800 | [diff] [blame] | 3022 |  | 
|  | 3023 | /* Alternate between antenna A and B for successive frames */ | 
|  | 3024 | if (priv->use_ant_b_for_management_frame) { | 
|  | 3025 | priv->use_ant_b_for_management_frame = 0; | 
|  | 3026 | rate_flags |= RATE_MCS_ANT_B_MSK; | 
|  | 3027 | } else { | 
|  | 3028 | priv->use_ant_b_for_management_frame = 1; | 
|  | 3029 | rate_flags |= RATE_MCS_ANT_A_MSK; | 
|  | 3030 | } | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3031 | } | 
|  | 3032 |  | 
| Tomas Winkler | 87e4f7d | 2008-01-14 17:46:16 -0800 | [diff] [blame] | 3033 | tx->rts_retry_limit = rts_retry_limit; | 
|  | 3034 | tx->data_retry_limit = data_retry_limit; | 
|  | 3035 | tx->rate_n_flags = iwl4965_hw_set_rate_n_flags(rate_plcp, rate_flags); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3036 | } | 
|  | 3037 |  | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 3038 | int iwl4965_hw_get_rx_read(struct iwl_priv *priv) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3039 | { | 
| Tomas Winkler | 059ff82 | 2008-04-14 21:16:14 -0700 | [diff] [blame] | 3040 | struct iwl4965_shared *s = priv->shared_virt; | 
|  | 3041 | return le32_to_cpu(s->rb_closed) & 0xFFF; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3042 | } | 
|  | 3043 |  | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 3044 | int iwl4965_hw_get_temperature(struct iwl_priv *priv) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3045 | { | 
|  | 3046 | return priv->temperature; | 
|  | 3047 | } | 
|  | 3048 |  | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 3049 | unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv, | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 3050 | struct iwl4965_frame *frame, u8 rate) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3051 | { | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 3052 | struct iwl4965_tx_beacon_cmd *tx_beacon_cmd; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3053 | unsigned int frame_size; | 
|  | 3054 |  | 
|  | 3055 | tx_beacon_cmd = &frame->u.beacon; | 
|  | 3056 | memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd)); | 
|  | 3057 |  | 
| Tomas Winkler | 5425e49 | 2008-04-15 16:01:38 -0700 | [diff] [blame] | 3058 | tx_beacon_cmd->tx.sta_id = priv->hw_params.bcast_sta_id; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3059 | tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; | 
|  | 3060 |  | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 3061 | frame_size = iwl4965_fill_beacon_frame(priv, | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3062 | tx_beacon_cmd->frame, | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 3063 | iwl4965_broadcast_addr, | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3064 | sizeof(frame->u) - sizeof(*tx_beacon_cmd)); | 
|  | 3065 |  | 
|  | 3066 | BUG_ON(frame_size > MAX_MPDU_SIZE); | 
|  | 3067 | tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size); | 
|  | 3068 |  | 
|  | 3069 | if ((rate == IWL_RATE_1M_PLCP) || (rate >= IWL_RATE_2M_PLCP)) | 
|  | 3070 | tx_beacon_cmd->tx.rate_n_flags = | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 3071 | iwl4965_hw_set_rate_n_flags(rate, RATE_MCS_CCK_MSK); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3072 | else | 
|  | 3073 | tx_beacon_cmd->tx.rate_n_flags = | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 3074 | iwl4965_hw_set_rate_n_flags(rate, 0); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3075 |  | 
|  | 3076 | tx_beacon_cmd->tx.tx_flags = (TX_CMD_FLG_SEQ_CTL_MSK | | 
|  | 3077 | TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK); | 
|  | 3078 | return (sizeof(*tx_beacon_cmd) + frame_size); | 
|  | 3079 | } | 
|  | 3080 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 3081 | /* | 
|  | 3082 | * Tell 4965 where to find circular buffer of Tx Frame Descriptors for | 
|  | 3083 | * given Tx queue, and enable the DMA channel used for that queue. | 
|  | 3084 | * | 
|  | 3085 | * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA | 
|  | 3086 | * channels supported in hardware. | 
|  | 3087 | */ | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 3088 | int iwl4965_hw_tx_queue_init(struct iwl_priv *priv, struct iwl4965_tx_queue *txq) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3089 | { | 
|  | 3090 | int rc; | 
|  | 3091 | unsigned long flags; | 
|  | 3092 | int txq_id = txq->q.id; | 
|  | 3093 |  | 
|  | 3094 | spin_lock_irqsave(&priv->lock, flags); | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 3095 | rc = iwl_grab_nic_access(priv); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3096 | if (rc) { | 
|  | 3097 | spin_unlock_irqrestore(&priv->lock, flags); | 
|  | 3098 | return rc; | 
|  | 3099 | } | 
|  | 3100 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 3101 | /* Circular buffer (TFD queue in DRAM) physical base address */ | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 3102 | iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id), | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3103 | txq->q.dma_addr >> 8); | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 3104 |  | 
|  | 3105 | /* Enable DMA channel, using same id as for TFD queue */ | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 3106 | iwl_write_direct32( | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3107 | priv, IWL_FH_TCSR_CHNL_TX_CONFIG_REG(txq_id), | 
|  | 3108 | IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | | 
|  | 3109 | IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL); | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 3110 | iwl_release_nic_access(priv); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3111 | spin_unlock_irqrestore(&priv->lock, flags); | 
|  | 3112 |  | 
|  | 3113 | return 0; | 
|  | 3114 | } | 
|  | 3115 |  | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 3116 | int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *ptr, | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3117 | dma_addr_t addr, u16 len) | 
|  | 3118 | { | 
|  | 3119 | int index, is_odd; | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 3120 | struct iwl4965_tfd_frame *tfd = ptr; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3121 | u32 num_tbs = IWL_GET_BITS(*tfd, num_tbs); | 
|  | 3122 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 3123 | /* Each TFD can point to a maximum 20 Tx buffers */ | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3124 | if ((num_tbs >= MAX_NUM_OF_TBS) || (num_tbs < 0)) { | 
|  | 3125 | IWL_ERROR("Error can not send more than %d chunks\n", | 
|  | 3126 | MAX_NUM_OF_TBS); | 
|  | 3127 | return -EINVAL; | 
|  | 3128 | } | 
|  | 3129 |  | 
|  | 3130 | index = num_tbs / 2; | 
|  | 3131 | is_odd = num_tbs & 0x1; | 
|  | 3132 |  | 
|  | 3133 | if (!is_odd) { | 
|  | 3134 | tfd->pa[index].tb1_addr = cpu_to_le32(addr); | 
|  | 3135 | IWL_SET_BITS(tfd->pa[index], tb1_addr_hi, | 
| Tomas Winkler | 6a218f6 | 2008-01-14 17:46:15 -0800 | [diff] [blame] | 3136 | iwl_get_dma_hi_address(addr)); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3137 | IWL_SET_BITS(tfd->pa[index], tb1_len, len); | 
|  | 3138 | } else { | 
|  | 3139 | IWL_SET_BITS(tfd->pa[index], tb2_addr_lo16, | 
|  | 3140 | (u32) (addr & 0xffff)); | 
|  | 3141 | IWL_SET_BITS(tfd->pa[index], tb2_addr_hi20, addr >> 16); | 
|  | 3142 | IWL_SET_BITS(tfd->pa[index], tb2_len, len); | 
|  | 3143 | } | 
|  | 3144 |  | 
|  | 3145 | IWL_SET_BITS(*tfd, num_tbs, num_tbs + 1); | 
|  | 3146 |  | 
|  | 3147 | return 0; | 
|  | 3148 | } | 
|  | 3149 |  | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 3150 | static void iwl4965_hw_card_show_info(struct iwl_priv *priv) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3151 | { | 
|  | 3152 | u16 hw_version = priv->eeprom.board_revision_4965; | 
|  | 3153 |  | 
|  | 3154 | IWL_DEBUG_INFO("4965ABGN HW Version %u.%u.%u\n", | 
|  | 3155 | ((hw_version >> 8) & 0x0F), | 
|  | 3156 | ((hw_version >> 8) >> 4), (hw_version & 0x00FF)); | 
|  | 3157 |  | 
|  | 3158 | IWL_DEBUG_INFO("4965ABGN PBA Number %.16s\n", | 
|  | 3159 | priv->eeprom.board_pba_number_4965); | 
|  | 3160 | } | 
|  | 3161 |  | 
|  | 3162 | #define IWL_TX_CRC_SIZE		4 | 
|  | 3163 | #define IWL_TX_DELIMITER_SIZE	4 | 
|  | 3164 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 3165 | /** | 
| Tomas Winkler | e2a722e | 2008-04-14 21:16:10 -0700 | [diff] [blame] | 3166 | * iwl4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 3167 | */ | 
| Tomas Winkler | e2a722e | 2008-04-14 21:16:10 -0700 | [diff] [blame] | 3168 | static void iwl4965_txq_update_byte_cnt_tbl(struct iwl_priv *priv, | 
|  | 3169 | struct iwl4965_tx_queue *txq, | 
|  | 3170 | u16 byte_cnt) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3171 | { | 
|  | 3172 | int len; | 
|  | 3173 | int txq_id = txq->q.id; | 
| Tomas Winkler | 059ff82 | 2008-04-14 21:16:14 -0700 | [diff] [blame] | 3174 | struct iwl4965_shared *shared_data = priv->shared_virt; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3175 |  | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3176 | len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; | 
|  | 3177 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 3178 | /* Set up byte count within first 256 entries */ | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3179 | IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id]. | 
| Tomas Winkler | fc4b685 | 2007-10-25 17:15:24 +0800 | [diff] [blame] | 3180 | tfd_offset[txq->q.write_ptr], byte_cnt, len); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3181 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 3182 | /* If within first 64 entries, duplicate at end */ | 
| Tomas Winkler | fc4b685 | 2007-10-25 17:15:24 +0800 | [diff] [blame] | 3183 | if (txq->q.write_ptr < IWL4965_MAX_WIN_SIZE) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3184 | IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id]. | 
| Tomas Winkler | fc4b685 | 2007-10-25 17:15:24 +0800 | [diff] [blame] | 3185 | tfd_offset[IWL4965_QUEUE_SIZE + txq->q.write_ptr], | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3186 | byte_cnt, len); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3187 | } | 
|  | 3188 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 3189 | /** | 
|  | 3190 | * iwl4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image | 
|  | 3191 | * | 
|  | 3192 | * Selects how many and which Rx receivers/antennas/chains to use. | 
|  | 3193 | * This should not be used for scan command ... it puts data in wrong place. | 
|  | 3194 | */ | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 3195 | void iwl4965_set_rxon_chain(struct iwl_priv *priv) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3196 | { | 
|  | 3197 | u8 is_single = is_single_stream(priv); | 
|  | 3198 | u8 idle_state, rx_state; | 
|  | 3199 |  | 
|  | 3200 | priv->staging_rxon.rx_chain = 0; | 
|  | 3201 | rx_state = idle_state = 3; | 
|  | 3202 |  | 
|  | 3203 | /* Tell uCode which antennas are actually connected. | 
|  | 3204 | * Before first association, we assume all antennas are connected. | 
|  | 3205 | * Just after first association, iwl4965_noise_calibration() | 
|  | 3206 | *    checks which antennas actually *are* connected. */ | 
|  | 3207 | priv->staging_rxon.rx_chain |= | 
|  | 3208 | cpu_to_le16(priv->valid_antenna << RXON_RX_CHAIN_VALID_POS); | 
|  | 3209 |  | 
|  | 3210 | /* How many receivers should we use? */ | 
|  | 3211 | iwl4965_get_rx_chain_counter(priv, &idle_state, &rx_state); | 
|  | 3212 | priv->staging_rxon.rx_chain |= | 
|  | 3213 | cpu_to_le16(rx_state << RXON_RX_CHAIN_MIMO_CNT_POS); | 
|  | 3214 | priv->staging_rxon.rx_chain |= | 
|  | 3215 | cpu_to_le16(idle_state << RXON_RX_CHAIN_CNT_POS); | 
|  | 3216 |  | 
|  | 3217 | if (!is_single && (rx_state >= 2) && | 
|  | 3218 | !test_bit(STATUS_POWER_PMI, &priv->status)) | 
|  | 3219 | priv->staging_rxon.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK; | 
|  | 3220 | else | 
|  | 3221 | priv->staging_rxon.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK; | 
|  | 3222 |  | 
|  | 3223 | IWL_DEBUG_ASSOC("rx chain %X\n", priv->staging_rxon.rx_chain); | 
|  | 3224 | } | 
|  | 3225 |  | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3226 | /** | 
|  | 3227 | * sign_extend - Sign extend a value using specified bit as sign-bit | 
|  | 3228 | * | 
|  | 3229 | * Example: sign_extend(9, 3) would return -7 as bit3 of 1001b is 1 | 
|  | 3230 | * and bit0..2 is 001b which when sign extended to 1111111111111001b is -7. | 
|  | 3231 | * | 
|  | 3232 | * @param oper value to sign extend | 
|  | 3233 | * @param index 0 based bit index (0<=index<32) to sign bit | 
|  | 3234 | */ | 
|  | 3235 | static s32 sign_extend(u32 oper, int index) | 
|  | 3236 | { | 
|  | 3237 | u8 shift = 31 - index; | 
|  | 3238 |  | 
|  | 3239 | return (s32)(oper << shift) >> shift; | 
|  | 3240 | } | 
|  | 3241 |  | 
|  | 3242 | /** | 
|  | 3243 | * iwl4965_get_temperature - return the calibrated temperature (in Kelvin) | 
|  | 3244 | * @statistics: Provides the temperature reading from the uCode | 
|  | 3245 | * | 
|  | 3246 | * A return of <0 indicates bogus data in the statistics | 
|  | 3247 | */ | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 3248 | int iwl4965_get_temperature(const struct iwl_priv *priv) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3249 | { | 
|  | 3250 | s32 temperature; | 
|  | 3251 | s32 vt; | 
|  | 3252 | s32 R1, R2, R3; | 
|  | 3253 | u32 R4; | 
|  | 3254 |  | 
|  | 3255 | if (test_bit(STATUS_TEMPERATURE, &priv->status) && | 
|  | 3256 | (priv->statistics.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK)) { | 
|  | 3257 | IWL_DEBUG_TEMP("Running FAT temperature calibration\n"); | 
|  | 3258 | R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]); | 
|  | 3259 | R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]); | 
|  | 3260 | R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]); | 
|  | 3261 | R4 = le32_to_cpu(priv->card_alive_init.therm_r4[1]); | 
|  | 3262 | } else { | 
|  | 3263 | IWL_DEBUG_TEMP("Running temperature calibration\n"); | 
|  | 3264 | R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]); | 
|  | 3265 | R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]); | 
|  | 3266 | R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]); | 
|  | 3267 | R4 = le32_to_cpu(priv->card_alive_init.therm_r4[0]); | 
|  | 3268 | } | 
|  | 3269 |  | 
|  | 3270 | /* | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 3271 | * Temperature is only 23 bits, so sign extend out to 32. | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3272 | * | 
|  | 3273 | * NOTE If we haven't received a statistics notification yet | 
|  | 3274 | * with an updated temperature, use R4 provided to us in the | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 3275 | * "initialize" ALIVE response. | 
|  | 3276 | */ | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3277 | if (!test_bit(STATUS_TEMPERATURE, &priv->status)) | 
|  | 3278 | vt = sign_extend(R4, 23); | 
|  | 3279 | else | 
|  | 3280 | vt = sign_extend( | 
|  | 3281 | le32_to_cpu(priv->statistics.general.temperature), 23); | 
|  | 3282 |  | 
|  | 3283 | IWL_DEBUG_TEMP("Calib values R[1-3]: %d %d %d R4: %d\n", | 
|  | 3284 | R1, R2, R3, vt); | 
|  | 3285 |  | 
|  | 3286 | if (R3 == R1) { | 
|  | 3287 | IWL_ERROR("Calibration conflict R1 == R3\n"); | 
|  | 3288 | return -1; | 
|  | 3289 | } | 
|  | 3290 |  | 
|  | 3291 | /* Calculate temperature in degrees Kelvin, adjust by 97%. | 
|  | 3292 | * Add offset to center the adjustment around 0 degrees Centigrade. */ | 
|  | 3293 | temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2); | 
|  | 3294 | temperature /= (R3 - R1); | 
|  | 3295 | temperature = (temperature * 97) / 100 + | 
|  | 3296 | TEMPERATURE_CALIB_KELVIN_OFFSET; | 
|  | 3297 |  | 
|  | 3298 | IWL_DEBUG_TEMP("Calibrated temperature: %dK, %dC\n", temperature, | 
|  | 3299 | KELVIN_TO_CELSIUS(temperature)); | 
|  | 3300 |  | 
|  | 3301 | return temperature; | 
|  | 3302 | } | 
|  | 3303 |  | 
|  | 3304 | /* Adjust Txpower only if temperature variance is greater than threshold. */ | 
|  | 3305 | #define IWL_TEMPERATURE_THRESHOLD   3 | 
|  | 3306 |  | 
|  | 3307 | /** | 
|  | 3308 | * iwl4965_is_temp_calib_needed - determines if new calibration is needed | 
|  | 3309 | * | 
|  | 3310 | * If the temperature changed has changed sufficiently, then a recalibration | 
|  | 3311 | * is needed. | 
|  | 3312 | * | 
|  | 3313 | * Assumes caller will replace priv->last_temperature once calibration | 
|  | 3314 | * executed. | 
|  | 3315 | */ | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 3316 | static int iwl4965_is_temp_calib_needed(struct iwl_priv *priv) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3317 | { | 
|  | 3318 | int temp_diff; | 
|  | 3319 |  | 
|  | 3320 | if (!test_bit(STATUS_STATISTICS, &priv->status)) { | 
|  | 3321 | IWL_DEBUG_TEMP("Temperature not updated -- no statistics.\n"); | 
|  | 3322 | return 0; | 
|  | 3323 | } | 
|  | 3324 |  | 
|  | 3325 | temp_diff = priv->temperature - priv->last_temperature; | 
|  | 3326 |  | 
|  | 3327 | /* get absolute value */ | 
|  | 3328 | if (temp_diff < 0) { | 
|  | 3329 | IWL_DEBUG_POWER("Getting cooler, delta %d, \n", temp_diff); | 
|  | 3330 | temp_diff = -temp_diff; | 
|  | 3331 | } else if (temp_diff == 0) | 
|  | 3332 | IWL_DEBUG_POWER("Same temp, \n"); | 
|  | 3333 | else | 
|  | 3334 | IWL_DEBUG_POWER("Getting warmer, delta %d, \n", temp_diff); | 
|  | 3335 |  | 
|  | 3336 | if (temp_diff < IWL_TEMPERATURE_THRESHOLD) { | 
|  | 3337 | IWL_DEBUG_POWER("Thermal txpower calib not needed\n"); | 
|  | 3338 | return 0; | 
|  | 3339 | } | 
|  | 3340 |  | 
|  | 3341 | IWL_DEBUG_POWER("Thermal txpower calib needed\n"); | 
|  | 3342 |  | 
|  | 3343 | return 1; | 
|  | 3344 | } | 
|  | 3345 |  | 
|  | 3346 | /* Calculate noise level, based on measurements during network silence just | 
|  | 3347 | *   before arriving beacon.  This measurement can be done only if we know | 
|  | 3348 | *   exactly when to expect beacons, therefore only when we're associated. */ | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 3349 | static void iwl4965_rx_calc_noise(struct iwl_priv *priv) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3350 | { | 
|  | 3351 | struct statistics_rx_non_phy *rx_info | 
|  | 3352 | = &(priv->statistics.rx.general); | 
|  | 3353 | int num_active_rx = 0; | 
|  | 3354 | int total_silence = 0; | 
|  | 3355 | int bcn_silence_a = | 
|  | 3356 | le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER; | 
|  | 3357 | int bcn_silence_b = | 
|  | 3358 | le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER; | 
|  | 3359 | int bcn_silence_c = | 
|  | 3360 | le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER; | 
|  | 3361 |  | 
|  | 3362 | if (bcn_silence_a) { | 
|  | 3363 | total_silence += bcn_silence_a; | 
|  | 3364 | num_active_rx++; | 
|  | 3365 | } | 
|  | 3366 | if (bcn_silence_b) { | 
|  | 3367 | total_silence += bcn_silence_b; | 
|  | 3368 | num_active_rx++; | 
|  | 3369 | } | 
|  | 3370 | if (bcn_silence_c) { | 
|  | 3371 | total_silence += bcn_silence_c; | 
|  | 3372 | num_active_rx++; | 
|  | 3373 | } | 
|  | 3374 |  | 
|  | 3375 | /* Average among active antennas */ | 
|  | 3376 | if (num_active_rx) | 
|  | 3377 | priv->last_rx_noise = (total_silence / num_active_rx) - 107; | 
|  | 3378 | else | 
|  | 3379 | priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE; | 
|  | 3380 |  | 
|  | 3381 | IWL_DEBUG_CALIB("inband silence a %u, b %u, c %u, dBm %d\n", | 
|  | 3382 | bcn_silence_a, bcn_silence_b, bcn_silence_c, | 
|  | 3383 | priv->last_rx_noise); | 
|  | 3384 | } | 
|  | 3385 |  | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 3386 | void iwl4965_hw_rx_statistics(struct iwl_priv *priv, struct iwl4965_rx_mem_buffer *rxb) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3387 | { | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 3388 | struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3389 | int change; | 
|  | 3390 | s32 temp; | 
|  | 3391 |  | 
|  | 3392 | IWL_DEBUG_RX("Statistics notification received (%d vs %d).\n", | 
|  | 3393 | (int)sizeof(priv->statistics), pkt->len); | 
|  | 3394 |  | 
|  | 3395 | change = ((priv->statistics.general.temperature != | 
|  | 3396 | pkt->u.stats.general.temperature) || | 
|  | 3397 | ((priv->statistics.flag & | 
|  | 3398 | STATISTICS_REPLY_FLG_FAT_MODE_MSK) != | 
|  | 3399 | (pkt->u.stats.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK))); | 
|  | 3400 |  | 
|  | 3401 | memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics)); | 
|  | 3402 |  | 
|  | 3403 | set_bit(STATUS_STATISTICS, &priv->status); | 
|  | 3404 |  | 
|  | 3405 | /* Reschedule the statistics timer to occur in | 
|  | 3406 | * REG_RECALIB_PERIOD seconds to ensure we get a | 
|  | 3407 | * thermal update even if the uCode doesn't give | 
|  | 3408 | * us one */ | 
|  | 3409 | mod_timer(&priv->statistics_periodic, jiffies + | 
|  | 3410 | msecs_to_jiffies(REG_RECALIB_PERIOD * 1000)); | 
|  | 3411 |  | 
|  | 3412 | if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) && | 
|  | 3413 | (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) { | 
|  | 3414 | iwl4965_rx_calc_noise(priv); | 
| Christoph Hellwig | c8b0e6e | 2007-10-25 17:15:51 +0800 | [diff] [blame] | 3415 | #ifdef CONFIG_IWL4965_SENSITIVITY | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3416 | queue_work(priv->workqueue, &priv->sensitivity_work); | 
|  | 3417 | #endif | 
|  | 3418 | } | 
|  | 3419 |  | 
| Mohamed Abbas | ab53d8a | 2008-03-25 16:33:36 -0700 | [diff] [blame] | 3420 | iwl_leds_background(priv); | 
|  | 3421 |  | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3422 | /* If the hardware hasn't reported a change in | 
|  | 3423 | * temperature then don't bother computing a | 
|  | 3424 | * calibrated temperature value */ | 
|  | 3425 | if (!change) | 
|  | 3426 | return; | 
|  | 3427 |  | 
|  | 3428 | temp = iwl4965_get_temperature(priv); | 
|  | 3429 | if (temp < 0) | 
|  | 3430 | return; | 
|  | 3431 |  | 
|  | 3432 | if (priv->temperature != temp) { | 
|  | 3433 | if (priv->temperature) | 
|  | 3434 | IWL_DEBUG_TEMP("Temperature changed " | 
|  | 3435 | "from %dC to %dC\n", | 
|  | 3436 | KELVIN_TO_CELSIUS(priv->temperature), | 
|  | 3437 | KELVIN_TO_CELSIUS(temp)); | 
|  | 3438 | else | 
|  | 3439 | IWL_DEBUG_TEMP("Temperature " | 
|  | 3440 | "initialized to %dC\n", | 
|  | 3441 | KELVIN_TO_CELSIUS(temp)); | 
|  | 3442 | } | 
|  | 3443 |  | 
|  | 3444 | priv->temperature = temp; | 
|  | 3445 | set_bit(STATUS_TEMPERATURE, &priv->status); | 
|  | 3446 |  | 
|  | 3447 | if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) && | 
|  | 3448 | iwl4965_is_temp_calib_needed(priv)) | 
|  | 3449 | queue_work(priv->workqueue, &priv->txpower_work); | 
|  | 3450 | } | 
|  | 3451 |  | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 3452 | static void iwl4965_add_radiotap(struct iwl_priv *priv, | 
| Zhu Yi | 12342c4 | 2007-12-20 11:27:32 +0800 | [diff] [blame] | 3453 | struct sk_buff *skb, | 
|  | 3454 | struct iwl4965_rx_phy_res *rx_start, | 
|  | 3455 | struct ieee80211_rx_status *stats, | 
|  | 3456 | u32 ampdu_status) | 
|  | 3457 | { | 
|  | 3458 | s8 signal = stats->ssi; | 
|  | 3459 | s8 noise = 0; | 
| Johannes Berg | 8318d78 | 2008-01-24 19:38:38 +0100 | [diff] [blame] | 3460 | int rate = stats->rate_idx; | 
| Zhu Yi | 12342c4 | 2007-12-20 11:27:32 +0800 | [diff] [blame] | 3461 | u64 tsf = stats->mactime; | 
| Johannes Berg | a0b484f | 2008-04-01 17:51:47 +0200 | [diff] [blame] | 3462 | __le16 antenna; | 
| Zhu Yi | 12342c4 | 2007-12-20 11:27:32 +0800 | [diff] [blame] | 3463 | __le16 phy_flags_hw = rx_start->phy_flags; | 
|  | 3464 | struct iwl4965_rt_rx_hdr { | 
|  | 3465 | struct ieee80211_radiotap_header rt_hdr; | 
|  | 3466 | __le64 rt_tsf;		/* TSF */ | 
|  | 3467 | u8 rt_flags;		/* radiotap packet flags */ | 
|  | 3468 | u8 rt_rate;		/* rate in 500kb/s */ | 
|  | 3469 | __le16 rt_channelMHz;	/* channel in MHz */ | 
|  | 3470 | __le16 rt_chbitmask;	/* channel bitfield */ | 
|  | 3471 | s8 rt_dbmsignal;	/* signal in dBm, kluged to signed */ | 
|  | 3472 | s8 rt_dbmnoise; | 
|  | 3473 | u8 rt_antenna;		/* antenna number */ | 
|  | 3474 | } __attribute__ ((packed)) *iwl4965_rt; | 
|  | 3475 |  | 
|  | 3476 | /* TODO: We won't have enough headroom for HT frames. Fix it later. */ | 
|  | 3477 | if (skb_headroom(skb) < sizeof(*iwl4965_rt)) { | 
|  | 3478 | if (net_ratelimit()) | 
|  | 3479 | printk(KERN_ERR "not enough headroom [%d] for " | 
| Miguel Botón | 01c2098 | 2008-01-04 23:34:35 +0100 | [diff] [blame] | 3480 | "radiotap head [%zd]\n", | 
| Zhu Yi | 12342c4 | 2007-12-20 11:27:32 +0800 | [diff] [blame] | 3481 | skb_headroom(skb), sizeof(*iwl4965_rt)); | 
|  | 3482 | return; | 
|  | 3483 | } | 
|  | 3484 |  | 
|  | 3485 | /* put radiotap header in front of 802.11 header and data */ | 
|  | 3486 | iwl4965_rt = (void *)skb_push(skb, sizeof(*iwl4965_rt)); | 
|  | 3487 |  | 
|  | 3488 | /* initialise radiotap header */ | 
|  | 3489 | iwl4965_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION; | 
|  | 3490 | iwl4965_rt->rt_hdr.it_pad = 0; | 
|  | 3491 |  | 
|  | 3492 | /* total header + data */ | 
|  | 3493 | put_unaligned(cpu_to_le16(sizeof(*iwl4965_rt)), | 
|  | 3494 | &iwl4965_rt->rt_hdr.it_len); | 
|  | 3495 |  | 
|  | 3496 | /* Indicate all the fields we add to the radiotap header */ | 
|  | 3497 | put_unaligned(cpu_to_le32((1 << IEEE80211_RADIOTAP_TSFT) | | 
|  | 3498 | (1 << IEEE80211_RADIOTAP_FLAGS) | | 
|  | 3499 | (1 << IEEE80211_RADIOTAP_RATE) | | 
|  | 3500 | (1 << IEEE80211_RADIOTAP_CHANNEL) | | 
|  | 3501 | (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | | 
|  | 3502 | (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) | | 
|  | 3503 | (1 << IEEE80211_RADIOTAP_ANTENNA)), | 
|  | 3504 | &iwl4965_rt->rt_hdr.it_present); | 
|  | 3505 |  | 
|  | 3506 | /* Zero the flags, we'll add to them as we go */ | 
|  | 3507 | iwl4965_rt->rt_flags = 0; | 
|  | 3508 |  | 
|  | 3509 | put_unaligned(cpu_to_le64(tsf), &iwl4965_rt->rt_tsf); | 
|  | 3510 |  | 
|  | 3511 | iwl4965_rt->rt_dbmsignal = signal; | 
|  | 3512 | iwl4965_rt->rt_dbmnoise = noise; | 
|  | 3513 |  | 
|  | 3514 | /* Convert the channel frequency and set the flags */ | 
|  | 3515 | put_unaligned(cpu_to_le16(stats->freq), &iwl4965_rt->rt_channelMHz); | 
|  | 3516 | if (!(phy_flags_hw & RX_RES_PHY_FLAGS_BAND_24_MSK)) | 
|  | 3517 | put_unaligned(cpu_to_le16(IEEE80211_CHAN_OFDM | | 
|  | 3518 | IEEE80211_CHAN_5GHZ), | 
|  | 3519 | &iwl4965_rt->rt_chbitmask); | 
|  | 3520 | else if (phy_flags_hw & RX_RES_PHY_FLAGS_MOD_CCK_MSK) | 
|  | 3521 | put_unaligned(cpu_to_le16(IEEE80211_CHAN_CCK | | 
|  | 3522 | IEEE80211_CHAN_2GHZ), | 
|  | 3523 | &iwl4965_rt->rt_chbitmask); | 
|  | 3524 | else	/* 802.11g */ | 
|  | 3525 | put_unaligned(cpu_to_le16(IEEE80211_CHAN_OFDM | | 
|  | 3526 | IEEE80211_CHAN_2GHZ), | 
|  | 3527 | &iwl4965_rt->rt_chbitmask); | 
|  | 3528 |  | 
| Zhu Yi | 12342c4 | 2007-12-20 11:27:32 +0800 | [diff] [blame] | 3529 | if (rate == -1) | 
|  | 3530 | iwl4965_rt->rt_rate = 0; | 
| Rick Farrington | ec04fd6 | 2008-07-01 09:20:33 +0800 | [diff] [blame] | 3531 | else { | 
|  | 3532 | if (stats->band == IEEE80211_BAND_5GHZ) | 
|  | 3533 | rate += IWL_FIRST_OFDM_RATE; | 
|  | 3534 |  | 
| Zhu Yi | 12342c4 | 2007-12-20 11:27:32 +0800 | [diff] [blame] | 3535 | iwl4965_rt->rt_rate = iwl4965_rates[rate].ieee; | 
| Rick Farrington | ec04fd6 | 2008-07-01 09:20:33 +0800 | [diff] [blame] | 3536 | } | 
| Zhu Yi | 12342c4 | 2007-12-20 11:27:32 +0800 | [diff] [blame] | 3537 |  | 
|  | 3538 | /* | 
|  | 3539 | * "antenna number" | 
|  | 3540 | * | 
|  | 3541 | * It seems that the antenna field in the phy flags value | 
|  | 3542 | * is actually a bitfield. This is undefined by radiotap, | 
|  | 3543 | * it wants an actual antenna number but I always get "7" | 
|  | 3544 | * for most legacy frames I receive indicating that the | 
|  | 3545 | * same frame was received on all three RX chains. | 
|  | 3546 | * | 
|  | 3547 | * I think this field should be removed in favour of a | 
|  | 3548 | * new 802.11n radiotap field "RX chains" that is defined | 
|  | 3549 | * as a bitmask. | 
|  | 3550 | */ | 
| Johannes Berg | a0b484f | 2008-04-01 17:51:47 +0200 | [diff] [blame] | 3551 | antenna = phy_flags_hw & RX_RES_PHY_FLAGS_ANTENNA_MSK; | 
|  | 3552 | iwl4965_rt->rt_antenna = le16_to_cpu(antenna) >> 4; | 
| Zhu Yi | 12342c4 | 2007-12-20 11:27:32 +0800 | [diff] [blame] | 3553 |  | 
|  | 3554 | /* set the preamble flag if appropriate */ | 
|  | 3555 | if (phy_flags_hw & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK) | 
|  | 3556 | iwl4965_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; | 
|  | 3557 |  | 
|  | 3558 | stats->flag |= RX_FLAG_RADIOTAP; | 
|  | 3559 | } | 
|  | 3560 |  | 
| Tomas Winkler | 19758be | 2008-03-12 16:58:51 -0700 | [diff] [blame] | 3561 | static void iwl_update_rx_stats(struct iwl_priv *priv, u16 fc, u16 len) | 
|  | 3562 | { | 
|  | 3563 | /* 0 - mgmt, 1 - cnt, 2 - data */ | 
|  | 3564 | int idx = (fc & IEEE80211_FCTL_FTYPE) >> 2; | 
|  | 3565 | priv->rx_stats[idx].cnt++; | 
|  | 3566 | priv->rx_stats[idx].bytes += len; | 
|  | 3567 | } | 
|  | 3568 |  | 
| Emmanuel Grumbach | 17e476b | 2008-03-19 16:41:42 -0700 | [diff] [blame] | 3569 | static u32 iwl4965_translate_rx_status(u32 decrypt_in) | 
|  | 3570 | { | 
|  | 3571 | u32 decrypt_out = 0; | 
|  | 3572 |  | 
|  | 3573 | if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) == | 
|  | 3574 | RX_RES_STATUS_STATION_FOUND) | 
|  | 3575 | decrypt_out |= (RX_RES_STATUS_STATION_FOUND | | 
|  | 3576 | RX_RES_STATUS_NO_STATION_INFO_MISMATCH); | 
|  | 3577 |  | 
|  | 3578 | decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK); | 
|  | 3579 |  | 
|  | 3580 | /* packet was not encrypted */ | 
|  | 3581 | if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) == | 
|  | 3582 | RX_RES_STATUS_SEC_TYPE_NONE) | 
|  | 3583 | return decrypt_out; | 
|  | 3584 |  | 
|  | 3585 | /* packet was encrypted with unknown alg */ | 
|  | 3586 | if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) == | 
|  | 3587 | RX_RES_STATUS_SEC_TYPE_ERR) | 
|  | 3588 | return decrypt_out; | 
|  | 3589 |  | 
|  | 3590 | /* decryption was not done in HW */ | 
|  | 3591 | if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) != | 
|  | 3592 | RX_MPDU_RES_STATUS_DEC_DONE_MSK) | 
|  | 3593 | return decrypt_out; | 
|  | 3594 |  | 
|  | 3595 | switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) { | 
|  | 3596 |  | 
|  | 3597 | case RX_RES_STATUS_SEC_TYPE_CCMP: | 
|  | 3598 | /* alg is CCM: check MIC only */ | 
|  | 3599 | if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK)) | 
|  | 3600 | /* Bad MIC */ | 
|  | 3601 | decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC; | 
|  | 3602 | else | 
|  | 3603 | decrypt_out |= RX_RES_STATUS_DECRYPT_OK; | 
|  | 3604 |  | 
|  | 3605 | break; | 
|  | 3606 |  | 
|  | 3607 | case RX_RES_STATUS_SEC_TYPE_TKIP: | 
|  | 3608 | if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) { | 
|  | 3609 | /* Bad TTAK */ | 
|  | 3610 | decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK; | 
|  | 3611 | break; | 
|  | 3612 | } | 
|  | 3613 | /* fall through if TTAK OK */ | 
|  | 3614 | default: | 
|  | 3615 | if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK)) | 
|  | 3616 | decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC; | 
|  | 3617 | else | 
|  | 3618 | decrypt_out |= RX_RES_STATUS_DECRYPT_OK; | 
|  | 3619 | break; | 
|  | 3620 | }; | 
|  | 3621 |  | 
|  | 3622 | IWL_DEBUG_RX("decrypt_in:0x%x  decrypt_out = 0x%x\n", | 
|  | 3623 | decrypt_in, decrypt_out); | 
|  | 3624 |  | 
|  | 3625 | return decrypt_out; | 
|  | 3626 | } | 
|  | 3627 |  | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 3628 | static void iwl4965_handle_data_packet(struct iwl_priv *priv, int is_data, | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3629 | int include_phy, | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 3630 | struct iwl4965_rx_mem_buffer *rxb, | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3631 | struct ieee80211_rx_status *stats) | 
|  | 3632 | { | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 3633 | struct iwl4965_rx_packet *pkt = (struct iwl4965_rx_packet *)rxb->skb->data; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3634 | struct iwl4965_rx_phy_res *rx_start = (include_phy) ? | 
|  | 3635 | (struct iwl4965_rx_phy_res *)&(pkt->u.raw[0]) : NULL; | 
|  | 3636 | struct ieee80211_hdr *hdr; | 
|  | 3637 | u16 len; | 
|  | 3638 | __le32 *rx_end; | 
|  | 3639 | unsigned int skblen; | 
|  | 3640 | u32 ampdu_status; | 
| Emmanuel Grumbach | 17e476b | 2008-03-19 16:41:42 -0700 | [diff] [blame] | 3641 | u32 ampdu_status_legacy; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3642 |  | 
|  | 3643 | if (!include_phy && priv->last_phy_res[0]) | 
|  | 3644 | rx_start = (struct iwl4965_rx_phy_res *)&priv->last_phy_res[1]; | 
|  | 3645 |  | 
|  | 3646 | if (!rx_start) { | 
|  | 3647 | IWL_ERROR("MPDU frame without a PHY data\n"); | 
|  | 3648 | return; | 
|  | 3649 | } | 
|  | 3650 | if (include_phy) { | 
|  | 3651 | hdr = (struct ieee80211_hdr *)((u8 *) & rx_start[1] + | 
|  | 3652 | rx_start->cfg_phy_cnt); | 
|  | 3653 |  | 
|  | 3654 | len = le16_to_cpu(rx_start->byte_count); | 
|  | 3655 |  | 
|  | 3656 | rx_end = (__le32 *) ((u8 *) & pkt->u.raw[0] + | 
|  | 3657 | sizeof(struct iwl4965_rx_phy_res) + | 
|  | 3658 | rx_start->cfg_phy_cnt + len); | 
|  | 3659 |  | 
|  | 3660 | } else { | 
|  | 3661 | struct iwl4965_rx_mpdu_res_start *amsdu = | 
|  | 3662 | (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw; | 
|  | 3663 |  | 
|  | 3664 | hdr = (struct ieee80211_hdr *)(pkt->u.raw + | 
|  | 3665 | sizeof(struct iwl4965_rx_mpdu_res_start)); | 
|  | 3666 | len =  le16_to_cpu(amsdu->byte_count); | 
|  | 3667 | rx_start->byte_count = amsdu->byte_count; | 
|  | 3668 | rx_end = (__le32 *) (((u8 *) hdr) + len); | 
|  | 3669 | } | 
| Tomas Winkler | 5425e49 | 2008-04-15 16:01:38 -0700 | [diff] [blame] | 3670 | if (len > priv->hw_params.max_pkt_size || len < 16) { | 
| Zhu Yi | 12342c4 | 2007-12-20 11:27:32 +0800 | [diff] [blame] | 3671 | IWL_WARNING("byte count out of range [16,4K] : %d\n", len); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3672 | return; | 
|  | 3673 | } | 
|  | 3674 |  | 
|  | 3675 | ampdu_status = le32_to_cpu(*rx_end); | 
|  | 3676 | skblen = ((u8 *) rx_end - (u8 *) & pkt->u.raw[0]) + sizeof(u32); | 
|  | 3677 |  | 
| Emmanuel Grumbach | 17e476b | 2008-03-19 16:41:42 -0700 | [diff] [blame] | 3678 | if (!include_phy) { | 
|  | 3679 | /* New status scheme, need to translate */ | 
|  | 3680 | ampdu_status_legacy = ampdu_status; | 
|  | 3681 | ampdu_status = iwl4965_translate_rx_status(ampdu_status); | 
|  | 3682 | } | 
|  | 3683 |  | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3684 | /* start from MAC */ | 
|  | 3685 | skb_reserve(rxb->skb, (void *)hdr - (void *)pkt); | 
|  | 3686 | skb_put(rxb->skb, len);	/* end where data ends */ | 
|  | 3687 |  | 
|  | 3688 | /* We only process data packets if the interface is open */ | 
|  | 3689 | if (unlikely(!priv->is_open)) { | 
|  | 3690 | IWL_DEBUG_DROP_LIMIT | 
|  | 3691 | ("Dropping packet while interface is not open.\n"); | 
|  | 3692 | return; | 
|  | 3693 | } | 
|  | 3694 |  | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3695 | stats->flag = 0; | 
|  | 3696 | hdr = (struct ieee80211_hdr *)rxb->skb->data; | 
|  | 3697 |  | 
| Emmanuel Grumbach | fcc76c6 | 2008-04-15 16:01:47 -0700 | [diff] [blame] | 3698 | if (!priv->cfg->mod_params->sw_crypto) | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 3699 | iwl4965_set_decrypted_flag(priv, rxb->skb, ampdu_status, stats); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3700 |  | 
| Zhu Yi | 12342c4 | 2007-12-20 11:27:32 +0800 | [diff] [blame] | 3701 | if (priv->add_radiotap) | 
|  | 3702 | iwl4965_add_radiotap(priv, rxb->skb, rx_start, stats, ampdu_status); | 
|  | 3703 |  | 
| Tomas Winkler | 19758be | 2008-03-12 16:58:51 -0700 | [diff] [blame] | 3704 | iwl_update_rx_stats(priv, le16_to_cpu(hdr->frame_control), len); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3705 | ieee80211_rx_irqsafe(priv->hw, rxb->skb, stats); | 
|  | 3706 | priv->alloc_rxb_skb--; | 
|  | 3707 | rxb->skb = NULL; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3708 | } | 
|  | 3709 |  | 
|  | 3710 | /* Calc max signal level (dBm) among 3 possible receivers */ | 
|  | 3711 | static int iwl4965_calc_rssi(struct iwl4965_rx_phy_res *rx_resp) | 
|  | 3712 | { | 
|  | 3713 | /* data from PHY/DSP regarding signal strength, etc., | 
|  | 3714 | *   contents are always there, not configurable by host.  */ | 
|  | 3715 | struct iwl4965_rx_non_cfg_phy *ncphy = | 
|  | 3716 | (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy; | 
|  | 3717 | u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL_AGC_DB_MASK) | 
|  | 3718 | >> IWL_AGC_DB_POS; | 
|  | 3719 |  | 
|  | 3720 | u32 valid_antennae = | 
|  | 3721 | (le16_to_cpu(rx_resp->phy_flags) & RX_PHY_FLAGS_ANTENNAE_MASK) | 
|  | 3722 | >> RX_PHY_FLAGS_ANTENNAE_OFFSET; | 
|  | 3723 | u8 max_rssi = 0; | 
|  | 3724 | u32 i; | 
|  | 3725 |  | 
|  | 3726 | /* Find max rssi among 3 possible receivers. | 
|  | 3727 | * These values are measured by the digital signal processor (DSP). | 
|  | 3728 | * They should stay fairly constant even as the signal strength varies, | 
|  | 3729 | *   if the radio's automatic gain control (AGC) is working right. | 
|  | 3730 | * AGC value (see below) will provide the "interesting" info. */ | 
|  | 3731 | for (i = 0; i < 3; i++) | 
|  | 3732 | if (valid_antennae & (1 << i)) | 
|  | 3733 | max_rssi = max(ncphy->rssi_info[i << 1], max_rssi); | 
|  | 3734 |  | 
|  | 3735 | IWL_DEBUG_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n", | 
|  | 3736 | ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4], | 
|  | 3737 | max_rssi, agc); | 
|  | 3738 |  | 
|  | 3739 | /* dBm = max_rssi dB - agc dB - constant. | 
|  | 3740 | * Higher AGC (higher radio gain) means lower signal. */ | 
|  | 3741 | return (max_rssi - agc - IWL_RSSI_OFFSET); | 
|  | 3742 | } | 
|  | 3743 |  | 
| Christoph Hellwig | c8b0e6e | 2007-10-25 17:15:51 +0800 | [diff] [blame] | 3744 | #ifdef CONFIG_IWL4965_HT | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3745 |  | 
| Assaf Krauss | 1ea8739 | 2008-03-18 14:57:50 -0700 | [diff] [blame] | 3746 | void iwl4965_init_ht_hw_capab(struct iwl_priv *priv, | 
|  | 3747 | struct ieee80211_ht_info *ht_info, | 
| Tomas Winkler | 78330fd | 2008-02-06 02:37:18 +0200 | [diff] [blame] | 3748 | enum ieee80211_band band) | 
| Ron Rindjunsky | 326eeee | 2007-11-26 16:14:37 +0200 | [diff] [blame] | 3749 | { | 
|  | 3750 | ht_info->cap = 0; | 
|  | 3751 | memset(ht_info->supp_mcs_set, 0, 16); | 
|  | 3752 |  | 
|  | 3753 | ht_info->ht_supported = 1; | 
|  | 3754 |  | 
| Tomas Winkler | 78330fd | 2008-02-06 02:37:18 +0200 | [diff] [blame] | 3755 | if (band == IEEE80211_BAND_5GHZ) { | 
| Ron Rindjunsky | 326eeee | 2007-11-26 16:14:37 +0200 | [diff] [blame] | 3756 | ht_info->cap |= (u16)IEEE80211_HT_CAP_SUP_WIDTH; | 
|  | 3757 | ht_info->cap |= (u16)IEEE80211_HT_CAP_SGI_40; | 
|  | 3758 | ht_info->supp_mcs_set[4] = 0x01; | 
|  | 3759 | } | 
|  | 3760 | ht_info->cap |= (u16)IEEE80211_HT_CAP_GRN_FLD; | 
|  | 3761 | ht_info->cap |= (u16)IEEE80211_HT_CAP_SGI_20; | 
|  | 3762 | ht_info->cap |= (u16)(IEEE80211_HT_CAP_MIMO_PS & | 
|  | 3763 | (IWL_MIMO_PS_NONE << 2)); | 
| Assaf Krauss | 1ea8739 | 2008-03-18 14:57:50 -0700 | [diff] [blame] | 3764 |  | 
|  | 3765 | if (priv->cfg->mod_params->amsdu_size_8K) | 
| Ron Rindjunsky | 9ee1ba4 | 2007-11-26 16:14:42 +0200 | [diff] [blame] | 3766 | ht_info->cap |= (u16)IEEE80211_HT_CAP_MAX_AMSDU; | 
| Ron Rindjunsky | 326eeee | 2007-11-26 16:14:37 +0200 | [diff] [blame] | 3767 |  | 
|  | 3768 | ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF; | 
|  | 3769 | ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF; | 
|  | 3770 |  | 
|  | 3771 | ht_info->supp_mcs_set[0] = 0xFF; | 
|  | 3772 | ht_info->supp_mcs_set[1] = 0xFF; | 
|  | 3773 | } | 
| Christoph Hellwig | c8b0e6e | 2007-10-25 17:15:51 +0800 | [diff] [blame] | 3774 | #endif /* CONFIG_IWL4965_HT */ | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3775 |  | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 3776 | static void iwl4965_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3777 | { | 
|  | 3778 | unsigned long flags; | 
|  | 3779 |  | 
|  | 3780 | spin_lock_irqsave(&priv->sta_lock, flags); | 
|  | 3781 | priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK; | 
|  | 3782 | priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK; | 
|  | 3783 | priv->stations[sta_id].sta.sta.modify_mask = 0; | 
|  | 3784 | priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; | 
|  | 3785 | spin_unlock_irqrestore(&priv->sta_lock, flags); | 
|  | 3786 |  | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 3787 | iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3788 | } | 
|  | 3789 |  | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 3790 | static void iwl4965_update_ps_mode(struct iwl_priv *priv, u16 ps_bit, u8 *addr) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3791 | { | 
|  | 3792 | /* FIXME: need locking over ps_status ??? */ | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 3793 | u8 sta_id = iwl4965_hw_find_station(priv, addr); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3794 |  | 
|  | 3795 | if (sta_id != IWL_INVALID_STATION) { | 
|  | 3796 | u8 sta_awake = priv->stations[sta_id]. | 
|  | 3797 | ps_status == STA_PS_STATUS_WAKE; | 
|  | 3798 |  | 
|  | 3799 | if (sta_awake && ps_bit) | 
|  | 3800 | priv->stations[sta_id].ps_status = STA_PS_STATUS_SLEEP; | 
|  | 3801 | else if (!sta_awake && !ps_bit) { | 
|  | 3802 | iwl4965_sta_modify_ps_wake(priv, sta_id); | 
|  | 3803 | priv->stations[sta_id].ps_status = STA_PS_STATUS_WAKE; | 
|  | 3804 | } | 
|  | 3805 | } | 
|  | 3806 | } | 
| Tomas Winkler | 0a6857e | 2008-03-12 16:58:49 -0700 | [diff] [blame] | 3807 | #ifdef CONFIG_IWLWIFI_DEBUG | 
| Tomas Winkler | 17744ff | 2008-03-02 01:52:00 +0200 | [diff] [blame] | 3808 |  | 
|  | 3809 | /** | 
|  | 3810 | * iwl4965_dbg_report_frame - dump frame to syslog during debug sessions | 
|  | 3811 | * | 
|  | 3812 | * You may hack this function to show different aspects of received frames, | 
|  | 3813 | * including selective frame dumps. | 
|  | 3814 | * group100 parameter selects whether to show 1 out of 100 good frames. | 
|  | 3815 | * | 
|  | 3816 | * TODO:  This was originally written for 3945, need to audit for | 
|  | 3817 | *        proper operation with 4965. | 
|  | 3818 | */ | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 3819 | static void iwl4965_dbg_report_frame(struct iwl_priv *priv, | 
| Tomas Winkler | 17744ff | 2008-03-02 01:52:00 +0200 | [diff] [blame] | 3820 | struct iwl4965_rx_packet *pkt, | 
|  | 3821 | struct ieee80211_hdr *header, int group100) | 
|  | 3822 | { | 
|  | 3823 | u32 to_us; | 
|  | 3824 | u32 print_summary = 0; | 
|  | 3825 | u32 print_dump = 0;	/* set to 1 to dump all frames' contents */ | 
|  | 3826 | u32 hundred = 0; | 
|  | 3827 | u32 dataframe = 0; | 
|  | 3828 | u16 fc; | 
|  | 3829 | u16 seq_ctl; | 
|  | 3830 | u16 channel; | 
|  | 3831 | u16 phy_flags; | 
|  | 3832 | int rate_sym; | 
|  | 3833 | u16 length; | 
|  | 3834 | u16 status; | 
|  | 3835 | u16 bcn_tmr; | 
|  | 3836 | u32 tsf_low; | 
|  | 3837 | u64 tsf; | 
|  | 3838 | u8 rssi; | 
|  | 3839 | u8 agc; | 
|  | 3840 | u16 sig_avg; | 
|  | 3841 | u16 noise_diff; | 
|  | 3842 | struct iwl4965_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt); | 
|  | 3843 | struct iwl4965_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt); | 
|  | 3844 | struct iwl4965_rx_frame_end *rx_end = IWL_RX_END(pkt); | 
|  | 3845 | u8 *data = IWL_RX_DATA(pkt); | 
|  | 3846 |  | 
| Tomas Winkler | 0a6857e | 2008-03-12 16:58:49 -0700 | [diff] [blame] | 3847 | if (likely(!(iwl_debug_level & IWL_DL_RX))) | 
| Tomas Winkler | 17744ff | 2008-03-02 01:52:00 +0200 | [diff] [blame] | 3848 | return; | 
|  | 3849 |  | 
|  | 3850 | /* MAC header */ | 
|  | 3851 | fc = le16_to_cpu(header->frame_control); | 
|  | 3852 | seq_ctl = le16_to_cpu(header->seq_ctrl); | 
|  | 3853 |  | 
|  | 3854 | /* metadata */ | 
|  | 3855 | channel = le16_to_cpu(rx_hdr->channel); | 
|  | 3856 | phy_flags = le16_to_cpu(rx_hdr->phy_flags); | 
|  | 3857 | rate_sym = rx_hdr->rate; | 
|  | 3858 | length = le16_to_cpu(rx_hdr->len); | 
|  | 3859 |  | 
|  | 3860 | /* end-of-frame status and timestamp */ | 
|  | 3861 | status = le32_to_cpu(rx_end->status); | 
|  | 3862 | bcn_tmr = le32_to_cpu(rx_end->beacon_timestamp); | 
|  | 3863 | tsf_low = le64_to_cpu(rx_end->timestamp) & 0x0ffffffff; | 
|  | 3864 | tsf = le64_to_cpu(rx_end->timestamp); | 
|  | 3865 |  | 
|  | 3866 | /* signal statistics */ | 
|  | 3867 | rssi = rx_stats->rssi; | 
|  | 3868 | agc = rx_stats->agc; | 
|  | 3869 | sig_avg = le16_to_cpu(rx_stats->sig_avg); | 
|  | 3870 | noise_diff = le16_to_cpu(rx_stats->noise_diff); | 
|  | 3871 |  | 
|  | 3872 | to_us = !compare_ether_addr(header->addr1, priv->mac_addr); | 
|  | 3873 |  | 
|  | 3874 | /* if data frame is to us and all is good, | 
|  | 3875 | *   (optionally) print summary for only 1 out of every 100 */ | 
|  | 3876 | if (to_us && (fc & ~IEEE80211_FCTL_PROTECTED) == | 
|  | 3877 | (IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) { | 
|  | 3878 | dataframe = 1; | 
|  | 3879 | if (!group100) | 
|  | 3880 | print_summary = 1;	/* print each frame */ | 
|  | 3881 | else if (priv->framecnt_to_us < 100) { | 
|  | 3882 | priv->framecnt_to_us++; | 
|  | 3883 | print_summary = 0; | 
|  | 3884 | } else { | 
|  | 3885 | priv->framecnt_to_us = 0; | 
|  | 3886 | print_summary = 1; | 
|  | 3887 | hundred = 1; | 
|  | 3888 | } | 
|  | 3889 | } else { | 
|  | 3890 | /* print summary for all other frames */ | 
|  | 3891 | print_summary = 1; | 
|  | 3892 | } | 
|  | 3893 |  | 
|  | 3894 | if (print_summary) { | 
|  | 3895 | char *title; | 
|  | 3896 | int rate_idx; | 
|  | 3897 | u32 bitrate; | 
|  | 3898 |  | 
|  | 3899 | if (hundred) | 
|  | 3900 | title = "100Frames"; | 
|  | 3901 | else if (fc & IEEE80211_FCTL_RETRY) | 
|  | 3902 | title = "Retry"; | 
|  | 3903 | else if (ieee80211_is_assoc_response(fc)) | 
|  | 3904 | title = "AscRsp"; | 
|  | 3905 | else if (ieee80211_is_reassoc_response(fc)) | 
|  | 3906 | title = "RasRsp"; | 
|  | 3907 | else if (ieee80211_is_probe_response(fc)) { | 
|  | 3908 | title = "PrbRsp"; | 
|  | 3909 | print_dump = 1;	/* dump frame contents */ | 
|  | 3910 | } else if (ieee80211_is_beacon(fc)) { | 
|  | 3911 | title = "Beacon"; | 
|  | 3912 | print_dump = 1;	/* dump frame contents */ | 
|  | 3913 | } else if (ieee80211_is_atim(fc)) | 
|  | 3914 | title = "ATIM"; | 
|  | 3915 | else if (ieee80211_is_auth(fc)) | 
|  | 3916 | title = "Auth"; | 
|  | 3917 | else if (ieee80211_is_deauth(fc)) | 
|  | 3918 | title = "DeAuth"; | 
|  | 3919 | else if (ieee80211_is_disassoc(fc)) | 
|  | 3920 | title = "DisAssoc"; | 
|  | 3921 | else | 
|  | 3922 | title = "Frame"; | 
|  | 3923 |  | 
|  | 3924 | rate_idx = iwl4965_hwrate_to_plcp_idx(rate_sym); | 
|  | 3925 | if (unlikely(rate_idx == -1)) | 
|  | 3926 | bitrate = 0; | 
|  | 3927 | else | 
|  | 3928 | bitrate = iwl4965_rates[rate_idx].ieee / 2; | 
|  | 3929 |  | 
|  | 3930 | /* print frame summary. | 
|  | 3931 | * MAC addresses show just the last byte (for brevity), | 
|  | 3932 | *    but you can hack it to show more, if you'd like to. */ | 
|  | 3933 | if (dataframe) | 
|  | 3934 | IWL_DEBUG_RX("%s: mhd=0x%04x, dst=0x%02x, " | 
|  | 3935 | "len=%u, rssi=%d, chnl=%d, rate=%u, \n", | 
|  | 3936 | title, fc, header->addr1[5], | 
|  | 3937 | length, rssi, channel, bitrate); | 
|  | 3938 | else { | 
|  | 3939 | /* src/dst addresses assume managed mode */ | 
|  | 3940 | IWL_DEBUG_RX("%s: 0x%04x, dst=0x%02x, " | 
|  | 3941 | "src=0x%02x, rssi=%u, tim=%lu usec, " | 
|  | 3942 | "phy=0x%02x, chnl=%d\n", | 
|  | 3943 | title, fc, header->addr1[5], | 
|  | 3944 | header->addr3[5], rssi, | 
|  | 3945 | tsf_low - priv->scan_start_tsf, | 
|  | 3946 | phy_flags, channel); | 
|  | 3947 | } | 
|  | 3948 | } | 
|  | 3949 | if (print_dump) | 
| Tomas Winkler | 0a6857e | 2008-03-12 16:58:49 -0700 | [diff] [blame] | 3950 | iwl_print_hex_dump(IWL_DL_RX, data, length); | 
| Tomas Winkler | 17744ff | 2008-03-02 01:52:00 +0200 | [diff] [blame] | 3951 | } | 
|  | 3952 | #else | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 3953 | static inline void iwl4965_dbg_report_frame(struct iwl_priv *priv, | 
| Tomas Winkler | 17744ff | 2008-03-02 01:52:00 +0200 | [diff] [blame] | 3954 | struct iwl4965_rx_packet *pkt, | 
|  | 3955 | struct ieee80211_hdr *header, | 
|  | 3956 | int group100) | 
|  | 3957 | { | 
|  | 3958 | } | 
|  | 3959 | #endif | 
|  | 3960 |  | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3961 |  | 
| Mohamed Abbas | 7878a5a | 2007-11-29 11:10:13 +0800 | [diff] [blame] | 3962 |  | 
| Tomas Winkler | 857485c | 2008-03-21 13:53:44 -0700 | [diff] [blame] | 3963 | /* Called for REPLY_RX (legacy ABG frames), or | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3964 | * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */ | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 3965 | static void iwl4965_rx_reply_rx(struct iwl_priv *priv, | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 3966 | struct iwl4965_rx_mem_buffer *rxb) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3967 | { | 
| Tomas Winkler | 17744ff | 2008-03-02 01:52:00 +0200 | [diff] [blame] | 3968 | struct ieee80211_hdr *header; | 
|  | 3969 | struct ieee80211_rx_status rx_status; | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 3970 | struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3971 | /* Use phy data (Rx signal strength, etc.) contained within | 
|  | 3972 | *   this rx packet for legacy frames, | 
|  | 3973 | *   or phy data cached from REPLY_RX_PHY_CMD for HT frames. */ | 
| Tomas Winkler | 857485c | 2008-03-21 13:53:44 -0700 | [diff] [blame] | 3974 | int include_phy = (pkt->hdr.cmd == REPLY_RX); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3975 | struct iwl4965_rx_phy_res *rx_start = (include_phy) ? | 
|  | 3976 | (struct iwl4965_rx_phy_res *)&(pkt->u.raw[0]) : | 
|  | 3977 | (struct iwl4965_rx_phy_res *)&priv->last_phy_res[1]; | 
|  | 3978 | __le32 *rx_end; | 
|  | 3979 | unsigned int len = 0; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3980 | u16 fc; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3981 | u8 network_packet; | 
|  | 3982 |  | 
| Tomas Winkler | 17744ff | 2008-03-02 01:52:00 +0200 | [diff] [blame] | 3983 | rx_status.mactime = le64_to_cpu(rx_start->timestamp); | 
| Tomas Winkler | dc92e49 | 2008-04-03 16:05:22 -0700 | [diff] [blame] | 3984 | rx_status.freq = | 
| Emmanuel Grumbach | c018607 | 2008-05-08 11:34:05 +0800 | [diff] [blame] | 3985 | ieee80211_channel_to_frequency(le16_to_cpu(rx_start->channel)); | 
| Tomas Winkler | 17744ff | 2008-03-02 01:52:00 +0200 | [diff] [blame] | 3986 | rx_status.band = (rx_start->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? | 
|  | 3987 | IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; | 
| Tomas Winkler | dc92e49 | 2008-04-03 16:05:22 -0700 | [diff] [blame] | 3988 | rx_status.rate_idx = | 
|  | 3989 | iwl4965_hwrate_to_plcp_idx(le32_to_cpu(rx_start->rate_n_flags)); | 
| Tomas Winkler | 17744ff | 2008-03-02 01:52:00 +0200 | [diff] [blame] | 3990 | if (rx_status.band == IEEE80211_BAND_5GHZ) | 
|  | 3991 | rx_status.rate_idx -= IWL_FIRST_OFDM_RATE; | 
|  | 3992 |  | 
|  | 3993 | rx_status.antenna = 0; | 
|  | 3994 | rx_status.flag = 0; | 
|  | 3995 |  | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3996 | if ((unlikely(rx_start->cfg_phy_cnt > 20))) { | 
| Tomas Winkler | dc92e49 | 2008-04-03 16:05:22 -0700 | [diff] [blame] | 3997 | IWL_DEBUG_DROP("dsp size out of range [0,20]: %d/n", | 
|  | 3998 | rx_start->cfg_phy_cnt); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 3999 | return; | 
|  | 4000 | } | 
| Tomas Winkler | 17744ff | 2008-03-02 01:52:00 +0200 | [diff] [blame] | 4001 |  | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4002 | if (!include_phy) { | 
|  | 4003 | if (priv->last_phy_res[0]) | 
|  | 4004 | rx_start = (struct iwl4965_rx_phy_res *) | 
|  | 4005 | &priv->last_phy_res[1]; | 
|  | 4006 | else | 
|  | 4007 | rx_start = NULL; | 
|  | 4008 | } | 
|  | 4009 |  | 
|  | 4010 | if (!rx_start) { | 
|  | 4011 | IWL_ERROR("MPDU frame without a PHY data\n"); | 
|  | 4012 | return; | 
|  | 4013 | } | 
|  | 4014 |  | 
|  | 4015 | if (include_phy) { | 
|  | 4016 | header = (struct ieee80211_hdr *)((u8 *) & rx_start[1] | 
|  | 4017 | + rx_start->cfg_phy_cnt); | 
|  | 4018 |  | 
|  | 4019 | len = le16_to_cpu(rx_start->byte_count); | 
| Tomas Winkler | 17744ff | 2008-03-02 01:52:00 +0200 | [diff] [blame] | 4020 | rx_end = (__le32 *)(pkt->u.raw + rx_start->cfg_phy_cnt + | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4021 | sizeof(struct iwl4965_rx_phy_res) + len); | 
|  | 4022 | } else { | 
|  | 4023 | struct iwl4965_rx_mpdu_res_start *amsdu = | 
|  | 4024 | (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw; | 
|  | 4025 |  | 
|  | 4026 | header = (void *)(pkt->u.raw + | 
|  | 4027 | sizeof(struct iwl4965_rx_mpdu_res_start)); | 
|  | 4028 | len = le16_to_cpu(amsdu->byte_count); | 
|  | 4029 | rx_end = (__le32 *) (pkt->u.raw + | 
|  | 4030 | sizeof(struct iwl4965_rx_mpdu_res_start) + len); | 
|  | 4031 | } | 
|  | 4032 |  | 
|  | 4033 | if (!(*rx_end & RX_RES_STATUS_NO_CRC32_ERROR) || | 
|  | 4034 | !(*rx_end & RX_RES_STATUS_NO_RXE_OVERFLOW)) { | 
|  | 4035 | IWL_DEBUG_RX("Bad CRC or FIFO: 0x%08X.\n", | 
|  | 4036 | le32_to_cpu(*rx_end)); | 
|  | 4037 | return; | 
|  | 4038 | } | 
|  | 4039 |  | 
|  | 4040 | priv->ucode_beacon_time = le32_to_cpu(rx_start->beacon_time_stamp); | 
|  | 4041 |  | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4042 | /* Find max signal strength (dBm) among 3 antenna/receiver chains */ | 
| Tomas Winkler | 17744ff | 2008-03-02 01:52:00 +0200 | [diff] [blame] | 4043 | rx_status.ssi = iwl4965_calc_rssi(rx_start); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4044 |  | 
|  | 4045 | /* Meaningful noise values are available only from beacon statistics, | 
|  | 4046 | *   which are gathered only when associated, and indicate noise | 
|  | 4047 | *   only for the associated network channel ... | 
|  | 4048 | * Ignore these noise values while scanning (other channels) */ | 
| Tomas Winkler | 3109ece | 2008-03-28 16:33:35 -0700 | [diff] [blame] | 4049 | if (iwl_is_associated(priv) && | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4050 | !test_bit(STATUS_SCANNING, &priv->status)) { | 
| Tomas Winkler | 17744ff | 2008-03-02 01:52:00 +0200 | [diff] [blame] | 4051 | rx_status.noise = priv->last_rx_noise; | 
|  | 4052 | rx_status.signal = iwl4965_calc_sig_qual(rx_status.ssi, | 
|  | 4053 | rx_status.noise); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4054 | } else { | 
| Tomas Winkler | 17744ff | 2008-03-02 01:52:00 +0200 | [diff] [blame] | 4055 | rx_status.noise = IWL_NOISE_MEAS_NOT_AVAILABLE; | 
|  | 4056 | rx_status.signal = iwl4965_calc_sig_qual(rx_status.ssi, 0); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4057 | } | 
|  | 4058 |  | 
|  | 4059 | /* Reset beacon noise level if not associated. */ | 
| Tomas Winkler | 3109ece | 2008-03-28 16:33:35 -0700 | [diff] [blame] | 4060 | if (!iwl_is_associated(priv)) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4061 | priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE; | 
|  | 4062 |  | 
| Tomas Winkler | 17744ff | 2008-03-02 01:52:00 +0200 | [diff] [blame] | 4063 | /* Set "1" to report good data frames in groups of 100 */ | 
|  | 4064 | /* FIXME: need to optimze the call: */ | 
|  | 4065 | iwl4965_dbg_report_frame(priv, pkt, header, 1); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4066 |  | 
| Tomas Winkler | 17744ff | 2008-03-02 01:52:00 +0200 | [diff] [blame] | 4067 | IWL_DEBUG_STATS_LIMIT("Rssi %d, noise %d, qual %d, TSF %llu\n", | 
|  | 4068 | rx_status.ssi, rx_status.noise, rx_status.signal, | 
| John W. Linville | 06501d2 | 2008-04-01 17:38:47 -0400 | [diff] [blame] | 4069 | (unsigned long long)rx_status.mactime); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4070 |  | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 4071 | network_packet = iwl4965_is_network_packet(priv, header); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4072 | if (network_packet) { | 
| Tomas Winkler | 17744ff | 2008-03-02 01:52:00 +0200 | [diff] [blame] | 4073 | priv->last_rx_rssi = rx_status.ssi; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4074 | priv->last_beacon_time =  priv->ucode_beacon_time; | 
|  | 4075 | priv->last_tsf = le64_to_cpu(rx_start->timestamp); | 
|  | 4076 | } | 
|  | 4077 |  | 
|  | 4078 | fc = le16_to_cpu(header->frame_control); | 
|  | 4079 | switch (fc & IEEE80211_FCTL_FTYPE) { | 
|  | 4080 | case IEEE80211_FTYPE_MGMT: | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4081 | if (priv->iw_mode == IEEE80211_IF_TYPE_AP) | 
|  | 4082 | iwl4965_update_ps_mode(priv, fc  & IEEE80211_FCTL_PM, | 
|  | 4083 | header->addr2); | 
| Tomas Winkler | 17744ff | 2008-03-02 01:52:00 +0200 | [diff] [blame] | 4084 | iwl4965_handle_data_packet(priv, 0, include_phy, rxb, &rx_status); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4085 | break; | 
|  | 4086 |  | 
|  | 4087 | case IEEE80211_FTYPE_CTL: | 
| Ron Rindjunsky | 9ab4617 | 2007-12-25 17:00:38 +0200 | [diff] [blame] | 4088 | #ifdef CONFIG_IWL4965_HT | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4089 | switch (fc & IEEE80211_FCTL_STYPE) { | 
|  | 4090 | case IEEE80211_STYPE_BACK_REQ: | 
|  | 4091 | IWL_DEBUG_HT("IEEE80211_STYPE_BACK_REQ arrived\n"); | 
|  | 4092 | iwl4965_handle_data_packet(priv, 0, include_phy, | 
| Tomas Winkler | 17744ff | 2008-03-02 01:52:00 +0200 | [diff] [blame] | 4093 | rxb, &rx_status); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4094 | break; | 
|  | 4095 | default: | 
|  | 4096 | break; | 
|  | 4097 | } | 
|  | 4098 | #endif | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4099 | break; | 
|  | 4100 |  | 
| Joe Perches | 0795af5 | 2007-10-03 17:59:30 -0700 | [diff] [blame] | 4101 | case IEEE80211_FTYPE_DATA: { | 
|  | 4102 | DECLARE_MAC_BUF(mac1); | 
|  | 4103 | DECLARE_MAC_BUF(mac2); | 
|  | 4104 | DECLARE_MAC_BUF(mac3); | 
|  | 4105 |  | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4106 | if (priv->iw_mode == IEEE80211_IF_TYPE_AP) | 
|  | 4107 | iwl4965_update_ps_mode(priv, fc  & IEEE80211_FCTL_PM, | 
|  | 4108 | header->addr2); | 
|  | 4109 |  | 
|  | 4110 | if (unlikely(!network_packet)) | 
|  | 4111 | IWL_DEBUG_DROP("Dropping (non network): " | 
| Joe Perches | 0795af5 | 2007-10-03 17:59:30 -0700 | [diff] [blame] | 4112 | "%s, %s, %s\n", | 
|  | 4113 | print_mac(mac1, header->addr1), | 
|  | 4114 | print_mac(mac2, header->addr2), | 
|  | 4115 | print_mac(mac3, header->addr3)); | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 4116 | else if (unlikely(iwl4965_is_duplicate_packet(priv, header))) | 
| Joe Perches | 0795af5 | 2007-10-03 17:59:30 -0700 | [diff] [blame] | 4117 | IWL_DEBUG_DROP("Dropping (dup): %s, %s, %s\n", | 
|  | 4118 | print_mac(mac1, header->addr1), | 
|  | 4119 | print_mac(mac2, header->addr2), | 
|  | 4120 | print_mac(mac3, header->addr3)); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4121 | else | 
|  | 4122 | iwl4965_handle_data_packet(priv, 1, include_phy, rxb, | 
| Tomas Winkler | 17744ff | 2008-03-02 01:52:00 +0200 | [diff] [blame] | 4123 | &rx_status); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4124 | break; | 
| Joe Perches | 0795af5 | 2007-10-03 17:59:30 -0700 | [diff] [blame] | 4125 | } | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4126 | default: | 
|  | 4127 | break; | 
|  | 4128 |  | 
|  | 4129 | } | 
|  | 4130 | } | 
|  | 4131 |  | 
|  | 4132 | /* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD). | 
|  | 4133 | * This will be used later in iwl4965_rx_reply_rx() for REPLY_RX_MPDU_CMD. */ | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 4134 | static void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv, | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 4135 | struct iwl4965_rx_mem_buffer *rxb) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4136 | { | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 4137 | struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4138 | priv->last_phy_res[0] = 1; | 
|  | 4139 | memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]), | 
|  | 4140 | sizeof(struct iwl4965_rx_phy_res)); | 
|  | 4141 | } | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 4142 | static void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv, | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 4143 | struct iwl4965_rx_mem_buffer *rxb) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4144 |  | 
|  | 4145 | { | 
| Christoph Hellwig | c8b0e6e | 2007-10-25 17:15:51 +0800 | [diff] [blame] | 4146 | #ifdef CONFIG_IWL4965_SENSITIVITY | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 4147 | struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; | 
|  | 4148 | struct iwl4965_missed_beacon_notif *missed_beacon; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4149 |  | 
|  | 4150 | missed_beacon = &pkt->u.missed_beacon; | 
|  | 4151 | if (le32_to_cpu(missed_beacon->consequtive_missed_beacons) > 5) { | 
|  | 4152 | IWL_DEBUG_CALIB("missed bcn cnsq %d totl %d rcd %d expctd %d\n", | 
|  | 4153 | le32_to_cpu(missed_beacon->consequtive_missed_beacons), | 
|  | 4154 | le32_to_cpu(missed_beacon->total_missed_becons), | 
|  | 4155 | le32_to_cpu(missed_beacon->num_recvd_beacons), | 
|  | 4156 | le32_to_cpu(missed_beacon->num_expected_beacons)); | 
|  | 4157 | priv->sensitivity_data.state = IWL_SENS_CALIB_NEED_REINIT; | 
|  | 4158 | if (unlikely(!test_bit(STATUS_SCANNING, &priv->status))) | 
|  | 4159 | queue_work(priv->workqueue, &priv->sensitivity_work); | 
|  | 4160 | } | 
| Christoph Hellwig | c8b0e6e | 2007-10-25 17:15:51 +0800 | [diff] [blame] | 4161 | #endif /*CONFIG_IWL4965_SENSITIVITY*/ | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4162 | } | 
| Christoph Hellwig | c8b0e6e | 2007-10-25 17:15:51 +0800 | [diff] [blame] | 4163 | #ifdef CONFIG_IWL4965_HT | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4164 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 4165 | /** | 
|  | 4166 | * iwl4965_sta_modify_enable_tid_tx - Enable Tx for this TID in station table | 
|  | 4167 | */ | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 4168 | static void iwl4965_sta_modify_enable_tid_tx(struct iwl_priv *priv, | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4169 | int sta_id, int tid) | 
|  | 4170 | { | 
|  | 4171 | unsigned long flags; | 
|  | 4172 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 4173 | /* Remove "disable" flag, to enable Tx for this TID */ | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4174 | spin_lock_irqsave(&priv->sta_lock, flags); | 
|  | 4175 | priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX; | 
|  | 4176 | priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid)); | 
|  | 4177 | priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; | 
|  | 4178 | spin_unlock_irqrestore(&priv->sta_lock, flags); | 
|  | 4179 |  | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 4180 | iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4181 | } | 
|  | 4182 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 4183 | /** | 
|  | 4184 | * iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack | 
|  | 4185 | * | 
|  | 4186 | * Go through block-ack's bitmap of ACK'd frames, update driver's record of | 
|  | 4187 | * ACK vs. not.  This gets sent to mac80211, then to rate scaling algo. | 
|  | 4188 | */ | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 4189 | static int iwl4965_tx_status_reply_compressed_ba(struct iwl_priv *priv, | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 4190 | struct iwl4965_ht_agg *agg, | 
|  | 4191 | struct iwl4965_compressed_ba_resp* | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4192 | ba_resp) | 
|  | 4193 |  | 
|  | 4194 | { | 
|  | 4195 | int i, sh, ack; | 
| Ron Rindjunsky | fe01b47 | 2008-01-28 14:07:24 +0200 | [diff] [blame] | 4196 | u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl); | 
|  | 4197 | u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); | 
|  | 4198 | u64 bitmap; | 
|  | 4199 | int successes = 0; | 
|  | 4200 | struct ieee80211_tx_status *tx_status; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4201 |  | 
|  | 4202 | if (unlikely(!agg->wait_for_ba))  { | 
|  | 4203 | IWL_ERROR("Received BA when not expected\n"); | 
|  | 4204 | return -EINVAL; | 
|  | 4205 | } | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 4206 |  | 
|  | 4207 | /* Mark that the expected block-ack response arrived */ | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4208 | agg->wait_for_ba = 0; | 
| Ron Rindjunsky | fe01b47 | 2008-01-28 14:07:24 +0200 | [diff] [blame] | 4209 | IWL_DEBUG_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->seq_ctl); | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 4210 |  | 
|  | 4211 | /* Calculate shift to align block-ack bits with our Tx window bits */ | 
| Ron Rindjunsky | fe01b47 | 2008-01-28 14:07:24 +0200 | [diff] [blame] | 4212 | sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl>>4); | 
| Ian Schram | 01ebd06 | 2007-10-25 17:15:22 +0800 | [diff] [blame] | 4213 | if (sh < 0) /* tbw something is wrong with indices */ | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4214 | sh += 0x100; | 
|  | 4215 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 4216 | /* don't use 64-bit values for now */ | 
| Ron Rindjunsky | fe01b47 | 2008-01-28 14:07:24 +0200 | [diff] [blame] | 4217 | bitmap = le64_to_cpu(ba_resp->bitmap) >> sh; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4218 |  | 
|  | 4219 | if (agg->frame_count > (64 - sh)) { | 
|  | 4220 | IWL_DEBUG_TX_REPLY("more frames than bitmap size"); | 
|  | 4221 | return -1; | 
|  | 4222 | } | 
|  | 4223 |  | 
|  | 4224 | /* check for success or failure according to the | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 4225 | * transmitted bitmap and block-ack bitmap */ | 
| Ron Rindjunsky | fe01b47 | 2008-01-28 14:07:24 +0200 | [diff] [blame] | 4226 | bitmap &= agg->bitmap; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4227 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 4228 | /* For each frame attempted in aggregation, | 
|  | 4229 | * update driver's record of tx frame's status. */ | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4230 | for (i = 0; i < agg->frame_count ; i++) { | 
| Ron Rindjunsky | fe01b47 | 2008-01-28 14:07:24 +0200 | [diff] [blame] | 4231 | ack = bitmap & (1 << i); | 
|  | 4232 | successes += !!ack; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4233 | IWL_DEBUG_TX_REPLY("%s ON i=%d idx=%d raw=%d\n", | 
| Ron Rindjunsky | fe01b47 | 2008-01-28 14:07:24 +0200 | [diff] [blame] | 4234 | ack? "ACK":"NACK", i, (agg->start_idx + i) & 0xff, | 
|  | 4235 | agg->start_idx + i); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4236 | } | 
|  | 4237 |  | 
| Ron Rindjunsky | fe01b47 | 2008-01-28 14:07:24 +0200 | [diff] [blame] | 4238 | tx_status = &priv->txq[scd_flow].txb[agg->start_idx].status; | 
|  | 4239 | tx_status->flags = IEEE80211_TX_STATUS_ACK; | 
| Ron Rindjunsky | 9955643 | 2008-01-28 14:07:25 +0200 | [diff] [blame] | 4240 | tx_status->flags |= IEEE80211_TX_STATUS_AMPDU; | 
|  | 4241 | tx_status->ampdu_ack_map = successes; | 
|  | 4242 | tx_status->ampdu_ack_len = agg->frame_count; | 
| Ron Rindjunsky | 4c424e4 | 2008-03-04 18:09:27 -0800 | [diff] [blame] | 4243 | iwl4965_hwrate_to_tx_control(priv, agg->rate_n_flags, | 
|  | 4244 | &tx_status->control); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4245 |  | 
| John W. Linville | f868f4e | 2008-03-07 16:38:43 -0500 | [diff] [blame] | 4246 | IWL_DEBUG_TX_REPLY("Bitmap %llx\n", (unsigned long long)bitmap); | 
| Ron Rindjunsky | fe01b47 | 2008-01-28 14:07:24 +0200 | [diff] [blame] | 4247 |  | 
|  | 4248 | return 0; | 
|  | 4249 | } | 
|  | 4250 |  | 
|  | 4251 | /** | 
|  | 4252 | * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration | 
|  | 4253 | */ | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 4254 | static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv, | 
| Ron Rindjunsky | fe01b47 | 2008-01-28 14:07:24 +0200 | [diff] [blame] | 4255 | u16 txq_id) | 
|  | 4256 | { | 
|  | 4257 | /* Simply stop the queue, but don't change any configuration; | 
|  | 4258 | * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 4259 | iwl_write_prph(priv, | 
| Tomas Winkler | 12a81f6 | 2008-04-03 16:05:20 -0700 | [diff] [blame] | 4260 | IWL49_SCD_QUEUE_STATUS_BITS(txq_id), | 
| Ron Rindjunsky | fe01b47 | 2008-01-28 14:07:24 +0200 | [diff] [blame] | 4261 | (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)| | 
|  | 4262 | (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); | 
|  | 4263 | } | 
|  | 4264 |  | 
|  | 4265 | /** | 
|  | 4266 | * txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID | 
| Ron Rindjunsky | b095d03 | 2008-03-06 17:36:56 -0800 | [diff] [blame] | 4267 | * priv->lock must be held by the caller | 
| Ron Rindjunsky | fe01b47 | 2008-01-28 14:07:24 +0200 | [diff] [blame] | 4268 | */ | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 4269 | static int iwl4965_tx_queue_agg_disable(struct iwl_priv *priv, u16 txq_id, | 
| Ron Rindjunsky | fe01b47 | 2008-01-28 14:07:24 +0200 | [diff] [blame] | 4270 | u16 ssn_idx, u8 tx_fifo) | 
|  | 4271 | { | 
| Ron Rindjunsky | b095d03 | 2008-03-06 17:36:56 -0800 | [diff] [blame] | 4272 | int ret = 0; | 
|  | 4273 |  | 
| Ron Rindjunsky | fe01b47 | 2008-01-28 14:07:24 +0200 | [diff] [blame] | 4274 | if (IWL_BACK_QUEUE_FIRST_ID > txq_id) { | 
|  | 4275 | IWL_WARNING("queue number too small: %d, must be > %d\n", | 
|  | 4276 | txq_id, IWL_BACK_QUEUE_FIRST_ID); | 
|  | 4277 | return -EINVAL; | 
|  | 4278 | } | 
|  | 4279 |  | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 4280 | ret = iwl_grab_nic_access(priv); | 
| Ron Rindjunsky | b095d03 | 2008-03-06 17:36:56 -0800 | [diff] [blame] | 4281 | if (ret) | 
|  | 4282 | return ret; | 
|  | 4283 |  | 
| Ron Rindjunsky | fe01b47 | 2008-01-28 14:07:24 +0200 | [diff] [blame] | 4284 | iwl4965_tx_queue_stop_scheduler(priv, txq_id); | 
|  | 4285 |  | 
| Tomas Winkler | 12a81f6 | 2008-04-03 16:05:20 -0700 | [diff] [blame] | 4286 | iwl_clear_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id)); | 
| Ron Rindjunsky | fe01b47 | 2008-01-28 14:07:24 +0200 | [diff] [blame] | 4287 |  | 
|  | 4288 | priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); | 
|  | 4289 | priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); | 
|  | 4290 | /* supposes that ssn_idx is valid (!= 0xFFF) */ | 
|  | 4291 | iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx); | 
|  | 4292 |  | 
| Tomas Winkler | 12a81f6 | 2008-04-03 16:05:20 -0700 | [diff] [blame] | 4293 | iwl_clear_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id)); | 
| Ron Rindjunsky | fe01b47 | 2008-01-28 14:07:24 +0200 | [diff] [blame] | 4294 | iwl4965_txq_ctx_deactivate(priv, txq_id); | 
|  | 4295 | iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0); | 
|  | 4296 |  | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 4297 | iwl_release_nic_access(priv); | 
| Ron Rindjunsky | b095d03 | 2008-03-06 17:36:56 -0800 | [diff] [blame] | 4298 |  | 
| Ron Rindjunsky | fe01b47 | 2008-01-28 14:07:24 +0200 | [diff] [blame] | 4299 | return 0; | 
|  | 4300 | } | 
|  | 4301 |  | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 4302 | int iwl4965_check_empty_hw_queue(struct iwl_priv *priv, int sta_id, | 
| Ron Rindjunsky | fe01b47 | 2008-01-28 14:07:24 +0200 | [diff] [blame] | 4303 | u8 tid, int txq_id) | 
|  | 4304 | { | 
|  | 4305 | struct iwl4965_queue *q = &priv->txq[txq_id].q; | 
|  | 4306 | u8 *addr = priv->stations[sta_id].sta.sta.addr; | 
|  | 4307 | struct iwl4965_tid_data *tid_data = &priv->stations[sta_id].tid[tid]; | 
|  | 4308 |  | 
|  | 4309 | switch (priv->stations[sta_id].tid[tid].agg.state) { | 
|  | 4310 | case IWL_EMPTYING_HW_QUEUE_DELBA: | 
|  | 4311 | /* We are reclaiming the last packet of the */ | 
|  | 4312 | /* aggregated HW queue */ | 
|  | 4313 | if (txq_id  == tid_data->agg.txq_id && | 
|  | 4314 | q->read_ptr == q->write_ptr) { | 
|  | 4315 | u16 ssn = SEQ_TO_SN(tid_data->seq_number); | 
|  | 4316 | int tx_fifo = default_tid_to_tx_fifo[tid]; | 
|  | 4317 | IWL_DEBUG_HT("HW queue empty: continue DELBA flow\n"); | 
|  | 4318 | iwl4965_tx_queue_agg_disable(priv, txq_id, | 
|  | 4319 | ssn, tx_fifo); | 
|  | 4320 | tid_data->agg.state = IWL_AGG_OFF; | 
|  | 4321 | ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, addr, tid); | 
|  | 4322 | } | 
|  | 4323 | break; | 
|  | 4324 | case IWL_EMPTYING_HW_QUEUE_ADDBA: | 
|  | 4325 | /* We are reclaiming the last packet of the queue */ | 
|  | 4326 | if (tid_data->tfds_in_queue == 0) { | 
|  | 4327 | IWL_DEBUG_HT("HW queue empty: continue ADDBA flow\n"); | 
|  | 4328 | tid_data->agg.state = IWL_AGG_ON; | 
|  | 4329 | ieee80211_start_tx_ba_cb_irqsafe(priv->hw, addr, tid); | 
|  | 4330 | } | 
|  | 4331 | break; | 
|  | 4332 | } | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4333 | return 0; | 
|  | 4334 | } | 
|  | 4335 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 4336 | /** | 
|  | 4337 | * iwl4965_queue_dec_wrap - Decrement queue index, wrap back to end if needed | 
|  | 4338 | * @index -- current index | 
|  | 4339 | * @n_bd -- total number of entries in queue (s/b power of 2) | 
|  | 4340 | */ | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 4341 | static inline int iwl4965_queue_dec_wrap(int index, int n_bd) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4342 | { | 
|  | 4343 | return (index == 0) ? n_bd - 1 : index - 1; | 
|  | 4344 | } | 
|  | 4345 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 4346 | /** | 
|  | 4347 | * iwl4965_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA | 
|  | 4348 | * | 
|  | 4349 | * Handles block-acknowledge notification from device, which reports success | 
|  | 4350 | * of frames sent via aggregation. | 
|  | 4351 | */ | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 4352 | static void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv, | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 4353 | struct iwl4965_rx_mem_buffer *rxb) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4354 | { | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 4355 | struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; | 
|  | 4356 | struct iwl4965_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4357 | int index; | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 4358 | struct iwl4965_tx_queue *txq = NULL; | 
|  | 4359 | struct iwl4965_ht_agg *agg; | 
| Ron Rindjunsky | fe01b47 | 2008-01-28 14:07:24 +0200 | [diff] [blame] | 4360 | DECLARE_MAC_BUF(mac); | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 4361 |  | 
|  | 4362 | /* "flow" corresponds to Tx queue */ | 
| Ron Rindjunsky | fe01b47 | 2008-01-28 14:07:24 +0200 | [diff] [blame] | 4363 | u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 4364 |  | 
|  | 4365 | /* "ssn" is start of block-ack Tx window, corresponds to index | 
|  | 4366 | * (in Tx queue's circular buffer) of first TFD/frame in window */ | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4367 | u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); | 
|  | 4368 |  | 
| Ron Rindjunsky | dfe7d45 | 2008-04-15 16:01:45 -0700 | [diff] [blame] | 4369 | if (scd_flow >= priv->hw_params.max_txq_num) { | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4370 | IWL_ERROR("BUG_ON scd_flow is bigger than number of queues"); | 
|  | 4371 | return; | 
|  | 4372 | } | 
|  | 4373 |  | 
| Ron Rindjunsky | fe01b47 | 2008-01-28 14:07:24 +0200 | [diff] [blame] | 4374 | txq = &priv->txq[scd_flow]; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4375 | agg = &priv->stations[ba_resp->sta_id].tid[ba_resp->tid].agg; | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 4376 |  | 
|  | 4377 | /* Find index just before block-ack window */ | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 4378 | index = iwl4965_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4379 |  | 
| Ian Schram | 01ebd06 | 2007-10-25 17:15:22 +0800 | [diff] [blame] | 4380 | /* TODO: Need to get this copy more safely - now good for debug */ | 
| Ron Rindjunsky | fe01b47 | 2008-01-28 14:07:24 +0200 | [diff] [blame] | 4381 |  | 
| Joe Perches | 0795af5 | 2007-10-03 17:59:30 -0700 | [diff] [blame] | 4382 | IWL_DEBUG_TX_REPLY("REPLY_COMPRESSED_BA [%d]Received from %s, " | 
|  | 4383 | "sta_id = %d\n", | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4384 | agg->wait_for_ba, | 
| Joe Perches | 0795af5 | 2007-10-03 17:59:30 -0700 | [diff] [blame] | 4385 | print_mac(mac, (u8*) &ba_resp->sta_addr_lo32), | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4386 | ba_resp->sta_id); | 
| Ron Rindjunsky | fe01b47 | 2008-01-28 14:07:24 +0200 | [diff] [blame] | 4387 | IWL_DEBUG_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = " | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4388 | "%d, scd_ssn = %d\n", | 
|  | 4389 | ba_resp->tid, | 
| Ron Rindjunsky | fe01b47 | 2008-01-28 14:07:24 +0200 | [diff] [blame] | 4390 | ba_resp->seq_ctl, | 
| Tomas Winkler | 0310ae7 | 2008-03-11 16:17:19 -0700 | [diff] [blame] | 4391 | (unsigned long long)le64_to_cpu(ba_resp->bitmap), | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4392 | ba_resp->scd_flow, | 
|  | 4393 | ba_resp->scd_ssn); | 
| Ron Rindjunsky | fe01b47 | 2008-01-28 14:07:24 +0200 | [diff] [blame] | 4394 | IWL_DEBUG_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx \n", | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4395 | agg->start_idx, | 
| John W. Linville | f868f4e | 2008-03-07 16:38:43 -0500 | [diff] [blame] | 4396 | (unsigned long long)agg->bitmap); | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 4397 |  | 
|  | 4398 | /* Update driver's record of ACK vs. not for each frame in window */ | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4399 | iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp); | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 4400 |  | 
|  | 4401 | /* Release all TFDs before the SSN, i.e. all TFDs in front of | 
|  | 4402 | * block-ack window (we assume that they've been successfully | 
|  | 4403 | * transmitted ... if not, it's too late anyway). */ | 
| Ron Rindjunsky | fe01b47 | 2008-01-28 14:07:24 +0200 | [diff] [blame] | 4404 | if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) { | 
|  | 4405 | int freed = iwl4965_tx_queue_reclaim(priv, scd_flow, index); | 
|  | 4406 | priv->stations[ba_resp->sta_id]. | 
|  | 4407 | tid[ba_resp->tid].tfds_in_queue -= freed; | 
|  | 4408 | if (iwl4965_queue_space(&txq->q) > txq->q.low_mark && | 
|  | 4409 | priv->mac80211_registered && | 
|  | 4410 | agg->state != IWL_EMPTYING_HW_QUEUE_DELBA) | 
|  | 4411 | ieee80211_wake_queue(priv->hw, scd_flow); | 
|  | 4412 | iwl4965_check_empty_hw_queue(priv, ba_resp->sta_id, | 
|  | 4413 | ba_resp->tid, scd_flow); | 
|  | 4414 | } | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4415 | } | 
|  | 4416 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 4417 | /** | 
|  | 4418 | * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue | 
|  | 4419 | */ | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 4420 | static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid, | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4421 | u16 txq_id) | 
|  | 4422 | { | 
|  | 4423 | u32 tbl_dw_addr; | 
|  | 4424 | u32 tbl_dw; | 
|  | 4425 | u16 scd_q2ratid; | 
|  | 4426 |  | 
|  | 4427 | scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK; | 
|  | 4428 |  | 
|  | 4429 | tbl_dw_addr = priv->scd_base_addr + | 
|  | 4430 | SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id); | 
|  | 4431 |  | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 4432 | tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4433 |  | 
|  | 4434 | if (txq_id & 0x1) | 
|  | 4435 | tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); | 
|  | 4436 | else | 
|  | 4437 | tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); | 
|  | 4438 |  | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 4439 | iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4440 |  | 
|  | 4441 | return 0; | 
|  | 4442 | } | 
|  | 4443 |  | 
| Ron Rindjunsky | fe01b47 | 2008-01-28 14:07:24 +0200 | [diff] [blame] | 4444 |  | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4445 | /** | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 4446 | * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue | 
|  | 4447 | * | 
|  | 4448 | * NOTE:  txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID, | 
|  | 4449 | *        i.e. it must be one of the higher queues used for aggregation | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4450 | */ | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 4451 | static int iwl4965_tx_queue_agg_enable(struct iwl_priv *priv, int txq_id, | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4452 | int tx_fifo, int sta_id, int tid, | 
|  | 4453 | u16 ssn_idx) | 
|  | 4454 | { | 
|  | 4455 | unsigned long flags; | 
|  | 4456 | int rc; | 
|  | 4457 | u16 ra_tid; | 
|  | 4458 |  | 
|  | 4459 | if (IWL_BACK_QUEUE_FIRST_ID > txq_id) | 
|  | 4460 | IWL_WARNING("queue number too small: %d, must be > %d\n", | 
|  | 4461 | txq_id, IWL_BACK_QUEUE_FIRST_ID); | 
|  | 4462 |  | 
|  | 4463 | ra_tid = BUILD_RAxTID(sta_id, tid); | 
|  | 4464 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 4465 | /* Modify device's station table to Tx this TID */ | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 4466 | iwl4965_sta_modify_enable_tid_tx(priv, sta_id, tid); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4467 |  | 
|  | 4468 | spin_lock_irqsave(&priv->lock, flags); | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 4469 | rc = iwl_grab_nic_access(priv); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4470 | if (rc) { | 
|  | 4471 | spin_unlock_irqrestore(&priv->lock, flags); | 
|  | 4472 | return rc; | 
|  | 4473 | } | 
|  | 4474 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 4475 | /* Stop this Tx queue before configuring it */ | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4476 | iwl4965_tx_queue_stop_scheduler(priv, txq_id); | 
|  | 4477 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 4478 | /* Map receiver-address / traffic-ID to this queue */ | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4479 | iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id); | 
|  | 4480 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 4481 | /* Set this queue as a chain-building queue */ | 
| Tomas Winkler | 12a81f6 | 2008-04-03 16:05:20 -0700 | [diff] [blame] | 4482 | iwl_set_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id)); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4483 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 4484 | /* Place first TFD at index corresponding to start sequence number. | 
|  | 4485 | * Assumes that ssn_idx is valid (!= 0xFFF) */ | 
| Tomas Winkler | fc4b685 | 2007-10-25 17:15:24 +0800 | [diff] [blame] | 4486 | priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); | 
|  | 4487 | priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4488 | iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx); | 
|  | 4489 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 4490 | /* Set up Tx window size and frame limit for this queue */ | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 4491 | iwl_write_targ_mem(priv, | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4492 | priv->scd_base_addr + SCD_CONTEXT_QUEUE_OFFSET(txq_id), | 
|  | 4493 | (SCD_WIN_SIZE << SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) & | 
|  | 4494 | SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK); | 
|  | 4495 |  | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 4496 | iwl_write_targ_mem(priv, priv->scd_base_addr + | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4497 | SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), | 
|  | 4498 | (SCD_FRAME_LIMIT << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) | 
|  | 4499 | & SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK); | 
|  | 4500 |  | 
| Tomas Winkler | 12a81f6 | 2008-04-03 16:05:20 -0700 | [diff] [blame] | 4501 | iwl_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id)); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4502 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 4503 | /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4504 | iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1); | 
|  | 4505 |  | 
| Tomas Winkler | 3395f6e | 2008-03-25 16:33:37 -0700 | [diff] [blame] | 4506 | iwl_release_nic_access(priv); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4507 | spin_unlock_irqrestore(&priv->lock, flags); | 
|  | 4508 |  | 
|  | 4509 | return 0; | 
|  | 4510 | } | 
|  | 4511 |  | 
| Christoph Hellwig | c8b0e6e | 2007-10-25 17:15:51 +0800 | [diff] [blame] | 4512 | #endif /* CONFIG_IWL4965_HT */ | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4513 |  | 
|  | 4514 | /** | 
|  | 4515 | * iwl4965_add_station - Initialize a station's hardware rate table | 
|  | 4516 | * | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 4517 | * The uCode's station table contains a table of fallback rates | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4518 | * for automatic fallback during transmission. | 
|  | 4519 | * | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 4520 | * NOTE: This sets up a default set of values.  These will be replaced later | 
|  | 4521 | *       if the driver's iwl-4965-rs rate scaling algorithm is used, instead of | 
|  | 4522 | *       rc80211_simple. | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4523 | * | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 4524 | * NOTE: Run REPLY_ADD_STA command to set up station table entry, before | 
|  | 4525 | *       calling this function (which runs REPLY_TX_LINK_QUALITY_CMD, | 
|  | 4526 | *       which requires station table entry to exist). | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4527 | */ | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 4528 | void iwl4965_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4529 | { | 
|  | 4530 | int i, r; | 
| Tomas Winkler | 66c73db | 2008-04-15 16:01:40 -0700 | [diff] [blame] | 4531 | struct iwl_link_quality_cmd link_cmd = { | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4532 | .reserved1 = 0, | 
|  | 4533 | }; | 
|  | 4534 | u16 rate_flags; | 
|  | 4535 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 4536 | /* Set up the rate scaling to start at selected rate, fall back | 
|  | 4537 | * all the way down to 1M in IEEE order, and then spin on 1M */ | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4538 | if (is_ap) | 
|  | 4539 | r = IWL_RATE_54M_INDEX; | 
| Johannes Berg | 8318d78 | 2008-01-24 19:38:38 +0100 | [diff] [blame] | 4540 | else if (priv->band == IEEE80211_BAND_5GHZ) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4541 | r = IWL_RATE_6M_INDEX; | 
|  | 4542 | else | 
|  | 4543 | r = IWL_RATE_1M_INDEX; | 
|  | 4544 |  | 
|  | 4545 | for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { | 
|  | 4546 | rate_flags = 0; | 
|  | 4547 | if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE) | 
|  | 4548 | rate_flags |= RATE_MCS_CCK_MSK; | 
|  | 4549 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 4550 | /* Use Tx antenna B only */ | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4551 | rate_flags |= RATE_MCS_ANT_B_MSK; | 
|  | 4552 | rate_flags &= ~RATE_MCS_ANT_A_MSK; | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 4553 |  | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4554 | link_cmd.rs_table[i].rate_n_flags = | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 4555 | iwl4965_hw_set_rate_n_flags(iwl4965_rates[r].plcp, rate_flags); | 
|  | 4556 | r = iwl4965_get_prev_ieee_rate(r); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4557 | } | 
|  | 4558 |  | 
|  | 4559 | link_cmd.general_params.single_stream_ant_msk = 2; | 
|  | 4560 | link_cmd.general_params.dual_stream_ant_msk = 3; | 
|  | 4561 | link_cmd.agg_params.agg_dis_start_th = 3; | 
|  | 4562 | link_cmd.agg_params.agg_time_limit = cpu_to_le16(4000); | 
|  | 4563 |  | 
|  | 4564 | /* Update the rate scaling for control frame Tx to AP */ | 
| Tomas Winkler | 5425e49 | 2008-04-15 16:01:38 -0700 | [diff] [blame] | 4565 | link_cmd.sta_id = is_ap ? IWL_AP_ID : priv->hw_params.bcast_sta_id; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4566 |  | 
| Tomas Winkler | e547297 | 2008-03-28 16:21:12 -0700 | [diff] [blame] | 4567 | iwl_send_cmd_pdu_async(priv, REPLY_TX_LINK_QUALITY_CMD, | 
|  | 4568 | sizeof(link_cmd), &link_cmd, NULL); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4569 | } | 
|  | 4570 |  | 
| Christoph Hellwig | c8b0e6e | 2007-10-25 17:15:51 +0800 | [diff] [blame] | 4571 | #ifdef CONFIG_IWL4965_HT | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4572 |  | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 4573 | static u8 iwl4965_is_channel_extension(struct iwl_priv *priv, | 
| Johannes Berg | 8318d78 | 2008-01-24 19:38:38 +0100 | [diff] [blame] | 4574 | enum ieee80211_band band, | 
| Tomas Winkler | 78330fd | 2008-02-06 02:37:18 +0200 | [diff] [blame] | 4575 | u16 channel, u8 extension_chan_offset) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4576 | { | 
| Assaf Krauss | bf85ea4 | 2008-03-14 10:38:49 -0700 | [diff] [blame] | 4577 | const struct iwl_channel_info *ch_info; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4578 |  | 
| Assaf Krauss | 8622e70 | 2008-03-21 13:53:43 -0700 | [diff] [blame] | 4579 | ch_info = iwl_get_channel_info(priv, band, channel); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4580 | if (!is_channel_valid(ch_info)) | 
|  | 4581 | return 0; | 
|  | 4582 |  | 
| Guy Cohen | 134eb5d | 2008-03-04 18:09:25 -0800 | [diff] [blame] | 4583 | if (extension_chan_offset == IWL_EXT_CHANNEL_OFFSET_NONE) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4584 | return 0; | 
|  | 4585 |  | 
|  | 4586 | if ((ch_info->fat_extension_channel == extension_chan_offset) || | 
|  | 4587 | (ch_info->fat_extension_channel == HT_IE_EXT_CHANNEL_MAX)) | 
|  | 4588 | return 1; | 
|  | 4589 |  | 
|  | 4590 | return 0; | 
|  | 4591 | } | 
|  | 4592 |  | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 4593 | static u8 iwl4965_is_fat_tx_allowed(struct iwl_priv *priv, | 
| Ron Rindjunsky | fd105e7 | 2007-11-26 16:14:39 +0200 | [diff] [blame] | 4594 | struct ieee80211_ht_info *sta_ht_inf) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4595 | { | 
| Ron Rindjunsky | fd105e7 | 2007-11-26 16:14:39 +0200 | [diff] [blame] | 4596 | struct iwl_ht_info *iwl_ht_conf = &priv->current_ht_config; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4597 |  | 
| Ron Rindjunsky | fd105e7 | 2007-11-26 16:14:39 +0200 | [diff] [blame] | 4598 | if ((!iwl_ht_conf->is_ht) || | 
|  | 4599 | (iwl_ht_conf->supported_chan_width != IWL_CHANNEL_WIDTH_40MHZ) || | 
| Guy Cohen | 134eb5d | 2008-03-04 18:09:25 -0800 | [diff] [blame] | 4600 | (iwl_ht_conf->extension_chan_offset == IWL_EXT_CHANNEL_OFFSET_NONE)) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4601 | return 0; | 
|  | 4602 |  | 
| Ron Rindjunsky | fd105e7 | 2007-11-26 16:14:39 +0200 | [diff] [blame] | 4603 | if (sta_ht_inf) { | 
|  | 4604 | if ((!sta_ht_inf->ht_supported) || | 
| Roel Kluin | 194c7ca | 2008-02-02 20:48:48 +0100 | [diff] [blame] | 4605 | (!(sta_ht_inf->cap & IEEE80211_HT_CAP_SUP_WIDTH))) | 
| Ron Rindjunsky | fd105e7 | 2007-11-26 16:14:39 +0200 | [diff] [blame] | 4606 | return 0; | 
|  | 4607 | } | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4608 |  | 
| Tomas Winkler | 78330fd | 2008-02-06 02:37:18 +0200 | [diff] [blame] | 4609 | return (iwl4965_is_channel_extension(priv, priv->band, | 
| Ron Rindjunsky | fd105e7 | 2007-11-26 16:14:39 +0200 | [diff] [blame] | 4610 | iwl_ht_conf->control_channel, | 
|  | 4611 | iwl_ht_conf->extension_chan_offset)); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4612 | } | 
|  | 4613 |  | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 4614 | void iwl4965_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4615 | { | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 4616 | struct iwl4965_rxon_cmd *rxon = &priv->staging_rxon; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4617 | u32 val; | 
|  | 4618 |  | 
|  | 4619 | if (!ht_info->is_ht) | 
|  | 4620 | return; | 
|  | 4621 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 4622 | /* Set up channel bandwidth:  20 MHz only, or 20/40 mixed if fat ok */ | 
| Ron Rindjunsky | fd105e7 | 2007-11-26 16:14:39 +0200 | [diff] [blame] | 4623 | if (iwl4965_is_fat_tx_allowed(priv, NULL)) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4624 | rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED_MSK; | 
|  | 4625 | else | 
|  | 4626 | rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK | | 
|  | 4627 | RXON_FLG_CHANNEL_MODE_PURE_40_MSK); | 
|  | 4628 |  | 
|  | 4629 | if (le16_to_cpu(rxon->channel) != ht_info->control_channel) { | 
|  | 4630 | IWL_DEBUG_ASSOC("control diff than current %d %d\n", | 
|  | 4631 | le16_to_cpu(rxon->channel), | 
|  | 4632 | ht_info->control_channel); | 
|  | 4633 | rxon->channel = cpu_to_le16(ht_info->control_channel); | 
|  | 4634 | return; | 
|  | 4635 | } | 
|  | 4636 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 4637 | /* Note: control channel is opposite of extension channel */ | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4638 | switch (ht_info->extension_chan_offset) { | 
|  | 4639 | case IWL_EXT_CHANNEL_OFFSET_ABOVE: | 
|  | 4640 | rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); | 
|  | 4641 | break; | 
|  | 4642 | case IWL_EXT_CHANNEL_OFFSET_BELOW: | 
|  | 4643 | rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; | 
|  | 4644 | break; | 
| Guy Cohen | 134eb5d | 2008-03-04 18:09:25 -0800 | [diff] [blame] | 4645 | case IWL_EXT_CHANNEL_OFFSET_NONE: | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4646 | default: | 
|  | 4647 | rxon->flags &= ~RXON_FLG_CHANNEL_MODE_MIXED_MSK; | 
|  | 4648 | break; | 
|  | 4649 | } | 
|  | 4650 |  | 
| Ron Rindjunsky | fd105e7 | 2007-11-26 16:14:39 +0200 | [diff] [blame] | 4651 | val = ht_info->ht_protection; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4652 |  | 
|  | 4653 | rxon->flags |= cpu_to_le32(val << RXON_FLG_HT_OPERATING_MODE_POS); | 
|  | 4654 |  | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4655 | iwl4965_set_rxon_chain(priv); | 
|  | 4656 |  | 
|  | 4657 | IWL_DEBUG_ASSOC("supported HT rate 0x%X %X " | 
|  | 4658 | "rxon flags 0x%X operation mode :0x%X " | 
|  | 4659 | "extension channel offset 0x%x " | 
|  | 4660 | "control chan %d\n", | 
| Ron Rindjunsky | fd105e7 | 2007-11-26 16:14:39 +0200 | [diff] [blame] | 4661 | ht_info->supp_mcs_set[0], ht_info->supp_mcs_set[1], | 
|  | 4662 | le32_to_cpu(rxon->flags), ht_info->ht_protection, | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4663 | ht_info->extension_chan_offset, | 
|  | 4664 | ht_info->control_channel); | 
|  | 4665 | return; | 
|  | 4666 | } | 
|  | 4667 |  | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 4668 | void iwl4965_set_ht_add_station(struct iwl_priv *priv, u8 index, | 
| Ron Rindjunsky | 67d6203 | 2007-11-26 16:14:40 +0200 | [diff] [blame] | 4669 | struct ieee80211_ht_info *sta_ht_inf) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4670 | { | 
|  | 4671 | __le32 sta_flags; | 
| Tomas Winkler | e53cfe0 | 2008-01-30 22:05:13 -0800 | [diff] [blame] | 4672 | u8 mimo_ps_mode; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4673 |  | 
| Ron Rindjunsky | 67d6203 | 2007-11-26 16:14:40 +0200 | [diff] [blame] | 4674 | if (!sta_ht_inf || !sta_ht_inf->ht_supported) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4675 | goto done; | 
|  | 4676 |  | 
| Tomas Winkler | e53cfe0 | 2008-01-30 22:05:13 -0800 | [diff] [blame] | 4677 | mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2; | 
|  | 4678 |  | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4679 | sta_flags = priv->stations[index].sta.station_flags; | 
|  | 4680 |  | 
| Tomas Winkler | e53cfe0 | 2008-01-30 22:05:13 -0800 | [diff] [blame] | 4681 | sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK); | 
|  | 4682 |  | 
|  | 4683 | switch (mimo_ps_mode) { | 
|  | 4684 | case WLAN_HT_CAP_MIMO_PS_STATIC: | 
|  | 4685 | sta_flags |= STA_FLG_MIMO_DIS_MSK; | 
|  | 4686 | break; | 
|  | 4687 | case WLAN_HT_CAP_MIMO_PS_DYNAMIC: | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4688 | sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK; | 
| Tomas Winkler | e53cfe0 | 2008-01-30 22:05:13 -0800 | [diff] [blame] | 4689 | break; | 
|  | 4690 | case WLAN_HT_CAP_MIMO_PS_DISABLED: | 
|  | 4691 | break; | 
|  | 4692 | default: | 
|  | 4693 | IWL_WARNING("Invalid MIMO PS mode %d", mimo_ps_mode); | 
|  | 4694 | break; | 
|  | 4695 | } | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4696 |  | 
|  | 4697 | sta_flags |= cpu_to_le32( | 
| Ron Rindjunsky | 67d6203 | 2007-11-26 16:14:40 +0200 | [diff] [blame] | 4698 | (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4699 |  | 
|  | 4700 | sta_flags |= cpu_to_le32( | 
| Ron Rindjunsky | 67d6203 | 2007-11-26 16:14:40 +0200 | [diff] [blame] | 4701 | (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4702 |  | 
| Ron Rindjunsky | 67d6203 | 2007-11-26 16:14:40 +0200 | [diff] [blame] | 4703 | if (iwl4965_is_fat_tx_allowed(priv, sta_ht_inf)) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4704 | sta_flags |= STA_FLG_FAT_EN_MSK; | 
| Ron Rindjunsky | 67d6203 | 2007-11-26 16:14:40 +0200 | [diff] [blame] | 4705 | else | 
| Tomas Winkler | e53cfe0 | 2008-01-30 22:05:13 -0800 | [diff] [blame] | 4706 | sta_flags &= ~STA_FLG_FAT_EN_MSK; | 
| Ron Rindjunsky | 67d6203 | 2007-11-26 16:14:40 +0200 | [diff] [blame] | 4707 |  | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4708 | priv->stations[index].sta.station_flags = sta_flags; | 
|  | 4709 | done: | 
|  | 4710 | return; | 
|  | 4711 | } | 
|  | 4712 |  | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 4713 | static void iwl4965_sta_modify_add_ba_tid(struct iwl_priv *priv, | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4714 | int sta_id, int tid, u16 ssn) | 
|  | 4715 | { | 
|  | 4716 | unsigned long flags; | 
|  | 4717 |  | 
|  | 4718 | spin_lock_irqsave(&priv->sta_lock, flags); | 
|  | 4719 | priv->stations[sta_id].sta.station_flags_msk = 0; | 
|  | 4720 | priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK; | 
|  | 4721 | priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid; | 
|  | 4722 | priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn); | 
|  | 4723 | priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; | 
|  | 4724 | spin_unlock_irqrestore(&priv->sta_lock, flags); | 
|  | 4725 |  | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 4726 | iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4727 | } | 
|  | 4728 |  | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 4729 | static void iwl4965_sta_modify_del_ba_tid(struct iwl_priv *priv, | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4730 | int sta_id, int tid) | 
|  | 4731 | { | 
|  | 4732 | unsigned long flags; | 
|  | 4733 |  | 
|  | 4734 | spin_lock_irqsave(&priv->sta_lock, flags); | 
|  | 4735 | priv->stations[sta_id].sta.station_flags_msk = 0; | 
|  | 4736 | priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK; | 
|  | 4737 | priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid; | 
|  | 4738 | priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; | 
|  | 4739 | spin_unlock_irqrestore(&priv->sta_lock, flags); | 
|  | 4740 |  | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 4741 | iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4742 | } | 
|  | 4743 |  | 
| Cahill, Ben M | 8b6eaea | 2007-11-29 11:09:54 +0800 | [diff] [blame] | 4744 | /* | 
|  | 4745 | * Find first available (lowest unused) Tx Queue, mark it "active". | 
|  | 4746 | * Called only when finding queue for aggregation. | 
|  | 4747 | * Should never return anything < 7, because they should already | 
|  | 4748 | * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6). | 
|  | 4749 | */ | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 4750 | static int iwl4965_txq_ctx_activate_free(struct iwl_priv *priv) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4751 | { | 
|  | 4752 | int txq_id; | 
|  | 4753 |  | 
| Tomas Winkler | 5425e49 | 2008-04-15 16:01:38 -0700 | [diff] [blame] | 4754 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4755 | if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk)) | 
|  | 4756 | return txq_id; | 
|  | 4757 | return -1; | 
|  | 4758 | } | 
|  | 4759 |  | 
| Ron Rindjunsky | fe01b47 | 2008-01-28 14:07:24 +0200 | [diff] [blame] | 4760 | static int iwl4965_mac_ht_tx_agg_start(struct ieee80211_hw *hw, const u8 *da, | 
|  | 4761 | u16 tid, u16 *start_seq_num) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4762 | { | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 4763 | struct iwl_priv *priv = hw->priv; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4764 | int sta_id; | 
|  | 4765 | int tx_fifo; | 
|  | 4766 | int txq_id; | 
|  | 4767 | int ssn = -1; | 
| Ron Rindjunsky | b095d03 | 2008-03-06 17:36:56 -0800 | [diff] [blame] | 4768 | int ret = 0; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4769 | unsigned long flags; | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 4770 | struct iwl4965_tid_data *tid_data; | 
| Joe Perches | 0795af5 | 2007-10-03 17:59:30 -0700 | [diff] [blame] | 4771 | DECLARE_MAC_BUF(mac); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4772 |  | 
|  | 4773 | if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo))) | 
|  | 4774 | tx_fifo = default_tid_to_tx_fifo[tid]; | 
|  | 4775 | else | 
|  | 4776 | return -EINVAL; | 
|  | 4777 |  | 
| Ron Rindjunsky | fe01b47 | 2008-01-28 14:07:24 +0200 | [diff] [blame] | 4778 | IWL_WARNING("%s on da = %s tid = %d\n", | 
|  | 4779 | __func__, print_mac(mac, da), tid); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4780 |  | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 4781 | sta_id = iwl4965_hw_find_station(priv, da); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4782 | if (sta_id == IWL_INVALID_STATION) | 
|  | 4783 | return -ENXIO; | 
|  | 4784 |  | 
| Ron Rindjunsky | fe01b47 | 2008-01-28 14:07:24 +0200 | [diff] [blame] | 4785 | if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) { | 
|  | 4786 | IWL_ERROR("Start AGG when state is not IWL_AGG_OFF !\n"); | 
|  | 4787 | return -ENXIO; | 
|  | 4788 | } | 
|  | 4789 |  | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 4790 | txq_id = iwl4965_txq_ctx_activate_free(priv); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4791 | if (txq_id == -1) | 
|  | 4792 | return -ENXIO; | 
|  | 4793 |  | 
|  | 4794 | spin_lock_irqsave(&priv->sta_lock, flags); | 
|  | 4795 | tid_data = &priv->stations[sta_id].tid[tid]; | 
|  | 4796 | ssn = SEQ_TO_SN(tid_data->seq_number); | 
|  | 4797 | tid_data->agg.txq_id = txq_id; | 
|  | 4798 | spin_unlock_irqrestore(&priv->sta_lock, flags); | 
|  | 4799 |  | 
|  | 4800 | *start_seq_num = ssn; | 
| Ron Rindjunsky | b095d03 | 2008-03-06 17:36:56 -0800 | [diff] [blame] | 4801 | ret = iwl4965_tx_queue_agg_enable(priv, txq_id, tx_fifo, | 
|  | 4802 | sta_id, tid, ssn); | 
|  | 4803 | if (ret) | 
|  | 4804 | return ret; | 
| Ron Rindjunsky | fe01b47 | 2008-01-28 14:07:24 +0200 | [diff] [blame] | 4805 |  | 
| Ron Rindjunsky | b095d03 | 2008-03-06 17:36:56 -0800 | [diff] [blame] | 4806 | ret = 0; | 
| Ron Rindjunsky | fe01b47 | 2008-01-28 14:07:24 +0200 | [diff] [blame] | 4807 | if (tid_data->tfds_in_queue == 0) { | 
|  | 4808 | printk(KERN_ERR "HW queue is empty\n"); | 
|  | 4809 | tid_data->agg.state = IWL_AGG_ON; | 
|  | 4810 | ieee80211_start_tx_ba_cb_irqsafe(hw, da, tid); | 
|  | 4811 | } else { | 
|  | 4812 | IWL_DEBUG_HT("HW queue is NOT empty: %d packets in HW queue\n", | 
|  | 4813 | tid_data->tfds_in_queue); | 
|  | 4814 | tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA; | 
|  | 4815 | } | 
| Ron Rindjunsky | b095d03 | 2008-03-06 17:36:56 -0800 | [diff] [blame] | 4816 | return ret; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4817 | } | 
|  | 4818 |  | 
| Ron Rindjunsky | fe01b47 | 2008-01-28 14:07:24 +0200 | [diff] [blame] | 4819 | static int iwl4965_mac_ht_tx_agg_stop(struct ieee80211_hw *hw, const u8 *da, | 
|  | 4820 | u16 tid) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4821 | { | 
|  | 4822 |  | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 4823 | struct iwl_priv *priv = hw->priv; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4824 | int tx_fifo_id, txq_id, sta_id, ssn = -1; | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 4825 | struct iwl4965_tid_data *tid_data; | 
| Ron Rindjunsky | b095d03 | 2008-03-06 17:36:56 -0800 | [diff] [blame] | 4826 | int ret, write_ptr, read_ptr; | 
| Ron Rindjunsky | fe01b47 | 2008-01-28 14:07:24 +0200 | [diff] [blame] | 4827 | unsigned long flags; | 
| Joe Perches | 0795af5 | 2007-10-03 17:59:30 -0700 | [diff] [blame] | 4828 | DECLARE_MAC_BUF(mac); | 
|  | 4829 |  | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4830 | if (!da) { | 
| Ron Rindjunsky | fe01b47 | 2008-01-28 14:07:24 +0200 | [diff] [blame] | 4831 | IWL_ERROR("da = NULL\n"); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4832 | return -EINVAL; | 
|  | 4833 | } | 
|  | 4834 |  | 
|  | 4835 | if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo))) | 
|  | 4836 | tx_fifo_id = default_tid_to_tx_fifo[tid]; | 
|  | 4837 | else | 
|  | 4838 | return -EINVAL; | 
|  | 4839 |  | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 4840 | sta_id = iwl4965_hw_find_station(priv, da); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4841 |  | 
|  | 4842 | if (sta_id == IWL_INVALID_STATION) | 
|  | 4843 | return -ENXIO; | 
|  | 4844 |  | 
| Ron Rindjunsky | fe01b47 | 2008-01-28 14:07:24 +0200 | [diff] [blame] | 4845 | if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON) | 
|  | 4846 | IWL_WARNING("Stopping AGG while state not IWL_AGG_ON\n"); | 
|  | 4847 |  | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4848 | tid_data = &priv->stations[sta_id].tid[tid]; | 
|  | 4849 | ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4; | 
|  | 4850 | txq_id = tid_data->agg.txq_id; | 
| Ron Rindjunsky | fe01b47 | 2008-01-28 14:07:24 +0200 | [diff] [blame] | 4851 | write_ptr = priv->txq[txq_id].q.write_ptr; | 
|  | 4852 | read_ptr = priv->txq[txq_id].q.read_ptr; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4853 |  | 
| Ron Rindjunsky | fe01b47 | 2008-01-28 14:07:24 +0200 | [diff] [blame] | 4854 | /* The queue is not empty */ | 
|  | 4855 | if (write_ptr != read_ptr) { | 
|  | 4856 | IWL_DEBUG_HT("Stopping a non empty AGG HW QUEUE\n"); | 
|  | 4857 | priv->stations[sta_id].tid[tid].agg.state = | 
|  | 4858 | IWL_EMPTYING_HW_QUEUE_DELBA; | 
|  | 4859 | return 0; | 
|  | 4860 | } | 
|  | 4861 |  | 
|  | 4862 | IWL_DEBUG_HT("HW queue empty\n");; | 
|  | 4863 | priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF; | 
|  | 4864 |  | 
|  | 4865 | spin_lock_irqsave(&priv->lock, flags); | 
| Ron Rindjunsky | b095d03 | 2008-03-06 17:36:56 -0800 | [diff] [blame] | 4866 | ret = iwl4965_tx_queue_agg_disable(priv, txq_id, ssn, tx_fifo_id); | 
| Ron Rindjunsky | fe01b47 | 2008-01-28 14:07:24 +0200 | [diff] [blame] | 4867 | spin_unlock_irqrestore(&priv->lock, flags); | 
|  | 4868 |  | 
| Ron Rindjunsky | b095d03 | 2008-03-06 17:36:56 -0800 | [diff] [blame] | 4869 | if (ret) | 
|  | 4870 | return ret; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4871 |  | 
| Ron Rindjunsky | fe01b47 | 2008-01-28 14:07:24 +0200 | [diff] [blame] | 4872 | ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, da, tid); | 
|  | 4873 |  | 
| Christoph Hellwig | bb8c093 | 2008-01-27 16:41:47 -0800 | [diff] [blame] | 4874 | IWL_DEBUG_INFO("iwl4965_mac_ht_tx_agg_stop on da=%s tid=%d\n", | 
| Ron Rindjunsky | fe01b47 | 2008-01-28 14:07:24 +0200 | [diff] [blame] | 4875 | print_mac(mac, da), tid); | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4876 |  | 
|  | 4877 | return 0; | 
|  | 4878 | } | 
|  | 4879 |  | 
| Ron Rindjunsky | 8114fcf | 2008-01-28 14:07:23 +0200 | [diff] [blame] | 4880 | int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw, | 
|  | 4881 | enum ieee80211_ampdu_mlme_action action, | 
|  | 4882 | const u8 *addr, u16 tid, u16 *ssn) | 
|  | 4883 | { | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 4884 | struct iwl_priv *priv = hw->priv; | 
| Ron Rindjunsky | 8114fcf | 2008-01-28 14:07:23 +0200 | [diff] [blame] | 4885 | int sta_id; | 
|  | 4886 | DECLARE_MAC_BUF(mac); | 
|  | 4887 |  | 
|  | 4888 | IWL_DEBUG_HT("A-MPDU action on da=%s tid=%d ", | 
|  | 4889 | print_mac(mac, addr), tid); | 
|  | 4890 | sta_id = iwl4965_hw_find_station(priv, addr); | 
|  | 4891 | switch (action) { | 
|  | 4892 | case IEEE80211_AMPDU_RX_START: | 
|  | 4893 | IWL_DEBUG_HT("start Rx\n"); | 
|  | 4894 | iwl4965_sta_modify_add_ba_tid(priv, sta_id, tid, *ssn); | 
|  | 4895 | break; | 
|  | 4896 | case IEEE80211_AMPDU_RX_STOP: | 
|  | 4897 | IWL_DEBUG_HT("stop Rx\n"); | 
|  | 4898 | iwl4965_sta_modify_del_ba_tid(priv, sta_id, tid); | 
|  | 4899 | break; | 
|  | 4900 | case IEEE80211_AMPDU_TX_START: | 
|  | 4901 | IWL_DEBUG_HT("start Tx\n"); | 
|  | 4902 | return iwl4965_mac_ht_tx_agg_start(hw, addr, tid, ssn); | 
|  | 4903 | case IEEE80211_AMPDU_TX_STOP: | 
|  | 4904 | IWL_DEBUG_HT("stop Tx\n"); | 
|  | 4905 | return iwl4965_mac_ht_tx_agg_stop(hw, addr, tid); | 
|  | 4906 | default: | 
|  | 4907 | IWL_DEBUG_HT("unknown\n"); | 
|  | 4908 | return -EINVAL; | 
|  | 4909 | break; | 
|  | 4910 | } | 
|  | 4911 | return 0; | 
|  | 4912 | } | 
|  | 4913 |  | 
| Christoph Hellwig | c8b0e6e | 2007-10-25 17:15:51 +0800 | [diff] [blame] | 4914 | #endif /* CONFIG_IWL4965_HT */ | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4915 |  | 
|  | 4916 | /* Set up 4965-specific Rx frame reply handlers */ | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 4917 | void iwl4965_hw_rx_handler_setup(struct iwl_priv *priv) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4918 | { | 
|  | 4919 | /* Legacy Rx frames */ | 
| Tomas Winkler | 857485c | 2008-03-21 13:53:44 -0700 | [diff] [blame] | 4920 | priv->rx_handlers[REPLY_RX] = iwl4965_rx_reply_rx; | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4921 |  | 
|  | 4922 | /* High-throughput (HT) Rx frames */ | 
|  | 4923 | priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl4965_rx_reply_rx_phy; | 
|  | 4924 | priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl4965_rx_reply_rx; | 
|  | 4925 |  | 
|  | 4926 | priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] = | 
|  | 4927 | iwl4965_rx_missed_beacon_notif; | 
|  | 4928 |  | 
| Christoph Hellwig | c8b0e6e | 2007-10-25 17:15:51 +0800 | [diff] [blame] | 4929 | #ifdef CONFIG_IWL4965_HT | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4930 | priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba; | 
| Christoph Hellwig | c8b0e6e | 2007-10-25 17:15:51 +0800 | [diff] [blame] | 4931 | #endif /* CONFIG_IWL4965_HT */ | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4932 | } | 
|  | 4933 |  | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 4934 | void iwl4965_hw_setup_deferred_work(struct iwl_priv *priv) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4935 | { | 
|  | 4936 | INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work); | 
| Christoph Hellwig | c8b0e6e | 2007-10-25 17:15:51 +0800 | [diff] [blame] | 4937 | #ifdef CONFIG_IWL4965_SENSITIVITY | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4938 | INIT_WORK(&priv->sensitivity_work, iwl4965_bg_sensitivity_work); | 
|  | 4939 | #endif | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4940 | init_timer(&priv->statistics_periodic); | 
|  | 4941 | priv->statistics_periodic.data = (unsigned long)priv; | 
|  | 4942 | priv->statistics_periodic.function = iwl4965_bg_statistics_periodic; | 
|  | 4943 | } | 
|  | 4944 |  | 
| Tomas Winkler | c79dd5b | 2008-03-12 16:58:50 -0700 | [diff] [blame] | 4945 | void iwl4965_hw_cancel_deferred_work(struct iwl_priv *priv) | 
| Zhu Yi | b481de9 | 2007-09-25 17:54:57 -0700 | [diff] [blame] | 4946 | { | 
|  | 4947 | del_timer_sync(&priv->statistics_periodic); | 
|  | 4948 |  | 
|  | 4949 | cancel_delayed_work(&priv->init_alive_start); | 
|  | 4950 | } | 
|  | 4951 |  | 
| Tomas Winkler | 3c424c2 | 2008-04-15 16:01:42 -0700 | [diff] [blame] | 4952 |  | 
|  | 4953 | static struct iwl_hcmd_ops iwl4965_hcmd = { | 
| Tomas Winkler | 7e8c519 | 2008-04-15 16:01:43 -0700 | [diff] [blame] | 4954 | .rxon_assoc = iwl4965_send_rxon_assoc, | 
| Tomas Winkler | 3c424c2 | 2008-04-15 16:01:42 -0700 | [diff] [blame] | 4955 | }; | 
|  | 4956 |  | 
| Tomas Winkler | 857485c | 2008-03-21 13:53:44 -0700 | [diff] [blame] | 4957 | static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = { | 
|  | 4958 | .enqueue_hcmd = iwl4965_enqueue_hcmd, | 
|  | 4959 | }; | 
|  | 4960 |  | 
| Assaf Krauss | 6bc913b | 2008-03-11 16:17:18 -0700 | [diff] [blame] | 4961 | static struct iwl_lib_ops iwl4965_lib = { | 
| Assaf Krauss | bf85ea4 | 2008-03-14 10:38:49 -0700 | [diff] [blame] | 4962 | .init_drv = iwl4965_init_drv, | 
| Tomas Winkler | 5425e49 | 2008-04-15 16:01:38 -0700 | [diff] [blame] | 4963 | .set_hw_params = iwl4965_hw_set_hw_params, | 
| Tomas Winkler | e2a722e | 2008-04-14 21:16:10 -0700 | [diff] [blame] | 4964 | .txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl, | 
| Tomas Winkler | 57aab75 | 2008-04-14 21:16:03 -0700 | [diff] [blame] | 4965 | .hw_nic_init = iwl4965_hw_nic_init, | 
|  | 4966 | .is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr, | 
|  | 4967 | .alive_notify = iwl4965_alive_notify, | 
|  | 4968 | .load_ucode = iwl4965_load_bsm, | 
| Assaf Krauss | 6bc913b | 2008-03-11 16:17:18 -0700 | [diff] [blame] | 4969 | .eeprom_ops = { | 
|  | 4970 | .verify_signature  = iwlcore_eeprom_verify_signature, | 
|  | 4971 | .acquire_semaphore = iwlcore_eeprom_acquire_semaphore, | 
|  | 4972 | .release_semaphore = iwlcore_eeprom_release_semaphore, | 
|  | 4973 | }, | 
| Mohamed Abbas | ad97edd | 2008-03-28 16:21:06 -0700 | [diff] [blame] | 4974 | .radio_kill_sw = iwl4965_radio_kill_sw, | 
| Assaf Krauss | 6bc913b | 2008-03-11 16:17:18 -0700 | [diff] [blame] | 4975 | }; | 
|  | 4976 |  | 
|  | 4977 | static struct iwl_ops iwl4965_ops = { | 
|  | 4978 | .lib = &iwl4965_lib, | 
| Tomas Winkler | 3c424c2 | 2008-04-15 16:01:42 -0700 | [diff] [blame] | 4979 | .hcmd = &iwl4965_hcmd, | 
| Tomas Winkler | 857485c | 2008-03-21 13:53:44 -0700 | [diff] [blame] | 4980 | .utils = &iwl4965_hcmd_utils, | 
| Assaf Krauss | 6bc913b | 2008-03-11 16:17:18 -0700 | [diff] [blame] | 4981 | }; | 
|  | 4982 |  | 
| Ron Rindjunsky | fed9017 | 2008-04-15 16:01:41 -0700 | [diff] [blame] | 4983 | struct iwl_cfg iwl4965_agn_cfg = { | 
| Tomas Winkler | 82b9a12 | 2008-03-04 18:09:30 -0800 | [diff] [blame] | 4984 | .name = "4965AGN", | 
| Tomas Winkler | 4bf775c | 2008-03-04 18:09:31 -0800 | [diff] [blame] | 4985 | .fw_name = "iwlwifi-4965" IWL4965_UCODE_API ".ucode", | 
| Tomas Winkler | 82b9a12 | 2008-03-04 18:09:30 -0800 | [diff] [blame] | 4986 | .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, | 
| Assaf Krauss | 6bc913b | 2008-03-11 16:17:18 -0700 | [diff] [blame] | 4987 | .ops = &iwl4965_ops, | 
| Assaf Krauss | 1ea8739 | 2008-03-18 14:57:50 -0700 | [diff] [blame] | 4988 | .mod_params = &iwl4965_mod_params, | 
| Tomas Winkler | 82b9a12 | 2008-03-04 18:09:30 -0800 | [diff] [blame] | 4989 | }; | 
|  | 4990 |  | 
| Assaf Krauss | 1ea8739 | 2008-03-18 14:57:50 -0700 | [diff] [blame] | 4991 | module_param_named(antenna, iwl4965_mod_params.antenna, int, 0444); | 
|  | 4992 | MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])"); | 
|  | 4993 | module_param_named(disable, iwl4965_mod_params.disable, int, 0444); | 
|  | 4994 | MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])"); | 
| Emmanuel Grumbach | fcc76c6 | 2008-04-15 16:01:47 -0700 | [diff] [blame] | 4995 | module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, 0444); | 
|  | 4996 | MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])\n"); | 
| Assaf Krauss | 1ea8739 | 2008-03-18 14:57:50 -0700 | [diff] [blame] | 4997 | module_param_named(debug, iwl4965_mod_params.debug, int, 0444); | 
|  | 4998 | MODULE_PARM_DESC(debug, "debug output mask"); | 
|  | 4999 | module_param_named( | 
|  | 5000 | disable_hw_scan, iwl4965_mod_params.disable_hw_scan, int, 0444); | 
|  | 5001 | MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)"); | 
|  | 5002 |  | 
|  | 5003 | module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, 0444); | 
|  | 5004 | MODULE_PARM_DESC(queues_num, "number of hw queues."); | 
|  | 5005 |  | 
|  | 5006 | /* QoS */ | 
|  | 5007 | module_param_named(qos_enable, iwl4965_mod_params.enable_qos, int, 0444); | 
|  | 5008 | MODULE_PARM_DESC(qos_enable, "enable all QoS functionality"); | 
|  | 5009 | module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K, int, 0444); | 
|  | 5010 | MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size"); | 
|  | 5011 |  |