blob: 8dc129499b900a5d531608141a1c08f7f09622a3 [file] [log] [blame]
Tomas Winklera55360e2008-05-05 10:22:28 +08001/******************************************************************************
2 *
Reinette Chatre1f447802010-01-15 13:43:41 -08003 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
Tomas Winklera55360e2008-05-05 10:22:28 +08004 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
Winkler, Tomas759ef892008-12-09 11:28:58 -080025 * Intel Linux Wireless <ilw@linux.intel.com>
Tomas Winklera55360e2008-05-05 10:22:28 +080026 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
Emmanuel Grumbach1781a072008-06-30 17:23:09 +080030#include <linux/etherdevice.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090031#include <linux/slab.h>
Tomas Winklera55360e2008-05-05 10:22:28 +080032#include <net/mac80211.h>
Tomas Winklera05ffd32008-07-10 14:28:42 +030033#include <asm/unaligned.h>
Tomas Winklera55360e2008-05-05 10:22:28 +080034#include "iwl-eeprom.h"
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-sta.h"
38#include "iwl-io.h"
39#include "iwl-helpers.h"
Stanislaw Gruszka67289942011-02-28 14:33:17 +010040#include "iwl-agn-calib.h"
Stanislaw Gruszka466a19a2011-03-04 17:51:49 +010041#include "iwl-agn.h"
42
43/******************************************************************************
44 *
45 * RX path functions
46 *
47 ******************************************************************************/
48
Tomas Winklera55360e2008-05-05 10:22:28 +080049/*
50 * Rx theory of operation
51 *
52 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
53 * each of which point to Receive Buffers to be filled by the NIC. These get
54 * used not only for Rx frames, but for any command response or notification
55 * from the NIC. The driver and NIC manage the Rx buffers by means
56 * of indexes into the circular buffer.
57 *
58 * Rx Queue Indexes
59 * The host/firmware share two index registers for managing the Rx buffers.
60 *
61 * The READ index maps to the first position that the firmware may be writing
62 * to -- the driver can read up to (but not including) this position and get
63 * good data.
64 * The READ index is managed by the firmware once the card is enabled.
65 *
66 * The WRITE index maps to the last position the driver has read from -- the
67 * position preceding WRITE is the last slot the firmware can place a packet.
68 *
69 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
70 * WRITE = READ.
71 *
72 * During initialization, the host sets up the READ queue position to the first
73 * INDEX position, and WRITE to the last (READ - 1 wrapped)
74 *
75 * When the firmware places a packet in a buffer, it will advance the READ index
76 * and fire the RX interrupt. The driver can then query the READ index and
77 * process as many packets as possible, moving the WRITE index forward as it
78 * resets the Rx queue buffers with new memory.
79 *
80 * The management in the driver is as follows:
81 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
82 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
83 * to replenish the iwl->rxq->rx_free.
84 * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
85 * iwl->rxq is replenished and the READ INDEX is updated (updating the
86 * 'processed' and 'read' driver indexes as well)
87 * + A received packet is processed and handed to the kernel network stack,
88 * detached from the iwl->rxq. The driver 'processed' index is updated.
89 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
90 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
91 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
92 * were enough free buffers and RX_STALLED is set it is cleared.
93 *
94 *
95 * Driver sequence:
96 *
97 * iwl_rx_queue_alloc() Allocates rx_free
98 * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
99 * iwl_rx_queue_restock
100 * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
101 * queue, updates firmware pointers, and updates
102 * the WRITE index. If insufficient rx_free buffers
103 * are available, schedules iwl_rx_replenish
104 *
105 * -- enable interrupts --
106 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
107 * READ INDEX, detaching the SKB from the pool.
108 * Moves the packet buffer from queue to rx_used.
109 * Calls iwl_rx_queue_restock to refill any empty
110 * slots.
111 * ...
112 *
113 */
114
115/**
116 * iwl_rx_queue_space - Return number of free slots available in queue.
117 */
118int iwl_rx_queue_space(const struct iwl_rx_queue *q)
119{
120 int s = q->read - q->write;
121 if (s <= 0)
122 s += RX_QUEUE_SIZE;
123 /* keep some buffer to not confuse full and empty queue */
124 s -= 2;
125 if (s < 0)
126 s = 0;
127 return s;
128}
Tomas Winklera55360e2008-05-05 10:22:28 +0800129
130/**
131 * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
132 */
Abhijeet Kolekar7bfedc52010-02-03 13:47:56 -0800133void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
Tomas Winklera55360e2008-05-05 10:22:28 +0800134{
Tomas Winklera55360e2008-05-05 10:22:28 +0800135 unsigned long flags;
Winkler, Tomas141c43a2009-01-08 10:19:53 -0800136 u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg;
137 u32 reg;
Tomas Winklera55360e2008-05-05 10:22:28 +0800138
139 spin_lock_irqsave(&q->lock, flags);
140
141 if (q->need_update == 0)
142 goto exit_unlock;
143
Wey-Yi Guyf81c1f42010-11-10 09:56:50 -0800144 if (priv->cfg->base_params->shadow_reg_enable) {
145 /* shadow register enabled */
Tomas Winklera55360e2008-05-05 10:22:28 +0800146 /* Device expects a multiple of 8 */
Mohamed Abbas4752c932009-05-22 11:01:51 -0700147 q->write_actual = (q->write & ~0x7);
Winkler, Tomasfd117432010-11-10 09:56:42 -0800148 iwl_write32(priv, rx_wrt_ptr_reg, q->write_actual);
Wey-Yi Guyf81c1f42010-11-10 09:56:50 -0800149 } else {
150 /* If power-saving is in use, make sure device is awake */
151 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
152 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
Tomas Winklera55360e2008-05-05 10:22:28 +0800153
Wey-Yi Guyf81c1f42010-11-10 09:56:50 -0800154 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
155 IWL_DEBUG_INFO(priv,
156 "Rx queue requesting wakeup,"
157 " GP1 = 0x%x\n", reg);
158 iwl_set_bit(priv, CSR_GP_CNTRL,
159 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
160 goto exit_unlock;
161 }
162
163 q->write_actual = (q->write & ~0x7);
164 iwl_write_direct32(priv, rx_wrt_ptr_reg,
165 q->write_actual);
166
167 /* Else device is assumed to be awake */
168 } else {
169 /* Device expects a multiple of 8 */
170 q->write_actual = (q->write & ~0x7);
171 iwl_write_direct32(priv, rx_wrt_ptr_reg,
172 q->write_actual);
173 }
174 }
Tomas Winklera55360e2008-05-05 10:22:28 +0800175 q->need_update = 0;
176
177 exit_unlock:
178 spin_unlock_irqrestore(&q->lock, flags);
Tomas Winklera55360e2008-05-05 10:22:28 +0800179}
Tomas Winklera55360e2008-05-05 10:22:28 +0800180
181int iwl_rx_queue_alloc(struct iwl_priv *priv)
182{
183 struct iwl_rx_queue *rxq = &priv->rxq;
Stanislaw Gruszkaf36d04a2010-02-10 05:07:45 -0800184 struct device *dev = &priv->pci_dev->dev;
Tomas Winklera55360e2008-05-05 10:22:28 +0800185 int i;
186
187 spin_lock_init(&rxq->lock);
188 INIT_LIST_HEAD(&rxq->rx_free);
189 INIT_LIST_HEAD(&rxq->rx_used);
190
191 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
Emmanuel Grumbachd5b25c92010-06-07 13:21:46 -0700192 rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
Stanislaw Gruszkaf36d04a2010-02-10 05:07:45 -0800193 GFP_KERNEL);
Tomas Winklera55360e2008-05-05 10:22:28 +0800194 if (!rxq->bd)
Winkler, Tomas8d864222008-11-07 09:58:39 -0800195 goto err_bd;
196
Stanislaw Gruszkaf36d04a2010-02-10 05:07:45 -0800197 rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status),
198 &rxq->rb_stts_dma, GFP_KERNEL);
Winkler, Tomas8d864222008-11-07 09:58:39 -0800199 if (!rxq->rb_stts)
200 goto err_rb;
Tomas Winklera55360e2008-05-05 10:22:28 +0800201
202 /* Fill the rx_used queue with _all_ of the Rx buffers */
203 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
204 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
205
206 /* Set us so that we have processed and used all buffers, but have
207 * not restocked the Rx queue with fresh buffers */
208 rxq->read = rxq->write = 0;
Mohamed Abbas4752c932009-05-22 11:01:51 -0700209 rxq->write_actual = 0;
Tomas Winklera55360e2008-05-05 10:22:28 +0800210 rxq->free_count = 0;
211 rxq->need_update = 0;
212 return 0;
Winkler, Tomas8d864222008-11-07 09:58:39 -0800213
214err_rb:
Stanislaw Gruszkaf36d04a2010-02-10 05:07:45 -0800215 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
Emmanuel Grumbachd5b25c92010-06-07 13:21:46 -0700216 rxq->bd_dma);
Winkler, Tomas8d864222008-11-07 09:58:39 -0800217err_bd:
218 return -ENOMEM;
Tomas Winklera55360e2008-05-05 10:22:28 +0800219}
Tomas Winklera55360e2008-05-05 10:22:28 +0800220
Stanislaw Gruszka466a19a2011-03-04 17:51:49 +0100221/******************************************************************************
222 *
223 * Generic RX handler implementations
224 *
225 ******************************************************************************/
226
227static void iwl_rx_reply_alive(struct iwl_priv *priv,
228 struct iwl_rx_mem_buffer *rxb)
229{
230 struct iwl_rx_packet *pkt = rxb_addr(rxb);
231 struct iwl_alive_resp *palive;
232 struct delayed_work *pwork;
233
234 palive = &pkt->u.alive_frame;
235
236 IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
237 "0x%01X 0x%01X\n",
238 palive->is_valid, palive->ver_type,
239 palive->ver_subtype);
240
241 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
242 IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
243 memcpy(&priv->card_alive_init,
244 &pkt->u.alive_frame,
245 sizeof(struct iwl_init_alive_resp));
246 pwork = &priv->init_alive_start;
247 } else {
248 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
249 memcpy(&priv->card_alive, &pkt->u.alive_frame,
250 sizeof(struct iwl_alive_resp));
251 pwork = &priv->alive_start;
252 }
253
254 /* We delay the ALIVE response by 5ms to
255 * give the HW RF Kill time to activate... */
256 if (palive->is_valid == UCODE_VALID_OK)
257 queue_delayed_work(priv->workqueue, pwork,
258 msecs_to_jiffies(5));
259 else {
260 IWL_WARN(priv, "%s uCode did not respond OK.\n",
261 (palive->ver_subtype == INITIALIZE_SUBTYPE) ?
262 "init" : "runtime");
263 /*
264 * If fail to load init uCode,
265 * let's try to load the init uCode again.
266 * We should not get into this situation, but if it
267 * does happen, we should not move on and loading "runtime"
268 * without proper calibrate the device.
269 */
270 if (palive->ver_subtype == INITIALIZE_SUBTYPE)
271 priv->ucode_type = UCODE_NONE;
272 queue_work(priv->workqueue, &priv->restart);
273 }
274}
275
276static void iwl_rx_reply_error(struct iwl_priv *priv,
277 struct iwl_rx_mem_buffer *rxb)
278{
279 struct iwl_rx_packet *pkt = rxb_addr(rxb);
280
281 IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
282 "seq 0x%04X ser 0x%08X\n",
283 le32_to_cpu(pkt->u.err_resp.error_type),
284 get_cmd_string(pkt->u.err_resp.cmd_id),
285 pkt->u.err_resp.cmd_id,
286 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
287 le32_to_cpu(pkt->u.err_resp.error_info));
288}
289
290static void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
291{
292 struct iwl_rx_packet *pkt = rxb_addr(rxb);
293 struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
294 /*
295 * MULTI-FIXME
296 * See iwl_mac_channel_switch.
297 */
298 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
299 struct iwl_rxon_cmd *rxon = (void *)&ctx->active;
300
301 if (priv->switch_rxon.switch_in_progress) {
302 if (!le32_to_cpu(csa->status) &&
303 (csa->channel == priv->switch_rxon.channel)) {
304 rxon->channel = csa->channel;
305 ctx->staging.channel = csa->channel;
306 IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
307 le16_to_cpu(csa->channel));
308 iwl_chswitch_done(priv, true);
309 } else {
310 IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
311 le16_to_cpu(csa->channel));
312 iwl_chswitch_done(priv, false);
313 }
314 }
315}
316
317
318static void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
Reinette Chatre81963d62010-01-22 14:22:57 -0800319 struct iwl_rx_mem_buffer *rxb)
320{
321 struct iwl_rx_packet *pkt = rxb_addr(rxb);
322 struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
323
324 if (!report->state) {
325 IWL_DEBUG_11H(priv,
326 "Spectrum Measure Notification: Start\n");
327 return;
328 }
329
330 memcpy(&priv->measure_report, report, sizeof(*report));
331 priv->measurement_status |= MEASUREMENT_READY;
332}
Reinette Chatre81963d62010-01-22 14:22:57 -0800333
Stanislaw Gruszka466a19a2011-03-04 17:51:49 +0100334static void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
335 struct iwl_rx_mem_buffer *rxb)
336{
337#ifdef CONFIG_IWLWIFI_DEBUG
338 struct iwl_rx_packet *pkt = rxb_addr(rxb);
339 struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
340 IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
341 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
342#endif
343}
344
345static void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
346 struct iwl_rx_mem_buffer *rxb)
347{
348 struct iwl_rx_packet *pkt = rxb_addr(rxb);
349 u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
350 IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
351 "notification for %s:\n", len,
352 get_cmd_string(pkt->hdr.cmd));
353 iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
354}
355
356static void iwl_rx_beacon_notif(struct iwl_priv *priv,
357 struct iwl_rx_mem_buffer *rxb)
358{
359 struct iwl_rx_packet *pkt = rxb_addr(rxb);
360 struct iwlagn_beacon_notif *beacon = (void *)pkt->u.raw;
361#ifdef CONFIG_IWLWIFI_DEBUG
362 u16 status = le16_to_cpu(beacon->beacon_notify_hdr.status.status);
363 u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
364
365 IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d "
366 "tsf:0x%.8x%.8x rate:%d\n",
367 status & TX_STATUS_MSK,
368 beacon->beacon_notify_hdr.failure_frame,
369 le32_to_cpu(beacon->ibss_mgr_status),
370 le32_to_cpu(beacon->high_tsf),
371 le32_to_cpu(beacon->low_tsf), rate);
372#endif
373
374 priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
375
376 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
377 queue_work(priv->workqueue, &priv->beacon_update);
378}
379
Stanislaw Gruszkaad6e82a2011-02-28 14:33:16 +0100380/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
381#define ACK_CNT_RATIO (50)
382#define BA_TIMEOUT_CNT (5)
383#define BA_TIMEOUT_MAX (16)
384
385/**
386 * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries.
387 *
388 * When the ACK count ratio is low and aggregated BA timeout retries exceeding
389 * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal
390 * operation state.
391 */
392static bool iwl_good_ack_health(struct iwl_priv *priv, struct iwl_rx_packet *pkt)
393{
394 int actual_delta, expected_delta, ba_timeout_delta;
395 struct statistics_tx *cur, *old;
396
397 if (priv->_agn.agg_tids_count)
398 return true;
399
400 if (iwl_bt_statistics(priv)) {
401 cur = &pkt->u.stats_bt.tx;
402 old = &priv->_agn.statistics_bt.tx;
403 } else {
404 cur = &pkt->u.stats.tx;
405 old = &priv->_agn.statistics.tx;
406 }
407
408 actual_delta = le32_to_cpu(cur->actual_ack_cnt) -
409 le32_to_cpu(old->actual_ack_cnt);
410 expected_delta = le32_to_cpu(cur->expected_ack_cnt) -
411 le32_to_cpu(old->expected_ack_cnt);
412
413 /* Values should not be negative, but we do not trust the firmware */
414 if (actual_delta <= 0 || expected_delta <= 0)
415 return true;
416
417 ba_timeout_delta = le32_to_cpu(cur->agg.ba_timeout) -
418 le32_to_cpu(old->agg.ba_timeout);
419
420 if ((actual_delta * 100 / expected_delta) < ACK_CNT_RATIO &&
421 ba_timeout_delta > BA_TIMEOUT_CNT) {
422 IWL_DEBUG_RADIO(priv, "deltas: actual %d expected %d ba_timeout %d\n",
423 actual_delta, expected_delta, ba_timeout_delta);
424
425#ifdef CONFIG_IWLWIFI_DEBUGFS
426 /*
427 * This is ifdef'ed on DEBUGFS because otherwise the
428 * statistics aren't available. If DEBUGFS is set but
429 * DEBUG is not, these will just compile out.
430 */
431 IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta %d\n",
432 priv->_agn.delta_statistics.tx.rx_detected_cnt);
433 IWL_DEBUG_RADIO(priv,
434 "ack_or_ba_timeout_collision delta %d\n",
435 priv->_agn.delta_statistics.tx.ack_or_ba_timeout_collision);
436#endif
437
438 if (ba_timeout_delta >= BA_TIMEOUT_MAX)
439 return false;
440 }
441
442 return true;
443}
444
445/**
446 * iwl_good_plcp_health - checks for plcp error.
447 *
448 * When the plcp error is exceeding the thresholds, reset the radio
449 * to improve the throughput.
450 */
Stanislaw Gruszka466a19a2011-03-04 17:51:49 +0100451static bool iwl_good_plcp_health(struct iwl_priv *priv,
452 struct iwl_rx_packet *pkt)
Stanislaw Gruszkaad6e82a2011-02-28 14:33:16 +0100453{
454 bool rc = true;
455 int combined_plcp_delta;
456 unsigned int plcp_msec;
457 unsigned long plcp_received_jiffies;
458
459 if (priv->cfg->base_params->plcp_delta_threshold ==
460 IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
461 IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
462 return rc;
463 }
464
465 /*
466 * check for plcp_err and trigger radio reset if it exceeds
467 * the plcp error threshold plcp_delta.
468 */
469 plcp_received_jiffies = jiffies;
470 plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies -
471 (long) priv->plcp_jiffies);
472 priv->plcp_jiffies = plcp_received_jiffies;
473 /*
474 * check to make sure plcp_msec is not 0 to prevent division
475 * by zero.
476 */
477 if (plcp_msec) {
478 struct statistics_rx_phy *ofdm;
479 struct statistics_rx_ht_phy *ofdm_ht;
480
481 if (iwl_bt_statistics(priv)) {
482 ofdm = &pkt->u.stats_bt.rx.ofdm;
483 ofdm_ht = &pkt->u.stats_bt.rx.ofdm_ht;
484 combined_plcp_delta =
485 (le32_to_cpu(ofdm->plcp_err) -
486 le32_to_cpu(priv->_agn.statistics_bt.
487 rx.ofdm.plcp_err)) +
488 (le32_to_cpu(ofdm_ht->plcp_err) -
489 le32_to_cpu(priv->_agn.statistics_bt.
490 rx.ofdm_ht.plcp_err));
491 } else {
492 ofdm = &pkt->u.stats.rx.ofdm;
493 ofdm_ht = &pkt->u.stats.rx.ofdm_ht;
494 combined_plcp_delta =
495 (le32_to_cpu(ofdm->plcp_err) -
496 le32_to_cpu(priv->_agn.statistics.
497 rx.ofdm.plcp_err)) +
498 (le32_to_cpu(ofdm_ht->plcp_err) -
499 le32_to_cpu(priv->_agn.statistics.
500 rx.ofdm_ht.plcp_err));
501 }
502
503 if ((combined_plcp_delta > 0) &&
504 ((combined_plcp_delta * 100) / plcp_msec) >
505 priv->cfg->base_params->plcp_delta_threshold) {
506 /*
507 * if plcp_err exceed the threshold,
508 * the following data is printed in csv format:
509 * Text: plcp_err exceeded %d,
510 * Received ofdm.plcp_err,
511 * Current ofdm.plcp_err,
512 * Received ofdm_ht.plcp_err,
513 * Current ofdm_ht.plcp_err,
514 * combined_plcp_delta,
515 * plcp_msec
516 */
517 IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, "
518 "%u, %u, %u, %u, %d, %u mSecs\n",
519 priv->cfg->base_params->plcp_delta_threshold,
520 le32_to_cpu(ofdm->plcp_err),
521 le32_to_cpu(ofdm->plcp_err),
522 le32_to_cpu(ofdm_ht->plcp_err),
523 le32_to_cpu(ofdm_ht->plcp_err),
524 combined_plcp_delta, plcp_msec);
525
526 rc = false;
527 }
528 }
529 return rc;
530}
531
Stanislaw Gruszka466a19a2011-03-04 17:51:49 +0100532static void iwl_recover_from_statistics(struct iwl_priv *priv,
533 struct iwl_rx_packet *pkt)
Wey-Yi Guyfa8f1302010-03-05 14:22:46 -0800534{
Stanislaw Gruszkab7977ff2011-02-28 14:33:15 +0100535 const struct iwl_mod_params *mod_params = priv->cfg->mod_params;
536
Stanislaw Gruszkaca3d9382011-02-08 09:31:55 +0100537 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
538 !iwl_is_any_associated(priv))
Wey-Yi Guyfa8f1302010-03-05 14:22:46 -0800539 return;
Stanislaw Gruszkaca3d9382011-02-08 09:31:55 +0100540
Stanislaw Gruszkaad6e82a2011-02-28 14:33:16 +0100541 if (mod_params->ack_check && !iwl_good_ack_health(priv, pkt)) {
Stanislaw Gruszkaca3d9382011-02-08 09:31:55 +0100542 IWL_ERR(priv, "low ack count detected, restart firmware\n");
543 if (!iwl_force_reset(priv, IWL_FW_RESET, false))
544 return;
Trieu 'Andrew' Nguyen3e4fb5f2010-01-22 14:22:46 -0800545 }
Stanislaw Gruszkaca3d9382011-02-08 09:31:55 +0100546
Stanislaw Gruszkaad6e82a2011-02-28 14:33:16 +0100547 if (mod_params->plcp_check && !iwl_good_plcp_health(priv, pkt))
Stanislaw Gruszkaca3d9382011-02-08 09:31:55 +0100548 iwl_force_reset(priv, IWL_RF_RESET, false);
Wey-Yi Guybeac5492010-03-04 13:38:58 -0800549}
Wey-Yi Guybeac5492010-03-04 13:38:58 -0800550
Stanislaw Gruszka67289942011-02-28 14:33:17 +0100551/* Calculate noise level, based on measurements during network silence just
552 * before arriving beacon. This measurement can be done only if we know
553 * exactly when to expect beacons, therefore only when we're associated. */
554static void iwl_rx_calc_noise(struct iwl_priv *priv)
555{
556 struct statistics_rx_non_phy *rx_info;
557 int num_active_rx = 0;
558 int total_silence = 0;
559 int bcn_silence_a, bcn_silence_b, bcn_silence_c;
560 int last_rx_noise;
561
562 if (iwl_bt_statistics(priv))
563 rx_info = &(priv->_agn.statistics_bt.rx.general.common);
564 else
565 rx_info = &(priv->_agn.statistics.rx.general);
566 bcn_silence_a =
567 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
568 bcn_silence_b =
569 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
570 bcn_silence_c =
571 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
572
573 if (bcn_silence_a) {
574 total_silence += bcn_silence_a;
575 num_active_rx++;
576 }
577 if (bcn_silence_b) {
578 total_silence += bcn_silence_b;
579 num_active_rx++;
580 }
581 if (bcn_silence_c) {
582 total_silence += bcn_silence_c;
583 num_active_rx++;
584 }
585
586 /* Average among active antennas */
587 if (num_active_rx)
588 last_rx_noise = (total_silence / num_active_rx) - 107;
589 else
590 last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
591
592 IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
593 bcn_silence_a, bcn_silence_b, bcn_silence_c,
594 last_rx_noise);
595}
596
Stanislaw Gruszka67289942011-02-28 14:33:17 +0100597/*
598 * based on the assumption of all statistics counter are in DWORD
599 * FIXME: This function is for debugging, do not deal with
600 * the case of counters roll-over.
601 */
602static void iwl_accumulative_statistics(struct iwl_priv *priv,
603 __le32 *stats)
604{
Stanislaw Gruszka466a19a2011-03-04 17:51:49 +0100605#ifdef CONFIG_IWLWIFI_DEBUGFS
Stanislaw Gruszka67289942011-02-28 14:33:17 +0100606 int i, size;
607 __le32 *prev_stats;
608 u32 *accum_stats;
609 u32 *delta, *max_delta;
610 struct statistics_general_common *general, *accum_general;
611 struct statistics_tx *tx, *accum_tx;
612
613 if (iwl_bt_statistics(priv)) {
614 prev_stats = (__le32 *)&priv->_agn.statistics_bt;
615 accum_stats = (u32 *)&priv->_agn.accum_statistics_bt;
616 size = sizeof(struct iwl_bt_notif_statistics);
617 general = &priv->_agn.statistics_bt.general.common;
618 accum_general = &priv->_agn.accum_statistics_bt.general.common;
619 tx = &priv->_agn.statistics_bt.tx;
620 accum_tx = &priv->_agn.accum_statistics_bt.tx;
621 delta = (u32 *)&priv->_agn.delta_statistics_bt;
622 max_delta = (u32 *)&priv->_agn.max_delta_bt;
623 } else {
624 prev_stats = (__le32 *)&priv->_agn.statistics;
625 accum_stats = (u32 *)&priv->_agn.accum_statistics;
626 size = sizeof(struct iwl_notif_statistics);
627 general = &priv->_agn.statistics.general.common;
628 accum_general = &priv->_agn.accum_statistics.general.common;
629 tx = &priv->_agn.statistics.tx;
630 accum_tx = &priv->_agn.accum_statistics.tx;
631 delta = (u32 *)&priv->_agn.delta_statistics;
632 max_delta = (u32 *)&priv->_agn.max_delta;
633 }
634 for (i = sizeof(__le32); i < size;
635 i += sizeof(__le32), stats++, prev_stats++, delta++,
636 max_delta++, accum_stats++) {
637 if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
638 *delta = (le32_to_cpu(*stats) -
639 le32_to_cpu(*prev_stats));
640 *accum_stats += *delta;
641 if (*delta > *max_delta)
642 *max_delta = *delta;
643 }
644 }
645
646 /* reset accumulative statistics for "no-counter" type statistics */
647 accum_general->temperature = general->temperature;
648 accum_general->temperature_m = general->temperature_m;
649 accum_general->ttl_timestamp = general->ttl_timestamp;
650 accum_tx->tx_power.ant_a = tx->tx_power.ant_a;
651 accum_tx->tx_power.ant_b = tx->tx_power.ant_b;
652 accum_tx->tx_power.ant_c = tx->tx_power.ant_c;
Stanislaw Gruszka67289942011-02-28 14:33:17 +0100653#endif
Stanislaw Gruszka466a19a2011-03-04 17:51:49 +0100654}
Stanislaw Gruszka67289942011-02-28 14:33:17 +0100655
Stanislaw Gruszka466a19a2011-03-04 17:51:49 +0100656static void iwl_rx_statistics(struct iwl_priv *priv,
Stanislaw Gruszka67289942011-02-28 14:33:17 +0100657 struct iwl_rx_mem_buffer *rxb)
658{
Stanislaw Gruszka466a19a2011-03-04 17:51:49 +0100659 const int reg_recalib_period = 60;
Stanislaw Gruszka67289942011-02-28 14:33:17 +0100660 int change;
661 struct iwl_rx_packet *pkt = rxb_addr(rxb);
662
663 if (iwl_bt_statistics(priv)) {
664 IWL_DEBUG_RX(priv,
665 "Statistics notification received (%d vs %d).\n",
666 (int)sizeof(struct iwl_bt_notif_statistics),
667 le32_to_cpu(pkt->len_n_flags) &
668 FH_RSCSR_FRAME_SIZE_MSK);
669
670 change = ((priv->_agn.statistics_bt.general.common.temperature !=
671 pkt->u.stats_bt.general.common.temperature) ||
672 ((priv->_agn.statistics_bt.flag &
673 STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
674 (pkt->u.stats_bt.flag &
675 STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
Stanislaw Gruszka67289942011-02-28 14:33:17 +0100676
Stanislaw Gruszka466a19a2011-03-04 17:51:49 +0100677 iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats_bt);
Stanislaw Gruszka67289942011-02-28 14:33:17 +0100678 } else {
679 IWL_DEBUG_RX(priv,
680 "Statistics notification received (%d vs %d).\n",
681 (int)sizeof(struct iwl_notif_statistics),
682 le32_to_cpu(pkt->len_n_flags) &
683 FH_RSCSR_FRAME_SIZE_MSK);
684
685 change = ((priv->_agn.statistics.general.common.temperature !=
686 pkt->u.stats.general.common.temperature) ||
687 ((priv->_agn.statistics.flag &
688 STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
689 (pkt->u.stats.flag &
690 STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
Stanislaw Gruszka67289942011-02-28 14:33:17 +0100691
Stanislaw Gruszka466a19a2011-03-04 17:51:49 +0100692 iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
Stanislaw Gruszka67289942011-02-28 14:33:17 +0100693 }
694
695 iwl_recover_from_statistics(priv, pkt);
696
697 if (iwl_bt_statistics(priv))
698 memcpy(&priv->_agn.statistics_bt, &pkt->u.stats_bt,
699 sizeof(priv->_agn.statistics_bt));
700 else
701 memcpy(&priv->_agn.statistics, &pkt->u.stats,
702 sizeof(priv->_agn.statistics));
703
704 set_bit(STATUS_STATISTICS, &priv->status);
705
706 /* Reschedule the statistics timer to occur in
Stanislaw Gruszka466a19a2011-03-04 17:51:49 +0100707 * reg_recalib_period seconds to ensure we get a
Stanislaw Gruszka67289942011-02-28 14:33:17 +0100708 * thermal update even if the uCode doesn't give
709 * us one */
710 mod_timer(&priv->statistics_periodic, jiffies +
Stanislaw Gruszka466a19a2011-03-04 17:51:49 +0100711 msecs_to_jiffies(reg_recalib_period * 1000));
Stanislaw Gruszka67289942011-02-28 14:33:17 +0100712
713 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
714 (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
715 iwl_rx_calc_noise(priv);
716 queue_work(priv->workqueue, &priv->run_time_calib_work);
717 }
718 if (priv->cfg->ops->lib->temp_ops.temperature && change)
719 priv->cfg->ops->lib->temp_ops.temperature(priv);
720}
721
Stanislaw Gruszka466a19a2011-03-04 17:51:49 +0100722static void iwl_rx_reply_statistics(struct iwl_priv *priv,
723 struct iwl_rx_mem_buffer *rxb)
Stanislaw Gruszka67289942011-02-28 14:33:17 +0100724{
725 struct iwl_rx_packet *pkt = rxb_addr(rxb);
726
727 if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
728#ifdef CONFIG_IWLWIFI_DEBUGFS
729 memset(&priv->_agn.accum_statistics, 0,
730 sizeof(struct iwl_notif_statistics));
731 memset(&priv->_agn.delta_statistics, 0,
732 sizeof(struct iwl_notif_statistics));
733 memset(&priv->_agn.max_delta, 0,
734 sizeof(struct iwl_notif_statistics));
735 memset(&priv->_agn.accum_statistics_bt, 0,
736 sizeof(struct iwl_bt_notif_statistics));
737 memset(&priv->_agn.delta_statistics_bt, 0,
738 sizeof(struct iwl_bt_notif_statistics));
739 memset(&priv->_agn.max_delta_bt, 0,
740 sizeof(struct iwl_bt_notif_statistics));
741#endif
742 IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
743 }
744 iwl_rx_statistics(priv, rxb);
745}
746
Stanislaw Gruszka466a19a2011-03-04 17:51:49 +0100747/* Handle notification from uCode that card's power state is changing
748 * due to software, hardware, or critical temperature RFKILL */
749static void iwl_rx_card_state_notif(struct iwl_priv *priv,
750 struct iwl_rx_mem_buffer *rxb)
751{
752 struct iwl_rx_packet *pkt = rxb_addr(rxb);
753 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
754 unsigned long status = priv->status;
755
756 IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
757 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
758 (flags & SW_CARD_DISABLED) ? "Kill" : "On",
759 (flags & CT_CARD_DISABLED) ?
760 "Reached" : "Not reached");
761
762 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
763 CT_CARD_DISABLED)) {
764
765 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
766 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
767
768 iwl_write_direct32(priv, HBUS_TARG_MBX_C,
769 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
770
771 if (!(flags & RXON_CARD_DISABLED)) {
772 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
773 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
774 iwl_write_direct32(priv, HBUS_TARG_MBX_C,
775 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
776 }
777 if (flags & CT_CARD_DISABLED)
778 iwl_tt_enter_ct_kill(priv);
779 }
780 if (!(flags & CT_CARD_DISABLED))
781 iwl_tt_exit_ct_kill(priv);
782
783 if (flags & HW_CARD_DISABLED)
784 set_bit(STATUS_RF_KILL_HW, &priv->status);
785 else
786 clear_bit(STATUS_RF_KILL_HW, &priv->status);
787
788
789 if (!(flags & RXON_CARD_DISABLED))
790 iwl_scan_cancel(priv);
791
792 if ((test_bit(STATUS_RF_KILL_HW, &status) !=
793 test_bit(STATUS_RF_KILL_HW, &priv->status)))
794 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
795 test_bit(STATUS_RF_KILL_HW, &priv->status));
796 else
797 wake_up_interruptible(&priv->wait_command_queue);
798}
799
800static void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
801 struct iwl_rx_mem_buffer *rxb)
Stanislaw Gruszka67289942011-02-28 14:33:17 +0100802
803{
804 struct iwl_rx_packet *pkt = rxb_addr(rxb);
805 struct iwl_missed_beacon_notif *missed_beacon;
806
807 missed_beacon = &pkt->u.missed_beacon;
808 if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
809 priv->missed_beacon_threshold) {
810 IWL_DEBUG_CALIB(priv,
811 "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
812 le32_to_cpu(missed_beacon->consecutive_missed_beacons),
813 le32_to_cpu(missed_beacon->total_missed_becons),
814 le32_to_cpu(missed_beacon->num_recvd_beacons),
815 le32_to_cpu(missed_beacon->num_expected_beacons));
816 if (!test_bit(STATUS_SCANNING, &priv->status))
817 iwl_init_sensitivity(priv);
818 }
819}
820
Stanislaw Gruszka466a19a2011-03-04 17:51:49 +0100821/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
822 * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
823static void iwl_rx_reply_rx_phy(struct iwl_priv *priv,
824 struct iwl_rx_mem_buffer *rxb)
825{
826 struct iwl_rx_packet *pkt = rxb_addr(rxb);
827
828 priv->_agn.last_phy_res_valid = true;
829 memcpy(&priv->_agn.last_phy_res, pkt->u.raw,
830 sizeof(struct iwl_rx_phy_res));
831}
832
Emmanuel Grumbach1781a072008-06-30 17:23:09 +0800833/*
834 * returns non-zero if packet should be dropped
835 */
Stanislaw Gruszka466a19a2011-03-04 17:51:49 +0100836static int iwl_set_decrypted_flag(struct iwl_priv *priv,
837 struct ieee80211_hdr *hdr,
838 u32 decrypt_res,
839 struct ieee80211_rx_status *stats)
Emmanuel Grumbach1781a072008-06-30 17:23:09 +0800840{
841 u16 fc = le16_to_cpu(hdr->frame_control);
842
Johannes Berg246ed352010-08-23 10:46:32 +0200843 /*
844 * All contexts have the same setting here due to it being
845 * a module parameter, so OK to check any context.
846 */
847 if (priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags &
848 RXON_FILTER_DIS_DECRYPT_MSK)
Emmanuel Grumbach1781a072008-06-30 17:23:09 +0800849 return 0;
850
851 if (!(fc & IEEE80211_FCTL_PROTECTED))
852 return 0;
853
Tomas Winklere1623442009-01-27 14:27:56 -0800854 IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res);
Emmanuel Grumbach1781a072008-06-30 17:23:09 +0800855 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
856 case RX_RES_STATUS_SEC_TYPE_TKIP:
857 /* The uCode has got a bad phase 1 Key, pushes the packet.
858 * Decryption will be done in SW. */
859 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
860 RX_RES_STATUS_BAD_KEY_TTAK)
861 break;
862
863 case RX_RES_STATUS_SEC_TYPE_WEP:
864 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
865 RX_RES_STATUS_BAD_ICV_MIC) {
866 /* bad ICV, the packet is destroyed since the
867 * decryption is inplace, drop it */
Tomas Winklere1623442009-01-27 14:27:56 -0800868 IWL_DEBUG_RX(priv, "Packet destroyed\n");
Emmanuel Grumbach1781a072008-06-30 17:23:09 +0800869 return -1;
870 }
871 case RX_RES_STATUS_SEC_TYPE_CCMP:
872 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
873 RX_RES_STATUS_DECRYPT_OK) {
Tomas Winklere1623442009-01-27 14:27:56 -0800874 IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n");
Emmanuel Grumbach1781a072008-06-30 17:23:09 +0800875 stats->flag |= RX_FLAG_DECRYPTED;
876 }
877 break;
878
879 default:
880 break;
881 }
882 return 0;
883}
Stanislaw Gruszka466a19a2011-03-04 17:51:49 +0100884
885static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
886 struct ieee80211_hdr *hdr,
887 u16 len,
888 u32 ampdu_status,
889 struct iwl_rx_mem_buffer *rxb,
890 struct ieee80211_rx_status *stats)
891{
892 struct sk_buff *skb;
893 __le16 fc = hdr->frame_control;
894
895 /* We only process data packets if the interface is open */
896 if (unlikely(!priv->is_open)) {
897 IWL_DEBUG_DROP_LIMIT(priv,
898 "Dropping packet while interface is not open.\n");
899 return;
900 }
901
902 /* In case of HW accelerated crypto and bad decryption, drop */
903 if (!priv->cfg->mod_params->sw_crypto &&
904 iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
905 return;
906
907 skb = dev_alloc_skb(128);
908 if (!skb) {
909 IWL_ERR(priv, "dev_alloc_skb failed\n");
910 return;
911 }
912
913 skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
914
915 iwl_update_stats(priv, false, fc, len);
916 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
917
918 ieee80211_rx(priv->hw, skb);
919 priv->alloc_rxb_page--;
920 rxb->page = NULL;
921}
922
923static u32 iwl_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
924{
925 u32 decrypt_out = 0;
926
927 if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
928 RX_RES_STATUS_STATION_FOUND)
929 decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
930 RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
931
932 decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
933
934 /* packet was not encrypted */
935 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
936 RX_RES_STATUS_SEC_TYPE_NONE)
937 return decrypt_out;
938
939 /* packet was encrypted with unknown alg */
940 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
941 RX_RES_STATUS_SEC_TYPE_ERR)
942 return decrypt_out;
943
944 /* decryption was not done in HW */
945 if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
946 RX_MPDU_RES_STATUS_DEC_DONE_MSK)
947 return decrypt_out;
948
949 switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
950
951 case RX_RES_STATUS_SEC_TYPE_CCMP:
952 /* alg is CCM: check MIC only */
953 if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
954 /* Bad MIC */
955 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
956 else
957 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
958
959 break;
960
961 case RX_RES_STATUS_SEC_TYPE_TKIP:
962 if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
963 /* Bad TTAK */
964 decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
965 break;
966 }
967 /* fall through if TTAK OK */
968 default:
969 if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
970 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
971 else
972 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
973 break;
974 }
975
976 IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
977 decrypt_in, decrypt_out);
978
979 return decrypt_out;
980}
981
982/* Called for REPLY_RX (legacy ABG frames), or
983 * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
984static void iwl_rx_reply_rx(struct iwl_priv *priv,
985 struct iwl_rx_mem_buffer *rxb)
986{
987 struct ieee80211_hdr *header;
988 struct ieee80211_rx_status rx_status;
989 struct iwl_rx_packet *pkt = rxb_addr(rxb);
990 struct iwl_rx_phy_res *phy_res;
991 __le32 rx_pkt_status;
992 struct iwl_rx_mpdu_res_start *amsdu;
993 u32 len;
994 u32 ampdu_status;
995 u32 rate_n_flags;
996
997 /**
998 * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
999 * REPLY_RX: physical layer info is in this buffer
1000 * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
1001 * command and cached in priv->last_phy_res
1002 *
1003 * Here we set up local variables depending on which command is
1004 * received.
1005 */
1006 if (pkt->hdr.cmd == REPLY_RX) {
1007 phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
1008 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
1009 + phy_res->cfg_phy_cnt);
1010
1011 len = le16_to_cpu(phy_res->byte_count);
1012 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
1013 phy_res->cfg_phy_cnt + len);
1014 ampdu_status = le32_to_cpu(rx_pkt_status);
1015 } else {
1016 if (!priv->_agn.last_phy_res_valid) {
1017 IWL_ERR(priv, "MPDU frame without cached PHY data\n");
1018 return;
1019 }
1020 phy_res = &priv->_agn.last_phy_res;
1021 amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
1022 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
1023 len = le16_to_cpu(amsdu->byte_count);
1024 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
1025 ampdu_status = iwl_translate_rx_status(priv,
1026 le32_to_cpu(rx_pkt_status));
1027 }
1028
1029 if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
1030 IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
1031 phy_res->cfg_phy_cnt);
1032 return;
1033 }
1034
1035 if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
1036 !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
1037 IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
1038 le32_to_cpu(rx_pkt_status));
1039 return;
1040 }
1041
1042 /* This will be used in several places later */
1043 rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
1044
1045 /* rx_status carries information about the packet to mac80211 */
1046 rx_status.mactime = le64_to_cpu(phy_res->timestamp);
1047 rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
1048 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
1049 rx_status.freq =
1050 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
1051 rx_status.band);
1052 rx_status.rate_idx =
1053 iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
1054 rx_status.flag = 0;
1055
1056 /* TSF isn't reliable. In order to allow smooth user experience,
1057 * this W/A doesn't propagate it to the mac80211 */
1058 /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
1059
1060 priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
1061
1062 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
1063 rx_status.signal = priv->cfg->ops->utils->calc_rssi(priv, phy_res);
1064
1065 iwl_dbg_log_rx_data_frame(priv, len, header);
1066 IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
1067 rx_status.signal, (unsigned long long)rx_status.mactime);
1068
1069 /*
1070 * "antenna number"
1071 *
1072 * It seems that the antenna field in the phy flags value
1073 * is actually a bit field. This is undefined by radiotap,
1074 * it wants an actual antenna number but I always get "7"
1075 * for most legacy frames I receive indicating that the
1076 * same frame was received on all three RX chains.
1077 *
1078 * I think this field should be removed in favor of a
1079 * new 802.11n radiotap field "RX chains" that is defined
1080 * as a bitmask.
1081 */
1082 rx_status.antenna =
1083 (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
1084 >> RX_RES_PHY_FLAGS_ANTENNA_POS;
1085
1086 /* set the preamble flag if appropriate */
1087 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
1088 rx_status.flag |= RX_FLAG_SHORTPRE;
1089
1090 /* Set up the HT phy flags */
1091 if (rate_n_flags & RATE_MCS_HT_MSK)
1092 rx_status.flag |= RX_FLAG_HT;
1093 if (rate_n_flags & RATE_MCS_HT40_MSK)
1094 rx_status.flag |= RX_FLAG_40MHZ;
1095 if (rate_n_flags & RATE_MCS_SGI_MSK)
1096 rx_status.flag |= RX_FLAG_SHORT_GI;
1097
1098 iwl_pass_packet_to_mac80211(priv, header, len, ampdu_status,
1099 rxb, &rx_status);
1100}
1101
1102/**
1103 * iwl_setup_rx_handlers - Initialize Rx handler callbacks
1104 *
1105 * Setup the RX handlers for each of the reply types sent from the uCode
1106 * to the host.
1107 */
1108void iwl_setup_rx_handlers(struct iwl_priv *priv)
1109{
1110 void (**handlers)(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
1111
1112 handlers = priv->rx_handlers;
1113
1114 handlers[REPLY_ALIVE] = iwl_rx_reply_alive;
1115 handlers[REPLY_ERROR] = iwl_rx_reply_error;
1116 handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa;
1117 handlers[SPECTRUM_MEASURE_NOTIFICATION] = iwl_rx_spectrum_measure_notif;
1118 handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif;
1119 handlers[PM_DEBUG_STATISTIC_NOTIFIC] = iwl_rx_pm_debug_statistics_notif;
1120 handlers[BEACON_NOTIFICATION] = iwl_rx_beacon_notif;
1121
1122 /*
1123 * The same handler is used for both the REPLY to a discrete
1124 * statistics request from the host as well as for the periodic
1125 * statistics notifications (after received beacons) from the uCode.
1126 */
1127 handlers[REPLY_STATISTICS_CMD] = iwl_rx_reply_statistics;
1128 handlers[STATISTICS_NOTIFICATION] = iwl_rx_statistics;
1129
1130 iwl_setup_rx_scan_handlers(priv);
1131
1132 handlers[CARD_STATE_NOTIFICATION] = iwl_rx_card_state_notif;
1133 handlers[MISSED_BEACONS_NOTIFICATION] = iwl_rx_missed_beacon_notif;
1134
1135 /* Rx handlers */
1136 handlers[REPLY_RX_PHY_CMD] = iwl_rx_reply_rx_phy;
1137 handlers[REPLY_RX_MPDU_CMD] = iwl_rx_reply_rx;
1138
1139 /* block ack */
1140 handlers[REPLY_COMPRESSED_BA] = iwlagn_rx_reply_compressed_ba;
1141
1142 /* Set up hardware specific Rx handlers */
1143 priv->cfg->ops->lib->rx_handler_setup(priv);
1144}