blob: a2bdacdd7e1dd742ae9398ef70c1d2eb1c8037d2 [file] [log] [blame]
Luciano Coelhof5fc0f82009-08-06 16:25:28 +03001/*
2 * This file is part of wl1271
3 *
4 * Copyright (C) 2008-2009 Nokia Corporation
5 *
6 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
Shahar Levi00d20102010-11-08 11:20:10 +000024#include "reg.h"
25#include "ps.h"
26#include "io.h"
Arik Nemtsovb622d992011-02-23 00:22:31 +020027#include "tx.h"
Luciano Coelho0f4e3122011-10-07 11:02:42 +030028#include "debug.h"
Luciano Coelhof5fc0f82009-08-06 16:25:28 +030029
30#define WL1271_WAKEUP_TIMEOUT 500
31
Juuso Oikarinen37b70a82009-10-08 21:56:21 +030032void wl1271_elp_work(struct work_struct *work)
33{
34 struct delayed_work *dwork;
35 struct wl1271 *wl;
Eliad Pellerc29bb002011-10-10 10:13:03 +020036 struct wl12xx_vif *wlvif;
Juuso Oikarinen37b70a82009-10-08 21:56:21 +030037
38 dwork = container_of(work, struct delayed_work, work);
39 wl = container_of(dwork, struct wl1271, elp_work);
40
41 wl1271_debug(DEBUG_PSM, "elp work");
42
43 mutex_lock(&wl->mutex);
44
Juuso Oikarinen8c7f4f32010-09-21 06:23:29 +020045 if (unlikely(wl->state == WL1271_STATE_OFF))
46 goto out;
47
Eliad Pellera665d6e2011-04-03 02:01:59 +030048 /* our work might have been already cancelled */
49 if (unlikely(!test_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags)))
50 goto out;
51
Eliad Pellerc29bb002011-10-10 10:13:03 +020052 if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
Juuso Oikarinen37b70a82009-10-08 21:56:21 +030053 goto out;
54
Eliad Pellerc29bb002011-10-10 10:13:03 +020055 wl12xx_for_each_wlvif(wl, wlvif) {
Eliad Pellera0c7b782011-12-18 20:25:41 +020056 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
57 goto out;
58
Eliad Pellerc29bb002011-10-10 10:13:03 +020059 if (!test_bit(WLVIF_FLAG_PSM, &wlvif->flags) &&
Eliad Pellera0c7b782011-12-18 20:25:41 +020060 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
Eliad Pellerc29bb002011-10-10 10:13:03 +020061 goto out;
62 }
63
Juuso Oikarinen37b70a82009-10-08 21:56:21 +030064 wl1271_debug(DEBUG_PSM, "chip to elp");
Juuso Oikarinen74621412009-10-12 15:08:54 +030065 wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP);
Juuso Oikarinen71449f82009-12-11 15:41:07 +020066 set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
Juuso Oikarinen37b70a82009-10-08 21:56:21 +030067
68out:
69 mutex_unlock(&wl->mutex);
70}
71
72#define ELP_ENTRY_DELAY 5
73
Luciano Coelhof5fc0f82009-08-06 16:25:28 +030074/* Routines to toggle sleep mode while in ELP */
75void wl1271_ps_elp_sleep(struct wl1271 *wl)
76{
Eliad Pellerc29bb002011-10-10 10:13:03 +020077 struct wl12xx_vif *wlvif;
78
Eliad Pellera665d6e2011-04-03 02:01:59 +030079 /* we shouldn't get consecutive sleep requests */
80 if (WARN_ON(test_and_set_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags)))
81 return;
82
Eliad Pellerc29bb002011-10-10 10:13:03 +020083 wl12xx_for_each_wlvif(wl, wlvif) {
Eliad Pellera0c7b782011-12-18 20:25:41 +020084 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
85 return;
86
Eliad Pellerc29bb002011-10-10 10:13:03 +020087 if (!test_bit(WLVIF_FLAG_PSM, &wlvif->flags) &&
Eliad Pellera0c7b782011-12-18 20:25:41 +020088 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
Eliad Pellerc29bb002011-10-10 10:13:03 +020089 return;
90 }
Eliad Pellera665d6e2011-04-03 02:01:59 +030091
92 ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
93 msecs_to_jiffies(ELP_ENTRY_DELAY));
Luciano Coelhof5fc0f82009-08-06 16:25:28 +030094}
95
Ido Yariva6208652011-03-01 15:14:41 +020096int wl1271_ps_elp_wakeup(struct wl1271 *wl)
Luciano Coelhof5fc0f82009-08-06 16:25:28 +030097{
98 DECLARE_COMPLETION_ONSTACK(compl);
99 unsigned long flags;
100 int ret;
101 u32 start_time = jiffies;
102 bool pending = false;
103
Eliad Pellera665d6e2011-04-03 02:01:59 +0300104 /*
105 * we might try to wake up even if we didn't go to sleep
106 * before (e.g. on boot)
107 */
108 if (!test_and_clear_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags))
109 return 0;
110
111 /* don't cancel_sync as it might contend for a mutex and deadlock */
112 cancel_delayed_work(&wl->elp_work);
113
Juuso Oikarinen71449f82009-12-11 15:41:07 +0200114 if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300115 return 0;
116
117 wl1271_debug(DEBUG_PSM, "waking up chip from elp");
118
119 /*
120 * The spinlock is required here to synchronize both the work and
121 * the completion variable in one entity.
122 */
123 spin_lock_irqsave(&wl->wl_lock, flags);
Ido Yariva6208652011-03-01 15:14:41 +0200124 if (test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300125 pending = true;
126 else
127 wl->elp_compl = &compl;
128 spin_unlock_irqrestore(&wl->wl_lock, flags);
129
Juuso Oikarinen74621412009-10-12 15:08:54 +0300130 wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300131
132 if (!pending) {
133 ret = wait_for_completion_timeout(
134 &compl, msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT));
135 if (ret == 0) {
136 wl1271_error("ELP wakeup timeout!");
Ido Yarivbaacb9a2011-06-06 14:57:05 +0300137 wl12xx_queue_recovery_work(wl);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300138 ret = -ETIMEDOUT;
139 goto err;
140 } else if (ret < 0) {
141 wl1271_error("ELP wakeup completion error.");
142 goto err;
143 }
144 }
145
Juuso Oikarinen71449f82009-12-11 15:41:07 +0200146 clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300147
148 wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
149 jiffies_to_msecs(jiffies - start_time));
150 goto out;
151
152err:
153 spin_lock_irqsave(&wl->wl_lock, flags);
154 wl->elp_compl = NULL;
155 spin_unlock_irqrestore(&wl->wl_lock, flags);
156 return ret;
157
158out:
159 return 0;
160}
161
Eliad Peller0603d892011-10-05 11:55:51 +0200162int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
163 enum wl1271_cmd_ps_mode mode, u32 rates, bool send)
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300164{
165 int ret;
166
167 switch (mode) {
168 case STATION_POWER_SAVE_MODE:
169 wl1271_debug(DEBUG_PSM, "entering psm");
Juuso Oikarinen19221672009-10-08 21:56:35 +0300170
Eliad Peller0603d892011-10-05 11:55:51 +0200171 ret = wl1271_acx_wake_up_conditions(wl, wlvif);
Juuso Oikarinen03f06b72010-08-10 06:38:36 +0200172 if (ret < 0) {
173 wl1271_error("couldn't set wake up conditions");
174 return ret;
175 }
176
Eliad Peller0603d892011-10-05 11:55:51 +0200177 ret = wl1271_cmd_ps_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300178 if (ret < 0)
179 return ret;
180
Eliad Pellerc29bb002011-10-10 10:13:03 +0200181 set_bit(WLVIF_FLAG_PSM, &wlvif->flags);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300182 break;
183 case STATION_ACTIVE_MODE:
184 default:
185 wl1271_debug(DEBUG_PSM, "leaving psm");
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300186
Juuso Oikarinen11f70f92009-10-13 12:47:46 +0300187 /* disable beacon early termination */
Eliad Peller1b92f152011-10-10 10:13:09 +0200188 if (wlvif->band == IEEE80211_BAND_2GHZ) {
Eliad Peller0603d892011-10-05 11:55:51 +0200189 ret = wl1271_acx_bet_enable(wl, wlvif, false);
Shahar Levi0e44eb22011-05-16 15:35:30 +0300190 if (ret < 0)
191 return ret;
192 }
Juuso Oikarinen11f70f92009-10-13 12:47:46 +0300193
Eliad Peller0603d892011-10-05 11:55:51 +0200194 ret = wl1271_cmd_ps_mode(wl, wlvif, STATION_ACTIVE_MODE);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300195 if (ret < 0)
196 return ret;
197
Eliad Pellerc29bb002011-10-10 10:13:03 +0200198 clear_bit(WLVIF_FLAG_PSM, &wlvif->flags);
Luciano Coelhof5fc0f82009-08-06 16:25:28 +0300199 break;
200 }
201
202 return ret;
203}
Arik Nemtsovb622d992011-02-23 00:22:31 +0200204
205static void wl1271_ps_filter_frames(struct wl1271 *wl, u8 hlid)
206{
Arik Nemtsovf1a46382011-07-07 14:25:23 +0300207 int i;
Arik Nemtsovb622d992011-02-23 00:22:31 +0200208 struct sk_buff *skb;
209 struct ieee80211_tx_info *info;
210 unsigned long flags;
Arik Nemtsovf1a46382011-07-07 14:25:23 +0300211 int filtered[NUM_TX_QUEUES];
Arik Nemtsovb622d992011-02-23 00:22:31 +0200212
Arik Nemtsovf8e0af62011-08-25 12:43:12 +0300213 /* filter all frames currently in the low level queues for this hlid */
Arik Nemtsovb622d992011-02-23 00:22:31 +0200214 for (i = 0; i < NUM_TX_QUEUES; i++) {
Arik Nemtsovf1a46382011-07-07 14:25:23 +0300215 filtered[i] = 0;
Arik Nemtsovb622d992011-02-23 00:22:31 +0200216 while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) {
Arik Nemtsovf8e0af62011-08-25 12:43:12 +0300217 filtered[i]++;
218
219 if (WARN_ON(wl12xx_is_dummy_packet(wl, skb)))
220 continue;
221
Arik Nemtsovb622d992011-02-23 00:22:31 +0200222 info = IEEE80211_SKB_CB(skb);
223 info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
224 info->status.rates[0].idx = -1;
Eliad Pellerc27d3ac2011-06-07 10:40:39 +0300225 ieee80211_tx_status_ni(wl->hw, skb);
Arik Nemtsovb622d992011-02-23 00:22:31 +0200226 }
227 }
228
229 spin_lock_irqsave(&wl->wl_lock, flags);
Arik Nemtsovf1a46382011-07-07 14:25:23 +0300230 for (i = 0; i < NUM_TX_QUEUES; i++)
231 wl->tx_queue_count[i] -= filtered[i];
Arik Nemtsovb622d992011-02-23 00:22:31 +0200232 spin_unlock_irqrestore(&wl->wl_lock, flags);
233
234 wl1271_handle_tx_low_watermark(wl);
235}
236
Eliad Peller6e8cd332011-10-10 10:13:13 +0200237void wl12xx_ps_link_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
238 u8 hlid, bool clean_queues)
Arik Nemtsovb622d992011-02-23 00:22:31 +0200239{
240 struct ieee80211_sta *sta;
Eliad Peller6e8cd332011-10-10 10:13:13 +0200241 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
Arik Nemtsovb622d992011-02-23 00:22:31 +0200242
243 if (test_bit(hlid, &wl->ap_ps_map))
244 return;
245
Arik Nemtsov9b17f1b2011-08-14 13:17:35 +0300246 wl1271_debug(DEBUG_PSM, "start mac80211 PSM on hlid %d pkts %d "
247 "clean_queues %d", hlid, wl->links[hlid].allocated_pkts,
Arik Nemtsovb622d992011-02-23 00:22:31 +0200248 clean_queues);
249
250 rcu_read_lock();
Eliad Peller6e8cd332011-10-10 10:13:13 +0200251 sta = ieee80211_find_sta(vif, wl->links[hlid].addr);
Arik Nemtsovb622d992011-02-23 00:22:31 +0200252 if (!sta) {
253 wl1271_error("could not find sta %pM for starting ps",
254 wl->links[hlid].addr);
255 rcu_read_unlock();
256 return;
257 }
258
259 ieee80211_sta_ps_transition_ni(sta, true);
260 rcu_read_unlock();
261
262 /* do we want to filter all frames from this link's queues? */
263 if (clean_queues)
264 wl1271_ps_filter_frames(wl, hlid);
265
266 __set_bit(hlid, &wl->ap_ps_map);
267}
268
Eliad Peller6e8cd332011-10-10 10:13:13 +0200269void wl12xx_ps_link_end(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
Arik Nemtsovb622d992011-02-23 00:22:31 +0200270{
271 struct ieee80211_sta *sta;
Eliad Peller6e8cd332011-10-10 10:13:13 +0200272 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
Arik Nemtsovb622d992011-02-23 00:22:31 +0200273
274 if (!test_bit(hlid, &wl->ap_ps_map))
275 return;
276
277 wl1271_debug(DEBUG_PSM, "end mac80211 PSM on hlid %d", hlid);
278
279 __clear_bit(hlid, &wl->ap_ps_map);
280
281 rcu_read_lock();
Eliad Peller6e8cd332011-10-10 10:13:13 +0200282 sta = ieee80211_find_sta(vif, wl->links[hlid].addr);
Arik Nemtsovb622d992011-02-23 00:22:31 +0200283 if (!sta) {
284 wl1271_error("could not find sta %pM for ending ps",
285 wl->links[hlid].addr);
286 goto end;
287 }
288
289 ieee80211_sta_ps_transition_ni(sta, false);
290end:
291 rcu_read_unlock();
292}