| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 1 | /* | 
 | 2 |  * This file is part of wl1271 | 
 | 3 |  * | 
 | 4 |  * Copyright (C) 2009 Nokia Corporation | 
 | 5 |  * | 
 | 6 |  * Contact: Luciano Coelho <luciano.coelho@nokia.com> | 
 | 7 |  * | 
 | 8 |  * This program is free software; you can redistribute it and/or | 
 | 9 |  * modify it under the terms of the GNU General Public License | 
 | 10 |  * version 2 as published by the Free Software Foundation. | 
 | 11 |  * | 
 | 12 |  * This program is distributed in the hope that it will be useful, but | 
 | 13 |  * WITHOUT ANY WARRANTY; without even the implied warranty of | 
 | 14 |  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU | 
 | 15 |  * General Public License for more details. | 
 | 16 |  * | 
 | 17 |  * You should have received a copy of the GNU General Public License | 
 | 18 |  * along with this program; if not, write to the Free Software | 
 | 19 |  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | 
 | 20 |  * 02110-1301 USA | 
 | 21 |  * | 
 | 22 |  */ | 
 | 23 |  | 
 | 24 | #include <linux/kernel.h> | 
 | 25 | #include <linux/module.h> | 
| Arik Nemtsov | c6c8a65 | 2010-10-16 20:27:53 +0200 | [diff] [blame] | 26 | #include <linux/etherdevice.h> | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 27 |  | 
| Shahar Levi | 00d2010 | 2010-11-08 11:20:10 +0000 | [diff] [blame] | 28 | #include "wl12xx.h" | 
 | 29 | #include "io.h" | 
 | 30 | #include "reg.h" | 
 | 31 | #include "ps.h" | 
 | 32 | #include "tx.h" | 
| Arik Nemtsov | 56d4f8f | 2011-08-25 12:43:13 +0300 | [diff] [blame] | 33 | #include "event.h" | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 34 |  | 
| Arik Nemtsov | 7f179b4 | 2010-10-16 21:39:06 +0200 | [diff] [blame] | 35 | static int wl1271_set_default_wep_key(struct wl1271 *wl, u8 id) | 
 | 36 | { | 
 | 37 | 	int ret; | 
 | 38 | 	bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS); | 
 | 39 |  | 
 | 40 | 	if (is_ap) | 
| Eliad Peller | c690ec8 | 2011-08-14 13:17:07 +0300 | [diff] [blame] | 41 | 		ret = wl12xx_cmd_set_default_wep_key(wl, id, | 
| Arik Nemtsov | e51ae9b | 2011-08-14 13:17:21 +0300 | [diff] [blame] | 42 | 						     wl->ap_bcast_hlid); | 
| Arik Nemtsov | 7f179b4 | 2010-10-16 21:39:06 +0200 | [diff] [blame] | 43 | 	else | 
| Eliad Peller | c690ec8 | 2011-08-14 13:17:07 +0300 | [diff] [blame] | 44 | 		ret = wl12xx_cmd_set_default_wep_key(wl, id, wl->sta_hlid); | 
| Arik Nemtsov | 7f179b4 | 2010-10-16 21:39:06 +0200 | [diff] [blame] | 45 |  | 
 | 46 | 	if (ret < 0) | 
 | 47 | 		return ret; | 
 | 48 |  | 
 | 49 | 	wl1271_debug(DEBUG_CRYPT, "default wep key idx: %d", (int)id); | 
 | 50 | 	return 0; | 
 | 51 | } | 
 | 52 |  | 
| Ido Yariv | 25eeb9e | 2010-10-12 16:20:06 +0200 | [diff] [blame] | 53 | static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb) | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 54 | { | 
| Ido Yariv | 25eeb9e | 2010-10-12 16:20:06 +0200 | [diff] [blame] | 55 | 	int id; | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 56 |  | 
| Ido Yariv | 25eeb9e | 2010-10-12 16:20:06 +0200 | [diff] [blame] | 57 | 	id = find_first_zero_bit(wl->tx_frames_map, ACX_TX_DESCRIPTORS); | 
 | 58 | 	if (id >= ACX_TX_DESCRIPTORS) | 
 | 59 | 		return -EBUSY; | 
 | 60 |  | 
 | 61 | 	__set_bit(id, wl->tx_frames_map); | 
 | 62 | 	wl->tx_frames[id] = skb; | 
 | 63 | 	wl->tx_frames_cnt++; | 
 | 64 | 	return id; | 
 | 65 | } | 
 | 66 |  | 
 | 67 | static void wl1271_free_tx_id(struct wl1271 *wl, int id) | 
 | 68 | { | 
 | 69 | 	if (__test_and_clear_bit(id, wl->tx_frames_map)) { | 
| Ido Yariv | ef2e300 | 2011-04-18 16:44:11 +0300 | [diff] [blame] | 70 | 		if (unlikely(wl->tx_frames_cnt == ACX_TX_DESCRIPTORS)) | 
 | 71 | 			clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); | 
 | 72 |  | 
| Ido Yariv | 25eeb9e | 2010-10-12 16:20:06 +0200 | [diff] [blame] | 73 | 		wl->tx_frames[id] = NULL; | 
 | 74 | 		wl->tx_frames_cnt--; | 
 | 75 | 	} | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 76 | } | 
 | 77 |  | 
| Ohad Ben-Cohen | c574518 | 2011-03-30 16:35:59 +0200 | [diff] [blame] | 78 | static int wl1271_tx_update_filters(struct wl1271 *wl, | 
 | 79 | 						 struct sk_buff *skb) | 
 | 80 | { | 
 | 81 | 	struct ieee80211_hdr *hdr; | 
| Eliad Peller | 251c177 | 2011-08-14 13:17:17 +0300 | [diff] [blame] | 82 | 	int ret; | 
| Ohad Ben-Cohen | c574518 | 2011-03-30 16:35:59 +0200 | [diff] [blame] | 83 |  | 
| Eliad Peller | df4c849 | 2011-09-15 16:05:47 +0300 | [diff] [blame] | 84 | 	hdr = (struct ieee80211_hdr *)skb->data; | 
| Ohad Ben-Cohen | c574518 | 2011-03-30 16:35:59 +0200 | [diff] [blame] | 85 |  | 
 | 86 | 	/* | 
 | 87 | 	 * stop bssid-based filtering before transmitting authentication | 
 | 88 | 	 * requests. this way the hw will never drop authentication | 
 | 89 | 	 * responses coming from BSSIDs it isn't familiar with (e.g. on | 
 | 90 | 	 * roaming) | 
 | 91 | 	 */ | 
 | 92 | 	if (!ieee80211_is_auth(hdr->frame_control)) | 
 | 93 | 		return 0; | 
 | 94 |  | 
| Eliad Peller | 251c177 | 2011-08-14 13:17:17 +0300 | [diff] [blame] | 95 | 	if (wl->dev_hlid != WL12XX_INVALID_LINK_ID) | 
 | 96 | 		goto out; | 
 | 97 |  | 
 | 98 | 	wl1271_debug(DEBUG_CMD, "starting device role for roaming"); | 
 | 99 | 	ret = wl12xx_cmd_role_start_dev(wl); | 
 | 100 | 	if (ret < 0) | 
 | 101 | 		goto out; | 
 | 102 |  | 
 | 103 | 	ret = wl12xx_roc(wl, wl->dev_role_id); | 
 | 104 | 	if (ret < 0) | 
 | 105 | 		goto out; | 
 | 106 | out: | 
| Eliad Peller | 08c1d1c | 2011-08-14 13:17:04 +0300 | [diff] [blame] | 107 | 	return 0; | 
| Ohad Ben-Cohen | c574518 | 2011-03-30 16:35:59 +0200 | [diff] [blame] | 108 | } | 
 | 109 |  | 
| Arik Nemtsov | 99a2775 | 2011-02-23 00:22:25 +0200 | [diff] [blame] | 110 | static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl, | 
 | 111 | 						 struct sk_buff *skb) | 
 | 112 | { | 
 | 113 | 	struct ieee80211_hdr *hdr; | 
 | 114 |  | 
 | 115 | 	/* | 
 | 116 | 	 * add the station to the known list before transmitting the | 
 | 117 | 	 * authentication response. this way it won't get de-authed by FW | 
 | 118 | 	 * when transmitting too soon. | 
 | 119 | 	 */ | 
 | 120 | 	hdr = (struct ieee80211_hdr *)(skb->data + | 
 | 121 | 				       sizeof(struct wl1271_tx_hw_descr)); | 
 | 122 | 	if (ieee80211_is_auth(hdr->frame_control)) | 
 | 123 | 		wl1271_acx_set_inconnection_sta(wl, hdr->addr1); | 
 | 124 | } | 
 | 125 |  | 
| Arik Nemtsov | b622d99 | 2011-02-23 00:22:31 +0200 | [diff] [blame] | 126 | static void wl1271_tx_regulate_link(struct wl1271 *wl, u8 hlid) | 
 | 127 | { | 
| Arik Nemtsov | da03209 | 2011-08-25 12:43:15 +0300 | [diff] [blame] | 128 | 	bool fw_ps, single_sta; | 
| Arik Nemtsov | 9b17f1b | 2011-08-14 13:17:35 +0300 | [diff] [blame] | 129 | 	u8 tx_pkts; | 
| Arik Nemtsov | b622d99 | 2011-02-23 00:22:31 +0200 | [diff] [blame] | 130 |  | 
 | 131 | 	/* only regulate station links */ | 
 | 132 | 	if (hlid < WL1271_AP_STA_HLID_START) | 
 | 133 | 		return; | 
 | 134 |  | 
| Arik Nemtsov | 56d4f8f | 2011-08-25 12:43:13 +0300 | [diff] [blame] | 135 | 	if (WARN_ON(!wl1271_is_active_sta(wl, hlid))) | 
 | 136 | 	    return; | 
 | 137 |  | 
| Arik Nemtsov | b622d99 | 2011-02-23 00:22:31 +0200 | [diff] [blame] | 138 | 	fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map); | 
| Arik Nemtsov | 9b17f1b | 2011-08-14 13:17:35 +0300 | [diff] [blame] | 139 | 	tx_pkts = wl->links[hlid].allocated_pkts; | 
| Arik Nemtsov | da03209 | 2011-08-25 12:43:15 +0300 | [diff] [blame] | 140 | 	single_sta = (wl->active_sta_count == 1); | 
| Arik Nemtsov | b622d99 | 2011-02-23 00:22:31 +0200 | [diff] [blame] | 141 |  | 
 | 142 | 	/* | 
 | 143 | 	 * if in FW PS and there is enough data in FW we can put the link | 
 | 144 | 	 * into high-level PS and clean out its TX queues. | 
| Arik Nemtsov | da03209 | 2011-08-25 12:43:15 +0300 | [diff] [blame] | 145 | 	 * Make an exception if this is the only connected station. In this | 
 | 146 | 	 * case FW-memory congestion is not a problem. | 
| Arik Nemtsov | b622d99 | 2011-02-23 00:22:31 +0200 | [diff] [blame] | 147 | 	 */ | 
| Arik Nemtsov | da03209 | 2011-08-25 12:43:15 +0300 | [diff] [blame] | 148 | 	if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS) | 
| Arik Nemtsov | b622d99 | 2011-02-23 00:22:31 +0200 | [diff] [blame] | 149 | 		wl1271_ps_link_start(wl, hlid, true); | 
 | 150 | } | 
 | 151 |  | 
| Arik Nemtsov | f8e0af6 | 2011-08-25 12:43:12 +0300 | [diff] [blame] | 152 | bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb) | 
| Eliad Peller | f4df1bd | 2011-08-14 13:17:15 +0300 | [diff] [blame] | 153 | { | 
 | 154 | 	return wl->dummy_packet == skb; | 
 | 155 | } | 
 | 156 |  | 
 | 157 | u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct sk_buff *skb) | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 158 | { | 
 | 159 | 	struct ieee80211_tx_info *control = IEEE80211_SKB_CB(skb); | 
 | 160 |  | 
 | 161 | 	if (control->control.sta) { | 
 | 162 | 		struct wl1271_station *wl_sta; | 
 | 163 |  | 
 | 164 | 		wl_sta = (struct wl1271_station *) | 
 | 165 | 				control->control.sta->drv_priv; | 
 | 166 | 		return wl_sta->hlid; | 
 | 167 | 	} else { | 
 | 168 | 		struct ieee80211_hdr *hdr; | 
 | 169 |  | 
| Eliad Peller | f4df1bd | 2011-08-14 13:17:15 +0300 | [diff] [blame] | 170 | 		if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) | 
 | 171 | 			return wl->system_hlid; | 
 | 172 |  | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 173 | 		hdr = (struct ieee80211_hdr *)skb->data; | 
 | 174 | 		if (ieee80211_is_mgmt(hdr->frame_control)) | 
| Arik Nemtsov | e51ae9b | 2011-08-14 13:17:21 +0300 | [diff] [blame] | 175 | 			return wl->ap_global_hlid; | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 176 | 		else | 
| Arik Nemtsov | e51ae9b | 2011-08-14 13:17:21 +0300 | [diff] [blame] | 177 | 			return wl->ap_bcast_hlid; | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 178 | 	} | 
 | 179 | } | 
 | 180 |  | 
| Eliad Peller | f4df1bd | 2011-08-14 13:17:15 +0300 | [diff] [blame] | 181 | static u8 wl1271_tx_get_hlid(struct wl1271 *wl, struct sk_buff *skb) | 
 | 182 | { | 
| Eliad Peller | df4c849 | 2011-09-15 16:05:47 +0300 | [diff] [blame] | 183 | 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | 
 | 184 |  | 
| Eliad Peller | f4df1bd | 2011-08-14 13:17:15 +0300 | [diff] [blame] | 185 | 	if (wl12xx_is_dummy_packet(wl, skb)) | 
 | 186 | 		return wl->system_hlid; | 
 | 187 |  | 
 | 188 | 	if (wl->bss_type == BSS_TYPE_AP_BSS) | 
 | 189 | 		return wl12xx_tx_get_hlid_ap(wl, skb); | 
 | 190 |  | 
| Eliad Peller | df4c849 | 2011-09-15 16:05:47 +0300 | [diff] [blame] | 191 | 	wl1271_tx_update_filters(wl, skb); | 
 | 192 |  | 
 | 193 | 	if ((test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) || | 
 | 194 | 	     test_bit(WL1271_FLAG_IBSS_JOINED, &wl->flags)) && | 
 | 195 | 	    !ieee80211_is_auth(hdr->frame_control) && | 
 | 196 | 	    !ieee80211_is_assoc_req(hdr->frame_control)) | 
| Eliad Peller | f4df1bd | 2011-08-14 13:17:15 +0300 | [diff] [blame] | 197 | 		return wl->sta_hlid; | 
 | 198 | 	else | 
 | 199 | 		return wl->dev_hlid; | 
 | 200 | } | 
 | 201 |  | 
| Ido Yariv | 0da13da | 2011-03-31 10:06:58 +0200 | [diff] [blame] | 202 | static unsigned int wl12xx_calc_packet_alignment(struct wl1271 *wl, | 
 | 203 | 						unsigned int packet_length) | 
 | 204 | { | 
 | 205 | 	if (wl->quirks & WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT) | 
 | 206 | 		return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE); | 
 | 207 | 	else | 
 | 208 | 		return ALIGN(packet_length, WL1271_TX_ALIGN_TO); | 
 | 209 | } | 
 | 210 |  | 
| Ido Yariv | a19606b | 2010-09-30 13:28:28 +0200 | [diff] [blame] | 211 | static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra, | 
| Arik Nemtsov | 09039f4 | 2011-02-23 00:22:30 +0200 | [diff] [blame] | 212 | 				u32 buf_offset, u8 hlid) | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 213 | { | 
 | 214 | 	struct wl1271_tx_hw_descr *desc; | 
 | 215 | 	u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra; | 
| Shahar Levi | 48a6147 | 2011-03-06 16:32:08 +0200 | [diff] [blame] | 216 | 	u32 len; | 
| Juuso Oikarinen | 5c9417f | 2010-02-22 08:38:39 +0200 | [diff] [blame] | 217 | 	u32 total_blocks; | 
| Arik Nemtsov | 742246f | 2011-08-14 13:17:33 +0300 | [diff] [blame] | 218 | 	int id, ret = -EBUSY, ac; | 
| Guy Eilam | e9eb8cb | 2011-08-16 19:49:12 +0300 | [diff] [blame] | 219 | 	u32 spare_blocks = wl->tx_spare_blocks; | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 220 |  | 
| Ido Yariv | a19606b | 2010-09-30 13:28:28 +0200 | [diff] [blame] | 221 | 	if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE) | 
| Ido Yariv | 6c6e669 | 2010-10-12 14:49:09 +0200 | [diff] [blame] | 222 | 		return -EAGAIN; | 
| Ido Yariv | a19606b | 2010-09-30 13:28:28 +0200 | [diff] [blame] | 223 |  | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 224 | 	/* allocate free identifier for the packet */ | 
| Ido Yariv | 25eeb9e | 2010-10-12 16:20:06 +0200 | [diff] [blame] | 225 | 	id = wl1271_alloc_tx_id(wl, skb); | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 226 | 	if (id < 0) | 
 | 227 | 		return id; | 
 | 228 |  | 
 | 229 | 	/* approximate the number of blocks required for this packet | 
 | 230 | 	   in the firmware */ | 
| Ido Yariv | 0da13da | 2011-03-31 10:06:58 +0200 | [diff] [blame] | 231 | 	len = wl12xx_calc_packet_alignment(wl, total_len); | 
| Shahar Levi | 48a6147 | 2011-03-06 16:32:08 +0200 | [diff] [blame] | 232 |  | 
| Guy Eilam | e9eb8cb | 2011-08-16 19:49:12 +0300 | [diff] [blame] | 233 | 	/* in case of a dummy packet, use default amount of spare mem blocks */ | 
 | 234 | 	if (unlikely(wl12xx_is_dummy_packet(wl, skb))) | 
 | 235 | 		spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT; | 
 | 236 |  | 
| Shahar Levi | 48a6147 | 2011-03-06 16:32:08 +0200 | [diff] [blame] | 237 | 	total_blocks = (len + TX_HW_BLOCK_SIZE - 1) / TX_HW_BLOCK_SIZE + | 
| Luciano Coelho | e7ddf54 | 2011-03-10 15:24:57 +0200 | [diff] [blame] | 238 | 		spare_blocks; | 
| Shahar Levi | 48a6147 | 2011-03-06 16:32:08 +0200 | [diff] [blame] | 239 |  | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 240 | 	if (total_blocks <= wl->tx_blocks_available) { | 
 | 241 | 		desc = (struct wl1271_tx_hw_descr *)skb_push( | 
 | 242 | 			skb, total_len - skb->len); | 
 | 243 |  | 
| Shahar Levi | ae77ecc | 2011-03-06 16:32:13 +0200 | [diff] [blame] | 244 | 		/* HW descriptor fields change between wl127x and wl128x */ | 
 | 245 | 		if (wl->chip.id == CHIP_ID_1283_PG20) { | 
 | 246 | 			desc->wl128x_mem.total_mem_blocks = total_blocks; | 
 | 247 | 		} else { | 
| Luciano Coelho | e7ddf54 | 2011-03-10 15:24:57 +0200 | [diff] [blame] | 248 | 			desc->wl127x_mem.extra_blocks = spare_blocks; | 
| Shahar Levi | ae77ecc | 2011-03-06 16:32:13 +0200 | [diff] [blame] | 249 | 			desc->wl127x_mem.total_mem_blocks = total_blocks; | 
 | 250 | 		} | 
 | 251 |  | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 252 | 		desc->id = id; | 
 | 253 |  | 
 | 254 | 		wl->tx_blocks_available -= total_blocks; | 
| Arik Nemtsov | 7bb5d6c | 2011-08-14 13:17:00 +0300 | [diff] [blame] | 255 | 		wl->tx_allocated_blocks += total_blocks; | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 256 |  | 
| Arik Nemtsov | 742246f | 2011-08-14 13:17:33 +0300 | [diff] [blame] | 257 | 		ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); | 
 | 258 | 		wl->tx_allocated_pkts[ac]++; | 
| Arik Nemtsov | bf54e30 | 2011-08-14 13:17:32 +0300 | [diff] [blame] | 259 |  | 
| Arik Nemtsov | 9b17f1b | 2011-08-14 13:17:35 +0300 | [diff] [blame] | 260 | 		if (wl->bss_type == BSS_TYPE_AP_BSS && | 
 | 261 | 		    hlid >= WL1271_AP_STA_HLID_START) | 
 | 262 | 			wl->links[hlid].allocated_pkts++; | 
| Arik Nemtsov | 09039f4 | 2011-02-23 00:22:30 +0200 | [diff] [blame] | 263 |  | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 264 | 		ret = 0; | 
 | 265 |  | 
 | 266 | 		wl1271_debug(DEBUG_TX, | 
 | 267 | 			     "tx_allocate: size: %d, blocks: %d, id: %d", | 
 | 268 | 			     total_len, total_blocks, id); | 
| Juuso Oikarinen | 781608c | 2010-05-24 11:18:17 +0300 | [diff] [blame] | 269 | 	} else { | 
| Ido Yariv | 25eeb9e | 2010-10-12 16:20:06 +0200 | [diff] [blame] | 270 | 		wl1271_free_tx_id(wl, id); | 
| Juuso Oikarinen | 781608c | 2010-05-24 11:18:17 +0300 | [diff] [blame] | 271 | 	} | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 272 |  | 
 | 273 | 	return ret; | 
 | 274 | } | 
 | 275 |  | 
| Ido Yariv | a19606b | 2010-09-30 13:28:28 +0200 | [diff] [blame] | 276 | static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb, | 
| Arik Nemtsov | 09039f4 | 2011-02-23 00:22:30 +0200 | [diff] [blame] | 277 | 			      u32 extra, struct ieee80211_tx_info *control, | 
 | 278 | 			      u8 hlid) | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 279 | { | 
| Juuso Oikarinen | ac5e1e3 | 2010-02-22 08:38:38 +0200 | [diff] [blame] | 280 | 	struct timespec ts; | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 281 | 	struct wl1271_tx_hw_descr *desc; | 
| Shahar Levi | 48a6147 | 2011-03-06 16:32:08 +0200 | [diff] [blame] | 282 | 	int aligned_len, ac, rate_idx; | 
| Juuso Oikarinen | ac5e1e3 | 2010-02-22 08:38:38 +0200 | [diff] [blame] | 283 | 	s64 hosttime; | 
| Luciano Coelho | d0f63b2 | 2009-10-15 10:33:29 +0300 | [diff] [blame] | 284 | 	u16 tx_attr; | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 285 |  | 
 | 286 | 	desc = (struct wl1271_tx_hw_descr *) skb->data; | 
 | 287 |  | 
| Juuso Oikarinen | 1e2b797 | 2009-10-08 21:56:20 +0300 | [diff] [blame] | 288 | 	/* relocate space for security header */ | 
 | 289 | 	if (extra) { | 
 | 290 | 		void *framestart = skb->data + sizeof(*desc); | 
 | 291 | 		u16 fc = *(u16 *)(framestart + extra); | 
| Luciano Coelho | d0f63b2 | 2009-10-15 10:33:29 +0300 | [diff] [blame] | 292 | 		int hdrlen = ieee80211_hdrlen(cpu_to_le16(fc)); | 
| Juuso Oikarinen | 1e2b797 | 2009-10-08 21:56:20 +0300 | [diff] [blame] | 293 | 		memmove(framestart, framestart + extra, hdrlen); | 
 | 294 | 	} | 
 | 295 |  | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 296 | 	/* configure packet life time */ | 
| Juuso Oikarinen | ac5e1e3 | 2010-02-22 08:38:38 +0200 | [diff] [blame] | 297 | 	getnstimeofday(&ts); | 
 | 298 | 	hosttime = (timespec_to_ns(&ts) >> 10); | 
 | 299 | 	desc->start_time = cpu_to_le32(hosttime - wl->time_offset); | 
| Arik Nemtsov | c6c8a65 | 2010-10-16 20:27:53 +0200 | [diff] [blame] | 300 |  | 
 | 301 | 	if (wl->bss_type != BSS_TYPE_AP_BSS) | 
 | 302 | 		desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU); | 
 | 303 | 	else | 
 | 304 | 		desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU); | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 305 |  | 
| Eliad Peller | db674d2 | 2011-03-16 23:03:54 +0200 | [diff] [blame] | 306 | 	/* queue */ | 
| Kalle Valo | c6999d8 | 2010-02-18 13:25:41 +0200 | [diff] [blame] | 307 | 	ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); | 
| Eliad Peller | db674d2 | 2011-03-16 23:03:54 +0200 | [diff] [blame] | 308 | 	desc->tid = skb->priority; | 
| Arik Nemtsov | c6c8a65 | 2010-10-16 20:27:53 +0200 | [diff] [blame] | 309 |  | 
| Ido Yariv | 990f5de | 2011-03-31 10:06:59 +0200 | [diff] [blame] | 310 | 	if (wl12xx_is_dummy_packet(wl, skb)) { | 
| Shahar Levi | ae47c45 | 2011-03-06 16:32:14 +0200 | [diff] [blame] | 311 | 		/* | 
 | 312 | 		 * FW expects the dummy packet to have an invalid session id - | 
 | 313 | 		 * any session id that is different than the one set in the join | 
 | 314 | 		 */ | 
 | 315 | 		tx_attr = ((~wl->session_counter) << | 
 | 316 | 			   TX_HW_ATTR_OFST_SESSION_COUNTER) & | 
 | 317 | 			   TX_HW_ATTR_SESSION_COUNTER; | 
 | 318 |  | 
 | 319 | 		tx_attr |= TX_HW_ATTR_TX_DUMMY_REQ; | 
| Shahar Levi | ae47c45 | 2011-03-06 16:32:14 +0200 | [diff] [blame] | 320 | 	} else { | 
 | 321 | 		/* configure the tx attributes */ | 
 | 322 | 		tx_attr = | 
 | 323 | 			wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER; | 
 | 324 | 	} | 
 | 325 |  | 
| Eliad Peller | 79b122d | 2011-08-14 13:17:11 +0300 | [diff] [blame] | 326 | 	desc->hlid = hlid; | 
| Arik Nemtsov | c6c8a65 | 2010-10-16 20:27:53 +0200 | [diff] [blame] | 327 |  | 
| Eliad Peller | 79b122d | 2011-08-14 13:17:11 +0300 | [diff] [blame] | 328 | 	if (wl->bss_type != BSS_TYPE_AP_BSS) { | 
| Arik Nemtsov | c6c8a65 | 2010-10-16 20:27:53 +0200 | [diff] [blame] | 329 | 		/* if the packets are destined for AP (have a STA entry) | 
 | 330 | 		   send them with AP rate policies, otherwise use default | 
 | 331 | 		   basic rates */ | 
 | 332 | 		if (control->control.sta) | 
 | 333 | 			rate_idx = ACX_TX_AP_FULL_RATE; | 
 | 334 | 		else | 
 | 335 | 			rate_idx = ACX_TX_BASIC_RATE; | 
 | 336 | 	} else { | 
| Arik Nemtsov | e51ae9b | 2011-08-14 13:17:21 +0300 | [diff] [blame] | 337 | 		if (hlid == wl->ap_global_hlid) | 
| Arik Nemtsov | 09039f4 | 2011-02-23 00:22:30 +0200 | [diff] [blame] | 338 | 			rate_idx = ACX_TX_AP_MODE_MGMT_RATE; | 
| Arik Nemtsov | e51ae9b | 2011-08-14 13:17:21 +0300 | [diff] [blame] | 339 | 		else if (hlid == wl->ap_bcast_hlid) | 
| Arik Nemtsov | 09039f4 | 2011-02-23 00:22:30 +0200 | [diff] [blame] | 340 | 			rate_idx = ACX_TX_AP_MODE_BCST_RATE; | 
| Arik Nemtsov | e51ae9b | 2011-08-14 13:17:21 +0300 | [diff] [blame] | 341 | 		else | 
| Arik Nemtsov | c6c8a65 | 2010-10-16 20:27:53 +0200 | [diff] [blame] | 342 | 			rate_idx = ac; | 
| Arik Nemtsov | c6c8a65 | 2010-10-16 20:27:53 +0200 | [diff] [blame] | 343 | 	} | 
 | 344 |  | 
 | 345 | 	tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY; | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 346 | 	desc->reserved = 0; | 
 | 347 |  | 
| Ido Yariv | 0da13da | 2011-03-31 10:06:58 +0200 | [diff] [blame] | 348 | 	aligned_len = wl12xx_calc_packet_alignment(wl, skb->len); | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 349 |  | 
| Ido Yariv | 0da13da | 2011-03-31 10:06:58 +0200 | [diff] [blame] | 350 | 	if (wl->chip.id == CHIP_ID_1283_PG20) { | 
| Shahar Levi | 48a6147 | 2011-03-06 16:32:08 +0200 | [diff] [blame] | 351 | 		desc->wl128x_mem.extra_bytes = aligned_len - skb->len; | 
 | 352 | 		desc->length = cpu_to_le16(aligned_len >> 2); | 
| Shahar Levi | ae77ecc | 2011-03-06 16:32:13 +0200 | [diff] [blame] | 353 |  | 
 | 354 | 		wl1271_debug(DEBUG_TX, "tx_fill_hdr: hlid: %d " | 
 | 355 | 			     "tx_attr: 0x%x len: %d life: %d mem: %d", | 
 | 356 | 			     desc->hlid, tx_attr, | 
 | 357 | 			     le16_to_cpu(desc->length), | 
 | 358 | 			     le16_to_cpu(desc->life_time), | 
 | 359 | 			     desc->wl128x_mem.total_mem_blocks); | 
| Shahar Levi | 48a6147 | 2011-03-06 16:32:08 +0200 | [diff] [blame] | 360 | 	} else { | 
 | 361 | 		int pad; | 
 | 362 |  | 
| Ido Yariv | 0da13da | 2011-03-31 10:06:58 +0200 | [diff] [blame] | 363 | 		/* Store the aligned length in terms of words */ | 
| Shahar Levi | 48a6147 | 2011-03-06 16:32:08 +0200 | [diff] [blame] | 364 | 		desc->length = cpu_to_le16(aligned_len >> 2); | 
 | 365 |  | 
 | 366 | 		/* calculate number of padding bytes */ | 
 | 367 | 		pad = aligned_len - skb->len; | 
 | 368 | 		tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD; | 
 | 369 |  | 
| Shahar Levi | ae77ecc | 2011-03-06 16:32:13 +0200 | [diff] [blame] | 370 | 		wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d hlid: %d " | 
 | 371 | 			     "tx_attr: 0x%x len: %d life: %d mem: %d", pad, | 
 | 372 | 			     desc->hlid, tx_attr, | 
 | 373 | 			     le16_to_cpu(desc->length), | 
 | 374 | 			     le16_to_cpu(desc->life_time), | 
 | 375 | 			     desc->wl127x_mem.total_mem_blocks); | 
| Shahar Levi | 48a6147 | 2011-03-06 16:32:08 +0200 | [diff] [blame] | 376 | 	} | 
| Luciano Coelho | d0f63b2 | 2009-10-15 10:33:29 +0300 | [diff] [blame] | 377 |  | 
 | 378 | 	desc->tx_attr = cpu_to_le16(tx_attr); | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 379 | } | 
 | 380 |  | 
 | 381 | /* caller must hold wl->mutex */ | 
| Ido Yariv | a19606b | 2010-09-30 13:28:28 +0200 | [diff] [blame] | 382 | static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb, | 
 | 383 | 							u32 buf_offset) | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 384 | { | 
 | 385 | 	struct ieee80211_tx_info *info; | 
 | 386 | 	u32 extra = 0; | 
 | 387 | 	int ret = 0; | 
| Ido Yariv | a19606b | 2010-09-30 13:28:28 +0200 | [diff] [blame] | 388 | 	u32 total_len; | 
| Arik Nemtsov | 09039f4 | 2011-02-23 00:22:30 +0200 | [diff] [blame] | 389 | 	u8 hlid; | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 390 |  | 
 | 391 | 	if (!skb) | 
 | 392 | 		return -EINVAL; | 
 | 393 |  | 
 | 394 | 	info = IEEE80211_SKB_CB(skb); | 
 | 395 |  | 
 | 396 | 	if (info->control.hw_key && | 
| Johannes Berg | 97359d1 | 2010-08-10 09:46:38 +0200 | [diff] [blame] | 397 | 	    info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 398 | 		extra = WL1271_TKIP_IV_SPACE; | 
 | 399 |  | 
 | 400 | 	if (info->control.hw_key) { | 
| Arik Nemtsov | 7f179b4 | 2010-10-16 21:39:06 +0200 | [diff] [blame] | 401 | 		bool is_wep; | 
 | 402 | 		u8 idx = info->control.hw_key->hw_key_idx; | 
 | 403 | 		u32 cipher = info->control.hw_key->cipher; | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 404 |  | 
| Arik Nemtsov | 7f179b4 | 2010-10-16 21:39:06 +0200 | [diff] [blame] | 405 | 		is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) || | 
 | 406 | 			 (cipher == WLAN_CIPHER_SUITE_WEP104); | 
 | 407 |  | 
 | 408 | 		if (unlikely(is_wep && wl->default_key != idx)) { | 
 | 409 | 			ret = wl1271_set_default_wep_key(wl, idx); | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 410 | 			if (ret < 0) | 
 | 411 | 				return ret; | 
| Juuso Oikarinen | ee444cf | 2010-02-18 13:25:50 +0200 | [diff] [blame] | 412 | 			wl->default_key = idx; | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 413 | 		} | 
 | 414 | 	} | 
 | 415 |  | 
| Eliad Peller | f4df1bd | 2011-08-14 13:17:15 +0300 | [diff] [blame] | 416 | 	hlid = wl1271_tx_get_hlid(wl, skb); | 
| Eliad Peller | 79b122d | 2011-08-14 13:17:11 +0300 | [diff] [blame] | 417 | 	if (hlid == WL12XX_INVALID_LINK_ID) { | 
 | 418 | 		wl1271_error("invalid hlid. dropping skb 0x%p", skb); | 
 | 419 | 		return -EINVAL; | 
 | 420 | 	} | 
| Arik Nemtsov | 09039f4 | 2011-02-23 00:22:30 +0200 | [diff] [blame] | 421 |  | 
 | 422 | 	ret = wl1271_tx_allocate(wl, skb, extra, buf_offset, hlid); | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 423 | 	if (ret < 0) | 
 | 424 | 		return ret; | 
 | 425 |  | 
| Arik Nemtsov | fae2fd7 | 2011-06-26 10:36:04 +0300 | [diff] [blame] | 426 | 	wl1271_tx_fill_hdr(wl, skb, extra, info, hlid); | 
 | 427 |  | 
| Arik Nemtsov | b622d99 | 2011-02-23 00:22:31 +0200 | [diff] [blame] | 428 | 	if (wl->bss_type == BSS_TYPE_AP_BSS) { | 
| Arik Nemtsov | 99a2775 | 2011-02-23 00:22:25 +0200 | [diff] [blame] | 429 | 		wl1271_tx_ap_update_inconnection_sta(wl, skb); | 
| Arik Nemtsov | b622d99 | 2011-02-23 00:22:31 +0200 | [diff] [blame] | 430 | 		wl1271_tx_regulate_link(wl, hlid); | 
 | 431 | 	} | 
| Arik Nemtsov | 99a2775 | 2011-02-23 00:22:25 +0200 | [diff] [blame] | 432 |  | 
| Ido Yariv | a19606b | 2010-09-30 13:28:28 +0200 | [diff] [blame] | 433 | 	/* | 
| Shahar Levi | 48a6147 | 2011-03-06 16:32:08 +0200 | [diff] [blame] | 434 | 	 * The length of each packet is stored in terms of | 
 | 435 | 	 * words. Thus, we must pad the skb data to make sure its | 
 | 436 | 	 * length is aligned.  The number of padding bytes is computed | 
 | 437 | 	 * and set in wl1271_tx_fill_hdr. | 
 | 438 | 	 * In special cases, we want to align to a specific block size | 
 | 439 | 	 * (eg. for wl128x with SDIO we align to 256). | 
| Ido Yariv | a19606b | 2010-09-30 13:28:28 +0200 | [diff] [blame] | 440 | 	 */ | 
| Ido Yariv | 0da13da | 2011-03-31 10:06:58 +0200 | [diff] [blame] | 441 | 	total_len = wl12xx_calc_packet_alignment(wl, skb->len); | 
| Shahar Levi | 48a6147 | 2011-03-06 16:32:08 +0200 | [diff] [blame] | 442 |  | 
| Ido Yariv | a19606b | 2010-09-30 13:28:28 +0200 | [diff] [blame] | 443 | 	memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len); | 
 | 444 | 	memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len); | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 445 |  | 
| Ido Yariv | 990f5de | 2011-03-31 10:06:59 +0200 | [diff] [blame] | 446 | 	/* Revert side effects in the dummy packet skb, so it can be reused */ | 
 | 447 | 	if (wl12xx_is_dummy_packet(wl, skb)) | 
 | 448 | 		skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); | 
 | 449 |  | 
| Ido Yariv | a19606b | 2010-09-30 13:28:28 +0200 | [diff] [blame] | 450 | 	return total_len; | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 451 | } | 
 | 452 |  | 
| Eliad Peller | af7fbb2 | 2011-09-19 13:51:42 +0300 | [diff] [blame] | 453 | u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set, | 
 | 454 | 				enum ieee80211_band rate_band) | 
| Juuso Oikarinen | 830fb67 | 2009-12-11 15:41:06 +0200 | [diff] [blame] | 455 | { | 
 | 456 | 	struct ieee80211_supported_band *band; | 
 | 457 | 	u32 enabled_rates = 0; | 
 | 458 | 	int bit; | 
 | 459 |  | 
| Eliad Peller | af7fbb2 | 2011-09-19 13:51:42 +0300 | [diff] [blame] | 460 | 	band = wl->hw->wiphy->bands[rate_band]; | 
| Juuso Oikarinen | 830fb67 | 2009-12-11 15:41:06 +0200 | [diff] [blame] | 461 | 	for (bit = 0; bit < band->n_bitrates; bit++) { | 
 | 462 | 		if (rate_set & 0x1) | 
 | 463 | 			enabled_rates |= band->bitrates[bit].hw_value; | 
 | 464 | 		rate_set >>= 1; | 
 | 465 | 	} | 
 | 466 |  | 
| Shahar Levi | 1835785 | 2010-10-13 16:09:41 +0200 | [diff] [blame] | 467 | 	/* MCS rates indication are on bits 16 - 23 */ | 
 | 468 | 	rate_set >>= HW_HT_RATES_OFFSET - band->n_bitrates; | 
 | 469 |  | 
 | 470 | 	for (bit = 0; bit < 8; bit++) { | 
 | 471 | 		if (rate_set & 0x1) | 
 | 472 | 			enabled_rates |= (CONF_HW_BIT_RATE_MCS_0 << bit); | 
 | 473 | 		rate_set >>= 1; | 
 | 474 | 	} | 
| Shahar Levi | 1835785 | 2010-10-13 16:09:41 +0200 | [diff] [blame] | 475 |  | 
| Juuso Oikarinen | 830fb67 | 2009-12-11 15:41:06 +0200 | [diff] [blame] | 476 | 	return enabled_rates; | 
 | 477 | } | 
 | 478 |  | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 479 | void wl1271_handle_tx_low_watermark(struct wl1271 *wl) | 
| Ido Yariv | 2fe33e8 | 2010-10-12 14:49:12 +0200 | [diff] [blame] | 480 | { | 
 | 481 | 	unsigned long flags; | 
| Arik Nemtsov | 708bb3c | 2011-06-24 13:03:37 +0300 | [diff] [blame] | 482 | 	int i; | 
| Ido Yariv | 2fe33e8 | 2010-10-12 14:49:12 +0200 | [diff] [blame] | 483 |  | 
| Arik Nemtsov | 708bb3c | 2011-06-24 13:03:37 +0300 | [diff] [blame] | 484 | 	for (i = 0; i < NUM_TX_QUEUES; i++) { | 
 | 485 | 		if (test_bit(i, &wl->stopped_queues_map) && | 
| Arik Nemtsov | f1a4638 | 2011-07-07 14:25:23 +0300 | [diff] [blame] | 486 | 		    wl->tx_queue_count[i] <= WL1271_TX_QUEUE_LOW_WATERMARK) { | 
| Arik Nemtsov | 708bb3c | 2011-06-24 13:03:37 +0300 | [diff] [blame] | 487 | 			/* firmware buffer has space, restart queues */ | 
 | 488 | 			spin_lock_irqsave(&wl->wl_lock, flags); | 
 | 489 | 			ieee80211_wake_queue(wl->hw, | 
 | 490 | 					     wl1271_tx_get_mac80211_queue(i)); | 
 | 491 | 			clear_bit(i, &wl->stopped_queues_map); | 
 | 492 | 			spin_unlock_irqrestore(&wl->wl_lock, flags); | 
 | 493 | 		} | 
| Ido Yariv | 2fe33e8 | 2010-10-12 14:49:12 +0200 | [diff] [blame] | 494 | 	} | 
 | 495 | } | 
 | 496 |  | 
| Arik Nemtsov | 742246f | 2011-08-14 13:17:33 +0300 | [diff] [blame] | 497 | static struct sk_buff_head *wl1271_select_queue(struct wl1271 *wl, | 
 | 498 | 						struct sk_buff_head *queues) | 
 | 499 | { | 
 | 500 | 	int i, q = -1, ac; | 
 | 501 | 	u32 min_pkts = 0xffffffff; | 
 | 502 |  | 
 | 503 | 	/* | 
 | 504 | 	 * Find a non-empty ac where: | 
 | 505 | 	 * 1. There are packets to transmit | 
 | 506 | 	 * 2. The FW has the least allocated blocks | 
 | 507 | 	 * | 
 | 508 | 	 * We prioritize the ACs according to VO>VI>BE>BK | 
 | 509 | 	 */ | 
 | 510 | 	for (i = 0; i < NUM_TX_QUEUES; i++) { | 
 | 511 | 		ac = wl1271_tx_get_queue(i); | 
 | 512 | 		if (!skb_queue_empty(&queues[ac]) && | 
 | 513 | 		    (wl->tx_allocated_pkts[ac] < min_pkts)) { | 
 | 514 | 			q = ac; | 
 | 515 | 			min_pkts = wl->tx_allocated_pkts[q]; | 
 | 516 | 		} | 
 | 517 | 	} | 
 | 518 |  | 
 | 519 | 	if (q == -1) | 
 | 520 | 		return NULL; | 
 | 521 |  | 
 | 522 | 	return &queues[q]; | 
 | 523 | } | 
 | 524 |  | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 525 | static struct sk_buff *wl1271_sta_skb_dequeue(struct wl1271 *wl) | 
| Juuso Oikarinen | 6742f55 | 2010-12-13 09:52:37 +0200 | [diff] [blame] | 526 | { | 
 | 527 | 	struct sk_buff *skb = NULL; | 
 | 528 | 	unsigned long flags; | 
| Arik Nemtsov | 742246f | 2011-08-14 13:17:33 +0300 | [diff] [blame] | 529 | 	struct sk_buff_head *queue; | 
| Juuso Oikarinen | 6742f55 | 2010-12-13 09:52:37 +0200 | [diff] [blame] | 530 |  | 
| Arik Nemtsov | 742246f | 2011-08-14 13:17:33 +0300 | [diff] [blame] | 531 | 	queue = wl1271_select_queue(wl, wl->tx_queue); | 
 | 532 | 	if (!queue) | 
| Juuso Oikarinen | 6742f55 | 2010-12-13 09:52:37 +0200 | [diff] [blame] | 533 | 		goto out; | 
| Arik Nemtsov | 742246f | 2011-08-14 13:17:33 +0300 | [diff] [blame] | 534 |  | 
 | 535 | 	skb = skb_dequeue(queue); | 
| Juuso Oikarinen | 6742f55 | 2010-12-13 09:52:37 +0200 | [diff] [blame] | 536 |  | 
 | 537 | out: | 
 | 538 | 	if (skb) { | 
| Arik Nemtsov | f1a4638 | 2011-07-07 14:25:23 +0300 | [diff] [blame] | 539 | 		int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); | 
| Juuso Oikarinen | 6742f55 | 2010-12-13 09:52:37 +0200 | [diff] [blame] | 540 | 		spin_lock_irqsave(&wl->wl_lock, flags); | 
| Arik Nemtsov | f1a4638 | 2011-07-07 14:25:23 +0300 | [diff] [blame] | 541 | 		wl->tx_queue_count[q]--; | 
| Juuso Oikarinen | 6742f55 | 2010-12-13 09:52:37 +0200 | [diff] [blame] | 542 | 		spin_unlock_irqrestore(&wl->wl_lock, flags); | 
 | 543 | 	} | 
 | 544 |  | 
 | 545 | 	return skb; | 
 | 546 | } | 
 | 547 |  | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 548 | static struct sk_buff *wl1271_ap_skb_dequeue(struct wl1271 *wl) | 
 | 549 | { | 
 | 550 | 	struct sk_buff *skb = NULL; | 
 | 551 | 	unsigned long flags; | 
 | 552 | 	int i, h, start_hlid; | 
| Arik Nemtsov | 742246f | 2011-08-14 13:17:33 +0300 | [diff] [blame] | 553 | 	struct sk_buff_head *queue; | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 554 |  | 
 | 555 | 	/* start from the link after the last one */ | 
 | 556 | 	start_hlid = (wl->last_tx_hlid + 1) % AP_MAX_LINKS; | 
 | 557 |  | 
 | 558 | 	/* dequeue according to AC, round robin on each link */ | 
 | 559 | 	for (i = 0; i < AP_MAX_LINKS; i++) { | 
 | 560 | 		h = (start_hlid + i) % AP_MAX_LINKS; | 
 | 561 |  | 
| Arik Nemtsov | 742246f | 2011-08-14 13:17:33 +0300 | [diff] [blame] | 562 | 		/* only consider connected stations */ | 
 | 563 | 		if (h >= WL1271_AP_STA_HLID_START && | 
 | 564 | 		    !test_bit(h - WL1271_AP_STA_HLID_START, wl->ap_hlid_map)) | 
 | 565 | 			continue; | 
 | 566 |  | 
 | 567 | 		queue = wl1271_select_queue(wl, wl->links[h].tx_queue); | 
 | 568 | 		if (!queue) | 
 | 569 | 			continue; | 
 | 570 |  | 
 | 571 | 		skb = skb_dequeue(queue); | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 572 | 		if (skb) | 
| Arik Nemtsov | 742246f | 2011-08-14 13:17:33 +0300 | [diff] [blame] | 573 | 			break; | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 574 | 	} | 
 | 575 |  | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 576 | 	if (skb) { | 
| Arik Nemtsov | f1a4638 | 2011-07-07 14:25:23 +0300 | [diff] [blame] | 577 | 		int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 578 | 		wl->last_tx_hlid = h; | 
 | 579 | 		spin_lock_irqsave(&wl->wl_lock, flags); | 
| Arik Nemtsov | f1a4638 | 2011-07-07 14:25:23 +0300 | [diff] [blame] | 580 | 		wl->tx_queue_count[q]--; | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 581 | 		spin_unlock_irqrestore(&wl->wl_lock, flags); | 
 | 582 | 	} else { | 
 | 583 | 		wl->last_tx_hlid = 0; | 
 | 584 | 	} | 
 | 585 |  | 
 | 586 | 	return skb; | 
 | 587 | } | 
 | 588 |  | 
 | 589 | static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl) | 
 | 590 | { | 
| Ido Yariv | 990f5de | 2011-03-31 10:06:59 +0200 | [diff] [blame] | 591 | 	unsigned long flags; | 
 | 592 | 	struct sk_buff *skb = NULL; | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 593 |  | 
| Ido Yariv | 990f5de | 2011-03-31 10:06:59 +0200 | [diff] [blame] | 594 | 	if (wl->bss_type == BSS_TYPE_AP_BSS) | 
 | 595 | 		skb = wl1271_ap_skb_dequeue(wl); | 
 | 596 | 	else | 
 | 597 | 		skb = wl1271_sta_skb_dequeue(wl); | 
 | 598 |  | 
 | 599 | 	if (!skb && | 
 | 600 | 	    test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) { | 
| Arik Nemtsov | f1a4638 | 2011-07-07 14:25:23 +0300 | [diff] [blame] | 601 | 		int q; | 
 | 602 |  | 
| Ido Yariv | 990f5de | 2011-03-31 10:06:59 +0200 | [diff] [blame] | 603 | 		skb = wl->dummy_packet; | 
| Arik Nemtsov | f1a4638 | 2011-07-07 14:25:23 +0300 | [diff] [blame] | 604 | 		q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); | 
| Ido Yariv | 990f5de | 2011-03-31 10:06:59 +0200 | [diff] [blame] | 605 | 		spin_lock_irqsave(&wl->wl_lock, flags); | 
| Arik Nemtsov | f1a4638 | 2011-07-07 14:25:23 +0300 | [diff] [blame] | 606 | 		wl->tx_queue_count[q]--; | 
| Ido Yariv | 990f5de | 2011-03-31 10:06:59 +0200 | [diff] [blame] | 607 | 		spin_unlock_irqrestore(&wl->wl_lock, flags); | 
 | 608 | 	} | 
 | 609 |  | 
 | 610 | 	return skb; | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 611 | } | 
 | 612 |  | 
| Juuso Oikarinen | 6742f55 | 2010-12-13 09:52:37 +0200 | [diff] [blame] | 613 | static void wl1271_skb_queue_head(struct wl1271 *wl, struct sk_buff *skb) | 
 | 614 | { | 
 | 615 | 	unsigned long flags; | 
 | 616 | 	int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); | 
 | 617 |  | 
| Ido Yariv | 990f5de | 2011-03-31 10:06:59 +0200 | [diff] [blame] | 618 | 	if (wl12xx_is_dummy_packet(wl, skb)) { | 
 | 619 | 		set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags); | 
 | 620 | 	} else if (wl->bss_type == BSS_TYPE_AP_BSS) { | 
| Eliad Peller | f4df1bd | 2011-08-14 13:17:15 +0300 | [diff] [blame] | 621 | 		u8 hlid = wl1271_tx_get_hlid(wl, skb); | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 622 | 		skb_queue_head(&wl->links[hlid].tx_queue[q], skb); | 
 | 623 |  | 
 | 624 | 		/* make sure we dequeue the same packet next time */ | 
 | 625 | 		wl->last_tx_hlid = (hlid + AP_MAX_LINKS - 1) % AP_MAX_LINKS; | 
 | 626 | 	} else { | 
 | 627 | 		skb_queue_head(&wl->tx_queue[q], skb); | 
 | 628 | 	} | 
 | 629 |  | 
| Juuso Oikarinen | 6742f55 | 2010-12-13 09:52:37 +0200 | [diff] [blame] | 630 | 	spin_lock_irqsave(&wl->wl_lock, flags); | 
| Arik Nemtsov | f1a4638 | 2011-07-07 14:25:23 +0300 | [diff] [blame] | 631 | 	wl->tx_queue_count[q]++; | 
| Juuso Oikarinen | 6742f55 | 2010-12-13 09:52:37 +0200 | [diff] [blame] | 632 | 	spin_unlock_irqrestore(&wl->wl_lock, flags); | 
 | 633 | } | 
 | 634 |  | 
| Eliad Peller | 77ddaa1 | 2011-05-15 11:10:29 +0300 | [diff] [blame] | 635 | static bool wl1271_tx_is_data_present(struct sk_buff *skb) | 
 | 636 | { | 
 | 637 | 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); | 
 | 638 |  | 
 | 639 | 	return ieee80211_is_data_present(hdr->frame_control); | 
 | 640 | } | 
 | 641 |  | 
| Ido Yariv | a522550 | 2010-10-12 14:49:10 +0200 | [diff] [blame] | 642 | void wl1271_tx_work_locked(struct wl1271 *wl) | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 643 | { | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 644 | 	struct sk_buff *skb; | 
| Ido Yariv | 6c6e669 | 2010-10-12 14:49:09 +0200 | [diff] [blame] | 645 | 	u32 buf_offset = 0; | 
 | 646 | 	bool sent_packets = false; | 
| Eliad Peller | 77ddaa1 | 2011-05-15 11:10:29 +0300 | [diff] [blame] | 647 | 	bool had_data = false; | 
 | 648 | 	bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS); | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 649 | 	int ret; | 
 | 650 |  | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 651 | 	if (unlikely(wl->state == WL1271_STATE_OFF)) | 
| Eliad Peller | c1b193e | 2011-03-23 22:22:15 +0200 | [diff] [blame] | 652 | 		return; | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 653 |  | 
| Juuso Oikarinen | 6742f55 | 2010-12-13 09:52:37 +0200 | [diff] [blame] | 654 | 	while ((skb = wl1271_skb_dequeue(wl))) { | 
| Eliad Peller | 77ddaa1 | 2011-05-15 11:10:29 +0300 | [diff] [blame] | 655 | 		if (wl1271_tx_is_data_present(skb)) | 
 | 656 | 			had_data = true; | 
 | 657 |  | 
| Ido Yariv | a19606b | 2010-09-30 13:28:28 +0200 | [diff] [blame] | 658 | 		ret = wl1271_prepare_tx_frame(wl, skb, buf_offset); | 
| Ido Yariv | 6c6e669 | 2010-10-12 14:49:09 +0200 | [diff] [blame] | 659 | 		if (ret == -EAGAIN) { | 
| Ido Yariv | a19606b | 2010-09-30 13:28:28 +0200 | [diff] [blame] | 660 | 			/* | 
| Ido Yariv | 6c6e669 | 2010-10-12 14:49:09 +0200 | [diff] [blame] | 661 | 			 * Aggregation buffer is full. | 
 | 662 | 			 * Flush buffer and try again. | 
 | 663 | 			 */ | 
| Juuso Oikarinen | 6742f55 | 2010-12-13 09:52:37 +0200 | [diff] [blame] | 664 | 			wl1271_skb_queue_head(wl, skb); | 
| Ido Yariv | 6c6e669 | 2010-10-12 14:49:09 +0200 | [diff] [blame] | 665 | 			wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf, | 
| Juuso Oikarinen | 6742f55 | 2010-12-13 09:52:37 +0200 | [diff] [blame] | 666 | 				     buf_offset, true); | 
| Ido Yariv | 6c6e669 | 2010-10-12 14:49:09 +0200 | [diff] [blame] | 667 | 			sent_packets = true; | 
 | 668 | 			buf_offset = 0; | 
 | 669 | 			continue; | 
 | 670 | 		} else if (ret == -EBUSY) { | 
 | 671 | 			/* | 
 | 672 | 			 * Firmware buffer is full. | 
| Ido Yariv | a19606b | 2010-09-30 13:28:28 +0200 | [diff] [blame] | 673 | 			 * Queue back last skb, and stop aggregating. | 
 | 674 | 			 */ | 
| Juuso Oikarinen | 6742f55 | 2010-12-13 09:52:37 +0200 | [diff] [blame] | 675 | 			wl1271_skb_queue_head(wl, skb); | 
| Ido Yariv | a522550 | 2010-10-12 14:49:10 +0200 | [diff] [blame] | 676 | 			/* No work left, avoid scheduling redundant tx work */ | 
 | 677 | 			set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); | 
| Juuso Oikarinen | ffb591c | 2010-02-22 08:38:31 +0200 | [diff] [blame] | 678 | 			goto out_ack; | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 679 | 		} else if (ret < 0) { | 
 | 680 | 			dev_kfree_skb(skb); | 
| Juuso Oikarinen | ffb591c | 2010-02-22 08:38:31 +0200 | [diff] [blame] | 681 | 			goto out_ack; | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 682 | 		} | 
| Ido Yariv | a19606b | 2010-09-30 13:28:28 +0200 | [diff] [blame] | 683 | 		buf_offset += ret; | 
 | 684 | 		wl->tx_packets_count++; | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 685 | 	} | 
 | 686 |  | 
| Juuso Oikarinen | ffb591c | 2010-02-22 08:38:31 +0200 | [diff] [blame] | 687 | out_ack: | 
| Ido Yariv | a19606b | 2010-09-30 13:28:28 +0200 | [diff] [blame] | 688 | 	if (buf_offset) { | 
 | 689 | 		wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf, | 
 | 690 | 				buf_offset, true); | 
| Ido Yariv | 6c6e669 | 2010-10-12 14:49:09 +0200 | [diff] [blame] | 691 | 		sent_packets = true; | 
 | 692 | 	} | 
 | 693 | 	if (sent_packets) { | 
| Ido Yariv | 606ea9f | 2011-03-01 15:14:39 +0200 | [diff] [blame] | 694 | 		/* | 
 | 695 | 		 * Interrupt the firmware with the new packets. This is only | 
 | 696 | 		 * required for older hardware revisions | 
 | 697 | 		 */ | 
 | 698 | 		if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION) | 
 | 699 | 			wl1271_write32(wl, WL1271_HOST_WR_ACCESS, | 
 | 700 | 				       wl->tx_packets_count); | 
 | 701 |  | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 702 | 		wl1271_handle_tx_low_watermark(wl); | 
| Ido Yariv | a19606b | 2010-09-30 13:28:28 +0200 | [diff] [blame] | 703 | 	} | 
| Eliad Peller | 77ddaa1 | 2011-05-15 11:10:29 +0300 | [diff] [blame] | 704 | 	if (!is_ap && wl->conf.rx_streaming.interval && had_data && | 
 | 705 | 	    (wl->conf.rx_streaming.always || | 
 | 706 | 	     test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) { | 
 | 707 | 		u32 timeout = wl->conf.rx_streaming.duration; | 
 | 708 |  | 
 | 709 | 		/* enable rx streaming */ | 
 | 710 | 		if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags)) | 
 | 711 | 			ieee80211_queue_work(wl->hw, | 
 | 712 | 					     &wl->rx_streaming_enable_work); | 
 | 713 |  | 
 | 714 | 		mod_timer(&wl->rx_streaming_timer, | 
 | 715 | 			  jiffies + msecs_to_jiffies(timeout)); | 
 | 716 | 	} | 
| Ido Yariv | a522550 | 2010-10-12 14:49:10 +0200 | [diff] [blame] | 717 | } | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 718 |  | 
| Ido Yariv | a522550 | 2010-10-12 14:49:10 +0200 | [diff] [blame] | 719 | void wl1271_tx_work(struct work_struct *work) | 
 | 720 | { | 
 | 721 | 	struct wl1271 *wl = container_of(work, struct wl1271, tx_work); | 
| Eliad Peller | c1b193e | 2011-03-23 22:22:15 +0200 | [diff] [blame] | 722 | 	int ret; | 
| Ido Yariv | a522550 | 2010-10-12 14:49:10 +0200 | [diff] [blame] | 723 |  | 
 | 724 | 	mutex_lock(&wl->mutex); | 
| Eliad Peller | c1b193e | 2011-03-23 22:22:15 +0200 | [diff] [blame] | 725 | 	ret = wl1271_ps_elp_wakeup(wl); | 
 | 726 | 	if (ret < 0) | 
 | 727 | 		goto out; | 
 | 728 |  | 
| Ido Yariv | a522550 | 2010-10-12 14:49:10 +0200 | [diff] [blame] | 729 | 	wl1271_tx_work_locked(wl); | 
| Eliad Peller | c1b193e | 2011-03-23 22:22:15 +0200 | [diff] [blame] | 730 |  | 
| Eliad Peller | c75bbcd | 2011-04-04 10:38:47 +0300 | [diff] [blame] | 731 | 	wl1271_ps_elp_sleep(wl); | 
| Eliad Peller | c1b193e | 2011-03-23 22:22:15 +0200 | [diff] [blame] | 732 | out: | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 733 | 	mutex_unlock(&wl->mutex); | 
 | 734 | } | 
 | 735 |  | 
 | 736 | static void wl1271_tx_complete_packet(struct wl1271 *wl, | 
 | 737 | 				      struct wl1271_tx_hw_res_descr *result) | 
 | 738 | { | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 739 | 	struct ieee80211_tx_info *info; | 
 | 740 | 	struct sk_buff *skb; | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 741 | 	int id = result->id; | 
| Juuso Oikarinen | 31627dc | 2010-03-26 12:53:12 +0200 | [diff] [blame] | 742 | 	int rate = -1; | 
 | 743 | 	u8 retries = 0; | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 744 |  | 
 | 745 | 	/* check for id legality */ | 
| Juuso Oikarinen | ffb591c | 2010-02-22 08:38:31 +0200 | [diff] [blame] | 746 | 	if (unlikely(id >= ACX_TX_DESCRIPTORS || wl->tx_frames[id] == NULL)) { | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 747 | 		wl1271_warning("TX result illegal id: %d", id); | 
 | 748 | 		return; | 
 | 749 | 	} | 
 | 750 |  | 
 | 751 | 	skb = wl->tx_frames[id]; | 
 | 752 | 	info = IEEE80211_SKB_CB(skb); | 
 | 753 |  | 
| Ido Yariv | 990f5de | 2011-03-31 10:06:59 +0200 | [diff] [blame] | 754 | 	if (wl12xx_is_dummy_packet(wl, skb)) { | 
| Shahar Levi | ae47c45 | 2011-03-06 16:32:14 +0200 | [diff] [blame] | 755 | 		wl1271_free_tx_id(wl, id); | 
 | 756 | 		return; | 
 | 757 | 	} | 
 | 758 |  | 
| Juuso Oikarinen | 31627dc | 2010-03-26 12:53:12 +0200 | [diff] [blame] | 759 | 	/* update the TX status info */ | 
 | 760 | 	if (result->status == TX_SUCCESS) { | 
 | 761 | 		if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 762 | 			info->flags |= IEEE80211_TX_STAT_ACK; | 
| Teemu Paasikivi | 6a2de93 | 2010-10-14 11:00:04 +0200 | [diff] [blame] | 763 | 		rate = wl1271_rate_to_idx(result->rate_class_index, wl->band); | 
| Juuso Oikarinen | 31627dc | 2010-03-26 12:53:12 +0200 | [diff] [blame] | 764 | 		retries = result->ack_failures; | 
 | 765 | 	} else if (result->status == TX_RETRY_EXCEEDED) { | 
 | 766 | 		wl->stats.excessive_retries++; | 
 | 767 | 		retries = result->ack_failures; | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 768 | 	} | 
 | 769 |  | 
| Juuso Oikarinen | 31627dc | 2010-03-26 12:53:12 +0200 | [diff] [blame] | 770 | 	info->status.rates[0].idx = rate; | 
 | 771 | 	info->status.rates[0].count = retries; | 
 | 772 | 	info->status.rates[0].flags = 0; | 
 | 773 | 	info->status.ack_signal = -1; | 
 | 774 |  | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 775 | 	wl->stats.retry_count += result->ack_failures; | 
 | 776 |  | 
| Oz Krakowski | b992c68 | 2011-06-26 10:36:02 +0300 | [diff] [blame] | 777 | 	/* | 
 | 778 | 	 * update sequence number only when relevant, i.e. only in | 
 | 779 | 	 * sessions of TKIP, AES and GEM (not in open or WEP sessions) | 
 | 780 | 	 */ | 
 | 781 | 	if (info->control.hw_key && | 
 | 782 | 	    (info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP || | 
 | 783 | 	     info->control.hw_key->cipher == WLAN_CIPHER_SUITE_CCMP || | 
 | 784 | 	     info->control.hw_key->cipher == WL1271_CIPHER_SUITE_GEM)) { | 
 | 785 | 		u8 fw_lsb = result->tx_security_sequence_number_lsb; | 
 | 786 | 		u8 cur_lsb = wl->tx_security_last_seq_lsb; | 
 | 787 |  | 
 | 788 | 		/* | 
 | 789 | 		 * update security sequence number, taking care of potential | 
 | 790 | 		 * wrap-around | 
 | 791 | 		 */ | 
 | 792 | 		wl->tx_security_seq += (fw_lsb - cur_lsb + 256) % 256; | 
 | 793 | 		wl->tx_security_last_seq_lsb = fw_lsb; | 
 | 794 | 	} | 
| Juuso Oikarinen | ac4e4ce | 2009-10-08 21:56:19 +0300 | [diff] [blame] | 795 |  | 
| Juuso Oikarinen | 1e2b797 | 2009-10-08 21:56:20 +0300 | [diff] [blame] | 796 | 	/* remove private header from packet */ | 
 | 797 | 	skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); | 
 | 798 |  | 
 | 799 | 	/* remove TKIP header space if present */ | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 800 | 	if (info->control.hw_key && | 
| Johannes Berg | 97359d1 | 2010-08-10 09:46:38 +0200 | [diff] [blame] | 801 | 	    info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) { | 
| Juuso Oikarinen | 1e2b797 | 2009-10-08 21:56:20 +0300 | [diff] [blame] | 802 | 		int hdrlen = ieee80211_get_hdrlen_from_skb(skb); | 
 | 803 | 		memmove(skb->data + WL1271_TKIP_IV_SPACE, skb->data, hdrlen); | 
 | 804 | 		skb_pull(skb, WL1271_TKIP_IV_SPACE); | 
 | 805 | 	} | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 806 |  | 
 | 807 | 	wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x" | 
 | 808 | 		     " status 0x%x", | 
 | 809 | 		     result->id, skb, result->ack_failures, | 
 | 810 | 		     result->rate_class_index, result->status); | 
 | 811 |  | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 812 | 	/* return the packet to the stack */ | 
| Ido Yariv | a620865 | 2011-03-01 15:14:41 +0200 | [diff] [blame] | 813 | 	skb_queue_tail(&wl->deferred_tx_queue, skb); | 
| Eliad Peller | 92ef896 | 2011-06-07 12:50:46 +0300 | [diff] [blame] | 814 | 	queue_work(wl->freezable_wq, &wl->netstack_work); | 
| Ido Yariv | 25eeb9e | 2010-10-12 16:20:06 +0200 | [diff] [blame] | 815 | 	wl1271_free_tx_id(wl, result->id); | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 816 | } | 
 | 817 |  | 
 | 818 | /* Called upon reception of a TX complete interrupt */ | 
| Juuso Oikarinen | ffb591c | 2010-02-22 08:38:31 +0200 | [diff] [blame] | 819 | void wl1271_tx_complete(struct wl1271 *wl) | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 820 | { | 
 | 821 | 	struct wl1271_acx_mem_map *memmap = | 
 | 822 | 		(struct wl1271_acx_mem_map *)wl->target_mem_map; | 
| Juuso Oikarinen | ffb591c | 2010-02-22 08:38:31 +0200 | [diff] [blame] | 823 | 	u32 count, fw_counter; | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 824 | 	u32 i; | 
 | 825 |  | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 826 | 	/* read the tx results from the chipset */ | 
| Teemu Paasikivi | 7b048c5 | 2010-02-18 13:25:55 +0200 | [diff] [blame] | 827 | 	wl1271_read(wl, le32_to_cpu(memmap->tx_result), | 
 | 828 | 		    wl->tx_res_if, sizeof(*wl->tx_res_if), false); | 
| Juuso Oikarinen | ffb591c | 2010-02-22 08:38:31 +0200 | [diff] [blame] | 829 | 	fw_counter = le32_to_cpu(wl->tx_res_if->tx_result_fw_counter); | 
 | 830 |  | 
 | 831 | 	/* write host counter to chipset (to ack) */ | 
 | 832 | 	wl1271_write32(wl, le32_to_cpu(memmap->tx_result) + | 
 | 833 | 		       offsetof(struct wl1271_tx_hw_res_if, | 
 | 834 | 				tx_result_host_counter), fw_counter); | 
 | 835 |  | 
 | 836 | 	count = fw_counter - wl->tx_results_count; | 
| Juuso Oikarinen | 06f7bc7 | 2010-02-22 08:38:33 +0200 | [diff] [blame] | 837 | 	wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count); | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 838 |  | 
 | 839 | 	/* verify that the result buffer is not getting overrun */ | 
| Juuso Oikarinen | ffb591c | 2010-02-22 08:38:31 +0200 | [diff] [blame] | 840 | 	if (unlikely(count > TX_HW_RESULT_QUEUE_LEN)) | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 841 | 		wl1271_warning("TX result overflow from chipset: %d", count); | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 842 |  | 
 | 843 | 	/* process the results */ | 
 | 844 | 	for (i = 0; i < count; i++) { | 
 | 845 | 		struct wl1271_tx_hw_res_descr *result; | 
 | 846 | 		u8 offset = wl->tx_results_count & TX_HW_RESULT_QUEUE_LEN_MASK; | 
 | 847 |  | 
 | 848 | 		/* process the packet */ | 
 | 849 | 		result =  &(wl->tx_res_if->tx_results_queue[offset]); | 
 | 850 | 		wl1271_tx_complete_packet(wl, result); | 
 | 851 |  | 
 | 852 | 		wl->tx_results_count++; | 
 | 853 | 	} | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 854 | } | 
 | 855 |  | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 856 | void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid) | 
 | 857 | { | 
 | 858 | 	struct sk_buff *skb; | 
| Arik Nemtsov | f1a4638 | 2011-07-07 14:25:23 +0300 | [diff] [blame] | 859 | 	int i; | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 860 | 	unsigned long flags; | 
| Arik Nemtsov | 1d36cd8 | 2011-02-23 00:22:27 +0200 | [diff] [blame] | 861 | 	struct ieee80211_tx_info *info; | 
| Arik Nemtsov | f1a4638 | 2011-07-07 14:25:23 +0300 | [diff] [blame] | 862 | 	int total[NUM_TX_QUEUES]; | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 863 |  | 
 | 864 | 	for (i = 0; i < NUM_TX_QUEUES; i++) { | 
| Arik Nemtsov | f1a4638 | 2011-07-07 14:25:23 +0300 | [diff] [blame] | 865 | 		total[i] = 0; | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 866 | 		while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) { | 
 | 867 | 			wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb); | 
| Arik Nemtsov | 79ebec7 | 2011-08-14 13:17:18 +0300 | [diff] [blame] | 868 |  | 
 | 869 | 			if (!wl12xx_is_dummy_packet(wl, skb)) { | 
 | 870 | 				info = IEEE80211_SKB_CB(skb); | 
 | 871 | 				info->status.rates[0].idx = -1; | 
 | 872 | 				info->status.rates[0].count = 0; | 
 | 873 | 				ieee80211_tx_status_ni(wl->hw, skb); | 
 | 874 | 			} | 
 | 875 |  | 
| Arik Nemtsov | f1a4638 | 2011-07-07 14:25:23 +0300 | [diff] [blame] | 876 | 			total[i]++; | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 877 | 		} | 
 | 878 | 	} | 
 | 879 |  | 
 | 880 | 	spin_lock_irqsave(&wl->wl_lock, flags); | 
| Arik Nemtsov | f1a4638 | 2011-07-07 14:25:23 +0300 | [diff] [blame] | 881 | 	for (i = 0; i < NUM_TX_QUEUES; i++) | 
 | 882 | 		wl->tx_queue_count[i] -= total[i]; | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 883 | 	spin_unlock_irqrestore(&wl->wl_lock, flags); | 
 | 884 |  | 
 | 885 | 	wl1271_handle_tx_low_watermark(wl); | 
 | 886 | } | 
 | 887 |  | 
| Arik Nemtsov | 7dece1c | 2011-04-18 14:15:28 +0300 | [diff] [blame] | 888 | /* caller must hold wl->mutex and TX must be stopped */ | 
 | 889 | void wl1271_tx_reset(struct wl1271 *wl, bool reset_tx_queues) | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 890 | { | 
 | 891 | 	int i; | 
 | 892 | 	struct sk_buff *skb; | 
| Arik Nemtsov | 1d36cd8 | 2011-02-23 00:22:27 +0200 | [diff] [blame] | 893 | 	struct ieee80211_tx_info *info; | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 894 |  | 
 | 895 | 	/* TX failure */ | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 896 | 	if (wl->bss_type == BSS_TYPE_AP_BSS) { | 
| Arik Nemtsov | 09039f4 | 2011-02-23 00:22:30 +0200 | [diff] [blame] | 897 | 		for (i = 0; i < AP_MAX_LINKS; i++) { | 
| Arik Nemtsov | f1acea9 | 2011-08-25 12:43:17 +0300 | [diff] [blame] | 898 | 			wl1271_free_sta(wl, i); | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 899 | 			wl1271_tx_reset_link_queues(wl, i); | 
| Arik Nemtsov | 9b17f1b | 2011-08-14 13:17:35 +0300 | [diff] [blame] | 900 | 			wl->links[i].allocated_pkts = 0; | 
 | 901 | 			wl->links[i].prev_freed_pkts = 0; | 
| Arik Nemtsov | 09039f4 | 2011-02-23 00:22:30 +0200 | [diff] [blame] | 902 | 		} | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 903 |  | 
 | 904 | 		wl->last_tx_hlid = 0; | 
 | 905 | 	} else { | 
 | 906 | 		for (i = 0; i < NUM_TX_QUEUES; i++) { | 
 | 907 | 			while ((skb = skb_dequeue(&wl->tx_queue[i]))) { | 
 | 908 | 				wl1271_debug(DEBUG_TX, "freeing skb 0x%p", | 
 | 909 | 					     skb); | 
| Shahar Levi | ae47c45 | 2011-03-06 16:32:14 +0200 | [diff] [blame] | 910 |  | 
| Ido Yariv | 990f5de | 2011-03-31 10:06:59 +0200 | [diff] [blame] | 911 | 				if (!wl12xx_is_dummy_packet(wl, skb)) { | 
| Shahar Levi | ae47c45 | 2011-03-06 16:32:14 +0200 | [diff] [blame] | 912 | 					info = IEEE80211_SKB_CB(skb); | 
 | 913 | 					info->status.rates[0].idx = -1; | 
 | 914 | 					info->status.rates[0].count = 0; | 
| Eliad Peller | c27d3ac | 2011-06-07 10:40:39 +0300 | [diff] [blame] | 915 | 					ieee80211_tx_status_ni(wl->hw, skb); | 
| Shahar Levi | ae47c45 | 2011-03-06 16:32:14 +0200 | [diff] [blame] | 916 | 				} | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 917 | 			} | 
| Juuso Oikarinen | 6742f55 | 2010-12-13 09:52:37 +0200 | [diff] [blame] | 918 | 		} | 
| Arik Nemtsov | f1acea9 | 2011-08-25 12:43:17 +0300 | [diff] [blame] | 919 |  | 
 | 920 | 		wl->ba_rx_bitmap = 0; | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 921 | 	} | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 922 |  | 
| Arik Nemtsov | f1acea9 | 2011-08-25 12:43:17 +0300 | [diff] [blame] | 923 | 	for (i = 0; i < NUM_TX_QUEUES; i++) | 
 | 924 | 		wl->tx_queue_count[i] = 0; | 
 | 925 |  | 
| Arik Nemtsov | 708bb3c | 2011-06-24 13:03:37 +0300 | [diff] [blame] | 926 | 	wl->stopped_queues_map = 0; | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 927 |  | 
| Ido Yariv | 2fe33e8 | 2010-10-12 14:49:12 +0200 | [diff] [blame] | 928 | 	/* | 
 | 929 | 	 * Make sure the driver is at a consistent state, in case this | 
 | 930 | 	 * function is called from a context other than interface removal. | 
| Arik Nemtsov | 7dece1c | 2011-04-18 14:15:28 +0300 | [diff] [blame] | 931 | 	 * This call will always wake the TX queues. | 
| Ido Yariv | 2fe33e8 | 2010-10-12 14:49:12 +0200 | [diff] [blame] | 932 | 	 */ | 
| Arik Nemtsov | 7dece1c | 2011-04-18 14:15:28 +0300 | [diff] [blame] | 933 | 	if (reset_tx_queues) | 
 | 934 | 		wl1271_handle_tx_low_watermark(wl); | 
| Ido Yariv | 2fe33e8 | 2010-10-12 14:49:12 +0200 | [diff] [blame] | 935 |  | 
| Ido Yariv | 50e9f74 | 2011-02-28 00:16:13 +0200 | [diff] [blame] | 936 | 	for (i = 0; i < ACX_TX_DESCRIPTORS; i++) { | 
 | 937 | 		if (wl->tx_frames[i] == NULL) | 
 | 938 | 			continue; | 
 | 939 |  | 
 | 940 | 		skb = wl->tx_frames[i]; | 
 | 941 | 		wl1271_free_tx_id(wl, i); | 
 | 942 | 		wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb); | 
 | 943 |  | 
| Ido Yariv | 990f5de | 2011-03-31 10:06:59 +0200 | [diff] [blame] | 944 | 		if (!wl12xx_is_dummy_packet(wl, skb)) { | 
| Shahar Levi | ae47c45 | 2011-03-06 16:32:14 +0200 | [diff] [blame] | 945 | 			/* | 
 | 946 | 			 * Remove private headers before passing the skb to | 
 | 947 | 			 * mac80211 | 
 | 948 | 			 */ | 
 | 949 | 			info = IEEE80211_SKB_CB(skb); | 
 | 950 | 			skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); | 
 | 951 | 			if (info->control.hw_key && | 
 | 952 | 			    info->control.hw_key->cipher == | 
 | 953 | 			    WLAN_CIPHER_SUITE_TKIP) { | 
 | 954 | 				int hdrlen = ieee80211_get_hdrlen_from_skb(skb); | 
 | 955 | 				memmove(skb->data + WL1271_TKIP_IV_SPACE, | 
 | 956 | 					skb->data, hdrlen); | 
 | 957 | 				skb_pull(skb, WL1271_TKIP_IV_SPACE); | 
 | 958 | 			} | 
 | 959 |  | 
 | 960 | 			info->status.rates[0].idx = -1; | 
 | 961 | 			info->status.rates[0].count = 0; | 
 | 962 |  | 
| Eliad Peller | c27d3ac | 2011-06-07 10:40:39 +0300 | [diff] [blame] | 963 | 			ieee80211_tx_status_ni(wl->hw, skb); | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 964 | 		} | 
| Ido Yariv | 50e9f74 | 2011-02-28 00:16:13 +0200 | [diff] [blame] | 965 | 	} | 
| Juuso Oikarinen | 781608c | 2010-05-24 11:18:17 +0300 | [diff] [blame] | 966 | } | 
 | 967 |  | 
 | 968 | #define WL1271_TX_FLUSH_TIMEOUT 500000 | 
 | 969 |  | 
 | 970 | /* caller must *NOT* hold wl->mutex */ | 
 | 971 | void wl1271_tx_flush(struct wl1271 *wl) | 
 | 972 | { | 
 | 973 | 	unsigned long timeout; | 
 | 974 | 	timeout = jiffies + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT); | 
 | 975 |  | 
 | 976 | 	while (!time_after(jiffies, timeout)) { | 
 | 977 | 		mutex_lock(&wl->mutex); | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 978 | 		wl1271_debug(DEBUG_TX, "flushing tx buffer: %d %d", | 
| Arik Nemtsov | f1a4638 | 2011-07-07 14:25:23 +0300 | [diff] [blame] | 979 | 			     wl->tx_frames_cnt, | 
 | 980 | 			     wl1271_tx_total_queue_count(wl)); | 
 | 981 | 		if ((wl->tx_frames_cnt == 0) && | 
 | 982 | 		    (wl1271_tx_total_queue_count(wl) == 0)) { | 
| Juuso Oikarinen | 781608c | 2010-05-24 11:18:17 +0300 | [diff] [blame] | 983 | 			mutex_unlock(&wl->mutex); | 
 | 984 | 			return; | 
 | 985 | 		} | 
 | 986 | 		mutex_unlock(&wl->mutex); | 
 | 987 | 		msleep(1); | 
 | 988 | 	} | 
 | 989 |  | 
 | 990 | 	wl1271_warning("Unable to flush all TX buffers, timed out."); | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 991 | } | 
| Arik Nemtsov | e0fe371 | 2010-10-16 18:19:53 +0200 | [diff] [blame] | 992 |  | 
| Eliad Peller | af7fbb2 | 2011-09-19 13:51:42 +0300 | [diff] [blame] | 993 | u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set) | 
| Arik Nemtsov | e0fe371 | 2010-10-16 18:19:53 +0200 | [diff] [blame] | 994 | { | 
| Eliad Peller | af7fbb2 | 2011-09-19 13:51:42 +0300 | [diff] [blame] | 995 | 	if (WARN_ON(!rate_set)) | 
 | 996 | 		return 0; | 
| Arik Nemtsov | e0fe371 | 2010-10-16 18:19:53 +0200 | [diff] [blame] | 997 |  | 
| Eliad Peller | af7fbb2 | 2011-09-19 13:51:42 +0300 | [diff] [blame] | 998 | 	return BIT(__ffs(rate_set)); | 
| Arik Nemtsov | e0fe371 | 2010-10-16 18:19:53 +0200 | [diff] [blame] | 999 | } |