| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 1 | /* | 
 | 2 |  * This file is part of wl1271 | 
 | 3 |  * | 
 | 4 |  * Copyright (C) 2009 Nokia Corporation | 
 | 5 |  * | 
 | 6 |  * Contact: Luciano Coelho <luciano.coelho@nokia.com> | 
 | 7 |  * | 
 | 8 |  * This program is free software; you can redistribute it and/or | 
 | 9 |  * modify it under the terms of the GNU General Public License | 
 | 10 |  * version 2 as published by the Free Software Foundation. | 
 | 11 |  * | 
 | 12 |  * This program is distributed in the hope that it will be useful, but | 
 | 13 |  * WITHOUT ANY WARRANTY; without even the implied warranty of | 
 | 14 |  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU | 
 | 15 |  * General Public License for more details. | 
 | 16 |  * | 
 | 17 |  * You should have received a copy of the GNU General Public License | 
 | 18 |  * along with this program; if not, write to the Free Software | 
 | 19 |  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | 
 | 20 |  * 02110-1301 USA | 
 | 21 |  * | 
 | 22 |  */ | 
 | 23 |  | 
 | 24 | #include <linux/kernel.h> | 
 | 25 | #include <linux/module.h> | 
| Arik Nemtsov | c6c8a65 | 2010-10-16 20:27:53 +0200 | [diff] [blame] | 26 | #include <linux/etherdevice.h> | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 27 |  | 
| Shahar Levi | 00d2010 | 2010-11-08 11:20:10 +0000 | [diff] [blame] | 28 | #include "wl12xx.h" | 
| Luciano Coelho | 0f4e312 | 2011-10-07 11:02:42 +0300 | [diff] [blame] | 29 | #include "debug.h" | 
| Shahar Levi | 00d2010 | 2010-11-08 11:20:10 +0000 | [diff] [blame] | 30 | #include "io.h" | 
 | 31 | #include "reg.h" | 
 | 32 | #include "ps.h" | 
 | 33 | #include "tx.h" | 
| Arik Nemtsov | 56d4f8f | 2011-08-25 12:43:13 +0300 | [diff] [blame] | 34 | #include "event.h" | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 35 |  | 
| Eliad Peller | 536129c | 2011-10-05 11:55:45 +0200 | [diff] [blame] | 36 | static int wl1271_set_default_wep_key(struct wl1271 *wl, | 
 | 37 | 				      struct wl12xx_vif *wlvif, u8 id) | 
| Arik Nemtsov | 7f179b4 | 2010-10-16 21:39:06 +0200 | [diff] [blame] | 38 | { | 
 | 39 | 	int ret; | 
| Eliad Peller | 536129c | 2011-10-05 11:55:45 +0200 | [diff] [blame] | 40 | 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); | 
| Arik Nemtsov | 7f179b4 | 2010-10-16 21:39:06 +0200 | [diff] [blame] | 41 |  | 
 | 42 | 	if (is_ap) | 
| Eliad Peller | c690ec8 | 2011-08-14 13:17:07 +0300 | [diff] [blame] | 43 | 		ret = wl12xx_cmd_set_default_wep_key(wl, id, | 
| Eliad Peller | a8ab39a | 2011-10-05 11:55:54 +0200 | [diff] [blame] | 44 | 						     wlvif->ap.bcast_hlid); | 
| Arik Nemtsov | 7f179b4 | 2010-10-16 21:39:06 +0200 | [diff] [blame] | 45 | 	else | 
| Eliad Peller | 154da67 | 2011-10-05 11:55:53 +0200 | [diff] [blame] | 46 | 		ret = wl12xx_cmd_set_default_wep_key(wl, id, wlvif->sta.hlid); | 
| Arik Nemtsov | 7f179b4 | 2010-10-16 21:39:06 +0200 | [diff] [blame] | 47 |  | 
 | 48 | 	if (ret < 0) | 
 | 49 | 		return ret; | 
 | 50 |  | 
 | 51 | 	wl1271_debug(DEBUG_CRYPT, "default wep key idx: %d", (int)id); | 
 | 52 | 	return 0; | 
 | 53 | } | 
 | 54 |  | 
| Ido Yariv | 25eeb9e | 2010-10-12 16:20:06 +0200 | [diff] [blame] | 55 | static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb) | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 56 | { | 
| Ido Yariv | 25eeb9e | 2010-10-12 16:20:06 +0200 | [diff] [blame] | 57 | 	int id; | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 58 |  | 
| Ido Yariv | 25eeb9e | 2010-10-12 16:20:06 +0200 | [diff] [blame] | 59 | 	id = find_first_zero_bit(wl->tx_frames_map, ACX_TX_DESCRIPTORS); | 
 | 60 | 	if (id >= ACX_TX_DESCRIPTORS) | 
 | 61 | 		return -EBUSY; | 
 | 62 |  | 
 | 63 | 	__set_bit(id, wl->tx_frames_map); | 
 | 64 | 	wl->tx_frames[id] = skb; | 
 | 65 | 	wl->tx_frames_cnt++; | 
 | 66 | 	return id; | 
 | 67 | } | 
 | 68 |  | 
 | 69 | static void wl1271_free_tx_id(struct wl1271 *wl, int id) | 
 | 70 | { | 
 | 71 | 	if (__test_and_clear_bit(id, wl->tx_frames_map)) { | 
| Ido Yariv | ef2e300 | 2011-04-18 16:44:11 +0300 | [diff] [blame] | 72 | 		if (unlikely(wl->tx_frames_cnt == ACX_TX_DESCRIPTORS)) | 
 | 73 | 			clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); | 
 | 74 |  | 
| Ido Yariv | 25eeb9e | 2010-10-12 16:20:06 +0200 | [diff] [blame] | 75 | 		wl->tx_frames[id] = NULL; | 
 | 76 | 		wl->tx_frames_cnt--; | 
 | 77 | 	} | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 78 | } | 
 | 79 |  | 
| Arik Nemtsov | 99a2775 | 2011-02-23 00:22:25 +0200 | [diff] [blame] | 80 | static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl, | 
 | 81 | 						 struct sk_buff *skb) | 
 | 82 | { | 
 | 83 | 	struct ieee80211_hdr *hdr; | 
 | 84 |  | 
 | 85 | 	/* | 
 | 86 | 	 * add the station to the known list before transmitting the | 
 | 87 | 	 * authentication response. this way it won't get de-authed by FW | 
 | 88 | 	 * when transmitting too soon. | 
 | 89 | 	 */ | 
 | 90 | 	hdr = (struct ieee80211_hdr *)(skb->data + | 
 | 91 | 				       sizeof(struct wl1271_tx_hw_descr)); | 
 | 92 | 	if (ieee80211_is_auth(hdr->frame_control)) | 
 | 93 | 		wl1271_acx_set_inconnection_sta(wl, hdr->addr1); | 
 | 94 | } | 
 | 95 |  | 
| Eliad Peller | c7ffb90 | 2011-10-05 11:56:05 +0200 | [diff] [blame] | 96 | static void wl1271_tx_regulate_link(struct wl1271 *wl, | 
 | 97 | 				    struct wl12xx_vif *wlvif, | 
 | 98 | 				    u8 hlid) | 
| Arik Nemtsov | b622d99 | 2011-02-23 00:22:31 +0200 | [diff] [blame] | 99 | { | 
| Arik Nemtsov | da03209 | 2011-08-25 12:43:15 +0300 | [diff] [blame] | 100 | 	bool fw_ps, single_sta; | 
| Arik Nemtsov | 9b17f1b | 2011-08-14 13:17:35 +0300 | [diff] [blame] | 101 | 	u8 tx_pkts; | 
| Arik Nemtsov | b622d99 | 2011-02-23 00:22:31 +0200 | [diff] [blame] | 102 |  | 
| Eliad Peller | c7ffb90 | 2011-10-05 11:56:05 +0200 | [diff] [blame] | 103 | 	if (WARN_ON(!test_bit(hlid, wlvif->links_map))) | 
| Arik Nemtsov | b622d99 | 2011-02-23 00:22:31 +0200 | [diff] [blame] | 104 | 		return; | 
 | 105 |  | 
 | 106 | 	fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map); | 
| Arik Nemtsov | 9b17f1b | 2011-08-14 13:17:35 +0300 | [diff] [blame] | 107 | 	tx_pkts = wl->links[hlid].allocated_pkts; | 
| Arik Nemtsov | da03209 | 2011-08-25 12:43:15 +0300 | [diff] [blame] | 108 | 	single_sta = (wl->active_sta_count == 1); | 
| Arik Nemtsov | b622d99 | 2011-02-23 00:22:31 +0200 | [diff] [blame] | 109 |  | 
 | 110 | 	/* | 
 | 111 | 	 * if in FW PS and there is enough data in FW we can put the link | 
 | 112 | 	 * into high-level PS and clean out its TX queues. | 
| Arik Nemtsov | da03209 | 2011-08-25 12:43:15 +0300 | [diff] [blame] | 113 | 	 * Make an exception if this is the only connected station. In this | 
 | 114 | 	 * case FW-memory congestion is not a problem. | 
| Arik Nemtsov | b622d99 | 2011-02-23 00:22:31 +0200 | [diff] [blame] | 115 | 	 */ | 
| Arik Nemtsov | da03209 | 2011-08-25 12:43:15 +0300 | [diff] [blame] | 116 | 	if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS) | 
| Eliad Peller | 6e8cd33 | 2011-10-10 10:13:13 +0200 | [diff] [blame] | 117 | 		wl12xx_ps_link_start(wl, wlvif, hlid, true); | 
| Arik Nemtsov | b622d99 | 2011-02-23 00:22:31 +0200 | [diff] [blame] | 118 | } | 
 | 119 |  | 
| Arik Nemtsov | f8e0af6 | 2011-08-25 12:43:12 +0300 | [diff] [blame] | 120 | bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb) | 
| Eliad Peller | f4df1bd | 2011-08-14 13:17:15 +0300 | [diff] [blame] | 121 | { | 
 | 122 | 	return wl->dummy_packet == skb; | 
 | 123 | } | 
 | 124 |  | 
| Eliad Peller | a8ab39a | 2011-10-05 11:55:54 +0200 | [diff] [blame] | 125 | u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif, | 
 | 126 | 			 struct sk_buff *skb) | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 127 | { | 
 | 128 | 	struct ieee80211_tx_info *control = IEEE80211_SKB_CB(skb); | 
 | 129 |  | 
 | 130 | 	if (control->control.sta) { | 
 | 131 | 		struct wl1271_station *wl_sta; | 
 | 132 |  | 
 | 133 | 		wl_sta = (struct wl1271_station *) | 
 | 134 | 				control->control.sta->drv_priv; | 
 | 135 | 		return wl_sta->hlid; | 
 | 136 | 	} else { | 
 | 137 | 		struct ieee80211_hdr *hdr; | 
 | 138 |  | 
| Eliad Peller | 53d40d0 | 2011-10-10 10:13:02 +0200 | [diff] [blame] | 139 | 		if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) | 
| Eliad Peller | f4df1bd | 2011-08-14 13:17:15 +0300 | [diff] [blame] | 140 | 			return wl->system_hlid; | 
 | 141 |  | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 142 | 		hdr = (struct ieee80211_hdr *)skb->data; | 
 | 143 | 		if (ieee80211_is_mgmt(hdr->frame_control)) | 
| Eliad Peller | a8ab39a | 2011-10-05 11:55:54 +0200 | [diff] [blame] | 144 | 			return wlvif->ap.global_hlid; | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 145 | 		else | 
| Eliad Peller | a8ab39a | 2011-10-05 11:55:54 +0200 | [diff] [blame] | 146 | 			return wlvif->ap.bcast_hlid; | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 147 | 	} | 
 | 148 | } | 
 | 149 |  | 
| Eliad Peller | d6a3cc2 | 2011-10-10 10:12:51 +0200 | [diff] [blame] | 150 | u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif, | 
 | 151 | 		      struct sk_buff *skb) | 
| Eliad Peller | f4df1bd | 2011-08-14 13:17:15 +0300 | [diff] [blame] | 152 | { | 
| Eliad Peller | df4c849 | 2011-09-15 16:05:47 +0300 | [diff] [blame] | 153 | 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | 
 | 154 |  | 
| Eliad Peller | 0f16801 | 2011-10-11 13:52:25 +0200 | [diff] [blame] | 155 | 	if (!wlvif || wl12xx_is_dummy_packet(wl, skb)) | 
| Eliad Peller | f4df1bd | 2011-08-14 13:17:15 +0300 | [diff] [blame] | 156 | 		return wl->system_hlid; | 
 | 157 |  | 
| Eliad Peller | 536129c | 2011-10-05 11:55:45 +0200 | [diff] [blame] | 158 | 	if (wlvif->bss_type == BSS_TYPE_AP_BSS) | 
| Eliad Peller | a8ab39a | 2011-10-05 11:55:54 +0200 | [diff] [blame] | 159 | 		return wl12xx_tx_get_hlid_ap(wl, wlvif, skb); | 
| Eliad Peller | f4df1bd | 2011-08-14 13:17:15 +0300 | [diff] [blame] | 160 |  | 
| Eliad Peller | ba8447f | 2011-10-10 10:13:00 +0200 | [diff] [blame] | 161 | 	if ((test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) || | 
| Eliad Peller | eee514e | 2011-10-10 10:13:01 +0200 | [diff] [blame] | 162 | 	     test_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags)) && | 
| Eliad Peller | df4c849 | 2011-09-15 16:05:47 +0300 | [diff] [blame] | 163 | 	    !ieee80211_is_auth(hdr->frame_control) && | 
 | 164 | 	    !ieee80211_is_assoc_req(hdr->frame_control)) | 
| Eliad Peller | 154da67 | 2011-10-05 11:55:53 +0200 | [diff] [blame] | 165 | 		return wlvif->sta.hlid; | 
| Eliad Peller | f4df1bd | 2011-08-14 13:17:15 +0300 | [diff] [blame] | 166 | 	else | 
| Eliad Peller | afaf8bd | 2011-10-05 11:55:57 +0200 | [diff] [blame] | 167 | 		return wlvif->dev_hlid; | 
| Eliad Peller | f4df1bd | 2011-08-14 13:17:15 +0300 | [diff] [blame] | 168 | } | 
 | 169 |  | 
| Ido Yariv | 0da13da | 2011-03-31 10:06:58 +0200 | [diff] [blame] | 170 | static unsigned int wl12xx_calc_packet_alignment(struct wl1271 *wl, | 
 | 171 | 						unsigned int packet_length) | 
 | 172 | { | 
| Luciano Coelho | ce39def | 2011-11-03 08:44:41 +0200 | [diff] [blame] | 173 | 	if (wl->quirks & WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT) | 
| Ido Yariv | 0da13da | 2011-03-31 10:06:58 +0200 | [diff] [blame] | 174 | 		return ALIGN(packet_length, WL1271_TX_ALIGN_TO); | 
| Luciano Coelho | ce39def | 2011-11-03 08:44:41 +0200 | [diff] [blame] | 175 | 	else | 
 | 176 | 		return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE); | 
| Ido Yariv | 0da13da | 2011-03-31 10:06:58 +0200 | [diff] [blame] | 177 | } | 
 | 178 |  | 
| Eliad Peller | a32d0cd | 2011-10-10 10:12:55 +0200 | [diff] [blame] | 179 | static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif, | 
| Eliad Peller | 536129c | 2011-10-05 11:55:45 +0200 | [diff] [blame] | 180 | 			      struct sk_buff *skb, u32 extra, u32 buf_offset, | 
 | 181 | 			      u8 hlid) | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 182 | { | 
 | 183 | 	struct wl1271_tx_hw_descr *desc; | 
 | 184 | 	u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra; | 
| Shahar Levi | 48a6147 | 2011-03-06 16:32:08 +0200 | [diff] [blame] | 185 | 	u32 len; | 
| Juuso Oikarinen | 5c9417f | 2010-02-22 08:38:39 +0200 | [diff] [blame] | 186 | 	u32 total_blocks; | 
| Arik Nemtsov | 742246f | 2011-08-14 13:17:33 +0300 | [diff] [blame] | 187 | 	int id, ret = -EBUSY, ac; | 
| Guy Eilam | e9eb8cb | 2011-08-16 19:49:12 +0300 | [diff] [blame] | 188 | 	u32 spare_blocks = wl->tx_spare_blocks; | 
| Eliad Peller | a32d0cd | 2011-10-10 10:12:55 +0200 | [diff] [blame] | 189 | 	bool is_dummy = false; | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 190 |  | 
| Ido Yariv | a19606b | 2010-09-30 13:28:28 +0200 | [diff] [blame] | 191 | 	if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE) | 
| Ido Yariv | 6c6e669 | 2010-10-12 14:49:09 +0200 | [diff] [blame] | 192 | 		return -EAGAIN; | 
| Ido Yariv | a19606b | 2010-09-30 13:28:28 +0200 | [diff] [blame] | 193 |  | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 194 | 	/* allocate free identifier for the packet */ | 
| Ido Yariv | 25eeb9e | 2010-10-12 16:20:06 +0200 | [diff] [blame] | 195 | 	id = wl1271_alloc_tx_id(wl, skb); | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 196 | 	if (id < 0) | 
 | 197 | 		return id; | 
 | 198 |  | 
 | 199 | 	/* approximate the number of blocks required for this packet | 
 | 200 | 	   in the firmware */ | 
| Ido Yariv | 0da13da | 2011-03-31 10:06:58 +0200 | [diff] [blame] | 201 | 	len = wl12xx_calc_packet_alignment(wl, total_len); | 
| Shahar Levi | 48a6147 | 2011-03-06 16:32:08 +0200 | [diff] [blame] | 202 |  | 
| Guy Eilam | e9eb8cb | 2011-08-16 19:49:12 +0300 | [diff] [blame] | 203 | 	/* in case of a dummy packet, use default amount of spare mem blocks */ | 
| Eliad Peller | a32d0cd | 2011-10-10 10:12:55 +0200 | [diff] [blame] | 204 | 	if (unlikely(wl12xx_is_dummy_packet(wl, skb))) { | 
 | 205 | 		is_dummy = true; | 
| Guy Eilam | e9eb8cb | 2011-08-16 19:49:12 +0300 | [diff] [blame] | 206 | 		spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT; | 
| Eliad Peller | a32d0cd | 2011-10-10 10:12:55 +0200 | [diff] [blame] | 207 | 	} | 
| Guy Eilam | e9eb8cb | 2011-08-16 19:49:12 +0300 | [diff] [blame] | 208 |  | 
| Shahar Levi | 48a6147 | 2011-03-06 16:32:08 +0200 | [diff] [blame] | 209 | 	total_blocks = (len + TX_HW_BLOCK_SIZE - 1) / TX_HW_BLOCK_SIZE + | 
| Luciano Coelho | e7ddf54 | 2011-03-10 15:24:57 +0200 | [diff] [blame] | 210 | 		spare_blocks; | 
| Shahar Levi | 48a6147 | 2011-03-06 16:32:08 +0200 | [diff] [blame] | 211 |  | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 212 | 	if (total_blocks <= wl->tx_blocks_available) { | 
 | 213 | 		desc = (struct wl1271_tx_hw_descr *)skb_push( | 
 | 214 | 			skb, total_len - skb->len); | 
 | 215 |  | 
| Shahar Levi | ae77ecc | 2011-03-06 16:32:13 +0200 | [diff] [blame] | 216 | 		/* HW descriptor fields change between wl127x and wl128x */ | 
 | 217 | 		if (wl->chip.id == CHIP_ID_1283_PG20) { | 
 | 218 | 			desc->wl128x_mem.total_mem_blocks = total_blocks; | 
 | 219 | 		} else { | 
| Luciano Coelho | e7ddf54 | 2011-03-10 15:24:57 +0200 | [diff] [blame] | 220 | 			desc->wl127x_mem.extra_blocks = spare_blocks; | 
| Shahar Levi | ae77ecc | 2011-03-06 16:32:13 +0200 | [diff] [blame] | 221 | 			desc->wl127x_mem.total_mem_blocks = total_blocks; | 
 | 222 | 		} | 
 | 223 |  | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 224 | 		desc->id = id; | 
 | 225 |  | 
 | 226 | 		wl->tx_blocks_available -= total_blocks; | 
| Arik Nemtsov | 7bb5d6c | 2011-08-14 13:17:00 +0300 | [diff] [blame] | 227 | 		wl->tx_allocated_blocks += total_blocks; | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 228 |  | 
| Arik Nemtsov | 55df5af | 2012-03-03 22:18:00 +0200 | [diff] [blame] | 229 | 		/* If the FW was empty before, arm the Tx watchdog */ | 
 | 230 | 		if (wl->tx_allocated_blocks == total_blocks) | 
 | 231 | 			wl12xx_rearm_tx_watchdog_locked(wl); | 
 | 232 |  | 
| Arik Nemtsov | 742246f | 2011-08-14 13:17:33 +0300 | [diff] [blame] | 233 | 		ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); | 
 | 234 | 		wl->tx_allocated_pkts[ac]++; | 
| Arik Nemtsov | bf54e30 | 2011-08-14 13:17:32 +0300 | [diff] [blame] | 235 |  | 
| Eliad Peller | 0f16801 | 2011-10-11 13:52:25 +0200 | [diff] [blame] | 236 | 		if (!is_dummy && wlvif && | 
 | 237 | 		    wlvif->bss_type == BSS_TYPE_AP_BSS && | 
| Eliad Peller | c7ffb90 | 2011-10-05 11:56:05 +0200 | [diff] [blame] | 238 | 		    test_bit(hlid, wlvif->ap.sta_hlid_map)) | 
| Arik Nemtsov | 9b17f1b | 2011-08-14 13:17:35 +0300 | [diff] [blame] | 239 | 			wl->links[hlid].allocated_pkts++; | 
| Arik Nemtsov | 09039f4 | 2011-02-23 00:22:30 +0200 | [diff] [blame] | 240 |  | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 241 | 		ret = 0; | 
 | 242 |  | 
 | 243 | 		wl1271_debug(DEBUG_TX, | 
 | 244 | 			     "tx_allocate: size: %d, blocks: %d, id: %d", | 
 | 245 | 			     total_len, total_blocks, id); | 
| Juuso Oikarinen | 781608c | 2010-05-24 11:18:17 +0300 | [diff] [blame] | 246 | 	} else { | 
| Ido Yariv | 25eeb9e | 2010-10-12 16:20:06 +0200 | [diff] [blame] | 247 | 		wl1271_free_tx_id(wl, id); | 
| Juuso Oikarinen | 781608c | 2010-05-24 11:18:17 +0300 | [diff] [blame] | 248 | 	} | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 249 |  | 
 | 250 | 	return ret; | 
 | 251 | } | 
 | 252 |  | 
| Eliad Peller | a32d0cd | 2011-10-10 10:12:55 +0200 | [diff] [blame] | 253 | static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif, | 
| Eliad Peller | 536129c | 2011-10-05 11:55:45 +0200 | [diff] [blame] | 254 | 			       struct sk_buff *skb, u32 extra, | 
 | 255 | 			       struct ieee80211_tx_info *control, u8 hlid) | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 256 | { | 
| Juuso Oikarinen | ac5e1e3 | 2010-02-22 08:38:38 +0200 | [diff] [blame] | 257 | 	struct timespec ts; | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 258 | 	struct wl1271_tx_hw_descr *desc; | 
| Shahar Levi | 48a6147 | 2011-03-06 16:32:08 +0200 | [diff] [blame] | 259 | 	int aligned_len, ac, rate_idx; | 
| Juuso Oikarinen | ac5e1e3 | 2010-02-22 08:38:38 +0200 | [diff] [blame] | 260 | 	s64 hosttime; | 
| John W. Linville | cf00f37 | 2011-12-08 16:15:58 -0500 | [diff] [blame] | 261 | 	u16 tx_attr = 0; | 
| Eliad Peller | f4f5794 | 2012-01-31 17:54:42 +0200 | [diff] [blame] | 262 | 	__le16 frame_control; | 
 | 263 | 	struct ieee80211_hdr *hdr; | 
 | 264 | 	u8 *frame_start; | 
| Eliad Peller | a32d0cd | 2011-10-10 10:12:55 +0200 | [diff] [blame] | 265 | 	bool is_dummy; | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 266 |  | 
 | 267 | 	desc = (struct wl1271_tx_hw_descr *) skb->data; | 
| Eliad Peller | f4f5794 | 2012-01-31 17:54:42 +0200 | [diff] [blame] | 268 | 	frame_start = (u8 *)(desc + 1); | 
 | 269 | 	hdr = (struct ieee80211_hdr *)(frame_start + extra); | 
 | 270 | 	frame_control = hdr->frame_control; | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 271 |  | 
| Juuso Oikarinen | 1e2b797 | 2009-10-08 21:56:20 +0300 | [diff] [blame] | 272 | 	/* relocate space for security header */ | 
 | 273 | 	if (extra) { | 
| Eliad Peller | f4f5794 | 2012-01-31 17:54:42 +0200 | [diff] [blame] | 274 | 		int hdrlen = ieee80211_hdrlen(frame_control); | 
 | 275 | 		memmove(frame_start, hdr, hdrlen); | 
| Juuso Oikarinen | 1e2b797 | 2009-10-08 21:56:20 +0300 | [diff] [blame] | 276 | 	} | 
 | 277 |  | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 278 | 	/* configure packet life time */ | 
| Juuso Oikarinen | ac5e1e3 | 2010-02-22 08:38:38 +0200 | [diff] [blame] | 279 | 	getnstimeofday(&ts); | 
 | 280 | 	hosttime = (timespec_to_ns(&ts) >> 10); | 
 | 281 | 	desc->start_time = cpu_to_le32(hosttime - wl->time_offset); | 
| Arik Nemtsov | c6c8a65 | 2010-10-16 20:27:53 +0200 | [diff] [blame] | 282 |  | 
| Eliad Peller | a32d0cd | 2011-10-10 10:12:55 +0200 | [diff] [blame] | 283 | 	is_dummy = wl12xx_is_dummy_packet(wl, skb); | 
| Eliad Peller | 0f16801 | 2011-10-11 13:52:25 +0200 | [diff] [blame] | 284 | 	if (is_dummy || !wlvif || wlvif->bss_type != BSS_TYPE_AP_BSS) | 
| Arik Nemtsov | c6c8a65 | 2010-10-16 20:27:53 +0200 | [diff] [blame] | 285 | 		desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU); | 
 | 286 | 	else | 
 | 287 | 		desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU); | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 288 |  | 
| Eliad Peller | db674d2 | 2011-03-16 23:03:54 +0200 | [diff] [blame] | 289 | 	/* queue */ | 
| Kalle Valo | c6999d8 | 2010-02-18 13:25:41 +0200 | [diff] [blame] | 290 | 	ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); | 
| Eliad Peller | db674d2 | 2011-03-16 23:03:54 +0200 | [diff] [blame] | 291 | 	desc->tid = skb->priority; | 
| Arik Nemtsov | c6c8a65 | 2010-10-16 20:27:53 +0200 | [diff] [blame] | 292 |  | 
| Eliad Peller | a32d0cd | 2011-10-10 10:12:55 +0200 | [diff] [blame] | 293 | 	if (is_dummy) { | 
| Shahar Levi | ae47c45 | 2011-03-06 16:32:14 +0200 | [diff] [blame] | 294 | 		/* | 
 | 295 | 		 * FW expects the dummy packet to have an invalid session id - | 
 | 296 | 		 * any session id that is different than the one set in the join | 
 | 297 | 		 */ | 
| Eliad Peller | 98b8625 | 2011-10-05 11:55:55 +0200 | [diff] [blame] | 298 | 		tx_attr = (SESSION_COUNTER_INVALID << | 
| Shahar Levi | ae47c45 | 2011-03-06 16:32:14 +0200 | [diff] [blame] | 299 | 			   TX_HW_ATTR_OFST_SESSION_COUNTER) & | 
 | 300 | 			   TX_HW_ATTR_SESSION_COUNTER; | 
 | 301 |  | 
 | 302 | 		tx_attr |= TX_HW_ATTR_TX_DUMMY_REQ; | 
| Eliad Peller | 0f16801 | 2011-10-11 13:52:25 +0200 | [diff] [blame] | 303 | 	} else if (wlvif) { | 
| Shahar Levi | ae47c45 | 2011-03-06 16:32:14 +0200 | [diff] [blame] | 304 | 		/* configure the tx attributes */ | 
| Eliad Peller | 98b8625 | 2011-10-05 11:55:55 +0200 | [diff] [blame] | 305 | 		tx_attr = wlvif->session_counter << | 
 | 306 | 			  TX_HW_ATTR_OFST_SESSION_COUNTER; | 
| Shahar Levi | ae47c45 | 2011-03-06 16:32:14 +0200 | [diff] [blame] | 307 | 	} | 
 | 308 |  | 
| Eliad Peller | 79b122d | 2011-08-14 13:17:11 +0300 | [diff] [blame] | 309 | 	desc->hlid = hlid; | 
| Eliad Peller | 0f16801 | 2011-10-11 13:52:25 +0200 | [diff] [blame] | 310 | 	if (is_dummy || !wlvif) | 
| Eliad Peller | a32d0cd | 2011-10-10 10:12:55 +0200 | [diff] [blame] | 311 | 		rate_idx = 0; | 
 | 312 | 	else if (wlvif->bss_type != BSS_TYPE_AP_BSS) { | 
| Arik Nemtsov | c6c8a65 | 2010-10-16 20:27:53 +0200 | [diff] [blame] | 313 | 		/* if the packets are destined for AP (have a STA entry) | 
 | 314 | 		   send them with AP rate policies, otherwise use default | 
 | 315 | 		   basic rates */ | 
| Eliad Peller | 8a0f2ee | 2011-11-01 09:23:52 +0200 | [diff] [blame] | 316 | 		if (control->flags & IEEE80211_TX_CTL_NO_CCK_RATE) | 
 | 317 | 			rate_idx = wlvif->sta.p2p_rate_idx; | 
 | 318 | 		else if (control->control.sta) | 
| Eliad Peller | e5a359f | 2011-10-10 10:13:15 +0200 | [diff] [blame] | 319 | 			rate_idx = wlvif->sta.ap_rate_idx; | 
| Arik Nemtsov | c6c8a65 | 2010-10-16 20:27:53 +0200 | [diff] [blame] | 320 | 		else | 
| Eliad Peller | e5a359f | 2011-10-10 10:13:15 +0200 | [diff] [blame] | 321 | 			rate_idx = wlvif->sta.basic_rate_idx; | 
| Arik Nemtsov | c6c8a65 | 2010-10-16 20:27:53 +0200 | [diff] [blame] | 322 | 	} else { | 
| Eliad Peller | a8ab39a | 2011-10-05 11:55:54 +0200 | [diff] [blame] | 323 | 		if (hlid == wlvif->ap.global_hlid) | 
| Eliad Peller | e5a359f | 2011-10-10 10:13:15 +0200 | [diff] [blame] | 324 | 			rate_idx = wlvif->ap.mgmt_rate_idx; | 
| Eliad Peller | a8ab39a | 2011-10-05 11:55:54 +0200 | [diff] [blame] | 325 | 		else if (hlid == wlvif->ap.bcast_hlid) | 
| Eliad Peller | e5a359f | 2011-10-10 10:13:15 +0200 | [diff] [blame] | 326 | 			rate_idx = wlvif->ap.bcast_rate_idx; | 
| Arik Nemtsov | e51ae9b | 2011-08-14 13:17:21 +0300 | [diff] [blame] | 327 | 		else | 
| Eliad Peller | e5a359f | 2011-10-10 10:13:15 +0200 | [diff] [blame] | 328 | 			rate_idx = wlvif->ap.ucast_rate_idx[ac]; | 
| Arik Nemtsov | c6c8a65 | 2010-10-16 20:27:53 +0200 | [diff] [blame] | 329 | 	} | 
 | 330 |  | 
 | 331 | 	tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY; | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 332 | 	desc->reserved = 0; | 
 | 333 |  | 
| Ido Yariv | 0da13da | 2011-03-31 10:06:58 +0200 | [diff] [blame] | 334 | 	aligned_len = wl12xx_calc_packet_alignment(wl, skb->len); | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 335 |  | 
| Ido Yariv | 0da13da | 2011-03-31 10:06:58 +0200 | [diff] [blame] | 336 | 	if (wl->chip.id == CHIP_ID_1283_PG20) { | 
| Shahar Levi | 48a6147 | 2011-03-06 16:32:08 +0200 | [diff] [blame] | 337 | 		desc->wl128x_mem.extra_bytes = aligned_len - skb->len; | 
 | 338 | 		desc->length = cpu_to_le16(aligned_len >> 2); | 
| Shahar Levi | ae77ecc | 2011-03-06 16:32:13 +0200 | [diff] [blame] | 339 |  | 
 | 340 | 		wl1271_debug(DEBUG_TX, "tx_fill_hdr: hlid: %d " | 
 | 341 | 			     "tx_attr: 0x%x len: %d life: %d mem: %d", | 
 | 342 | 			     desc->hlid, tx_attr, | 
 | 343 | 			     le16_to_cpu(desc->length), | 
 | 344 | 			     le16_to_cpu(desc->life_time), | 
 | 345 | 			     desc->wl128x_mem.total_mem_blocks); | 
| Shahar Levi | 48a6147 | 2011-03-06 16:32:08 +0200 | [diff] [blame] | 346 | 	} else { | 
 | 347 | 		int pad; | 
 | 348 |  | 
| Ido Yariv | 0da13da | 2011-03-31 10:06:58 +0200 | [diff] [blame] | 349 | 		/* Store the aligned length in terms of words */ | 
| Shahar Levi | 48a6147 | 2011-03-06 16:32:08 +0200 | [diff] [blame] | 350 | 		desc->length = cpu_to_le16(aligned_len >> 2); | 
 | 351 |  | 
 | 352 | 		/* calculate number of padding bytes */ | 
 | 353 | 		pad = aligned_len - skb->len; | 
 | 354 | 		tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD; | 
 | 355 |  | 
| Shahar Levi | ae77ecc | 2011-03-06 16:32:13 +0200 | [diff] [blame] | 356 | 		wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d hlid: %d " | 
 | 357 | 			     "tx_attr: 0x%x len: %d life: %d mem: %d", pad, | 
 | 358 | 			     desc->hlid, tx_attr, | 
 | 359 | 			     le16_to_cpu(desc->length), | 
 | 360 | 			     le16_to_cpu(desc->life_time), | 
 | 361 | 			     desc->wl127x_mem.total_mem_blocks); | 
| Shahar Levi | 48a6147 | 2011-03-06 16:32:08 +0200 | [diff] [blame] | 362 | 	} | 
| Luciano Coelho | d0f63b2 | 2009-10-15 10:33:29 +0300 | [diff] [blame] | 363 |  | 
| Eliad Peller | f4f5794 | 2012-01-31 17:54:42 +0200 | [diff] [blame] | 364 | 	/* for WEP shared auth - no fw encryption is needed */ | 
 | 365 | 	if (ieee80211_is_auth(frame_control) && | 
 | 366 | 	    ieee80211_has_protected(frame_control)) | 
 | 367 | 		tx_attr |= TX_HW_ATTR_HOST_ENCRYPT; | 
 | 368 |  | 
| Luciano Coelho | d0f63b2 | 2009-10-15 10:33:29 +0300 | [diff] [blame] | 369 | 	desc->tx_attr = cpu_to_le16(tx_attr); | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 370 | } | 
 | 371 |  | 
 | 372 | /* caller must hold wl->mutex */ | 
| Eliad Peller | a32d0cd | 2011-10-10 10:12:55 +0200 | [diff] [blame] | 373 | static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif, | 
 | 374 | 				   struct sk_buff *skb, u32 buf_offset) | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 375 | { | 
 | 376 | 	struct ieee80211_tx_info *info; | 
 | 377 | 	u32 extra = 0; | 
 | 378 | 	int ret = 0; | 
| Ido Yariv | a19606b | 2010-09-30 13:28:28 +0200 | [diff] [blame] | 379 | 	u32 total_len; | 
| Arik Nemtsov | 09039f4 | 2011-02-23 00:22:30 +0200 | [diff] [blame] | 380 | 	u8 hlid; | 
| Eliad Peller | 536129c | 2011-10-05 11:55:45 +0200 | [diff] [blame] | 381 | 	bool is_dummy; | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 382 |  | 
 | 383 | 	if (!skb) | 
 | 384 | 		return -EINVAL; | 
 | 385 |  | 
 | 386 | 	info = IEEE80211_SKB_CB(skb); | 
 | 387 |  | 
| Eliad Peller | 536129c | 2011-10-05 11:55:45 +0200 | [diff] [blame] | 388 | 	/* TODO: handle dummy packets on multi-vifs */ | 
 | 389 | 	is_dummy = wl12xx_is_dummy_packet(wl, skb); | 
| Eliad Peller | 536129c | 2011-10-05 11:55:45 +0200 | [diff] [blame] | 390 |  | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 391 | 	if (info->control.hw_key && | 
| Johannes Berg | 97359d1 | 2010-08-10 09:46:38 +0200 | [diff] [blame] | 392 | 	    info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) | 
| Eliad Peller | 5ec8a44 | 2012-02-02 12:22:09 +0200 | [diff] [blame] | 393 | 		extra = WL1271_EXTRA_SPACE_TKIP; | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 394 |  | 
 | 395 | 	if (info->control.hw_key) { | 
| Arik Nemtsov | 7f179b4 | 2010-10-16 21:39:06 +0200 | [diff] [blame] | 396 | 		bool is_wep; | 
 | 397 | 		u8 idx = info->control.hw_key->hw_key_idx; | 
 | 398 | 		u32 cipher = info->control.hw_key->cipher; | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 399 |  | 
| Arik Nemtsov | 7f179b4 | 2010-10-16 21:39:06 +0200 | [diff] [blame] | 400 | 		is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) || | 
 | 401 | 			 (cipher == WLAN_CIPHER_SUITE_WEP104); | 
 | 402 |  | 
| Eliad Peller | f75c753f | 2011-10-05 11:55:59 +0200 | [diff] [blame] | 403 | 		if (unlikely(is_wep && wlvif->default_key != idx)) { | 
| Eliad Peller | 536129c | 2011-10-05 11:55:45 +0200 | [diff] [blame] | 404 | 			ret = wl1271_set_default_wep_key(wl, wlvif, idx); | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 405 | 			if (ret < 0) | 
 | 406 | 				return ret; | 
| Eliad Peller | f75c753f | 2011-10-05 11:55:59 +0200 | [diff] [blame] | 407 | 			wlvif->default_key = idx; | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 408 | 		} | 
 | 409 | 	} | 
| Eliad Peller | d6a3cc2 | 2011-10-10 10:12:51 +0200 | [diff] [blame] | 410 | 	hlid = wl12xx_tx_get_hlid(wl, wlvif, skb); | 
| Eliad Peller | 79b122d | 2011-08-14 13:17:11 +0300 | [diff] [blame] | 411 | 	if (hlid == WL12XX_INVALID_LINK_ID) { | 
| Eliad Peller | e0d6253 | 2011-11-08 16:07:52 +0200 | [diff] [blame] | 412 | 		wl1271_error("invalid hlid. dropping skb 0x%p", skb); | 
| Eliad Peller | 79b122d | 2011-08-14 13:17:11 +0300 | [diff] [blame] | 413 | 		return -EINVAL; | 
 | 414 | 	} | 
| Arik Nemtsov | 09039f4 | 2011-02-23 00:22:30 +0200 | [diff] [blame] | 415 |  | 
| Eliad Peller | a32d0cd | 2011-10-10 10:12:55 +0200 | [diff] [blame] | 416 | 	ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid); | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 417 | 	if (ret < 0) | 
 | 418 | 		return ret; | 
 | 419 |  | 
| Eliad Peller | a32d0cd | 2011-10-10 10:12:55 +0200 | [diff] [blame] | 420 | 	wl1271_tx_fill_hdr(wl, wlvif, skb, extra, info, hlid); | 
| Arik Nemtsov | fae2fd7 | 2011-06-26 10:36:04 +0300 | [diff] [blame] | 421 |  | 
| Eliad Peller | 0f16801 | 2011-10-11 13:52:25 +0200 | [diff] [blame] | 422 | 	if (!is_dummy && wlvif && wlvif->bss_type == BSS_TYPE_AP_BSS) { | 
| Arik Nemtsov | 99a2775 | 2011-02-23 00:22:25 +0200 | [diff] [blame] | 423 | 		wl1271_tx_ap_update_inconnection_sta(wl, skb); | 
| Eliad Peller | c7ffb90 | 2011-10-05 11:56:05 +0200 | [diff] [blame] | 424 | 		wl1271_tx_regulate_link(wl, wlvif, hlid); | 
| Arik Nemtsov | b622d99 | 2011-02-23 00:22:31 +0200 | [diff] [blame] | 425 | 	} | 
| Arik Nemtsov | 99a2775 | 2011-02-23 00:22:25 +0200 | [diff] [blame] | 426 |  | 
| Ido Yariv | a19606b | 2010-09-30 13:28:28 +0200 | [diff] [blame] | 427 | 	/* | 
| Shahar Levi | 48a6147 | 2011-03-06 16:32:08 +0200 | [diff] [blame] | 428 | 	 * The length of each packet is stored in terms of | 
 | 429 | 	 * words. Thus, we must pad the skb data to make sure its | 
 | 430 | 	 * length is aligned.  The number of padding bytes is computed | 
 | 431 | 	 * and set in wl1271_tx_fill_hdr. | 
 | 432 | 	 * In special cases, we want to align to a specific block size | 
 | 433 | 	 * (eg. for wl128x with SDIO we align to 256). | 
| Ido Yariv | a19606b | 2010-09-30 13:28:28 +0200 | [diff] [blame] | 434 | 	 */ | 
| Ido Yariv | 0da13da | 2011-03-31 10:06:58 +0200 | [diff] [blame] | 435 | 	total_len = wl12xx_calc_packet_alignment(wl, skb->len); | 
| Shahar Levi | 48a6147 | 2011-03-06 16:32:08 +0200 | [diff] [blame] | 436 |  | 
| Ido Yariv | a19606b | 2010-09-30 13:28:28 +0200 | [diff] [blame] | 437 | 	memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len); | 
 | 438 | 	memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len); | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 439 |  | 
| Ido Yariv | 990f5de | 2011-03-31 10:06:59 +0200 | [diff] [blame] | 440 | 	/* Revert side effects in the dummy packet skb, so it can be reused */ | 
| Eliad Peller | 536129c | 2011-10-05 11:55:45 +0200 | [diff] [blame] | 441 | 	if (is_dummy) | 
| Ido Yariv | 990f5de | 2011-03-31 10:06:59 +0200 | [diff] [blame] | 442 | 		skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); | 
 | 443 |  | 
| Ido Yariv | a19606b | 2010-09-30 13:28:28 +0200 | [diff] [blame] | 444 | 	return total_len; | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 445 | } | 
 | 446 |  | 
| Eliad Peller | af7fbb2 | 2011-09-19 13:51:42 +0300 | [diff] [blame] | 447 | u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set, | 
 | 448 | 				enum ieee80211_band rate_band) | 
| Juuso Oikarinen | 830fb67 | 2009-12-11 15:41:06 +0200 | [diff] [blame] | 449 | { | 
 | 450 | 	struct ieee80211_supported_band *band; | 
 | 451 | 	u32 enabled_rates = 0; | 
 | 452 | 	int bit; | 
 | 453 |  | 
| Eliad Peller | af7fbb2 | 2011-09-19 13:51:42 +0300 | [diff] [blame] | 454 | 	band = wl->hw->wiphy->bands[rate_band]; | 
| Juuso Oikarinen | 830fb67 | 2009-12-11 15:41:06 +0200 | [diff] [blame] | 455 | 	for (bit = 0; bit < band->n_bitrates; bit++) { | 
 | 456 | 		if (rate_set & 0x1) | 
 | 457 | 			enabled_rates |= band->bitrates[bit].hw_value; | 
 | 458 | 		rate_set >>= 1; | 
 | 459 | 	} | 
 | 460 |  | 
| Shahar Levi | 1835785 | 2010-10-13 16:09:41 +0200 | [diff] [blame] | 461 | 	/* MCS rates indication are on bits 16 - 23 */ | 
 | 462 | 	rate_set >>= HW_HT_RATES_OFFSET - band->n_bitrates; | 
 | 463 |  | 
 | 464 | 	for (bit = 0; bit < 8; bit++) { | 
 | 465 | 		if (rate_set & 0x1) | 
 | 466 | 			enabled_rates |= (CONF_HW_BIT_RATE_MCS_0 << bit); | 
 | 467 | 		rate_set >>= 1; | 
 | 468 | 	} | 
| Shahar Levi | 1835785 | 2010-10-13 16:09:41 +0200 | [diff] [blame] | 469 |  | 
| Juuso Oikarinen | 830fb67 | 2009-12-11 15:41:06 +0200 | [diff] [blame] | 470 | 	return enabled_rates; | 
 | 471 | } | 
 | 472 |  | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 473 | void wl1271_handle_tx_low_watermark(struct wl1271 *wl) | 
| Ido Yariv | 2fe33e8 | 2010-10-12 14:49:12 +0200 | [diff] [blame] | 474 | { | 
 | 475 | 	unsigned long flags; | 
| Arik Nemtsov | 708bb3c | 2011-06-24 13:03:37 +0300 | [diff] [blame] | 476 | 	int i; | 
| Ido Yariv | 2fe33e8 | 2010-10-12 14:49:12 +0200 | [diff] [blame] | 477 |  | 
| Arik Nemtsov | 708bb3c | 2011-06-24 13:03:37 +0300 | [diff] [blame] | 478 | 	for (i = 0; i < NUM_TX_QUEUES; i++) { | 
 | 479 | 		if (test_bit(i, &wl->stopped_queues_map) && | 
| Arik Nemtsov | f1a4638 | 2011-07-07 14:25:23 +0300 | [diff] [blame] | 480 | 		    wl->tx_queue_count[i] <= WL1271_TX_QUEUE_LOW_WATERMARK) { | 
| Arik Nemtsov | 708bb3c | 2011-06-24 13:03:37 +0300 | [diff] [blame] | 481 | 			/* firmware buffer has space, restart queues */ | 
 | 482 | 			spin_lock_irqsave(&wl->wl_lock, flags); | 
 | 483 | 			ieee80211_wake_queue(wl->hw, | 
 | 484 | 					     wl1271_tx_get_mac80211_queue(i)); | 
 | 485 | 			clear_bit(i, &wl->stopped_queues_map); | 
 | 486 | 			spin_unlock_irqrestore(&wl->wl_lock, flags); | 
 | 487 | 		} | 
| Ido Yariv | 2fe33e8 | 2010-10-12 14:49:12 +0200 | [diff] [blame] | 488 | 	} | 
 | 489 | } | 
 | 490 |  | 
| Arik Nemtsov | 742246f | 2011-08-14 13:17:33 +0300 | [diff] [blame] | 491 | static struct sk_buff_head *wl1271_select_queue(struct wl1271 *wl, | 
 | 492 | 						struct sk_buff_head *queues) | 
 | 493 | { | 
 | 494 | 	int i, q = -1, ac; | 
 | 495 | 	u32 min_pkts = 0xffffffff; | 
 | 496 |  | 
 | 497 | 	/* | 
 | 498 | 	 * Find a non-empty ac where: | 
 | 499 | 	 * 1. There are packets to transmit | 
 | 500 | 	 * 2. The FW has the least allocated blocks | 
 | 501 | 	 * | 
 | 502 | 	 * We prioritize the ACs according to VO>VI>BE>BK | 
 | 503 | 	 */ | 
 | 504 | 	for (i = 0; i < NUM_TX_QUEUES; i++) { | 
 | 505 | 		ac = wl1271_tx_get_queue(i); | 
 | 506 | 		if (!skb_queue_empty(&queues[ac]) && | 
 | 507 | 		    (wl->tx_allocated_pkts[ac] < min_pkts)) { | 
 | 508 | 			q = ac; | 
 | 509 | 			min_pkts = wl->tx_allocated_pkts[q]; | 
 | 510 | 		} | 
 | 511 | 	} | 
 | 512 |  | 
 | 513 | 	if (q == -1) | 
 | 514 | 		return NULL; | 
 | 515 |  | 
 | 516 | 	return &queues[q]; | 
 | 517 | } | 
 | 518 |  | 
| Eliad Peller | d6a3cc2 | 2011-10-10 10:12:51 +0200 | [diff] [blame] | 519 | static struct sk_buff *wl12xx_lnk_skb_dequeue(struct wl1271 *wl, | 
 | 520 | 					      struct wl1271_link *lnk) | 
| Juuso Oikarinen | 6742f55 | 2010-12-13 09:52:37 +0200 | [diff] [blame] | 521 | { | 
| Eliad Peller | d6a3cc2 | 2011-10-10 10:12:51 +0200 | [diff] [blame] | 522 | 	struct sk_buff *skb; | 
| Juuso Oikarinen | 6742f55 | 2010-12-13 09:52:37 +0200 | [diff] [blame] | 523 | 	unsigned long flags; | 
| Arik Nemtsov | 742246f | 2011-08-14 13:17:33 +0300 | [diff] [blame] | 524 | 	struct sk_buff_head *queue; | 
| Juuso Oikarinen | 6742f55 | 2010-12-13 09:52:37 +0200 | [diff] [blame] | 525 |  | 
| Eliad Peller | d6a3cc2 | 2011-10-10 10:12:51 +0200 | [diff] [blame] | 526 | 	queue = wl1271_select_queue(wl, lnk->tx_queue); | 
| Arik Nemtsov | 742246f | 2011-08-14 13:17:33 +0300 | [diff] [blame] | 527 | 	if (!queue) | 
| Eliad Peller | d6a3cc2 | 2011-10-10 10:12:51 +0200 | [diff] [blame] | 528 | 		return NULL; | 
| Arik Nemtsov | 742246f | 2011-08-14 13:17:33 +0300 | [diff] [blame] | 529 |  | 
 | 530 | 	skb = skb_dequeue(queue); | 
| Juuso Oikarinen | 6742f55 | 2010-12-13 09:52:37 +0200 | [diff] [blame] | 531 | 	if (skb) { | 
| Arik Nemtsov | f1a4638 | 2011-07-07 14:25:23 +0300 | [diff] [blame] | 532 | 		int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); | 
| Juuso Oikarinen | 6742f55 | 2010-12-13 09:52:37 +0200 | [diff] [blame] | 533 | 		spin_lock_irqsave(&wl->wl_lock, flags); | 
| Arik Nemtsov | 6246ca0 | 2012-02-28 00:41:30 +0200 | [diff] [blame] | 534 | 		WARN_ON_ONCE(wl->tx_queue_count[q] <= 0); | 
| Arik Nemtsov | f1a4638 | 2011-07-07 14:25:23 +0300 | [diff] [blame] | 535 | 		wl->tx_queue_count[q]--; | 
| Juuso Oikarinen | 6742f55 | 2010-12-13 09:52:37 +0200 | [diff] [blame] | 536 | 		spin_unlock_irqrestore(&wl->wl_lock, flags); | 
 | 537 | 	} | 
 | 538 |  | 
 | 539 | 	return skb; | 
 | 540 | } | 
 | 541 |  | 
| Eliad Peller | d6a3cc2 | 2011-10-10 10:12:51 +0200 | [diff] [blame] | 542 | static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl, | 
 | 543 | 					      struct wl12xx_vif *wlvif) | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 544 | { | 
 | 545 | 	struct sk_buff *skb = NULL; | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 546 | 	int i, h, start_hlid; | 
 | 547 |  | 
 | 548 | 	/* start from the link after the last one */ | 
| Eliad Peller | 4438aca | 2011-10-10 10:12:50 +0200 | [diff] [blame] | 549 | 	start_hlid = (wlvif->last_tx_hlid + 1) % WL12XX_MAX_LINKS; | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 550 |  | 
 | 551 | 	/* dequeue according to AC, round robin on each link */ | 
| Eliad Peller | c7ffb90 | 2011-10-05 11:56:05 +0200 | [diff] [blame] | 552 | 	for (i = 0; i < WL12XX_MAX_LINKS; i++) { | 
 | 553 | 		h = (start_hlid + i) % WL12XX_MAX_LINKS; | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 554 |  | 
| Arik Nemtsov | 742246f | 2011-08-14 13:17:33 +0300 | [diff] [blame] | 555 | 		/* only consider connected stations */ | 
| Eliad Peller | c7ffb90 | 2011-10-05 11:56:05 +0200 | [diff] [blame] | 556 | 		if (!test_bit(h, wlvif->links_map)) | 
| Arik Nemtsov | 742246f | 2011-08-14 13:17:33 +0300 | [diff] [blame] | 557 | 			continue; | 
 | 558 |  | 
| Eliad Peller | d6a3cc2 | 2011-10-10 10:12:51 +0200 | [diff] [blame] | 559 | 		skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[h]); | 
 | 560 | 		if (!skb) | 
| Arik Nemtsov | 742246f | 2011-08-14 13:17:33 +0300 | [diff] [blame] | 561 | 			continue; | 
 | 562 |  | 
| Eliad Peller | d6a3cc2 | 2011-10-10 10:12:51 +0200 | [diff] [blame] | 563 | 		wlvif->last_tx_hlid = h; | 
 | 564 | 		break; | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 565 | 	} | 
 | 566 |  | 
| Eliad Peller | d6a3cc2 | 2011-10-10 10:12:51 +0200 | [diff] [blame] | 567 | 	if (!skb) | 
| Eliad Peller | 4438aca | 2011-10-10 10:12:50 +0200 | [diff] [blame] | 568 | 		wlvif->last_tx_hlid = 0; | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 569 |  | 
 | 570 | 	return skb; | 
 | 571 | } | 
 | 572 |  | 
| Eliad Peller | a32d0cd | 2011-10-10 10:12:55 +0200 | [diff] [blame] | 573 | static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl) | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 574 | { | 
| Ido Yariv | 990f5de | 2011-03-31 10:06:59 +0200 | [diff] [blame] | 575 | 	unsigned long flags; | 
| Eliad Peller | e4120df | 2011-10-10 10:13:17 +0200 | [diff] [blame] | 576 | 	struct wl12xx_vif *wlvif = wl->last_wlvif; | 
| Ido Yariv | 990f5de | 2011-03-31 10:06:59 +0200 | [diff] [blame] | 577 | 	struct sk_buff *skb = NULL; | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 578 |  | 
| Arik Nemtsov | 49c9cd2 | 2012-02-28 00:41:31 +0200 | [diff] [blame] | 579 | 	/* continue from last wlvif (round robin) */ | 
| Eliad Peller | e4120df | 2011-10-10 10:13:17 +0200 | [diff] [blame] | 580 | 	if (wlvif) { | 
 | 581 | 		wl12xx_for_each_wlvif_continue(wl, wlvif) { | 
 | 582 | 			skb = wl12xx_vif_skb_dequeue(wl, wlvif); | 
 | 583 | 			if (skb) { | 
 | 584 | 				wl->last_wlvif = wlvif; | 
 | 585 | 				break; | 
 | 586 | 			} | 
 | 587 | 		} | 
 | 588 | 	} | 
 | 589 |  | 
| Arik Nemtsov | 49c9cd2 | 2012-02-28 00:41:31 +0200 | [diff] [blame] | 590 | 	/* dequeue from the system HLID before the restarting wlvif list */ | 
 | 591 | 	if (!skb) | 
 | 592 | 		skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[wl->system_hlid]); | 
 | 593 |  | 
 | 594 | 	/* do a new pass over the wlvif list */ | 
| Eliad Peller | e4120df | 2011-10-10 10:13:17 +0200 | [diff] [blame] | 595 | 	if (!skb) { | 
 | 596 | 		wl12xx_for_each_wlvif(wl, wlvif) { | 
 | 597 | 			skb = wl12xx_vif_skb_dequeue(wl, wlvif); | 
 | 598 | 			if (skb) { | 
 | 599 | 				wl->last_wlvif = wlvif; | 
 | 600 | 				break; | 
 | 601 | 			} | 
| Arik Nemtsov | 49c9cd2 | 2012-02-28 00:41:31 +0200 | [diff] [blame] | 602 |  | 
 | 603 | 			/* | 
 | 604 | 			 * No need to continue after last_wlvif. The previous | 
 | 605 | 			 * pass should have found it. | 
 | 606 | 			 */ | 
 | 607 | 			if (wlvif == wl->last_wlvif) | 
 | 608 | 				break; | 
| Eliad Peller | e4120df | 2011-10-10 10:13:17 +0200 | [diff] [blame] | 609 | 		} | 
| Eliad Peller | a32d0cd | 2011-10-10 10:12:55 +0200 | [diff] [blame] | 610 | 	} | 
 | 611 |  | 
| Ido Yariv | 990f5de | 2011-03-31 10:06:59 +0200 | [diff] [blame] | 612 | 	if (!skb && | 
 | 613 | 	    test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) { | 
| Arik Nemtsov | f1a4638 | 2011-07-07 14:25:23 +0300 | [diff] [blame] | 614 | 		int q; | 
 | 615 |  | 
| Ido Yariv | 990f5de | 2011-03-31 10:06:59 +0200 | [diff] [blame] | 616 | 		skb = wl->dummy_packet; | 
| Arik Nemtsov | f1a4638 | 2011-07-07 14:25:23 +0300 | [diff] [blame] | 617 | 		q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); | 
| Ido Yariv | 990f5de | 2011-03-31 10:06:59 +0200 | [diff] [blame] | 618 | 		spin_lock_irqsave(&wl->wl_lock, flags); | 
| Arik Nemtsov | 6246ca0 | 2012-02-28 00:41:30 +0200 | [diff] [blame] | 619 | 		WARN_ON_ONCE(wl->tx_queue_count[q] <= 0); | 
| Arik Nemtsov | f1a4638 | 2011-07-07 14:25:23 +0300 | [diff] [blame] | 620 | 		wl->tx_queue_count[q]--; | 
| Ido Yariv | 990f5de | 2011-03-31 10:06:59 +0200 | [diff] [blame] | 621 | 		spin_unlock_irqrestore(&wl->wl_lock, flags); | 
 | 622 | 	} | 
 | 623 |  | 
 | 624 | 	return skb; | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 625 | } | 
 | 626 |  | 
| Eliad Peller | d6a3cc2 | 2011-10-10 10:12:51 +0200 | [diff] [blame] | 627 | static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif, | 
| Eliad Peller | 536129c | 2011-10-05 11:55:45 +0200 | [diff] [blame] | 628 | 				  struct sk_buff *skb) | 
| Juuso Oikarinen | 6742f55 | 2010-12-13 09:52:37 +0200 | [diff] [blame] | 629 | { | 
 | 630 | 	unsigned long flags; | 
 | 631 | 	int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); | 
 | 632 |  | 
| Ido Yariv | 990f5de | 2011-03-31 10:06:59 +0200 | [diff] [blame] | 633 | 	if (wl12xx_is_dummy_packet(wl, skb)) { | 
 | 634 | 		set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags); | 
| Eliad Peller | d6a3cc2 | 2011-10-10 10:12:51 +0200 | [diff] [blame] | 635 | 	} else { | 
 | 636 | 		u8 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb); | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 637 | 		skb_queue_head(&wl->links[hlid].tx_queue[q], skb); | 
 | 638 |  | 
 | 639 | 		/* make sure we dequeue the same packet next time */ | 
| Eliad Peller | 4438aca | 2011-10-10 10:12:50 +0200 | [diff] [blame] | 640 | 		wlvif->last_tx_hlid = (hlid + WL12XX_MAX_LINKS - 1) % | 
| Eliad Peller | d6a3cc2 | 2011-10-10 10:12:51 +0200 | [diff] [blame] | 641 | 				      WL12XX_MAX_LINKS; | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 642 | 	} | 
 | 643 |  | 
| Juuso Oikarinen | 6742f55 | 2010-12-13 09:52:37 +0200 | [diff] [blame] | 644 | 	spin_lock_irqsave(&wl->wl_lock, flags); | 
| Arik Nemtsov | f1a4638 | 2011-07-07 14:25:23 +0300 | [diff] [blame] | 645 | 	wl->tx_queue_count[q]++; | 
| Juuso Oikarinen | 6742f55 | 2010-12-13 09:52:37 +0200 | [diff] [blame] | 646 | 	spin_unlock_irqrestore(&wl->wl_lock, flags); | 
 | 647 | } | 
 | 648 |  | 
| Eliad Peller | 77ddaa1 | 2011-05-15 11:10:29 +0300 | [diff] [blame] | 649 | static bool wl1271_tx_is_data_present(struct sk_buff *skb) | 
 | 650 | { | 
 | 651 | 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); | 
 | 652 |  | 
 | 653 | 	return ieee80211_is_data_present(hdr->frame_control); | 
 | 654 | } | 
 | 655 |  | 
| Eliad Peller | 9eb599e | 2011-10-10 10:12:59 +0200 | [diff] [blame] | 656 | void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids) | 
 | 657 | { | 
 | 658 | 	struct wl12xx_vif *wlvif; | 
 | 659 | 	u32 timeout; | 
 | 660 | 	u8 hlid; | 
 | 661 |  | 
 | 662 | 	if (!wl->conf.rx_streaming.interval) | 
 | 663 | 		return; | 
 | 664 |  | 
 | 665 | 	if (!wl->conf.rx_streaming.always && | 
 | 666 | 	    !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)) | 
 | 667 | 		return; | 
 | 668 |  | 
 | 669 | 	timeout = wl->conf.rx_streaming.duration; | 
 | 670 | 	wl12xx_for_each_wlvif_sta(wl, wlvif) { | 
 | 671 | 		bool found = false; | 
 | 672 | 		for_each_set_bit(hlid, active_hlids, WL12XX_MAX_LINKS) { | 
 | 673 | 			if (test_bit(hlid, wlvif->links_map)) { | 
 | 674 | 				found  = true; | 
 | 675 | 				break; | 
 | 676 | 			} | 
 | 677 | 		} | 
 | 678 |  | 
 | 679 | 		if (!found) | 
 | 680 | 			continue; | 
 | 681 |  | 
 | 682 | 		/* enable rx streaming */ | 
| Eliad Peller | 0744bdb | 2011-10-10 10:13:05 +0200 | [diff] [blame] | 683 | 		if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags)) | 
| Eliad Peller | 9eb599e | 2011-10-10 10:12:59 +0200 | [diff] [blame] | 684 | 			ieee80211_queue_work(wl->hw, | 
 | 685 | 					     &wlvif->rx_streaming_enable_work); | 
 | 686 |  | 
 | 687 | 		mod_timer(&wlvif->rx_streaming_timer, | 
 | 688 | 			  jiffies + msecs_to_jiffies(timeout)); | 
 | 689 | 	} | 
 | 690 | } | 
 | 691 |  | 
| Eliad Peller | a32d0cd | 2011-10-10 10:12:55 +0200 | [diff] [blame] | 692 | void wl1271_tx_work_locked(struct wl1271 *wl) | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 693 | { | 
| Eliad Peller | a32d0cd | 2011-10-10 10:12:55 +0200 | [diff] [blame] | 694 | 	struct wl12xx_vif *wlvif; | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 695 | 	struct sk_buff *skb; | 
| Eliad Peller | 9eb599e | 2011-10-10 10:12:59 +0200 | [diff] [blame] | 696 | 	struct wl1271_tx_hw_descr *desc; | 
| Ido Yariv | 6c6e669 | 2010-10-12 14:49:09 +0200 | [diff] [blame] | 697 | 	u32 buf_offset = 0; | 
 | 698 | 	bool sent_packets = false; | 
| Eliad Peller | 9eb599e | 2011-10-10 10:12:59 +0200 | [diff] [blame] | 699 | 	unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0}; | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 700 | 	int ret; | 
 | 701 |  | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 702 | 	if (unlikely(wl->state == WL1271_STATE_OFF)) | 
| Eliad Peller | c1b193e | 2011-03-23 22:22:15 +0200 | [diff] [blame] | 703 | 		return; | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 704 |  | 
| Eliad Peller | a32d0cd | 2011-10-10 10:12:55 +0200 | [diff] [blame] | 705 | 	while ((skb = wl1271_skb_dequeue(wl))) { | 
| Eliad Peller | 0f16801 | 2011-10-11 13:52:25 +0200 | [diff] [blame] | 706 | 		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 
| Eliad Peller | 9eb599e | 2011-10-10 10:12:59 +0200 | [diff] [blame] | 707 | 		bool has_data = false; | 
 | 708 |  | 
| Eliad Peller | a32d0cd | 2011-10-10 10:12:55 +0200 | [diff] [blame] | 709 | 		wlvif = NULL; | 
| Eliad Peller | 0f16801 | 2011-10-11 13:52:25 +0200 | [diff] [blame] | 710 | 		if (!wl12xx_is_dummy_packet(wl, skb) && info->control.vif) | 
 | 711 | 			wlvif = wl12xx_vif_to_data(info->control.vif); | 
| Eliad Peller | a32d0cd | 2011-10-10 10:12:55 +0200 | [diff] [blame] | 712 |  | 
| Eliad Peller | 9eb599e | 2011-10-10 10:12:59 +0200 | [diff] [blame] | 713 | 		has_data = wlvif && wl1271_tx_is_data_present(skb); | 
| Eliad Peller | a32d0cd | 2011-10-10 10:12:55 +0200 | [diff] [blame] | 714 | 		ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset); | 
| Ido Yariv | 6c6e669 | 2010-10-12 14:49:09 +0200 | [diff] [blame] | 715 | 		if (ret == -EAGAIN) { | 
| Ido Yariv | a19606b | 2010-09-30 13:28:28 +0200 | [diff] [blame] | 716 | 			/* | 
| Ido Yariv | 6c6e669 | 2010-10-12 14:49:09 +0200 | [diff] [blame] | 717 | 			 * Aggregation buffer is full. | 
 | 718 | 			 * Flush buffer and try again. | 
 | 719 | 			 */ | 
| Eliad Peller | d6a3cc2 | 2011-10-10 10:12:51 +0200 | [diff] [blame] | 720 | 			wl1271_skb_queue_head(wl, wlvif, skb); | 
| Ido Yariv | 6c6e669 | 2010-10-12 14:49:09 +0200 | [diff] [blame] | 721 | 			wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf, | 
| Juuso Oikarinen | 6742f55 | 2010-12-13 09:52:37 +0200 | [diff] [blame] | 722 | 				     buf_offset, true); | 
| Ido Yariv | 6c6e669 | 2010-10-12 14:49:09 +0200 | [diff] [blame] | 723 | 			sent_packets = true; | 
 | 724 | 			buf_offset = 0; | 
 | 725 | 			continue; | 
 | 726 | 		} else if (ret == -EBUSY) { | 
 | 727 | 			/* | 
 | 728 | 			 * Firmware buffer is full. | 
| Ido Yariv | a19606b | 2010-09-30 13:28:28 +0200 | [diff] [blame] | 729 | 			 * Queue back last skb, and stop aggregating. | 
 | 730 | 			 */ | 
| Eliad Peller | d6a3cc2 | 2011-10-10 10:12:51 +0200 | [diff] [blame] | 731 | 			wl1271_skb_queue_head(wl, wlvif, skb); | 
| Ido Yariv | a522550 | 2010-10-12 14:49:10 +0200 | [diff] [blame] | 732 | 			/* No work left, avoid scheduling redundant tx work */ | 
 | 733 | 			set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); | 
| Juuso Oikarinen | ffb591c | 2010-02-22 08:38:31 +0200 | [diff] [blame] | 734 | 			goto out_ack; | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 735 | 		} else if (ret < 0) { | 
| Eliad Peller | 5de8eef | 2011-12-13 15:26:38 +0200 | [diff] [blame] | 736 | 			if (wl12xx_is_dummy_packet(wl, skb)) | 
 | 737 | 				/* | 
 | 738 | 				 * fw still expects dummy packet, | 
 | 739 | 				 * so re-enqueue it | 
 | 740 | 				 */ | 
 | 741 | 				wl1271_skb_queue_head(wl, wlvif, skb); | 
 | 742 | 			else | 
 | 743 | 				ieee80211_free_txskb(wl->hw, skb); | 
| Juuso Oikarinen | ffb591c | 2010-02-22 08:38:31 +0200 | [diff] [blame] | 744 | 			goto out_ack; | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 745 | 		} | 
| Ido Yariv | a19606b | 2010-09-30 13:28:28 +0200 | [diff] [blame] | 746 | 		buf_offset += ret; | 
 | 747 | 		wl->tx_packets_count++; | 
| Eliad Peller | 9eb599e | 2011-10-10 10:12:59 +0200 | [diff] [blame] | 748 | 		if (has_data) { | 
 | 749 | 			desc = (struct wl1271_tx_hw_descr *) skb->data; | 
 | 750 | 			__set_bit(desc->hlid, active_hlids); | 
 | 751 | 		} | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 752 | 	} | 
 | 753 |  | 
| Juuso Oikarinen | ffb591c | 2010-02-22 08:38:31 +0200 | [diff] [blame] | 754 | out_ack: | 
| Ido Yariv | a19606b | 2010-09-30 13:28:28 +0200 | [diff] [blame] | 755 | 	if (buf_offset) { | 
 | 756 | 		wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf, | 
 | 757 | 				buf_offset, true); | 
| Ido Yariv | 6c6e669 | 2010-10-12 14:49:09 +0200 | [diff] [blame] | 758 | 		sent_packets = true; | 
 | 759 | 	} | 
 | 760 | 	if (sent_packets) { | 
| Ido Yariv | 606ea9f | 2011-03-01 15:14:39 +0200 | [diff] [blame] | 761 | 		/* | 
 | 762 | 		 * Interrupt the firmware with the new packets. This is only | 
 | 763 | 		 * required for older hardware revisions | 
 | 764 | 		 */ | 
 | 765 | 		if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION) | 
 | 766 | 			wl1271_write32(wl, WL1271_HOST_WR_ACCESS, | 
 | 767 | 				       wl->tx_packets_count); | 
 | 768 |  | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 769 | 		wl1271_handle_tx_low_watermark(wl); | 
| Ido Yariv | a19606b | 2010-09-30 13:28:28 +0200 | [diff] [blame] | 770 | 	} | 
| Eliad Peller | 9eb599e | 2011-10-10 10:12:59 +0200 | [diff] [blame] | 771 | 	wl12xx_rearm_rx_streaming(wl, active_hlids); | 
| Ido Yariv | a522550 | 2010-10-12 14:49:10 +0200 | [diff] [blame] | 772 | } | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 773 |  | 
| Ido Yariv | a522550 | 2010-10-12 14:49:10 +0200 | [diff] [blame] | 774 | void wl1271_tx_work(struct work_struct *work) | 
 | 775 | { | 
 | 776 | 	struct wl1271 *wl = container_of(work, struct wl1271, tx_work); | 
| Eliad Peller | c1b193e | 2011-03-23 22:22:15 +0200 | [diff] [blame] | 777 | 	int ret; | 
| Ido Yariv | a522550 | 2010-10-12 14:49:10 +0200 | [diff] [blame] | 778 |  | 
 | 779 | 	mutex_lock(&wl->mutex); | 
| Eliad Peller | c1b193e | 2011-03-23 22:22:15 +0200 | [diff] [blame] | 780 | 	ret = wl1271_ps_elp_wakeup(wl); | 
 | 781 | 	if (ret < 0) | 
 | 782 | 		goto out; | 
 | 783 |  | 
| Eliad Peller | a32d0cd | 2011-10-10 10:12:55 +0200 | [diff] [blame] | 784 | 	wl1271_tx_work_locked(wl); | 
| Eliad Peller | c1b193e | 2011-03-23 22:22:15 +0200 | [diff] [blame] | 785 |  | 
| Eliad Peller | c75bbcd | 2011-04-04 10:38:47 +0300 | [diff] [blame] | 786 | 	wl1271_ps_elp_sleep(wl); | 
| Eliad Peller | c1b193e | 2011-03-23 22:22:15 +0200 | [diff] [blame] | 787 | out: | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 788 | 	mutex_unlock(&wl->mutex); | 
 | 789 | } | 
 | 790 |  | 
| Pontus Fuchs | d2e2d76 | 2012-01-31 17:54:40 +0200 | [diff] [blame] | 791 | static u8 wl1271_tx_get_rate_flags(u8 rate_class_index) | 
 | 792 | { | 
| Pontus Fuchs | defe02c | 2012-01-31 17:54:41 +0200 | [diff] [blame] | 793 | 	u8 flags = 0; | 
 | 794 |  | 
| Pontus Fuchs | d2e2d76 | 2012-01-31 17:54:40 +0200 | [diff] [blame] | 795 | 	if (rate_class_index >= CONF_HW_RXTX_RATE_MCS_MIN && | 
 | 796 | 	    rate_class_index <= CONF_HW_RXTX_RATE_MCS_MAX) | 
| Pontus Fuchs | defe02c | 2012-01-31 17:54:41 +0200 | [diff] [blame] | 797 | 		flags |= IEEE80211_TX_RC_MCS; | 
 | 798 | 	if (rate_class_index == CONF_HW_RXTX_RATE_MCS7_SGI) | 
 | 799 | 		flags |= IEEE80211_TX_RC_SHORT_GI; | 
 | 800 | 	return flags; | 
| Pontus Fuchs | d2e2d76 | 2012-01-31 17:54:40 +0200 | [diff] [blame] | 801 | } | 
 | 802 |  | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 803 | static void wl1271_tx_complete_packet(struct wl1271 *wl, | 
 | 804 | 				      struct wl1271_tx_hw_res_descr *result) | 
 | 805 | { | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 806 | 	struct ieee80211_tx_info *info; | 
| Eliad Peller | 48e93e4 | 2011-10-10 10:12:58 +0200 | [diff] [blame] | 807 | 	struct ieee80211_vif *vif; | 
 | 808 | 	struct wl12xx_vif *wlvif; | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 809 | 	struct sk_buff *skb; | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 810 | 	int id = result->id; | 
| Juuso Oikarinen | 31627dc | 2010-03-26 12:53:12 +0200 | [diff] [blame] | 811 | 	int rate = -1; | 
| Pontus Fuchs | d2e2d76 | 2012-01-31 17:54:40 +0200 | [diff] [blame] | 812 | 	u8 rate_flags = 0; | 
| Juuso Oikarinen | 31627dc | 2010-03-26 12:53:12 +0200 | [diff] [blame] | 813 | 	u8 retries = 0; | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 814 |  | 
 | 815 | 	/* check for id legality */ | 
| Juuso Oikarinen | ffb591c | 2010-02-22 08:38:31 +0200 | [diff] [blame] | 816 | 	if (unlikely(id >= ACX_TX_DESCRIPTORS || wl->tx_frames[id] == NULL)) { | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 817 | 		wl1271_warning("TX result illegal id: %d", id); | 
 | 818 | 		return; | 
 | 819 | 	} | 
 | 820 |  | 
 | 821 | 	skb = wl->tx_frames[id]; | 
 | 822 | 	info = IEEE80211_SKB_CB(skb); | 
 | 823 |  | 
| Ido Yariv | 990f5de | 2011-03-31 10:06:59 +0200 | [diff] [blame] | 824 | 	if (wl12xx_is_dummy_packet(wl, skb)) { | 
| Shahar Levi | ae47c45 | 2011-03-06 16:32:14 +0200 | [diff] [blame] | 825 | 		wl1271_free_tx_id(wl, id); | 
 | 826 | 		return; | 
 | 827 | 	} | 
 | 828 |  | 
| Eliad Peller | 48e93e4 | 2011-10-10 10:12:58 +0200 | [diff] [blame] | 829 | 	/* info->control is valid as long as we don't update info->status */ | 
 | 830 | 	vif = info->control.vif; | 
 | 831 | 	wlvif = wl12xx_vif_to_data(vif); | 
 | 832 |  | 
| Juuso Oikarinen | 31627dc | 2010-03-26 12:53:12 +0200 | [diff] [blame] | 833 | 	/* update the TX status info */ | 
 | 834 | 	if (result->status == TX_SUCCESS) { | 
 | 835 | 		if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 836 | 			info->flags |= IEEE80211_TX_STAT_ACK; | 
| Eliad Peller | 1b92f15 | 2011-10-10 10:13:09 +0200 | [diff] [blame] | 837 | 		rate = wl1271_rate_to_idx(result->rate_class_index, | 
 | 838 | 					  wlvif->band); | 
| Pontus Fuchs | d2e2d76 | 2012-01-31 17:54:40 +0200 | [diff] [blame] | 839 | 		rate_flags = wl1271_tx_get_rate_flags(result->rate_class_index); | 
| Juuso Oikarinen | 31627dc | 2010-03-26 12:53:12 +0200 | [diff] [blame] | 840 | 		retries = result->ack_failures; | 
 | 841 | 	} else if (result->status == TX_RETRY_EXCEEDED) { | 
 | 842 | 		wl->stats.excessive_retries++; | 
 | 843 | 		retries = result->ack_failures; | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 844 | 	} | 
 | 845 |  | 
| Juuso Oikarinen | 31627dc | 2010-03-26 12:53:12 +0200 | [diff] [blame] | 846 | 	info->status.rates[0].idx = rate; | 
 | 847 | 	info->status.rates[0].count = retries; | 
| Pontus Fuchs | d2e2d76 | 2012-01-31 17:54:40 +0200 | [diff] [blame] | 848 | 	info->status.rates[0].flags = rate_flags; | 
| Juuso Oikarinen | 31627dc | 2010-03-26 12:53:12 +0200 | [diff] [blame] | 849 | 	info->status.ack_signal = -1; | 
 | 850 |  | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 851 | 	wl->stats.retry_count += result->ack_failures; | 
 | 852 |  | 
| Oz Krakowski | b992c68 | 2011-06-26 10:36:02 +0300 | [diff] [blame] | 853 | 	/* | 
 | 854 | 	 * update sequence number only when relevant, i.e. only in | 
 | 855 | 	 * sessions of TKIP, AES and GEM (not in open or WEP sessions) | 
 | 856 | 	 */ | 
 | 857 | 	if (info->control.hw_key && | 
 | 858 | 	    (info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP || | 
 | 859 | 	     info->control.hw_key->cipher == WLAN_CIPHER_SUITE_CCMP || | 
 | 860 | 	     info->control.hw_key->cipher == WL1271_CIPHER_SUITE_GEM)) { | 
 | 861 | 		u8 fw_lsb = result->tx_security_sequence_number_lsb; | 
| Eliad Peller | 48e93e4 | 2011-10-10 10:12:58 +0200 | [diff] [blame] | 862 | 		u8 cur_lsb = wlvif->tx_security_last_seq_lsb; | 
| Oz Krakowski | b992c68 | 2011-06-26 10:36:02 +0300 | [diff] [blame] | 863 |  | 
 | 864 | 		/* | 
 | 865 | 		 * update security sequence number, taking care of potential | 
 | 866 | 		 * wrap-around | 
 | 867 | 		 */ | 
| Eliad Peller | 48e93e4 | 2011-10-10 10:12:58 +0200 | [diff] [blame] | 868 | 		wlvif->tx_security_seq += (fw_lsb - cur_lsb) & 0xff; | 
 | 869 | 		wlvif->tx_security_last_seq_lsb = fw_lsb; | 
| Oz Krakowski | b992c68 | 2011-06-26 10:36:02 +0300 | [diff] [blame] | 870 | 	} | 
| Juuso Oikarinen | ac4e4ce | 2009-10-08 21:56:19 +0300 | [diff] [blame] | 871 |  | 
| Juuso Oikarinen | 1e2b797 | 2009-10-08 21:56:20 +0300 | [diff] [blame] | 872 | 	/* remove private header from packet */ | 
 | 873 | 	skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); | 
 | 874 |  | 
 | 875 | 	/* remove TKIP header space if present */ | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 876 | 	if (info->control.hw_key && | 
| Johannes Berg | 97359d1 | 2010-08-10 09:46:38 +0200 | [diff] [blame] | 877 | 	    info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) { | 
| Juuso Oikarinen | 1e2b797 | 2009-10-08 21:56:20 +0300 | [diff] [blame] | 878 | 		int hdrlen = ieee80211_get_hdrlen_from_skb(skb); | 
| Eliad Peller | 5ec8a44 | 2012-02-02 12:22:09 +0200 | [diff] [blame] | 879 | 		memmove(skb->data + WL1271_EXTRA_SPACE_TKIP, skb->data, | 
 | 880 | 			hdrlen); | 
 | 881 | 		skb_pull(skb, WL1271_EXTRA_SPACE_TKIP); | 
| Juuso Oikarinen | 1e2b797 | 2009-10-08 21:56:20 +0300 | [diff] [blame] | 882 | 	} | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 883 |  | 
 | 884 | 	wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x" | 
 | 885 | 		     " status 0x%x", | 
 | 886 | 		     result->id, skb, result->ack_failures, | 
 | 887 | 		     result->rate_class_index, result->status); | 
 | 888 |  | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 889 | 	/* return the packet to the stack */ | 
| Ido Yariv | a620865 | 2011-03-01 15:14:41 +0200 | [diff] [blame] | 890 | 	skb_queue_tail(&wl->deferred_tx_queue, skb); | 
| Eliad Peller | 92ef896 | 2011-06-07 12:50:46 +0300 | [diff] [blame] | 891 | 	queue_work(wl->freezable_wq, &wl->netstack_work); | 
| Ido Yariv | 25eeb9e | 2010-10-12 16:20:06 +0200 | [diff] [blame] | 892 | 	wl1271_free_tx_id(wl, result->id); | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 893 | } | 
 | 894 |  | 
 | 895 | /* Called upon reception of a TX complete interrupt */ | 
| Juuso Oikarinen | ffb591c | 2010-02-22 08:38:31 +0200 | [diff] [blame] | 896 | void wl1271_tx_complete(struct wl1271 *wl) | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 897 | { | 
 | 898 | 	struct wl1271_acx_mem_map *memmap = | 
 | 899 | 		(struct wl1271_acx_mem_map *)wl->target_mem_map; | 
| Juuso Oikarinen | ffb591c | 2010-02-22 08:38:31 +0200 | [diff] [blame] | 900 | 	u32 count, fw_counter; | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 901 | 	u32 i; | 
 | 902 |  | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 903 | 	/* read the tx results from the chipset */ | 
| Teemu Paasikivi | 7b048c5 | 2010-02-18 13:25:55 +0200 | [diff] [blame] | 904 | 	wl1271_read(wl, le32_to_cpu(memmap->tx_result), | 
 | 905 | 		    wl->tx_res_if, sizeof(*wl->tx_res_if), false); | 
| Juuso Oikarinen | ffb591c | 2010-02-22 08:38:31 +0200 | [diff] [blame] | 906 | 	fw_counter = le32_to_cpu(wl->tx_res_if->tx_result_fw_counter); | 
 | 907 |  | 
 | 908 | 	/* write host counter to chipset (to ack) */ | 
 | 909 | 	wl1271_write32(wl, le32_to_cpu(memmap->tx_result) + | 
 | 910 | 		       offsetof(struct wl1271_tx_hw_res_if, | 
 | 911 | 				tx_result_host_counter), fw_counter); | 
 | 912 |  | 
 | 913 | 	count = fw_counter - wl->tx_results_count; | 
| Juuso Oikarinen | 06f7bc7 | 2010-02-22 08:38:33 +0200 | [diff] [blame] | 914 | 	wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count); | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 915 |  | 
 | 916 | 	/* verify that the result buffer is not getting overrun */ | 
| Juuso Oikarinen | ffb591c | 2010-02-22 08:38:31 +0200 | [diff] [blame] | 917 | 	if (unlikely(count > TX_HW_RESULT_QUEUE_LEN)) | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 918 | 		wl1271_warning("TX result overflow from chipset: %d", count); | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 919 |  | 
 | 920 | 	/* process the results */ | 
 | 921 | 	for (i = 0; i < count; i++) { | 
 | 922 | 		struct wl1271_tx_hw_res_descr *result; | 
 | 923 | 		u8 offset = wl->tx_results_count & TX_HW_RESULT_QUEUE_LEN_MASK; | 
 | 924 |  | 
 | 925 | 		/* process the packet */ | 
 | 926 | 		result =  &(wl->tx_res_if->tx_results_queue[offset]); | 
 | 927 | 		wl1271_tx_complete_packet(wl, result); | 
 | 928 |  | 
 | 929 | 		wl->tx_results_count++; | 
 | 930 | 	} | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 931 | } | 
 | 932 |  | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 933 | void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid) | 
 | 934 | { | 
 | 935 | 	struct sk_buff *skb; | 
| Arik Nemtsov | f1a4638 | 2011-07-07 14:25:23 +0300 | [diff] [blame] | 936 | 	int i; | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 937 | 	unsigned long flags; | 
| Arik Nemtsov | 1d36cd8 | 2011-02-23 00:22:27 +0200 | [diff] [blame] | 938 | 	struct ieee80211_tx_info *info; | 
| Arik Nemtsov | f1a4638 | 2011-07-07 14:25:23 +0300 | [diff] [blame] | 939 | 	int total[NUM_TX_QUEUES]; | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 940 |  | 
 | 941 | 	for (i = 0; i < NUM_TX_QUEUES; i++) { | 
| Arik Nemtsov | f1a4638 | 2011-07-07 14:25:23 +0300 | [diff] [blame] | 942 | 		total[i] = 0; | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 943 | 		while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) { | 
 | 944 | 			wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb); | 
| Arik Nemtsov | 79ebec7 | 2011-08-14 13:17:18 +0300 | [diff] [blame] | 945 |  | 
 | 946 | 			if (!wl12xx_is_dummy_packet(wl, skb)) { | 
 | 947 | 				info = IEEE80211_SKB_CB(skb); | 
 | 948 | 				info->status.rates[0].idx = -1; | 
 | 949 | 				info->status.rates[0].count = 0; | 
 | 950 | 				ieee80211_tx_status_ni(wl->hw, skb); | 
 | 951 | 			} | 
 | 952 |  | 
| Arik Nemtsov | f1a4638 | 2011-07-07 14:25:23 +0300 | [diff] [blame] | 953 | 			total[i]++; | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 954 | 		} | 
 | 955 | 	} | 
 | 956 |  | 
 | 957 | 	spin_lock_irqsave(&wl->wl_lock, flags); | 
| Arik Nemtsov | f1a4638 | 2011-07-07 14:25:23 +0300 | [diff] [blame] | 958 | 	for (i = 0; i < NUM_TX_QUEUES; i++) | 
 | 959 | 		wl->tx_queue_count[i] -= total[i]; | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 960 | 	spin_unlock_irqrestore(&wl->wl_lock, flags); | 
 | 961 |  | 
 | 962 | 	wl1271_handle_tx_low_watermark(wl); | 
 | 963 | } | 
 | 964 |  | 
| Arik Nemtsov | 7dece1c | 2011-04-18 14:15:28 +0300 | [diff] [blame] | 965 | /* caller must hold wl->mutex and TX must be stopped */ | 
| Eliad Peller | d6a3cc2 | 2011-10-10 10:12:51 +0200 | [diff] [blame] | 966 | void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif) | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 967 | { | 
| Eliad Peller | d6a3cc2 | 2011-10-10 10:12:51 +0200 | [diff] [blame] | 968 | 	int i; | 
 | 969 |  | 
 | 970 | 	/* TX failure */ | 
 | 971 | 	for_each_set_bit(i, wlvif->links_map, WL12XX_MAX_LINKS) { | 
 | 972 | 		if (wlvif->bss_type == BSS_TYPE_AP_BSS) | 
 | 973 | 			wl1271_free_sta(wl, wlvif, i); | 
 | 974 | 		else | 
 | 975 | 			wlvif->sta.ba_rx_bitmap = 0; | 
 | 976 |  | 
| Eliad Peller | d6a3cc2 | 2011-10-10 10:12:51 +0200 | [diff] [blame] | 977 | 		wl->links[i].allocated_pkts = 0; | 
 | 978 | 		wl->links[i].prev_freed_pkts = 0; | 
 | 979 | 	} | 
 | 980 | 	wlvif->last_tx_hlid = 0; | 
 | 981 |  | 
 | 982 | } | 
 | 983 | /* caller must hold wl->mutex and TX must be stopped */ | 
 | 984 | void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues) | 
 | 985 | { | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 986 | 	int i; | 
 | 987 | 	struct sk_buff *skb; | 
| Arik Nemtsov | 1d36cd8 | 2011-02-23 00:22:27 +0200 | [diff] [blame] | 988 | 	struct ieee80211_tx_info *info; | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 989 |  | 
| Arik Nemtsov | 6246ca0 | 2012-02-28 00:41:30 +0200 | [diff] [blame] | 990 | 	/* only reset the queues if something bad happened */ | 
 | 991 | 	if (WARN_ON_ONCE(wl1271_tx_total_queue_count(wl) != 0)) { | 
 | 992 | 		for (i = 0; i < WL12XX_MAX_LINKS; i++) | 
 | 993 | 			wl1271_tx_reset_link_queues(wl, i); | 
 | 994 |  | 
 | 995 | 		for (i = 0; i < NUM_TX_QUEUES; i++) | 
 | 996 | 			wl->tx_queue_count[i] = 0; | 
 | 997 | 	} | 
| Arik Nemtsov | f1acea9 | 2011-08-25 12:43:17 +0300 | [diff] [blame] | 998 |  | 
| Arik Nemtsov | 708bb3c | 2011-06-24 13:03:37 +0300 | [diff] [blame] | 999 | 	wl->stopped_queues_map = 0; | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 1000 |  | 
| Ido Yariv | 2fe33e8 | 2010-10-12 14:49:12 +0200 | [diff] [blame] | 1001 | 	/* | 
 | 1002 | 	 * Make sure the driver is at a consistent state, in case this | 
 | 1003 | 	 * function is called from a context other than interface removal. | 
| Arik Nemtsov | 7dece1c | 2011-04-18 14:15:28 +0300 | [diff] [blame] | 1004 | 	 * This call will always wake the TX queues. | 
| Ido Yariv | 2fe33e8 | 2010-10-12 14:49:12 +0200 | [diff] [blame] | 1005 | 	 */ | 
| Arik Nemtsov | 7dece1c | 2011-04-18 14:15:28 +0300 | [diff] [blame] | 1006 | 	if (reset_tx_queues) | 
 | 1007 | 		wl1271_handle_tx_low_watermark(wl); | 
| Ido Yariv | 2fe33e8 | 2010-10-12 14:49:12 +0200 | [diff] [blame] | 1008 |  | 
| Ido Yariv | 50e9f74 | 2011-02-28 00:16:13 +0200 | [diff] [blame] | 1009 | 	for (i = 0; i < ACX_TX_DESCRIPTORS; i++) { | 
 | 1010 | 		if (wl->tx_frames[i] == NULL) | 
 | 1011 | 			continue; | 
 | 1012 |  | 
 | 1013 | 		skb = wl->tx_frames[i]; | 
 | 1014 | 		wl1271_free_tx_id(wl, i); | 
 | 1015 | 		wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb); | 
 | 1016 |  | 
| Ido Yariv | 990f5de | 2011-03-31 10:06:59 +0200 | [diff] [blame] | 1017 | 		if (!wl12xx_is_dummy_packet(wl, skb)) { | 
| Shahar Levi | ae47c45 | 2011-03-06 16:32:14 +0200 | [diff] [blame] | 1018 | 			/* | 
 | 1019 | 			 * Remove private headers before passing the skb to | 
 | 1020 | 			 * mac80211 | 
 | 1021 | 			 */ | 
 | 1022 | 			info = IEEE80211_SKB_CB(skb); | 
 | 1023 | 			skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); | 
 | 1024 | 			if (info->control.hw_key && | 
 | 1025 | 			    info->control.hw_key->cipher == | 
 | 1026 | 			    WLAN_CIPHER_SUITE_TKIP) { | 
 | 1027 | 				int hdrlen = ieee80211_get_hdrlen_from_skb(skb); | 
| Eliad Peller | 5ec8a44 | 2012-02-02 12:22:09 +0200 | [diff] [blame] | 1028 | 				memmove(skb->data + WL1271_EXTRA_SPACE_TKIP, | 
| Shahar Levi | ae47c45 | 2011-03-06 16:32:14 +0200 | [diff] [blame] | 1029 | 					skb->data, hdrlen); | 
| Eliad Peller | 5ec8a44 | 2012-02-02 12:22:09 +0200 | [diff] [blame] | 1030 | 				skb_pull(skb, WL1271_EXTRA_SPACE_TKIP); | 
| Shahar Levi | ae47c45 | 2011-03-06 16:32:14 +0200 | [diff] [blame] | 1031 | 			} | 
 | 1032 |  | 
 | 1033 | 			info->status.rates[0].idx = -1; | 
 | 1034 | 			info->status.rates[0].count = 0; | 
 | 1035 |  | 
| Eliad Peller | c27d3ac | 2011-06-07 10:40:39 +0300 | [diff] [blame] | 1036 | 			ieee80211_tx_status_ni(wl->hw, skb); | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 1037 | 		} | 
| Ido Yariv | 50e9f74 | 2011-02-28 00:16:13 +0200 | [diff] [blame] | 1038 | 	} | 
| Juuso Oikarinen | 781608c | 2010-05-24 11:18:17 +0300 | [diff] [blame] | 1039 | } | 
 | 1040 |  | 
 | 1041 | #define WL1271_TX_FLUSH_TIMEOUT 500000 | 
 | 1042 |  | 
 | 1043 | /* caller must *NOT* hold wl->mutex */ | 
 | 1044 | void wl1271_tx_flush(struct wl1271 *wl) | 
 | 1045 | { | 
 | 1046 | 	unsigned long timeout; | 
| Arik Nemtsov | 18aa755 | 2012-02-28 00:41:32 +0200 | [diff] [blame] | 1047 | 	int i; | 
| Juuso Oikarinen | 781608c | 2010-05-24 11:18:17 +0300 | [diff] [blame] | 1048 | 	timeout = jiffies + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT); | 
 | 1049 |  | 
 | 1050 | 	while (!time_after(jiffies, timeout)) { | 
 | 1051 | 		mutex_lock(&wl->mutex); | 
| Arik Nemtsov | a8c0ddb | 2011-02-23 00:22:26 +0200 | [diff] [blame] | 1052 | 		wl1271_debug(DEBUG_TX, "flushing tx buffer: %d %d", | 
| Arik Nemtsov | f1a4638 | 2011-07-07 14:25:23 +0300 | [diff] [blame] | 1053 | 			     wl->tx_frames_cnt, | 
 | 1054 | 			     wl1271_tx_total_queue_count(wl)); | 
 | 1055 | 		if ((wl->tx_frames_cnt == 0) && | 
 | 1056 | 		    (wl1271_tx_total_queue_count(wl) == 0)) { | 
| Juuso Oikarinen | 781608c | 2010-05-24 11:18:17 +0300 | [diff] [blame] | 1057 | 			mutex_unlock(&wl->mutex); | 
 | 1058 | 			return; | 
 | 1059 | 		} | 
 | 1060 | 		mutex_unlock(&wl->mutex); | 
 | 1061 | 		msleep(1); | 
 | 1062 | 	} | 
 | 1063 |  | 
 | 1064 | 	wl1271_warning("Unable to flush all TX buffers, timed out."); | 
| Arik Nemtsov | 18aa755 | 2012-02-28 00:41:32 +0200 | [diff] [blame] | 1065 |  | 
 | 1066 | 	/* forcibly flush all Tx buffers on our queues */ | 
 | 1067 | 	mutex_lock(&wl->mutex); | 
 | 1068 | 	for (i = 0; i < WL12XX_MAX_LINKS; i++) | 
 | 1069 | 		wl1271_tx_reset_link_queues(wl, i); | 
 | 1070 | 	mutex_unlock(&wl->mutex); | 
| Luciano Coelho | f5fc0f8 | 2009-08-06 16:25:28 +0300 | [diff] [blame] | 1071 | } | 
| Arik Nemtsov | e0fe371 | 2010-10-16 18:19:53 +0200 | [diff] [blame] | 1072 |  | 
| Eliad Peller | af7fbb2 | 2011-09-19 13:51:42 +0300 | [diff] [blame] | 1073 | u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set) | 
| Arik Nemtsov | e0fe371 | 2010-10-16 18:19:53 +0200 | [diff] [blame] | 1074 | { | 
| Eliad Peller | af7fbb2 | 2011-09-19 13:51:42 +0300 | [diff] [blame] | 1075 | 	if (WARN_ON(!rate_set)) | 
 | 1076 | 		return 0; | 
| Arik Nemtsov | e0fe371 | 2010-10-16 18:19:53 +0200 | [diff] [blame] | 1077 |  | 
| Eliad Peller | af7fbb2 | 2011-09-19 13:51:42 +0300 | [diff] [blame] | 1078 | 	return BIT(__ffs(rate_set)); | 
| Arik Nemtsov | e0fe371 | 2010-10-16 18:19:53 +0200 | [diff] [blame] | 1079 | } |