blob: 952a0490eb1706573061d4ac056a12f6a17032b7 [file] [log] [blame]
Ivo van Doorn181d6902008-02-05 16:42:23 -05001/*
Ivo van Doorn7e613e12010-08-06 20:45:38 +02002 Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
3 Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
Gertjan van Wingerde9c9a0d12009-11-08 16:39:55 +01004 Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com>
Ivo van Doorn181d6902008-02-05 16:42:23 -05005 <http://rt2x00.serialmonkey.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the
19 Free Software Foundation, Inc.,
20 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 */
22
23/*
24 Module: rt2x00lib
25 Abstract: rt2x00 queue specific routines.
26 */
27
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090028#include <linux/slab.h>
Ivo van Doorn181d6902008-02-05 16:42:23 -050029#include <linux/kernel.h>
30#include <linux/module.h>
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +020031#include <linux/dma-mapping.h>
Ivo van Doorn181d6902008-02-05 16:42:23 -050032
33#include "rt2x00.h"
34#include "rt2x00lib.h"
35
Helmut Schaa88211022012-04-19 13:24:10 +020036struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp)
Gertjan van Wingerde239c2492008-06-06 22:54:12 +020037{
Ivo van Doornfa695602010-10-11 15:37:25 +020038 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +020039 struct sk_buff *skb;
40 struct skb_frame_desc *skbdesc;
Ivo van Doorn2bb057d2008-08-04 16:37:44 +020041 unsigned int frame_size;
42 unsigned int head_size = 0;
43 unsigned int tail_size = 0;
Gertjan van Wingerde239c2492008-06-06 22:54:12 +020044
45 /*
46 * The frame size includes descriptor size, because the
47 * hardware directly receive the frame into the skbuffer.
48 */
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +020049 frame_size = entry->queue->data_size + entry->queue->desc_size;
Gertjan van Wingerde239c2492008-06-06 22:54:12 +020050
51 /*
Ivo van Doornff352392008-07-04 14:56:07 +020052 * The payload should be aligned to a 4-byte boundary,
53 * this means we need at least 3 bytes for moving the frame
54 * into the correct offset.
Gertjan van Wingerde239c2492008-06-06 22:54:12 +020055 */
Ivo van Doorn2bb057d2008-08-04 16:37:44 +020056 head_size = 4;
57
58 /*
59 * For IV/EIV/ICV assembly we must make sure there is
60 * at least 8 bytes bytes available in headroom for IV/EIV
Ivo van Doorn9c3444d2008-12-03 17:29:48 +010061 * and 8 bytes for ICV data as tailroon.
Ivo van Doorn2bb057d2008-08-04 16:37:44 +020062 */
Ivo van Doorn7dab73b2011-04-18 15:27:06 +020063 if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags)) {
Ivo van Doorn2bb057d2008-08-04 16:37:44 +020064 head_size += 8;
Ivo van Doorn9c3444d2008-12-03 17:29:48 +010065 tail_size += 8;
Ivo van Doorn2bb057d2008-08-04 16:37:44 +020066 }
Gertjan van Wingerde239c2492008-06-06 22:54:12 +020067
68 /*
69 * Allocate skbuffer.
70 */
Helmut Schaa88211022012-04-19 13:24:10 +020071 skb = __dev_alloc_skb(frame_size + head_size + tail_size, gfp);
Gertjan van Wingerde239c2492008-06-06 22:54:12 +020072 if (!skb)
73 return NULL;
74
Ivo van Doorn2bb057d2008-08-04 16:37:44 +020075 /*
76 * Make sure we not have a frame with the requested bytes
77 * available in the head and tail.
78 */
79 skb_reserve(skb, head_size);
Gertjan van Wingerde239c2492008-06-06 22:54:12 +020080 skb_put(skb, frame_size);
81
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +020082 /*
83 * Populate skbdesc.
84 */
85 skbdesc = get_skb_frame_desc(skb);
86 memset(skbdesc, 0, sizeof(*skbdesc));
87 skbdesc->entry = entry;
88
Ivo van Doorn7dab73b2011-04-18 15:27:06 +020089 if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags)) {
Stanislaw Gruszka4ea545d2013-02-13 14:27:05 +010090 dma_addr_t skb_dma;
91
92 skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len,
93 DMA_FROM_DEVICE);
94 if (unlikely(dma_mapping_error(rt2x00dev->dev, skb_dma))) {
95 dev_kfree_skb_any(skb);
96 return NULL;
97 }
98
99 skbdesc->skb_dma = skb_dma;
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200100 skbdesc->flags |= SKBDESC_DMA_MAPPED_RX;
101 }
102
Gertjan van Wingerde239c2492008-06-06 22:54:12 +0200103 return skb;
104}
Gertjan van Wingerde30caa6e2008-06-16 19:56:08 +0200105
Stanislaw Gruszka4ea545d2013-02-13 14:27:05 +0100106int rt2x00queue_map_txskb(struct queue_entry *entry)
Gertjan van Wingerde30caa6e2008-06-16 19:56:08 +0200107{
Ivo van Doornfa695602010-10-11 15:37:25 +0200108 struct device *dev = entry->queue->rt2x00dev->dev;
109 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200110
Ivo van Doorn3ee54a02008-08-29 21:04:50 +0200111 skbdesc->skb_dma =
Ivo van Doornfa695602010-10-11 15:37:25 +0200112 dma_map_single(dev, entry->skb->data, entry->skb->len, DMA_TO_DEVICE);
Stanislaw Gruszka4ea545d2013-02-13 14:27:05 +0100113
114 if (unlikely(dma_mapping_error(dev, skbdesc->skb_dma)))
115 return -ENOMEM;
116
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200117 skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
Stanislaw Gruszka4ea545d2013-02-13 14:27:05 +0100118 return 0;
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200119}
120EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
121
Ivo van Doornfa695602010-10-11 15:37:25 +0200122void rt2x00queue_unmap_skb(struct queue_entry *entry)
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200123{
Ivo van Doornfa695602010-10-11 15:37:25 +0200124 struct device *dev = entry->queue->rt2x00dev->dev;
125 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200126
127 if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) {
Ivo van Doornfa695602010-10-11 15:37:25 +0200128 dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200129 DMA_FROM_DEVICE);
130 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX;
Helmut Schaa546adf22010-10-09 13:33:43 +0200131 } else if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
Ivo van Doornfa695602010-10-11 15:37:25 +0200132 dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200133 DMA_TO_DEVICE);
134 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
135 }
136}
Gertjan van Wingerde0b8004a2010-06-03 10:51:45 +0200137EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb);
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200138
Ivo van Doornfa695602010-10-11 15:37:25 +0200139void rt2x00queue_free_skb(struct queue_entry *entry)
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200140{
Ivo van Doornfa695602010-10-11 15:37:25 +0200141 if (!entry->skb)
Ivo van Doorn9a613192008-07-05 15:11:57 +0200142 return;
143
Ivo van Doornfa695602010-10-11 15:37:25 +0200144 rt2x00queue_unmap_skb(entry);
145 dev_kfree_skb_any(entry->skb);
146 entry->skb = NULL;
Gertjan van Wingerde30caa6e2008-06-16 19:56:08 +0200147}
Gertjan van Wingerde239c2492008-06-06 22:54:12 +0200148
Ivo van Doorndaee6c02009-08-29 20:30:45 +0200149void rt2x00queue_align_frame(struct sk_buff *skb)
Ivo van Doorn9f166172009-04-26 16:08:50 +0200150{
Ivo van Doorn9f166172009-04-26 16:08:50 +0200151 unsigned int frame_length = skb->len;
Ivo van Doorndaee6c02009-08-29 20:30:45 +0200152 unsigned int align = ALIGN_SIZE(skb, 0);
Ivo van Doorn9f166172009-04-26 16:08:50 +0200153
154 if (!align)
155 return;
156
Ivo van Doorndaee6c02009-08-29 20:30:45 +0200157 skb_push(skb, align);
158 memmove(skb->data, skb->data + align, frame_length);
159 skb_trim(skb, frame_length);
160}
161
Ivo van Doorndaee6c02009-08-29 20:30:45 +0200162void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length)
163{
Gertjan van Wingerde2e331462009-12-04 23:47:03 +0100164 unsigned int payload_length = skb->len - header_length;
Ivo van Doorndaee6c02009-08-29 20:30:45 +0200165 unsigned int header_align = ALIGN_SIZE(skb, 0);
166 unsigned int payload_align = ALIGN_SIZE(skb, header_length);
Gertjan van Wingerdee54be4e2009-12-04 23:47:07 +0100167 unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0;
Ivo van Doorndaee6c02009-08-29 20:30:45 +0200168
Gertjan van Wingerde2e331462009-12-04 23:47:03 +0100169 /*
170 * Adjust the header alignment if the payload needs to be moved more
171 * than the header.
172 */
173 if (payload_align > header_align)
174 header_align += 4;
Ivo van Doorndaee6c02009-08-29 20:30:45 +0200175
Gertjan van Wingerde2e331462009-12-04 23:47:03 +0100176 /* There is nothing to do if no alignment is needed */
177 if (!header_align)
178 return;
179
180 /* Reserve the amount of space needed in front of the frame */
181 skb_push(skb, header_align);
182
183 /*
184 * Move the header.
185 */
186 memmove(skb->data, skb->data + header_align, header_length);
187
188 /* Move the payload, if present and if required */
189 if (payload_length && payload_align)
Ivo van Doorndaee6c02009-08-29 20:30:45 +0200190 memmove(skb->data + header_length + l2pad,
Gertjan van Wingerdea5186e92009-11-24 23:11:32 +0100191 skb->data + header_length + l2pad + payload_align,
Gertjan van Wingerde2e331462009-12-04 23:47:03 +0100192 payload_length);
193
194 /* Trim the skb to the correct size */
195 skb_trim(skb, header_length + l2pad + payload_length);
Ivo van Doorn9f166172009-04-26 16:08:50 +0200196}
197
Ivo van Doorndaee6c02009-08-29 20:30:45 +0200198void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length)
199{
Gertjan van Wingerdea061a932010-12-13 12:33:12 +0100200 /*
201 * L2 padding is only present if the skb contains more than just the
202 * IEEE 802.11 header.
203 */
204 unsigned int l2pad = (skb->len > header_length) ?
205 L2PAD_SIZE(header_length) : 0;
Ivo van Doorndaee6c02009-08-29 20:30:45 +0200206
Gertjan van Wingerde354e39d2009-12-04 23:47:02 +0100207 if (!l2pad)
Ivo van Doorndaee6c02009-08-29 20:30:45 +0200208 return;
209
Gertjan van Wingerdea061a932010-12-13 12:33:12 +0100210 memmove(skb->data + l2pad, skb->data, header_length);
211 skb_pull(skb, l2pad);
Ivo van Doorndaee6c02009-08-29 20:30:45 +0200212}
213
Gertjan van Wingerde77b56212011-07-06 22:57:00 +0200214static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
215 struct sk_buff *skb,
Ivo van Doorn7b409822008-12-20 10:58:33 +0100216 struct txentry_desc *txdesc)
217{
Gertjan van Wingerde77b56212011-07-06 22:57:00 +0200218 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
219 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Ivo van Doorn7b409822008-12-20 10:58:33 +0100220 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
Stanislaw Gruszkae5851da2012-06-01 11:29:40 +0200221 u16 seqno;
Ivo van Doorn7b409822008-12-20 10:58:33 +0100222
Helmut Schaac262e082011-03-03 19:39:56 +0100223 if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
Ivo van Doorn7b409822008-12-20 10:58:33 +0100224 return;
225
Helmut Schaa7fe7ee72011-03-03 19:42:01 +0100226 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
227
Stanislaw Gruszkae66a8dd2012-04-02 13:21:06 +0200228 if (!test_bit(REQUIRE_SW_SEQNO, &rt2x00dev->cap_flags)) {
229 /*
230 * rt2800 has a H/W (or F/W) bug, device incorrectly increase
231 * seqno on retransmited data (non-QOS) frames. To workaround
232 * the problem let's generate seqno in software if QOS is
233 * disabled.
234 */
235 if (test_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags))
236 __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
237 else
238 /* H/W will generate sequence number */
239 return;
240 }
Helmut Schaa7fe7ee72011-03-03 19:42:01 +0100241
Ivo van Doorn7b409822008-12-20 10:58:33 +0100242 /*
Helmut Schaa7fe7ee72011-03-03 19:42:01 +0100243 * The hardware is not able to insert a sequence number. Assign a
244 * software generated one here.
Ivo van Doorn7b409822008-12-20 10:58:33 +0100245 *
246 * This is wrong because beacons are not getting sequence
247 * numbers assigned properly.
248 *
249 * A secondary problem exists for drivers that cannot toggle
250 * sequence counting per-frame, since those will override the
251 * sequence counter given by mac80211.
252 */
Ivo van Doorn7b409822008-12-20 10:58:33 +0100253 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
Stanislaw Gruszkae5851da2012-06-01 11:29:40 +0200254 seqno = atomic_add_return(0x10, &intf->seqno);
255 else
256 seqno = atomic_read(&intf->seqno);
257
Ivo van Doorn7b409822008-12-20 10:58:33 +0100258 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
Stanislaw Gruszkae5851da2012-06-01 11:29:40 +0200259 hdr->seq_ctrl |= cpu_to_le16(seqno);
Ivo van Doorn7b409822008-12-20 10:58:33 +0100260}
261
Gertjan van Wingerde77b56212011-07-06 22:57:00 +0200262static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev,
263 struct sk_buff *skb,
Ivo van Doorn7b409822008-12-20 10:58:33 +0100264 struct txentry_desc *txdesc,
265 const struct rt2x00_rate *hwrate)
266{
Gertjan van Wingerde77b56212011-07-06 22:57:00 +0200267 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Ivo van Doorn7b409822008-12-20 10:58:33 +0100268 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
269 unsigned int data_length;
270 unsigned int duration;
271 unsigned int residual;
272
Helmut Schaa25177942011-03-03 19:43:25 +0100273 /*
274 * Determine with what IFS priority this frame should be send.
275 * Set ifs to IFS_SIFS when the this is not the first fragment,
276 * or this fragment came after RTS/CTS.
277 */
278 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
279 txdesc->u.plcp.ifs = IFS_BACKOFF;
280 else
281 txdesc->u.plcp.ifs = IFS_SIFS;
282
Ivo van Doorn7b409822008-12-20 10:58:33 +0100283 /* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */
Gertjan van Wingerde77b56212011-07-06 22:57:00 +0200284 data_length = skb->len + 4;
285 data_length += rt2x00crypto_tx_overhead(rt2x00dev, skb);
Ivo van Doorn7b409822008-12-20 10:58:33 +0100286
287 /*
288 * PLCP setup
289 * Length calculation depends on OFDM/CCK rate.
290 */
Helmut Schaa26a1d072011-03-03 19:42:35 +0100291 txdesc->u.plcp.signal = hwrate->plcp;
292 txdesc->u.plcp.service = 0x04;
Ivo van Doorn7b409822008-12-20 10:58:33 +0100293
294 if (hwrate->flags & DEV_RATE_OFDM) {
Helmut Schaa26a1d072011-03-03 19:42:35 +0100295 txdesc->u.plcp.length_high = (data_length >> 6) & 0x3f;
296 txdesc->u.plcp.length_low = data_length & 0x3f;
Ivo van Doorn7b409822008-12-20 10:58:33 +0100297 } else {
298 /*
299 * Convert length to microseconds.
300 */
301 residual = GET_DURATION_RES(data_length, hwrate->bitrate);
302 duration = GET_DURATION(data_length, hwrate->bitrate);
303
304 if (residual != 0) {
305 duration++;
306
307 /*
308 * Check if we need to set the Length Extension
309 */
310 if (hwrate->bitrate == 110 && residual <= 30)
Helmut Schaa26a1d072011-03-03 19:42:35 +0100311 txdesc->u.plcp.service |= 0x80;
Ivo van Doorn7b409822008-12-20 10:58:33 +0100312 }
313
Helmut Schaa26a1d072011-03-03 19:42:35 +0100314 txdesc->u.plcp.length_high = (duration >> 8) & 0xff;
315 txdesc->u.plcp.length_low = duration & 0xff;
Ivo van Doorn7b409822008-12-20 10:58:33 +0100316
317 /*
318 * When preamble is enabled we should set the
319 * preamble bit for the signal.
320 */
321 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
Helmut Schaa26a1d072011-03-03 19:42:35 +0100322 txdesc->u.plcp.signal |= 0x08;
Ivo van Doorn7b409822008-12-20 10:58:33 +0100323 }
324}
325
Gertjan van Wingerde77b56212011-07-06 22:57:00 +0200326static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
327 struct sk_buff *skb,
Gertjan van Wingerde46a01ec2011-04-18 15:33:41 +0200328 struct txentry_desc *txdesc,
Thomas Huehn36323f82012-07-23 21:33:42 +0200329 struct ieee80211_sta *sta,
Gertjan van Wingerde46a01ec2011-04-18 15:33:41 +0200330 const struct rt2x00_rate *hwrate)
331{
Gertjan van Wingerde77b56212011-07-06 22:57:00 +0200332 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
Gertjan van Wingerde46a01ec2011-04-18 15:33:41 +0200333 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
Gertjan van Wingerde77b56212011-07-06 22:57:00 +0200334 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Helmut Schaaead2bb62011-09-08 14:37:19 +0200335 struct rt2x00_sta *sta_priv = NULL;
Gertjan van Wingerde46a01ec2011-04-18 15:33:41 +0200336
Thomas Huehn36323f82012-07-23 21:33:42 +0200337 if (sta) {
Gertjan van Wingerde46a01ec2011-04-18 15:33:41 +0200338 txdesc->u.ht.mpdu_density =
Thomas Huehn36323f82012-07-23 21:33:42 +0200339 sta->ht_cap.ampdu_density;
Gertjan van Wingerde46a01ec2011-04-18 15:33:41 +0200340
Thomas Huehn36323f82012-07-23 21:33:42 +0200341 sta_priv = sta_to_rt2x00_sta(sta);
Helmut Schaaead2bb62011-09-08 14:37:19 +0200342 txdesc->u.ht.wcid = sta_priv->wcid;
343 }
344
Gertjan van Wingerde46a01ec2011-04-18 15:33:41 +0200345 /*
346 * If IEEE80211_TX_RC_MCS is set txrate->idx just contains the
347 * mcs rate to be used
348 */
349 if (txrate->flags & IEEE80211_TX_RC_MCS) {
350 txdesc->u.ht.mcs = txrate->idx;
351
352 /*
353 * MIMO PS should be set to 1 for STA's using dynamic SM PS
354 * when using more then one tx stream (>MCS7).
355 */
Thomas Huehn36323f82012-07-23 21:33:42 +0200356 if (sta && txdesc->u.ht.mcs > 7 &&
Johannes Bergaf0ed692013-02-12 14:21:00 +0100357 sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
Gertjan van Wingerde46a01ec2011-04-18 15:33:41 +0200358 __set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags);
359 } else {
360 txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs);
361 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
362 txdesc->u.ht.mcs |= 0x08;
363 }
364
Stanislaw Gruszkada40f402012-04-04 16:15:33 +0200365 if (test_bit(CONFIG_HT_DISABLED, &rt2x00dev->flags)) {
366 if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
367 txdesc->u.ht.txop = TXOP_SIFS;
368 else
369 txdesc->u.ht.txop = TXOP_BACKOFF;
370
371 /* Left zero on all other settings. */
372 return;
373 }
374
375 txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */
376
377 /*
378 * Only one STBC stream is supported for now.
379 */
380 if (tx_info->flags & IEEE80211_TX_CTL_STBC)
381 txdesc->u.ht.stbc = 1;
382
Gertjan van Wingerde46a01ec2011-04-18 15:33:41 +0200383 /*
384 * This frame is eligible for an AMPDU, however, don't aggregate
385 * frames that are intended to probe a specific tx rate.
386 */
387 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU &&
388 !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
389 __set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags);
390
391 /*
392 * Set 40Mhz mode if necessary (for legacy rates this will
393 * duplicate the frame to both channels).
394 */
395 if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH ||
396 txrate->flags & IEEE80211_TX_RC_DUP_DATA)
397 __set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags);
398 if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
399 __set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags);
400
401 /*
402 * Determine IFS values
403 * - Use TXOP_BACKOFF for management frames except beacons
404 * - Use TXOP_SIFS for fragment bursts
405 * - Use TXOP_HTTXOP for everything else
406 *
407 * Note: rt2800 devices won't use CTS protection (if used)
408 * for frames not transmitted with TXOP_HTTXOP
409 */
410 if (ieee80211_is_mgmt(hdr->frame_control) &&
411 !ieee80211_is_beacon(hdr->frame_control))
412 txdesc->u.ht.txop = TXOP_BACKOFF;
413 else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
414 txdesc->u.ht.txop = TXOP_SIFS;
415 else
416 txdesc->u.ht.txop = TXOP_HTTXOP;
417}
418
Gertjan van Wingerde77b56212011-07-06 22:57:00 +0200419static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
420 struct sk_buff *skb,
Thomas Huehn36323f82012-07-23 21:33:42 +0200421 struct txentry_desc *txdesc,
422 struct ieee80211_sta *sta)
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200423{
Gertjan van Wingerde77b56212011-07-06 22:57:00 +0200424 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
425 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
Helmut Schaa55b585e2011-03-03 19:43:49 +0100426 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
427 struct ieee80211_rate *rate;
428 const struct rt2x00_rate *hwrate = NULL;
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200429
430 memset(txdesc, 0, sizeof(*txdesc));
431
432 /*
Gertjan van Wingerdedf624ca2010-05-03 22:43:05 +0200433 * Header and frame information.
Ivo van Doorn9f166172009-04-26 16:08:50 +0200434 */
Gertjan van Wingerde77b56212011-07-06 22:57:00 +0200435 txdesc->length = skb->len;
436 txdesc->header_length = ieee80211_get_hdrlen_from_skb(skb);
Ivo van Doorn9f166172009-04-26 16:08:50 +0200437
438 /*
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200439 * Check whether this frame is to be acked.
440 */
Johannes Berge039fa42008-05-15 12:55:29 +0200441 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200442 __set_bit(ENTRY_TXD_ACK, &txdesc->flags);
443
444 /*
445 * Check if this is a RTS/CTS frame
446 */
Ivo van Doornac104462008-06-16 19:54:57 +0200447 if (ieee80211_is_rts(hdr->frame_control) ||
448 ieee80211_is_cts(hdr->frame_control)) {
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200449 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
Ivo van Doornac104462008-06-16 19:54:57 +0200450 if (ieee80211_is_rts(hdr->frame_control))
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200451 __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags);
Johannes Berge039fa42008-05-15 12:55:29 +0200452 else
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200453 __set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags);
Johannes Berge039fa42008-05-15 12:55:29 +0200454 if (tx_info->control.rts_cts_rate_idx >= 0)
Johannes Berg2e92e6f2008-05-15 12:55:27 +0200455 rate =
Johannes Berge039fa42008-05-15 12:55:29 +0200456 ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info);
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200457 }
458
459 /*
460 * Determine retry information.
461 */
Johannes Berge6a98542008-10-21 12:40:02 +0200462 txdesc->retry_limit = tx_info->control.rates[0].count - 1;
Ivo van Doorn42c82852008-12-02 18:20:04 +0100463 if (txdesc->retry_limit >= rt2x00dev->long_retry)
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200464 __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags);
465
466 /*
467 * Check if more fragments are pending
468 */
Helmut Schaa2606e422010-06-14 22:10:09 +0200469 if (ieee80211_has_morefrags(hdr->frame_control)) {
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200470 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
471 __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags);
472 }
473
474 /*
Helmut Schaa2606e422010-06-14 22:10:09 +0200475 * Check if more frames (!= fragments) are pending
476 */
477 if (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES)
478 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
479
480 /*
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200481 * Beacons and probe responses require the tsf timestamp
Helmut Schaa1bce85cf2011-02-20 13:56:07 +0100482 * to be inserted into the frame.
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200483 */
Helmut Schaa1bce85cf2011-02-20 13:56:07 +0100484 if (ieee80211_is_beacon(hdr->frame_control) ||
485 ieee80211_is_probe_resp(hdr->frame_control))
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200486 __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
487
Ivo van Doorn7b409822008-12-20 10:58:33 +0100488 if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) &&
Helmut Schaa25177942011-03-03 19:43:25 +0100489 !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags))
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200490 __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags);
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200491
Ivo van Doorn076f9582008-12-20 10:59:02 +0100492 /*
493 * Determine rate modulation.
494 */
Helmut Schaa55b585e2011-03-03 19:43:49 +0100495 if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
496 txdesc->rate_mode = RATE_MODE_HT_GREENFIELD;
497 else if (txrate->flags & IEEE80211_TX_RC_MCS)
498 txdesc->rate_mode = RATE_MODE_HT_MIX;
499 else {
500 rate = ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
501 hwrate = rt2x00_get_rate(rate->hw_value);
502 if (hwrate->flags & DEV_RATE_OFDM)
503 txdesc->rate_mode = RATE_MODE_OFDM;
504 else
505 txdesc->rate_mode = RATE_MODE_CCK;
506 }
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200507
Ivo van Doorn7b409822008-12-20 10:58:33 +0100508 /*
509 * Apply TX descriptor handling by components
510 */
Gertjan van Wingerde77b56212011-07-06 22:57:00 +0200511 rt2x00crypto_create_tx_descriptor(rt2x00dev, skb, txdesc);
512 rt2x00queue_create_tx_descriptor_seq(rt2x00dev, skb, txdesc);
Helmut Schaa26a1d072011-03-03 19:42:35 +0100513
Ivo van Doorn7dab73b2011-04-18 15:27:06 +0200514 if (test_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags))
Gertjan van Wingerde77b56212011-07-06 22:57:00 +0200515 rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc,
Thomas Huehn36323f82012-07-23 21:33:42 +0200516 sta, hwrate);
Helmut Schaa26a1d072011-03-03 19:42:35 +0100517 else
Gertjan van Wingerde77b56212011-07-06 22:57:00 +0200518 rt2x00queue_create_tx_descriptor_plcp(rt2x00dev, skb, txdesc,
519 hwrate);
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200520}
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200521
Gertjan van Wingerde78eea112010-06-29 21:41:05 +0200522static int rt2x00queue_write_tx_data(struct queue_entry *entry,
523 struct txentry_desc *txdesc)
524{
525 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
526
527 /*
528 * This should not happen, we already checked the entry
529 * was ours. When the hardware disagrees there has been
530 * a queue corruption!
531 */
532 if (unlikely(rt2x00dev->ops->lib->get_entry_state &&
533 rt2x00dev->ops->lib->get_entry_state(entry))) {
534 ERROR(rt2x00dev,
535 "Corrupt queue %d, accessing entry which is not ours.\n"
536 "Please file bug report to %s.\n",
537 entry->queue->qid, DRV_PROJECT);
538 return -EINVAL;
539 }
540
541 /*
542 * Add the requested extra tx headroom in front of the skb.
543 */
544 skb_push(entry->skb, rt2x00dev->ops->extra_tx_headroom);
545 memset(entry->skb->data, 0, rt2x00dev->ops->extra_tx_headroom);
546
547 /*
Gertjan van Wingerde76dd5dd2010-06-29 21:42:23 +0200548 * Call the driver's write_tx_data function, if it exists.
Gertjan van Wingerde78eea112010-06-29 21:41:05 +0200549 */
Gertjan van Wingerde76dd5dd2010-06-29 21:42:23 +0200550 if (rt2x00dev->ops->lib->write_tx_data)
551 rt2x00dev->ops->lib->write_tx_data(entry, txdesc);
Gertjan van Wingerde78eea112010-06-29 21:41:05 +0200552
553 /*
554 * Map the skb to DMA.
555 */
Stanislaw Gruszka4ea545d2013-02-13 14:27:05 +0100556 if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags) &&
557 rt2x00queue_map_txskb(entry))
558 return -ENOMEM;
Gertjan van Wingerde78eea112010-06-29 21:41:05 +0200559
560 return 0;
561}
562
Ivo van Doornbd88a782008-07-09 15:12:44 +0200563static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
564 struct txentry_desc *txdesc)
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200565{
Ivo van Doornb8697672008-06-06 22:53:14 +0200566 struct data_queue *queue = entry->queue;
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200567
Ivo van Doorn93331452010-08-23 19:53:39 +0200568 queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc);
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200569
570 /*
571 * All processing on the frame has been completed, this means
572 * it is now ready to be dumped to userspace through debugfs.
573 */
Ivo van Doorn93331452010-08-23 19:53:39 +0200574 rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry->skb);
Gertjan van Wingerde6295d812010-05-09 21:24:22 +0200575}
576
Ivo van Doorn8be4eed2010-11-06 15:48:23 +0100577static void rt2x00queue_kick_tx_queue(struct data_queue *queue,
Gertjan van Wingerde6295d812010-05-09 21:24:22 +0200578 struct txentry_desc *txdesc)
579{
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200580 /*
Ivo van Doornb8697672008-06-06 22:53:14 +0200581 * Check if we need to kick the queue, there are however a few rules
Gertjan van Wingerde6295d812010-05-09 21:24:22 +0200582 * 1) Don't kick unless this is the last in frame in a burst.
Ivo van Doornb8697672008-06-06 22:53:14 +0200583 * When the burst flag is set, this frame is always followed
584 * by another frame which in some way are related to eachother.
585 * This is true for fragments, RTS or CTS-to-self frames.
Gertjan van Wingerde6295d812010-05-09 21:24:22 +0200586 * 2) Rule 1 can be broken when the available entries
Ivo van Doornb8697672008-06-06 22:53:14 +0200587 * in the queue are less then a certain threshold.
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200588 */
Ivo van Doornb8697672008-06-06 22:53:14 +0200589 if (rt2x00queue_threshold(queue) ||
590 !test_bit(ENTRY_TXD_BURST, &txdesc->flags))
Ivo van Doorndbba3062010-12-13 12:34:54 +0100591 queue->rt2x00dev->ops->lib->kick_queue(queue);
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200592}
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200593
Helmut Schaa84e9e8ebd2013-01-17 17:34:32 +0100594static void rt2x00queue_bar_check(struct queue_entry *entry)
595{
596 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
597 struct ieee80211_bar *bar = (void *) (entry->skb->data +
598 rt2x00dev->ops->extra_tx_headroom);
599 struct rt2x00_bar_list_entry *bar_entry;
600
601 if (likely(!ieee80211_is_back_req(bar->frame_control)))
602 return;
603
604 bar_entry = kmalloc(sizeof(*bar_entry), GFP_ATOMIC);
605
606 /*
607 * If the alloc fails we still send the BAR out but just don't track
608 * it in our bar list. And as a result we will report it to mac80211
609 * back as failed.
610 */
611 if (!bar_entry)
612 return;
613
614 bar_entry->entry = entry;
615 bar_entry->block_acked = 0;
616
617 /*
618 * Copy the relevant parts of the 802.11 BAR into out check list
619 * such that we can use RCU for less-overhead in the RX path since
620 * sending BARs and processing the according BlockAck should be
621 * the exception.
622 */
623 memcpy(bar_entry->ra, bar->ra, sizeof(bar->ra));
624 memcpy(bar_entry->ta, bar->ta, sizeof(bar->ta));
625 bar_entry->control = bar->control;
626 bar_entry->start_seq_num = bar->start_seq_num;
627
628 /*
629 * Insert BAR into our BAR check list.
630 */
631 spin_lock_bh(&rt2x00dev->bar_list_lock);
632 list_add_tail_rcu(&bar_entry->list, &rt2x00dev->bar_list);
633 spin_unlock_bh(&rt2x00dev->bar_list_lock);
634}
635
Johannes Berg7351c6b2009-11-19 01:08:30 +0100636int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
637 bool local)
Ivo van Doorn6db37862008-06-06 22:50:28 +0200638{
Johannes Berge6a98542008-10-21 12:40:02 +0200639 struct ieee80211_tx_info *tx_info;
Gertjan van Wingerde77a861c2011-07-06 22:56:24 +0200640 struct queue_entry *entry;
Ivo van Doorn6db37862008-06-06 22:50:28 +0200641 struct txentry_desc txdesc;
Ivo van Doornd74f5ba2008-06-16 19:56:54 +0200642 struct skb_frame_desc *skbdesc;
Johannes Berge6a98542008-10-21 12:40:02 +0200643 u8 rate_idx, rate_flags;
Gertjan van Wingerde77a861c2011-07-06 22:56:24 +0200644 int ret = 0;
645
Ivo van Doorn6db37862008-06-06 22:50:28 +0200646 /*
647 * Copy all TX descriptor information into txdesc,
648 * after that we are free to use the skb->cb array
649 * for our information.
650 */
Thomas Huehn36323f82012-07-23 21:33:42 +0200651 rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, NULL);
Ivo van Doorn6db37862008-06-06 22:50:28 +0200652
Ivo van Doornd74f5ba2008-06-16 19:56:54 +0200653 /*
Johannes Berge6a98542008-10-21 12:40:02 +0200654 * All information is retrieved from the skb->cb array,
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200655 * now we should claim ownership of the driver part of that
Johannes Berge6a98542008-10-21 12:40:02 +0200656 * array, preserving the bitrate index and flags.
Ivo van Doornd74f5ba2008-06-16 19:56:54 +0200657 */
Johannes Berge6a98542008-10-21 12:40:02 +0200658 tx_info = IEEE80211_SKB_CB(skb);
659 rate_idx = tx_info->control.rates[0].idx;
660 rate_flags = tx_info->control.rates[0].flags;
Ivo van Doorn0e3de992008-11-12 00:01:37 +0100661 skbdesc = get_skb_frame_desc(skb);
Ivo van Doornd74f5ba2008-06-16 19:56:54 +0200662 memset(skbdesc, 0, sizeof(*skbdesc));
Johannes Berge6a98542008-10-21 12:40:02 +0200663 skbdesc->tx_rate_idx = rate_idx;
664 skbdesc->tx_rate_flags = rate_flags;
Ivo van Doornd74f5ba2008-06-16 19:56:54 +0200665
Johannes Berg7351c6b2009-11-19 01:08:30 +0100666 if (local)
667 skbdesc->flags |= SKBDESC_NOT_MAC80211;
668
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200669 /*
670 * When hardware encryption is supported, and this frame
671 * is to be encrypted, we should strip the IV/EIV data from
Daniel Mack3ad2f3f2010-02-03 08:01:28 +0800672 * the frame so we can provide it to the driver separately.
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200673 */
674 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
Ivo van Doorndddfb472008-12-02 18:20:42 +0100675 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
Ivo van Doorn7dab73b2011-04-18 15:27:06 +0200676 if (test_bit(REQUIRE_COPY_IV, &queue->rt2x00dev->cap_flags))
Ivo van Doorn9eb4e212009-04-26 16:08:30 +0200677 rt2x00crypto_tx_copy_iv(skb, &txdesc);
Ivo van Doorndddfb472008-12-02 18:20:42 +0100678 else
Ivo van Doorn9eb4e212009-04-26 16:08:30 +0200679 rt2x00crypto_tx_remove_iv(skb, &txdesc);
Ivo van Doorndddfb472008-12-02 18:20:42 +0100680 }
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200681
Ivo van Doorn93354cb2009-08-08 23:53:47 +0200682 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300683 * When DMA allocation is required we should guarantee to the
Ivo van Doorn93354cb2009-08-08 23:53:47 +0200684 * driver that the DMA is aligned to a 4-byte boundary.
Ivo van Doorn93354cb2009-08-08 23:53:47 +0200685 * However some drivers require L2 padding to pad the payload
686 * rather then the header. This could be a requirement for
687 * PCI and USB devices, while header alignment only is valid
688 * for PCI devices.
689 */
Ivo van Doorn7dab73b2011-04-18 15:27:06 +0200690 if (test_bit(REQUIRE_L2PAD, &queue->rt2x00dev->cap_flags))
Gertjan van Wingerde128f8f72011-07-06 22:57:37 +0200691 rt2x00queue_insert_l2pad(skb, txdesc.header_length);
Ivo van Doorn7dab73b2011-04-18 15:27:06 +0200692 else if (test_bit(REQUIRE_DMA, &queue->rt2x00dev->cap_flags))
Gertjan van Wingerde128f8f72011-07-06 22:57:37 +0200693 rt2x00queue_align_frame(skb);
694
Stanislaw Gruszka3780d032012-03-09 12:39:54 +0100695 /*
696 * That function must be called with bh disabled.
697 */
Gertjan van Wingerde128f8f72011-07-06 22:57:37 +0200698 spin_lock(&queue->tx_lock);
699
700 if (unlikely(rt2x00queue_full(queue))) {
701 ERROR(queue->rt2x00dev,
702 "Dropping frame due to full tx queue %d.\n", queue->qid);
703 ret = -ENOBUFS;
704 goto out;
705 }
706
707 entry = rt2x00queue_get_entry(queue, Q_INDEX);
708
709 if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA,
710 &entry->flags))) {
711 ERROR(queue->rt2x00dev,
712 "Arrived at non-free entry in the non-full queue %d.\n"
713 "Please file bug report to %s.\n",
714 queue->qid, DRV_PROJECT);
715 ret = -EINVAL;
716 goto out;
717 }
718
719 skbdesc->entry = entry;
720 entry->skb = skb;
Ivo van Doorn9f166172009-04-26 16:08:50 +0200721
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200722 /*
723 * It could be possible that the queue was corrupted and this
Ivo van Doorn0e3de992008-11-12 00:01:37 +0100724 * call failed. Since we always return NETDEV_TX_OK to mac80211,
725 * this frame will simply be dropped.
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200726 */
Gertjan van Wingerde78eea112010-06-29 21:41:05 +0200727 if (unlikely(rt2x00queue_write_tx_data(entry, &txdesc))) {
Ivo van Doorn0262ab02008-08-29 21:04:26 +0200728 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200729 entry->skb = NULL;
Gertjan van Wingerde77a861c2011-07-06 22:56:24 +0200730 ret = -EIO;
731 goto out;
Ivo van Doorn6db37862008-06-06 22:50:28 +0200732 }
733
Helmut Schaa84e9e8ebd2013-01-17 17:34:32 +0100734 /*
735 * Put BlockAckReqs into our check list for driver BA processing.
736 */
737 rt2x00queue_bar_check(entry);
738
Ivo van Doorn0262ab02008-08-29 21:04:26 +0200739 set_bit(ENTRY_DATA_PENDING, &entry->flags);
Ivo van Doorn6db37862008-06-06 22:50:28 +0200740
Johannes Stezenbach75256f02011-04-18 15:29:38 +0200741 rt2x00queue_index_inc(entry, Q_INDEX);
Ivo van Doorn6db37862008-06-06 22:50:28 +0200742 rt2x00queue_write_tx_descriptor(entry, &txdesc);
Ivo van Doorn8be4eed2010-11-06 15:48:23 +0100743 rt2x00queue_kick_tx_queue(queue, &txdesc);
Ivo van Doorn6db37862008-06-06 22:50:28 +0200744
Gertjan van Wingerde77a861c2011-07-06 22:56:24 +0200745out:
746 spin_unlock(&queue->tx_lock);
747 return ret;
Ivo van Doorn6db37862008-06-06 22:50:28 +0200748}
749
Helmut Schaa69cf36a2011-01-30 13:16:03 +0100750int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev,
751 struct ieee80211_vif *vif)
752{
753 struct rt2x00_intf *intf = vif_to_intf(vif);
754
755 if (unlikely(!intf->beacon))
756 return -ENOBUFS;
757
758 mutex_lock(&intf->beacon_skb_mutex);
759
760 /*
761 * Clean up the beacon skb.
762 */
763 rt2x00queue_free_skb(intf->beacon);
764
765 /*
766 * Clear beacon (single bssid devices don't need to clear the beacon
767 * since the beacon queue will get stopped anyway).
768 */
769 if (rt2x00dev->ops->lib->clear_beacon)
770 rt2x00dev->ops->lib->clear_beacon(intf->beacon);
771
772 mutex_unlock(&intf->beacon_skb_mutex);
773
774 return 0;
775}
776
Helmut Schaa8414ff02011-01-30 13:16:28 +0100777int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev,
778 struct ieee80211_vif *vif)
Ivo van Doornbd88a782008-07-09 15:12:44 +0200779{
780 struct rt2x00_intf *intf = vif_to_intf(vif);
781 struct skb_frame_desc *skbdesc;
782 struct txentry_desc txdesc;
Ivo van Doornbd88a782008-07-09 15:12:44 +0200783
784 if (unlikely(!intf->beacon))
785 return -ENOBUFS;
786
Igor Perminov17512dc2009-08-08 23:55:18 +0200787 /*
788 * Clean up the beacon skb.
789 */
Ivo van Doornfa695602010-10-11 15:37:25 +0200790 rt2x00queue_free_skb(intf->beacon);
Igor Perminov17512dc2009-08-08 23:55:18 +0200791
Ivo van Doornbd88a782008-07-09 15:12:44 +0200792 intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif);
Helmut Schaa8414ff02011-01-30 13:16:28 +0100793 if (!intf->beacon->skb)
Ivo van Doornbd88a782008-07-09 15:12:44 +0200794 return -ENOMEM;
795
796 /*
797 * Copy all TX descriptor information into txdesc,
798 * after that we are free to use the skb->cb array
799 * for our information.
800 */
Thomas Huehn36323f82012-07-23 21:33:42 +0200801 rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc, NULL);
Ivo van Doornbd88a782008-07-09 15:12:44 +0200802
803 /*
Ivo van Doornbd88a782008-07-09 15:12:44 +0200804 * Fill in skb descriptor
805 */
806 skbdesc = get_skb_frame_desc(intf->beacon->skb);
807 memset(skbdesc, 0, sizeof(*skbdesc));
Ivo van Doornbd88a782008-07-09 15:12:44 +0200808 skbdesc->entry = intf->beacon;
809
810 /*
Helmut Schaa69cf36a2011-01-30 13:16:03 +0100811 * Send beacon to hardware.
Ivo van Doornbd88a782008-07-09 15:12:44 +0200812 */
Gertjan van Wingerdef224f4e2010-05-08 23:40:25 +0200813 rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc);
Ivo van Doornbd88a782008-07-09 15:12:44 +0200814
Helmut Schaa8414ff02011-01-30 13:16:28 +0100815 return 0;
816
817}
818
819int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
820 struct ieee80211_vif *vif)
821{
822 struct rt2x00_intf *intf = vif_to_intf(vif);
823 int ret;
824
825 mutex_lock(&intf->beacon_skb_mutex);
826 ret = rt2x00queue_update_beacon_locked(rt2x00dev, vif);
Igor Perminov17512dc2009-08-08 23:55:18 +0200827 mutex_unlock(&intf->beacon_skb_mutex);
828
Helmut Schaa8414ff02011-01-30 13:16:28 +0100829 return ret;
Ivo van Doornbd88a782008-07-09 15:12:44 +0200830}
831
Helmut Schaa10e11562011-04-18 15:27:43 +0200832bool rt2x00queue_for_each_entry(struct data_queue *queue,
Ivo van Doorn5eb7efe2010-08-23 19:54:21 +0200833 enum queue_index start,
834 enum queue_index end,
Helmut Schaa1dd0dbb2013-03-15 09:57:56 +0100835 void *data,
836 bool (*fn)(struct queue_entry *entry,
837 void *data))
Ivo van Doorn5eb7efe2010-08-23 19:54:21 +0200838{
839 unsigned long irqflags;
840 unsigned int index_start;
841 unsigned int index_end;
842 unsigned int i;
843
844 if (unlikely(start >= Q_INDEX_MAX || end >= Q_INDEX_MAX)) {
845 ERROR(queue->rt2x00dev,
846 "Entry requested from invalid index range (%d - %d)\n",
847 start, end);
Helmut Schaa10e11562011-04-18 15:27:43 +0200848 return true;
Ivo van Doorn5eb7efe2010-08-23 19:54:21 +0200849 }
850
851 /*
852 * Only protect the range we are going to loop over,
853 * if during our loop a extra entry is set to pending
854 * it should not be kicked during this run, since it
855 * is part of another TX operation.
856 */
Ivo van Doorn813f0332010-11-06 15:48:05 +0100857 spin_lock_irqsave(&queue->index_lock, irqflags);
Ivo van Doorn5eb7efe2010-08-23 19:54:21 +0200858 index_start = queue->index[start];
859 index_end = queue->index[end];
Ivo van Doorn813f0332010-11-06 15:48:05 +0100860 spin_unlock_irqrestore(&queue->index_lock, irqflags);
Ivo van Doorn5eb7efe2010-08-23 19:54:21 +0200861
862 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300863 * Start from the TX done pointer, this guarantees that we will
Ivo van Doorn5eb7efe2010-08-23 19:54:21 +0200864 * send out all frames in the correct order.
865 */
866 if (index_start < index_end) {
Helmut Schaa10e11562011-04-18 15:27:43 +0200867 for (i = index_start; i < index_end; i++) {
Helmut Schaa1dd0dbb2013-03-15 09:57:56 +0100868 if (fn(&queue->entries[i], data))
Helmut Schaa10e11562011-04-18 15:27:43 +0200869 return true;
870 }
Ivo van Doorn5eb7efe2010-08-23 19:54:21 +0200871 } else {
Helmut Schaa10e11562011-04-18 15:27:43 +0200872 for (i = index_start; i < queue->limit; i++) {
Helmut Schaa1dd0dbb2013-03-15 09:57:56 +0100873 if (fn(&queue->entries[i], data))
Helmut Schaa10e11562011-04-18 15:27:43 +0200874 return true;
875 }
Ivo van Doorn5eb7efe2010-08-23 19:54:21 +0200876
Helmut Schaa10e11562011-04-18 15:27:43 +0200877 for (i = 0; i < index_end; i++) {
Helmut Schaa1dd0dbb2013-03-15 09:57:56 +0100878 if (fn(&queue->entries[i], data))
Helmut Schaa10e11562011-04-18 15:27:43 +0200879 return true;
880 }
Ivo van Doorn5eb7efe2010-08-23 19:54:21 +0200881 }
Helmut Schaa10e11562011-04-18 15:27:43 +0200882
883 return false;
Ivo van Doorn5eb7efe2010-08-23 19:54:21 +0200884}
885EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry);
886
Ivo van Doorn181d6902008-02-05 16:42:23 -0500887struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
888 enum queue_index index)
889{
890 struct queue_entry *entry;
Ivo van Doorn5f46c4d2008-03-09 22:44:30 +0100891 unsigned long irqflags;
Ivo van Doorn181d6902008-02-05 16:42:23 -0500892
893 if (unlikely(index >= Q_INDEX_MAX)) {
894 ERROR(queue->rt2x00dev,
895 "Entry requested from invalid index type (%d)\n", index);
896 return NULL;
897 }
898
Ivo van Doorn813f0332010-11-06 15:48:05 +0100899 spin_lock_irqsave(&queue->index_lock, irqflags);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500900
901 entry = &queue->entries[queue->index[index]];
902
Ivo van Doorn813f0332010-11-06 15:48:05 +0100903 spin_unlock_irqrestore(&queue->index_lock, irqflags);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500904
905 return entry;
906}
907EXPORT_SYMBOL_GPL(rt2x00queue_get_entry);
908
Johannes Stezenbach75256f02011-04-18 15:29:38 +0200909void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index)
Ivo van Doorn181d6902008-02-05 16:42:23 -0500910{
Johannes Stezenbach75256f02011-04-18 15:29:38 +0200911 struct data_queue *queue = entry->queue;
Ivo van Doorn5f46c4d2008-03-09 22:44:30 +0100912 unsigned long irqflags;
913
Ivo van Doorn181d6902008-02-05 16:42:23 -0500914 if (unlikely(index >= Q_INDEX_MAX)) {
915 ERROR(queue->rt2x00dev,
916 "Index change on invalid index type (%d)\n", index);
917 return;
918 }
919
Ivo van Doorn813f0332010-11-06 15:48:05 +0100920 spin_lock_irqsave(&queue->index_lock, irqflags);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500921
922 queue->index[index]++;
923 if (queue->index[index] >= queue->limit)
924 queue->index[index] = 0;
925
Johannes Stezenbach75256f02011-04-18 15:29:38 +0200926 entry->last_action = jiffies;
Ivo van Doorn652a9dd2010-08-30 21:15:19 +0200927
Ivo van Doorn10b6b802008-02-03 15:55:21 +0100928 if (index == Q_INDEX) {
929 queue->length++;
930 } else if (index == Q_INDEX_DONE) {
931 queue->length--;
John Daiker55887512008-10-17 12:16:17 -0700932 queue->count++;
Ivo van Doorn10b6b802008-02-03 15:55:21 +0100933 }
Ivo van Doorn181d6902008-02-05 16:42:23 -0500934
Ivo van Doorn813f0332010-11-06 15:48:05 +0100935 spin_unlock_irqrestore(&queue->index_lock, irqflags);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500936}
Ivo van Doorn181d6902008-02-05 16:42:23 -0500937
Ivo van Doorn0b7fde52010-12-13 12:35:17 +0100938void rt2x00queue_pause_queue(struct data_queue *queue)
939{
940 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
941 !test_bit(QUEUE_STARTED, &queue->flags) ||
942 test_and_set_bit(QUEUE_PAUSED, &queue->flags))
943 return;
944
945 switch (queue->qid) {
Ivo van Doornf615e9a2010-12-13 12:36:38 +0100946 case QID_AC_VO:
947 case QID_AC_VI:
Ivo van Doorn0b7fde52010-12-13 12:35:17 +0100948 case QID_AC_BE:
949 case QID_AC_BK:
Ivo van Doorn0b7fde52010-12-13 12:35:17 +0100950 /*
951 * For TX queues, we have to disable the queue
952 * inside mac80211.
953 */
954 ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid);
955 break;
956 default:
957 break;
958 }
959}
960EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue);
961
962void rt2x00queue_unpause_queue(struct data_queue *queue)
963{
964 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
965 !test_bit(QUEUE_STARTED, &queue->flags) ||
966 !test_and_clear_bit(QUEUE_PAUSED, &queue->flags))
967 return;
968
969 switch (queue->qid) {
Ivo van Doornf615e9a2010-12-13 12:36:38 +0100970 case QID_AC_VO:
971 case QID_AC_VI:
Ivo van Doorn0b7fde52010-12-13 12:35:17 +0100972 case QID_AC_BE:
973 case QID_AC_BK:
Ivo van Doorn0b7fde52010-12-13 12:35:17 +0100974 /*
975 * For TX queues, we have to enable the queue
976 * inside mac80211.
977 */
978 ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid);
979 break;
Ivo van Doorn5be65602010-12-13 12:35:40 +0100980 case QID_RX:
981 /*
982 * For RX we need to kick the queue now in order to
983 * receive frames.
984 */
985 queue->rt2x00dev->ops->lib->kick_queue(queue);
Ivo van Doorn0b7fde52010-12-13 12:35:17 +0100986 default:
987 break;
988 }
989}
990EXPORT_SYMBOL_GPL(rt2x00queue_unpause_queue);
991
992void rt2x00queue_start_queue(struct data_queue *queue)
993{
994 mutex_lock(&queue->status_lock);
995
996 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
997 test_and_set_bit(QUEUE_STARTED, &queue->flags)) {
998 mutex_unlock(&queue->status_lock);
999 return;
1000 }
1001
1002 set_bit(QUEUE_PAUSED, &queue->flags);
1003
1004 queue->rt2x00dev->ops->lib->start_queue(queue);
1005
1006 rt2x00queue_unpause_queue(queue);
1007
1008 mutex_unlock(&queue->status_lock);
1009}
1010EXPORT_SYMBOL_GPL(rt2x00queue_start_queue);
1011
1012void rt2x00queue_stop_queue(struct data_queue *queue)
1013{
1014 mutex_lock(&queue->status_lock);
1015
1016 if (!test_and_clear_bit(QUEUE_STARTED, &queue->flags)) {
1017 mutex_unlock(&queue->status_lock);
1018 return;
1019 }
1020
1021 rt2x00queue_pause_queue(queue);
1022
1023 queue->rt2x00dev->ops->lib->stop_queue(queue);
1024
1025 mutex_unlock(&queue->status_lock);
1026}
1027EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue);
1028
Ivo van Doorn5be65602010-12-13 12:35:40 +01001029void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
1030{
Ivo van Doorn5be65602010-12-13 12:35:40 +01001031 bool started;
1032 bool tx_queue =
Ivo van Doornf615e9a2010-12-13 12:36:38 +01001033 (queue->qid == QID_AC_VO) ||
Ivo van Doorn5be65602010-12-13 12:35:40 +01001034 (queue->qid == QID_AC_VI) ||
Ivo van Doornf615e9a2010-12-13 12:36:38 +01001035 (queue->qid == QID_AC_BE) ||
1036 (queue->qid == QID_AC_BK);
Ivo van Doorn5be65602010-12-13 12:35:40 +01001037
1038 mutex_lock(&queue->status_lock);
1039
1040 /*
1041 * If the queue has been started, we must stop it temporarily
1042 * to prevent any new frames to be queued on the device. If
1043 * we are not dropping the pending frames, the queue must
1044 * only be stopped in the software and not the hardware,
1045 * otherwise the queue will never become empty on its own.
1046 */
1047 started = test_bit(QUEUE_STARTED, &queue->flags);
1048 if (started) {
1049 /*
1050 * Pause the queue
1051 */
1052 rt2x00queue_pause_queue(queue);
1053
1054 /*
1055 * If we are not supposed to drop any pending
1056 * frames, this means we must force a start (=kick)
1057 * to the queue to make sure the hardware will
1058 * start transmitting.
1059 */
1060 if (!drop && tx_queue)
1061 queue->rt2x00dev->ops->lib->kick_queue(queue);
1062 }
1063
1064 /*
Ivo van Doorn152a5992011-04-18 15:31:02 +02001065 * Check if driver supports flushing, if that is the case we can
1066 * defer the flushing to the driver. Otherwise we must use the
1067 * alternative which just waits for the queue to become empty.
Ivo van Doorn5be65602010-12-13 12:35:40 +01001068 */
Ivo van Doorn152a5992011-04-18 15:31:02 +02001069 if (likely(queue->rt2x00dev->ops->lib->flush_queue))
1070 queue->rt2x00dev->ops->lib->flush_queue(queue, drop);
Ivo van Doorn5be65602010-12-13 12:35:40 +01001071
1072 /*
1073 * The queue flush has failed...
1074 */
1075 if (unlikely(!rt2x00queue_empty(queue)))
Johannes Stezenbach21957c32011-01-30 13:22:22 +01001076 WARNING(queue->rt2x00dev, "Queue %d failed to flush\n", queue->qid);
Ivo van Doorn5be65602010-12-13 12:35:40 +01001077
1078 /*
1079 * Restore the queue to the previous status
1080 */
1081 if (started)
1082 rt2x00queue_unpause_queue(queue);
1083
1084 mutex_unlock(&queue->status_lock);
1085}
1086EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue);
1087
Ivo van Doorn0b7fde52010-12-13 12:35:17 +01001088void rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev)
1089{
1090 struct data_queue *queue;
1091
1092 /*
1093 * rt2x00queue_start_queue will call ieee80211_wake_queue
1094 * for each queue after is has been properly initialized.
1095 */
1096 tx_queue_for_each(rt2x00dev, queue)
1097 rt2x00queue_start_queue(queue);
1098
1099 rt2x00queue_start_queue(rt2x00dev->rx);
1100}
1101EXPORT_SYMBOL_GPL(rt2x00queue_start_queues);
1102
1103void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev)
1104{
1105 struct data_queue *queue;
1106
1107 /*
1108 * rt2x00queue_stop_queue will call ieee80211_stop_queue
1109 * as well, but we are completely shutting doing everything
1110 * now, so it is much safer to stop all TX queues at once,
1111 * and use rt2x00queue_stop_queue for cleaning up.
1112 */
1113 ieee80211_stop_queues(rt2x00dev->hw);
1114
1115 tx_queue_for_each(rt2x00dev, queue)
1116 rt2x00queue_stop_queue(queue);
1117
1118 rt2x00queue_stop_queue(rt2x00dev->rx);
1119}
1120EXPORT_SYMBOL_GPL(rt2x00queue_stop_queues);
1121
Ivo van Doorn5be65602010-12-13 12:35:40 +01001122void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop)
1123{
1124 struct data_queue *queue;
1125
1126 tx_queue_for_each(rt2x00dev, queue)
1127 rt2x00queue_flush_queue(queue, drop);
1128
1129 rt2x00queue_flush_queue(rt2x00dev->rx, drop);
1130}
1131EXPORT_SYMBOL_GPL(rt2x00queue_flush_queues);
1132
Ivo van Doorn181d6902008-02-05 16:42:23 -05001133static void rt2x00queue_reset(struct data_queue *queue)
1134{
Ivo van Doorn5f46c4d2008-03-09 22:44:30 +01001135 unsigned long irqflags;
Ivo van Doorn652a9dd2010-08-30 21:15:19 +02001136 unsigned int i;
Ivo van Doorn5f46c4d2008-03-09 22:44:30 +01001137
Ivo van Doorn813f0332010-11-06 15:48:05 +01001138 spin_lock_irqsave(&queue->index_lock, irqflags);
Ivo van Doorn181d6902008-02-05 16:42:23 -05001139
1140 queue->count = 0;
1141 queue->length = 0;
Ivo van Doorn652a9dd2010-08-30 21:15:19 +02001142
Johannes Stezenbach75256f02011-04-18 15:29:38 +02001143 for (i = 0; i < Q_INDEX_MAX; i++)
Ivo van Doorn652a9dd2010-08-30 21:15:19 +02001144 queue->index[i] = 0;
Ivo van Doorn181d6902008-02-05 16:42:23 -05001145
Ivo van Doorn813f0332010-11-06 15:48:05 +01001146 spin_unlock_irqrestore(&queue->index_lock, irqflags);
Ivo van Doorn181d6902008-02-05 16:42:23 -05001147}
1148
Ivo van Doorn798b7ad2008-11-08 15:25:33 +01001149void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
Ivo van Doorn181d6902008-02-05 16:42:23 -05001150{
1151 struct data_queue *queue;
1152 unsigned int i;
1153
Ivo van Doorn798b7ad2008-11-08 15:25:33 +01001154 queue_for_each(rt2x00dev, queue) {
Ivo van Doorn181d6902008-02-05 16:42:23 -05001155 rt2x00queue_reset(queue);
1156
Ivo van Doorn64e7d722010-12-13 12:36:00 +01001157 for (i = 0; i < queue->limit; i++)
Ivo van Doorn798b7ad2008-11-08 15:25:33 +01001158 rt2x00dev->ops->lib->clear_entry(&queue->entries[i]);
Ivo van Doorn181d6902008-02-05 16:42:23 -05001159 }
1160}
1161
1162static int rt2x00queue_alloc_entries(struct data_queue *queue,
1163 const struct data_queue_desc *qdesc)
1164{
1165 struct queue_entry *entries;
1166 unsigned int entry_size;
1167 unsigned int i;
1168
1169 rt2x00queue_reset(queue);
1170
1171 queue->limit = qdesc->entry_num;
Ivo van Doornb8697672008-06-06 22:53:14 +02001172 queue->threshold = DIV_ROUND_UP(qdesc->entry_num, 10);
Ivo van Doorn181d6902008-02-05 16:42:23 -05001173 queue->data_size = qdesc->data_size;
1174 queue->desc_size = qdesc->desc_size;
1175
1176 /*
1177 * Allocate all queue entries.
1178 */
1179 entry_size = sizeof(*entries) + qdesc->priv_size;
Joe Perchesbaeb2ff2010-08-11 07:02:48 +00001180 entries = kcalloc(queue->limit, entry_size, GFP_KERNEL);
Ivo van Doorn181d6902008-02-05 16:42:23 -05001181 if (!entries)
1182 return -ENOMEM;
1183
1184#define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
Mark Einonf8bfbc32010-11-06 15:47:25 +01001185 (((char *)(__base)) + ((__limit) * (__esize)) + \
1186 ((__index) * (__psize)))
Ivo van Doorn181d6902008-02-05 16:42:23 -05001187
1188 for (i = 0; i < queue->limit; i++) {
1189 entries[i].flags = 0;
1190 entries[i].queue = queue;
1191 entries[i].skb = NULL;
1192 entries[i].entry_idx = i;
1193 entries[i].priv_data =
1194 QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit,
1195 sizeof(*entries), qdesc->priv_size);
1196 }
1197
1198#undef QUEUE_ENTRY_PRIV_OFFSET
1199
1200 queue->entries = entries;
1201
1202 return 0;
1203}
1204
Ivo van Doornfa695602010-10-11 15:37:25 +02001205static void rt2x00queue_free_skbs(struct data_queue *queue)
Gertjan van Wingerde30caa6e2008-06-16 19:56:08 +02001206{
1207 unsigned int i;
1208
1209 if (!queue->entries)
1210 return;
1211
1212 for (i = 0; i < queue->limit; i++) {
Ivo van Doornfa695602010-10-11 15:37:25 +02001213 rt2x00queue_free_skb(&queue->entries[i]);
Gertjan van Wingerde30caa6e2008-06-16 19:56:08 +02001214 }
1215}
1216
Ivo van Doornfa695602010-10-11 15:37:25 +02001217static int rt2x00queue_alloc_rxskbs(struct data_queue *queue)
Gertjan van Wingerde30caa6e2008-06-16 19:56:08 +02001218{
1219 unsigned int i;
1220 struct sk_buff *skb;
1221
1222 for (i = 0; i < queue->limit; i++) {
Helmut Schaa88211022012-04-19 13:24:10 +02001223 skb = rt2x00queue_alloc_rxskb(&queue->entries[i], GFP_KERNEL);
Gertjan van Wingerde30caa6e2008-06-16 19:56:08 +02001224 if (!skb)
Ivo van Doorn61243d82008-06-20 22:10:53 +02001225 return -ENOMEM;
Gertjan van Wingerde30caa6e2008-06-16 19:56:08 +02001226 queue->entries[i].skb = skb;
1227 }
1228
1229 return 0;
Gertjan van Wingerde30caa6e2008-06-16 19:56:08 +02001230}
1231
Ivo van Doorn181d6902008-02-05 16:42:23 -05001232int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
1233{
1234 struct data_queue *queue;
1235 int status;
1236
Ivo van Doorn181d6902008-02-05 16:42:23 -05001237 status = rt2x00queue_alloc_entries(rt2x00dev->rx, rt2x00dev->ops->rx);
1238 if (status)
1239 goto exit;
1240
1241 tx_queue_for_each(rt2x00dev, queue) {
1242 status = rt2x00queue_alloc_entries(queue, rt2x00dev->ops->tx);
1243 if (status)
1244 goto exit;
1245 }
1246
1247 status = rt2x00queue_alloc_entries(rt2x00dev->bcn, rt2x00dev->ops->bcn);
1248 if (status)
1249 goto exit;
1250
Ivo van Doorn7dab73b2011-04-18 15:27:06 +02001251 if (test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags)) {
Gertjan van Wingerdee74df4a2011-03-03 19:46:09 +01001252 status = rt2x00queue_alloc_entries(rt2x00dev->atim,
Gertjan van Wingerde30caa6e2008-06-16 19:56:08 +02001253 rt2x00dev->ops->atim);
1254 if (status)
1255 goto exit;
1256 }
Ivo van Doorn181d6902008-02-05 16:42:23 -05001257
Ivo van Doornfa695602010-10-11 15:37:25 +02001258 status = rt2x00queue_alloc_rxskbs(rt2x00dev->rx);
Ivo van Doorn181d6902008-02-05 16:42:23 -05001259 if (status)
1260 goto exit;
1261
1262 return 0;
1263
1264exit:
1265 ERROR(rt2x00dev, "Queue entries allocation failed.\n");
1266
1267 rt2x00queue_uninitialize(rt2x00dev);
1268
1269 return status;
1270}
1271
1272void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
1273{
1274 struct data_queue *queue;
1275
Ivo van Doornfa695602010-10-11 15:37:25 +02001276 rt2x00queue_free_skbs(rt2x00dev->rx);
Gertjan van Wingerde30caa6e2008-06-16 19:56:08 +02001277
Ivo van Doorn181d6902008-02-05 16:42:23 -05001278 queue_for_each(rt2x00dev, queue) {
1279 kfree(queue->entries);
1280 queue->entries = NULL;
1281 }
1282}
1283
Ivo van Doorn8f539272008-02-10 22:51:41 +01001284static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
1285 struct data_queue *queue, enum data_queue_qid qid)
1286{
Ivo van Doorn0b7fde52010-12-13 12:35:17 +01001287 mutex_init(&queue->status_lock);
Gertjan van Wingerde77a861c2011-07-06 22:56:24 +02001288 spin_lock_init(&queue->tx_lock);
Ivo van Doorn813f0332010-11-06 15:48:05 +01001289 spin_lock_init(&queue->index_lock);
Ivo van Doorn8f539272008-02-10 22:51:41 +01001290
1291 queue->rt2x00dev = rt2x00dev;
1292 queue->qid = qid;
Ivo van Doorn2af0a572008-08-29 21:05:45 +02001293 queue->txop = 0;
Ivo van Doorn8f539272008-02-10 22:51:41 +01001294 queue->aifs = 2;
1295 queue->cw_min = 5;
1296 queue->cw_max = 10;
1297}
1298
Ivo van Doorn181d6902008-02-05 16:42:23 -05001299int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
1300{
1301 struct data_queue *queue;
1302 enum data_queue_qid qid;
1303 unsigned int req_atim =
Ivo van Doorn7dab73b2011-04-18 15:27:06 +02001304 !!test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags);
Ivo van Doorn181d6902008-02-05 16:42:23 -05001305
1306 /*
1307 * We need the following queues:
1308 * RX: 1
Gertjan van Wingerde61448f82008-05-10 13:43:33 +02001309 * TX: ops->tx_queues
Ivo van Doorn181d6902008-02-05 16:42:23 -05001310 * Beacon: 1
1311 * Atim: 1 (if required)
1312 */
Gertjan van Wingerde61448f82008-05-10 13:43:33 +02001313 rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
Ivo van Doorn181d6902008-02-05 16:42:23 -05001314
Joe Perchesbaeb2ff2010-08-11 07:02:48 +00001315 queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL);
Ivo van Doorn181d6902008-02-05 16:42:23 -05001316 if (!queue) {
1317 ERROR(rt2x00dev, "Queue allocation failed.\n");
1318 return -ENOMEM;
1319 }
1320
1321 /*
1322 * Initialize pointers
1323 */
1324 rt2x00dev->rx = queue;
1325 rt2x00dev->tx = &queue[1];
Gertjan van Wingerde61448f82008-05-10 13:43:33 +02001326 rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues];
Gertjan van Wingerdee74df4a2011-03-03 19:46:09 +01001327 rt2x00dev->atim = req_atim ? &queue[2 + rt2x00dev->ops->tx_queues] : NULL;
Ivo van Doorn181d6902008-02-05 16:42:23 -05001328
1329 /*
1330 * Initialize queue parameters.
1331 * RX: qid = QID_RX
Ivo van Doornf615e9a2010-12-13 12:36:38 +01001332 * TX: qid = QID_AC_VO + index
Ivo van Doorn181d6902008-02-05 16:42:23 -05001333 * TX: cw_min: 2^5 = 32.
1334 * TX: cw_max: 2^10 = 1024.
Ivo van Doorn565a0192008-06-03 20:29:05 +02001335 * BCN: qid = QID_BEACON
1336 * ATIM: qid = QID_ATIM
Ivo van Doorn181d6902008-02-05 16:42:23 -05001337 */
Ivo van Doorn8f539272008-02-10 22:51:41 +01001338 rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX);
1339
Ivo van Doornf615e9a2010-12-13 12:36:38 +01001340 qid = QID_AC_VO;
Ivo van Doorn8f539272008-02-10 22:51:41 +01001341 tx_queue_for_each(rt2x00dev, queue)
1342 rt2x00queue_init(rt2x00dev, queue, qid++);
Ivo van Doorn181d6902008-02-05 16:42:23 -05001343
Gertjan van Wingerdee74df4a2011-03-03 19:46:09 +01001344 rt2x00queue_init(rt2x00dev, rt2x00dev->bcn, QID_BEACON);
Ivo van Doorn181d6902008-02-05 16:42:23 -05001345 if (req_atim)
Gertjan van Wingerdee74df4a2011-03-03 19:46:09 +01001346 rt2x00queue_init(rt2x00dev, rt2x00dev->atim, QID_ATIM);
Ivo van Doorn181d6902008-02-05 16:42:23 -05001347
1348 return 0;
1349}
1350
1351void rt2x00queue_free(struct rt2x00_dev *rt2x00dev)
1352{
1353 kfree(rt2x00dev->rx);
1354 rt2x00dev->rx = NULL;
1355 rt2x00dev->tx = NULL;
1356 rt2x00dev->bcn = NULL;
1357}