blob: 20dbdd6fb9047e63ed98a64d084072f1642084e8 [file] [log] [blame]
Ivo van Doorn181d6902008-02-05 16:42:23 -05001/*
Gertjan van Wingerde9c9a0d12009-11-08 16:39:55 +01002 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
3 Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com>
Ivo van Doorn181d6902008-02-05 16:42:23 -05004 <http://rt2x00.serialmonkey.com>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the
18 Free Software Foundation, Inc.,
19 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21
22/*
23 Module: rt2x00lib
24 Abstract: rt2x00 queue specific routines.
25 */
26
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090027#include <linux/slab.h>
Ivo van Doorn181d6902008-02-05 16:42:23 -050028#include <linux/kernel.h>
29#include <linux/module.h>
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +020030#include <linux/dma-mapping.h>
Ivo van Doorn181d6902008-02-05 16:42:23 -050031
32#include "rt2x00.h"
33#include "rt2x00lib.h"
34
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +020035struct sk_buff *rt2x00queue_alloc_rxskb(struct rt2x00_dev *rt2x00dev,
36 struct queue_entry *entry)
Gertjan van Wingerde239c2492008-06-06 22:54:12 +020037{
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +020038 struct sk_buff *skb;
39 struct skb_frame_desc *skbdesc;
Ivo van Doorn2bb057d2008-08-04 16:37:44 +020040 unsigned int frame_size;
41 unsigned int head_size = 0;
42 unsigned int tail_size = 0;
Gertjan van Wingerde239c2492008-06-06 22:54:12 +020043
44 /*
45 * The frame size includes descriptor size, because the
46 * hardware directly receive the frame into the skbuffer.
47 */
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +020048 frame_size = entry->queue->data_size + entry->queue->desc_size;
Gertjan van Wingerde239c2492008-06-06 22:54:12 +020049
50 /*
Ivo van Doornff352392008-07-04 14:56:07 +020051 * The payload should be aligned to a 4-byte boundary,
52 * this means we need at least 3 bytes for moving the frame
53 * into the correct offset.
Gertjan van Wingerde239c2492008-06-06 22:54:12 +020054 */
Ivo van Doorn2bb057d2008-08-04 16:37:44 +020055 head_size = 4;
56
57 /*
58 * For IV/EIV/ICV assembly we must make sure there is
59 * at least 8 bytes bytes available in headroom for IV/EIV
Ivo van Doorn9c3444d2008-12-03 17:29:48 +010060 * and 8 bytes for ICV data as tailroon.
Ivo van Doorn2bb057d2008-08-04 16:37:44 +020061 */
Ivo van Doorn2bb057d2008-08-04 16:37:44 +020062 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
63 head_size += 8;
Ivo van Doorn9c3444d2008-12-03 17:29:48 +010064 tail_size += 8;
Ivo van Doorn2bb057d2008-08-04 16:37:44 +020065 }
Gertjan van Wingerde239c2492008-06-06 22:54:12 +020066
67 /*
68 * Allocate skbuffer.
69 */
Ivo van Doorn2bb057d2008-08-04 16:37:44 +020070 skb = dev_alloc_skb(frame_size + head_size + tail_size);
Gertjan van Wingerde239c2492008-06-06 22:54:12 +020071 if (!skb)
72 return NULL;
73
Ivo van Doorn2bb057d2008-08-04 16:37:44 +020074 /*
75 * Make sure we not have a frame with the requested bytes
76 * available in the head and tail.
77 */
78 skb_reserve(skb, head_size);
Gertjan van Wingerde239c2492008-06-06 22:54:12 +020079 skb_put(skb, frame_size);
80
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +020081 /*
82 * Populate skbdesc.
83 */
84 skbdesc = get_skb_frame_desc(skb);
85 memset(skbdesc, 0, sizeof(*skbdesc));
86 skbdesc->entry = entry;
87
88 if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags)) {
89 skbdesc->skb_dma = dma_map_single(rt2x00dev->dev,
90 skb->data,
91 skb->len,
92 DMA_FROM_DEVICE);
93 skbdesc->flags |= SKBDESC_DMA_MAPPED_RX;
94 }
95
Gertjan van Wingerde239c2492008-06-06 22:54:12 +020096 return skb;
97}
Gertjan van Wingerde30caa6e2008-06-16 19:56:08 +020098
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +020099void rt2x00queue_map_txskb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
Gertjan van Wingerde30caa6e2008-06-16 19:56:08 +0200100{
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200101 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
102
Ivo van Doorn3ee54a02008-08-29 21:04:50 +0200103 /*
104 * If device has requested headroom, we should make sure that
105 * is also mapped to the DMA so it can be used for transfering
106 * additional descriptor information to the hardware.
107 */
Pavel Roskinb59a52f2009-12-30 11:36:29 +0100108 skb_push(skb, rt2x00dev->ops->extra_tx_headroom);
Ivo van Doorn3ee54a02008-08-29 21:04:50 +0200109
110 skbdesc->skb_dma =
111 dma_map_single(rt2x00dev->dev, skb->data, skb->len, DMA_TO_DEVICE);
112
113 /*
114 * Restore data pointer to original location again.
115 */
Pavel Roskinb59a52f2009-12-30 11:36:29 +0100116 skb_pull(skb, rt2x00dev->ops->extra_tx_headroom);
Ivo van Doorn3ee54a02008-08-29 21:04:50 +0200117
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200118 skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
119}
120EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
121
122void rt2x00queue_unmap_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
123{
124 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
125
126 if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) {
127 dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma, skb->len,
128 DMA_FROM_DEVICE);
129 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX;
130 }
131
132 if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
Ivo van Doorn3ee54a02008-08-29 21:04:50 +0200133 /*
134 * Add headroom to the skb length, it has been removed
135 * by the driver, but it was actually mapped to DMA.
136 */
137 dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma,
Pavel Roskinb59a52f2009-12-30 11:36:29 +0100138 skb->len + rt2x00dev->ops->extra_tx_headroom,
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200139 DMA_TO_DEVICE);
140 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
141 }
142}
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200143
144void rt2x00queue_free_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
145{
Ivo van Doorn9a613192008-07-05 15:11:57 +0200146 if (!skb)
147 return;
148
Ivo van Doorn61243d82008-06-20 22:10:53 +0200149 rt2x00queue_unmap_skb(rt2x00dev, skb);
Gertjan van Wingerde30caa6e2008-06-16 19:56:08 +0200150 dev_kfree_skb_any(skb);
151}
Gertjan van Wingerde239c2492008-06-06 22:54:12 +0200152
Ivo van Doorndaee6c02009-08-29 20:30:45 +0200153void rt2x00queue_align_frame(struct sk_buff *skb)
Ivo van Doorn9f166172009-04-26 16:08:50 +0200154{
Ivo van Doorn9f166172009-04-26 16:08:50 +0200155 unsigned int frame_length = skb->len;
Ivo van Doorndaee6c02009-08-29 20:30:45 +0200156 unsigned int align = ALIGN_SIZE(skb, 0);
Ivo van Doorn9f166172009-04-26 16:08:50 +0200157
158 if (!align)
159 return;
160
Ivo van Doorndaee6c02009-08-29 20:30:45 +0200161 skb_push(skb, align);
162 memmove(skb->data, skb->data + align, frame_length);
163 skb_trim(skb, frame_length);
164}
165
Gertjan van Wingerde95d69aa2009-11-23 22:44:50 +0100166void rt2x00queue_align_payload(struct sk_buff *skb, unsigned int header_length)
Ivo van Doorndaee6c02009-08-29 20:30:45 +0200167{
168 unsigned int frame_length = skb->len;
Gertjan van Wingerde95d69aa2009-11-23 22:44:50 +0100169 unsigned int align = ALIGN_SIZE(skb, header_length);
Ivo van Doorndaee6c02009-08-29 20:30:45 +0200170
171 if (!align)
172 return;
173
174 skb_push(skb, align);
175 memmove(skb->data, skb->data + align, frame_length);
176 skb_trim(skb, frame_length);
177}
178
179void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length)
180{
Gertjan van Wingerde2e331462009-12-04 23:47:03 +0100181 unsigned int payload_length = skb->len - header_length;
Ivo van Doorndaee6c02009-08-29 20:30:45 +0200182 unsigned int header_align = ALIGN_SIZE(skb, 0);
183 unsigned int payload_align = ALIGN_SIZE(skb, header_length);
Gertjan van Wingerdee54be4e2009-12-04 23:47:07 +0100184 unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0;
Ivo van Doorndaee6c02009-08-29 20:30:45 +0200185
Gertjan van Wingerde2e331462009-12-04 23:47:03 +0100186 /*
187 * Adjust the header alignment if the payload needs to be moved more
188 * than the header.
189 */
190 if (payload_align > header_align)
191 header_align += 4;
Ivo van Doorndaee6c02009-08-29 20:30:45 +0200192
Gertjan van Wingerde2e331462009-12-04 23:47:03 +0100193 /* There is nothing to do if no alignment is needed */
194 if (!header_align)
195 return;
196
197 /* Reserve the amount of space needed in front of the frame */
198 skb_push(skb, header_align);
199
200 /*
201 * Move the header.
202 */
203 memmove(skb->data, skb->data + header_align, header_length);
204
205 /* Move the payload, if present and if required */
206 if (payload_length && payload_align)
Ivo van Doorndaee6c02009-08-29 20:30:45 +0200207 memmove(skb->data + header_length + l2pad,
Gertjan van Wingerdea5186e92009-11-24 23:11:32 +0100208 skb->data + header_length + l2pad + payload_align,
Gertjan van Wingerde2e331462009-12-04 23:47:03 +0100209 payload_length);
210
211 /* Trim the skb to the correct size */
212 skb_trim(skb, header_length + l2pad + payload_length);
Ivo van Doorn9f166172009-04-26 16:08:50 +0200213}
214
Ivo van Doorndaee6c02009-08-29 20:30:45 +0200215void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length)
216{
Gertjan van Wingerde77e73d12009-12-04 23:47:01 +0100217 unsigned int l2pad = L2PAD_SIZE(header_length);
Ivo van Doorndaee6c02009-08-29 20:30:45 +0200218
Gertjan van Wingerde354e39d2009-12-04 23:47:02 +0100219 if (!l2pad)
Ivo van Doorndaee6c02009-08-29 20:30:45 +0200220 return;
221
222 memmove(skb->data + l2pad, skb->data, header_length);
223 skb_pull(skb, l2pad);
224}
225
Ivo van Doorn7b409822008-12-20 10:58:33 +0100226static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry,
227 struct txentry_desc *txdesc)
228{
229 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
230 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
231 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
232 unsigned long irqflags;
233
234 if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) ||
235 unlikely(!tx_info->control.vif))
236 return;
237
238 /*
239 * Hardware should insert sequence counter.
240 * FIXME: We insert a software sequence counter first for
241 * hardware that doesn't support hardware sequence counting.
242 *
243 * This is wrong because beacons are not getting sequence
244 * numbers assigned properly.
245 *
246 * A secondary problem exists for drivers that cannot toggle
247 * sequence counting per-frame, since those will override the
248 * sequence counter given by mac80211.
249 */
250 spin_lock_irqsave(&intf->seqlock, irqflags);
251
252 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
253 intf->seqno += 0x10;
254 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
255 hdr->seq_ctrl |= cpu_to_le16(intf->seqno);
256
257 spin_unlock_irqrestore(&intf->seqlock, irqflags);
258
259 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
260}
261
262static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry,
263 struct txentry_desc *txdesc,
264 const struct rt2x00_rate *hwrate)
265{
266 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
267 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
268 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
269 unsigned int data_length;
270 unsigned int duration;
271 unsigned int residual;
272
273 /* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */
274 data_length = entry->skb->len + 4;
275 data_length += rt2x00crypto_tx_overhead(rt2x00dev, entry->skb);
276
277 /*
278 * PLCP setup
279 * Length calculation depends on OFDM/CCK rate.
280 */
281 txdesc->signal = hwrate->plcp;
282 txdesc->service = 0x04;
283
284 if (hwrate->flags & DEV_RATE_OFDM) {
285 txdesc->length_high = (data_length >> 6) & 0x3f;
286 txdesc->length_low = data_length & 0x3f;
287 } else {
288 /*
289 * Convert length to microseconds.
290 */
291 residual = GET_DURATION_RES(data_length, hwrate->bitrate);
292 duration = GET_DURATION(data_length, hwrate->bitrate);
293
294 if (residual != 0) {
295 duration++;
296
297 /*
298 * Check if we need to set the Length Extension
299 */
300 if (hwrate->bitrate == 110 && residual <= 30)
301 txdesc->service |= 0x80;
302 }
303
304 txdesc->length_high = (duration >> 8) & 0xff;
305 txdesc->length_low = duration & 0xff;
306
307 /*
308 * When preamble is enabled we should set the
309 * preamble bit for the signal.
310 */
311 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
312 txdesc->signal |= 0x08;
313 }
314}
315
Ivo van Doornbd88a782008-07-09 15:12:44 +0200316static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
317 struct txentry_desc *txdesc)
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200318{
Johannes Berg2e92e6f2008-05-15 12:55:27 +0200319 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
Johannes Berge039fa42008-05-15 12:55:29 +0200320 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200321 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
Johannes Berg2e92e6f2008-05-15 12:55:27 +0200322 struct ieee80211_rate *rate =
Johannes Berge039fa42008-05-15 12:55:29 +0200323 ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200324 const struct rt2x00_rate *hwrate;
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200325
326 memset(txdesc, 0, sizeof(*txdesc));
327
328 /*
329 * Initialize information from queue
330 */
331 txdesc->queue = entry->queue->qid;
332 txdesc->cw_min = entry->queue->cw_min;
333 txdesc->cw_max = entry->queue->cw_max;
334 txdesc->aifs = entry->queue->aifs;
335
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200336 /*
Gertjan van Wingerdedf624ca2010-05-03 22:43:05 +0200337 * Header and frame information.
Ivo van Doorn9f166172009-04-26 16:08:50 +0200338 */
Gertjan van Wingerdedf624ca2010-05-03 22:43:05 +0200339 txdesc->length = entry->skb->len;
Ivo van Doorn9f166172009-04-26 16:08:50 +0200340 txdesc->header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
Ivo van Doorn9f166172009-04-26 16:08:50 +0200341
342 /*
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200343 * Check whether this frame is to be acked.
344 */
Johannes Berge039fa42008-05-15 12:55:29 +0200345 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200346 __set_bit(ENTRY_TXD_ACK, &txdesc->flags);
347
348 /*
349 * Check if this is a RTS/CTS frame
350 */
Ivo van Doornac104462008-06-16 19:54:57 +0200351 if (ieee80211_is_rts(hdr->frame_control) ||
352 ieee80211_is_cts(hdr->frame_control)) {
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200353 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
Ivo van Doornac104462008-06-16 19:54:57 +0200354 if (ieee80211_is_rts(hdr->frame_control))
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200355 __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags);
Johannes Berge039fa42008-05-15 12:55:29 +0200356 else
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200357 __set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags);
Johannes Berge039fa42008-05-15 12:55:29 +0200358 if (tx_info->control.rts_cts_rate_idx >= 0)
Johannes Berg2e92e6f2008-05-15 12:55:27 +0200359 rate =
Johannes Berge039fa42008-05-15 12:55:29 +0200360 ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info);
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200361 }
362
363 /*
364 * Determine retry information.
365 */
Johannes Berge6a98542008-10-21 12:40:02 +0200366 txdesc->retry_limit = tx_info->control.rates[0].count - 1;
Ivo van Doorn42c82852008-12-02 18:20:04 +0100367 if (txdesc->retry_limit >= rt2x00dev->long_retry)
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200368 __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags);
369
370 /*
371 * Check if more fragments are pending
372 */
Ivo van Doorn267e8982009-08-08 23:53:26 +0200373 if (ieee80211_has_morefrags(hdr->frame_control) ||
374 (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES)) {
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200375 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
376 __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags);
377 }
378
379 /*
380 * Beacons and probe responses require the tsf timestamp
Alban Browaeyse81e0ae2009-12-04 23:46:59 +0100381 * to be inserted into the frame, except for a frame that has been injected
382 * through a monitor interface. This latter is needed for testing a
383 * monitor interface.
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200384 */
Alban Browaeyse81e0ae2009-12-04 23:46:59 +0100385 if ((ieee80211_is_beacon(hdr->frame_control) ||
386 ieee80211_is_probe_resp(hdr->frame_control)) &&
387 (!(tx_info->flags & IEEE80211_TX_CTL_INJECTED)))
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200388 __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
389
390 /*
391 * Determine with what IFS priority this frame should be send.
392 * Set ifs to IFS_SIFS when the this is not the first fragment,
393 * or this fragment came after RTS/CTS.
394 */
Ivo van Doorn7b409822008-12-20 10:58:33 +0100395 if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) &&
396 !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)) {
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200397 __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags);
398 txdesc->ifs = IFS_BACKOFF;
Ivo van Doorn7b409822008-12-20 10:58:33 +0100399 } else
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200400 txdesc->ifs = IFS_SIFS;
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200401
Ivo van Doorn076f9582008-12-20 10:59:02 +0100402 /*
403 * Determine rate modulation.
404 */
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200405 hwrate = rt2x00_get_rate(rate->hw_value);
Ivo van Doorn076f9582008-12-20 10:59:02 +0100406 txdesc->rate_mode = RATE_MODE_CCK;
Ivo van Doorn7b409822008-12-20 10:58:33 +0100407 if (hwrate->flags & DEV_RATE_OFDM)
Ivo van Doorn076f9582008-12-20 10:59:02 +0100408 txdesc->rate_mode = RATE_MODE_OFDM;
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200409
Ivo van Doorn7b409822008-12-20 10:58:33 +0100410 /*
411 * Apply TX descriptor handling by components
412 */
413 rt2x00crypto_create_tx_descriptor(entry, txdesc);
Ivo van Doorn35f00cf2009-04-26 16:09:32 +0200414 rt2x00ht_create_tx_descriptor(entry, txdesc, hwrate);
Ivo van Doorn7b409822008-12-20 10:58:33 +0100415 rt2x00queue_create_tx_descriptor_seq(entry, txdesc);
416 rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate);
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200417}
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200418
Ivo van Doornbd88a782008-07-09 15:12:44 +0200419static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
420 struct txentry_desc *txdesc)
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200421{
Ivo van Doornb8697672008-06-06 22:53:14 +0200422 struct data_queue *queue = entry->queue;
423 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
Gertjan van Wingerde185e5f72010-05-11 23:51:41 +0200424 enum rt2x00_dump_type dump_type;
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200425
426 rt2x00dev->ops->lib->write_tx_desc(rt2x00dev, entry->skb, txdesc);
427
428 /*
429 * All processing on the frame has been completed, this means
430 * it is now ready to be dumped to userspace through debugfs.
431 */
Gertjan van Wingerde185e5f72010-05-11 23:51:41 +0200432 dump_type = (txdesc->queue == QID_BEACON) ?
433 DUMP_FRAME_BEACON : DUMP_FRAME_TX;
434 rt2x00debug_dump_frame(rt2x00dev, dump_type, entry->skb);
Gertjan van Wingerde6295d812010-05-09 21:24:22 +0200435}
436
437static void rt2x00queue_kick_tx_queue(struct queue_entry *entry,
438 struct txentry_desc *txdesc)
439{
440 struct data_queue *queue = entry->queue;
441 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200442
443 /*
Ivo van Doornb8697672008-06-06 22:53:14 +0200444 * Check if we need to kick the queue, there are however a few rules
Gertjan van Wingerde6295d812010-05-09 21:24:22 +0200445 * 1) Don't kick unless this is the last in frame in a burst.
Ivo van Doornb8697672008-06-06 22:53:14 +0200446 * When the burst flag is set, this frame is always followed
447 * by another frame which in some way are related to eachother.
448 * This is true for fragments, RTS or CTS-to-self frames.
Gertjan van Wingerde6295d812010-05-09 21:24:22 +0200449 * 2) Rule 1 can be broken when the available entries
Ivo van Doornb8697672008-06-06 22:53:14 +0200450 * in the queue are less then a certain threshold.
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200451 */
Ivo van Doornb8697672008-06-06 22:53:14 +0200452 if (rt2x00queue_threshold(queue) ||
453 !test_bit(ENTRY_TXD_BURST, &txdesc->flags))
454 rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, queue->qid);
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200455}
Ivo van Doorn7050ec82008-05-10 13:46:13 +0200456
Johannes Berg7351c6b2009-11-19 01:08:30 +0100457int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
458 bool local)
Ivo van Doorn6db37862008-06-06 22:50:28 +0200459{
Johannes Berge6a98542008-10-21 12:40:02 +0200460 struct ieee80211_tx_info *tx_info;
Ivo van Doorn6db37862008-06-06 22:50:28 +0200461 struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
462 struct txentry_desc txdesc;
Ivo van Doornd74f5ba2008-06-16 19:56:54 +0200463 struct skb_frame_desc *skbdesc;
Johannes Berge6a98542008-10-21 12:40:02 +0200464 u8 rate_idx, rate_flags;
Ivo van Doorn6db37862008-06-06 22:50:28 +0200465
466 if (unlikely(rt2x00queue_full(queue)))
Ivo van Doorn0e3de992008-11-12 00:01:37 +0100467 return -ENOBUFS;
Ivo van Doorn6db37862008-06-06 22:50:28 +0200468
Ivo van Doorn0262ab02008-08-29 21:04:26 +0200469 if (test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) {
Ivo van Doorn6db37862008-06-06 22:50:28 +0200470 ERROR(queue->rt2x00dev,
471 "Arrived at non-free entry in the non-full queue %d.\n"
472 "Please file bug report to %s.\n",
473 queue->qid, DRV_PROJECT);
474 return -EINVAL;
475 }
476
477 /*
478 * Copy all TX descriptor information into txdesc,
479 * after that we are free to use the skb->cb array
480 * for our information.
481 */
482 entry->skb = skb;
483 rt2x00queue_create_tx_descriptor(entry, &txdesc);
484
Ivo van Doornd74f5ba2008-06-16 19:56:54 +0200485 /*
Johannes Berge6a98542008-10-21 12:40:02 +0200486 * All information is retrieved from the skb->cb array,
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200487 * now we should claim ownership of the driver part of that
Johannes Berge6a98542008-10-21 12:40:02 +0200488 * array, preserving the bitrate index and flags.
Ivo van Doornd74f5ba2008-06-16 19:56:54 +0200489 */
Johannes Berge6a98542008-10-21 12:40:02 +0200490 tx_info = IEEE80211_SKB_CB(skb);
491 rate_idx = tx_info->control.rates[0].idx;
492 rate_flags = tx_info->control.rates[0].flags;
Ivo van Doorn0e3de992008-11-12 00:01:37 +0100493 skbdesc = get_skb_frame_desc(skb);
Ivo van Doornd74f5ba2008-06-16 19:56:54 +0200494 memset(skbdesc, 0, sizeof(*skbdesc));
495 skbdesc->entry = entry;
Johannes Berge6a98542008-10-21 12:40:02 +0200496 skbdesc->tx_rate_idx = rate_idx;
497 skbdesc->tx_rate_flags = rate_flags;
Ivo van Doornd74f5ba2008-06-16 19:56:54 +0200498
Johannes Berg7351c6b2009-11-19 01:08:30 +0100499 if (local)
500 skbdesc->flags |= SKBDESC_NOT_MAC80211;
501
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200502 /*
503 * When hardware encryption is supported, and this frame
504 * is to be encrypted, we should strip the IV/EIV data from
Daniel Mack3ad2f3f2010-02-03 08:01:28 +0800505 * the frame so we can provide it to the driver separately.
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200506 */
507 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
Ivo van Doorndddfb472008-12-02 18:20:42 +0100508 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
Ivo van Doorn3f787bd2008-12-20 10:56:36 +0100509 if (test_bit(DRIVER_REQUIRE_COPY_IV, &queue->rt2x00dev->flags))
Ivo van Doorn9eb4e212009-04-26 16:08:30 +0200510 rt2x00crypto_tx_copy_iv(skb, &txdesc);
Ivo van Doorndddfb472008-12-02 18:20:42 +0100511 else
Ivo van Doorn9eb4e212009-04-26 16:08:30 +0200512 rt2x00crypto_tx_remove_iv(skb, &txdesc);
Ivo van Doorndddfb472008-12-02 18:20:42 +0100513 }
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200514
Ivo van Doorn93354cb2009-08-08 23:53:47 +0200515 /*
516 * When DMA allocation is required we should guarentee to the
517 * driver that the DMA is aligned to a 4-byte boundary.
Ivo van Doorn93354cb2009-08-08 23:53:47 +0200518 * However some drivers require L2 padding to pad the payload
519 * rather then the header. This could be a requirement for
520 * PCI and USB devices, while header alignment only is valid
521 * for PCI devices.
522 */
Ivo van Doorn9f166172009-04-26 16:08:50 +0200523 if (test_bit(DRIVER_REQUIRE_L2PAD, &queue->rt2x00dev->flags))
Ivo van Doorndaee6c02009-08-29 20:30:45 +0200524 rt2x00queue_insert_l2pad(entry->skb, txdesc.header_length);
Ivo van Doorn93354cb2009-08-08 23:53:47 +0200525 else if (test_bit(DRIVER_REQUIRE_DMA, &queue->rt2x00dev->flags))
Ivo van Doorndaee6c02009-08-29 20:30:45 +0200526 rt2x00queue_align_frame(entry->skb);
Ivo van Doorn9f166172009-04-26 16:08:50 +0200527
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200528 /*
529 * It could be possible that the queue was corrupted and this
Ivo van Doorn0e3de992008-11-12 00:01:37 +0100530 * call failed. Since we always return NETDEV_TX_OK to mac80211,
531 * this frame will simply be dropped.
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200532 */
Helmut Schaa41086692010-04-15 09:13:13 +0200533 if (unlikely(queue->rt2x00dev->ops->lib->write_tx_data(entry,
534 &txdesc))) {
Ivo van Doorn0262ab02008-08-29 21:04:26 +0200535 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
Ivo van Doorn2bb057d2008-08-04 16:37:44 +0200536 entry->skb = NULL;
Ivo van Doorn0e3de992008-11-12 00:01:37 +0100537 return -EIO;
Ivo van Doorn6db37862008-06-06 22:50:28 +0200538 }
539
Ivo van Doornd74f5ba2008-06-16 19:56:54 +0200540 if (test_bit(DRIVER_REQUIRE_DMA, &queue->rt2x00dev->flags))
541 rt2x00queue_map_txskb(queue->rt2x00dev, skb);
542
Ivo van Doorn0262ab02008-08-29 21:04:26 +0200543 set_bit(ENTRY_DATA_PENDING, &entry->flags);
Ivo van Doorn6db37862008-06-06 22:50:28 +0200544
545 rt2x00queue_index_inc(queue, Q_INDEX);
546 rt2x00queue_write_tx_descriptor(entry, &txdesc);
Gertjan van Wingerde6295d812010-05-09 21:24:22 +0200547 rt2x00queue_kick_tx_queue(entry, &txdesc);
Ivo van Doorn6db37862008-06-06 22:50:28 +0200548
549 return 0;
550}
551
Ivo van Doornbd88a782008-07-09 15:12:44 +0200552int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
Ivo van Doorna2c9b652009-01-28 00:32:33 +0100553 struct ieee80211_vif *vif,
554 const bool enable_beacon)
Ivo van Doornbd88a782008-07-09 15:12:44 +0200555{
556 struct rt2x00_intf *intf = vif_to_intf(vif);
557 struct skb_frame_desc *skbdesc;
558 struct txentry_desc txdesc;
Ivo van Doornbd88a782008-07-09 15:12:44 +0200559
560 if (unlikely(!intf->beacon))
561 return -ENOBUFS;
562
Igor Perminov17512dc2009-08-08 23:55:18 +0200563 mutex_lock(&intf->beacon_skb_mutex);
564
565 /*
566 * Clean up the beacon skb.
567 */
568 rt2x00queue_free_skb(rt2x00dev, intf->beacon->skb);
569 intf->beacon->skb = NULL;
570
Ivo van Doorna2c9b652009-01-28 00:32:33 +0100571 if (!enable_beacon) {
572 rt2x00dev->ops->lib->kill_tx_queue(rt2x00dev, QID_BEACON);
Igor Perminov17512dc2009-08-08 23:55:18 +0200573 mutex_unlock(&intf->beacon_skb_mutex);
Ivo van Doorna2c9b652009-01-28 00:32:33 +0100574 return 0;
575 }
576
Ivo van Doornbd88a782008-07-09 15:12:44 +0200577 intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif);
Igor Perminov17512dc2009-08-08 23:55:18 +0200578 if (!intf->beacon->skb) {
579 mutex_unlock(&intf->beacon_skb_mutex);
Ivo van Doornbd88a782008-07-09 15:12:44 +0200580 return -ENOMEM;
Igor Perminov17512dc2009-08-08 23:55:18 +0200581 }
Ivo van Doornbd88a782008-07-09 15:12:44 +0200582
583 /*
584 * Copy all TX descriptor information into txdesc,
585 * after that we are free to use the skb->cb array
586 * for our information.
587 */
588 rt2x00queue_create_tx_descriptor(intf->beacon, &txdesc);
589
590 /*
Ivo van Doornbd88a782008-07-09 15:12:44 +0200591 * Fill in skb descriptor
592 */
593 skbdesc = get_skb_frame_desc(intf->beacon->skb);
594 memset(skbdesc, 0, sizeof(*skbdesc));
Ivo van Doornbd88a782008-07-09 15:12:44 +0200595 skbdesc->entry = intf->beacon;
596
597 /*
598 * Write TX descriptor into reserved room in front of the beacon.
599 */
600 rt2x00queue_write_tx_descriptor(intf->beacon, &txdesc);
601
602 /*
Gertjan van Wingerded61cb262010-05-08 23:40:24 +0200603 * Send beacon to hardware and enable beacon genaration..
Ivo van Doornbd88a782008-07-09 15:12:44 +0200604 */
Gertjan van Wingerdef224f4e2010-05-08 23:40:25 +0200605 rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc);
Ivo van Doornbd88a782008-07-09 15:12:44 +0200606
Igor Perminov17512dc2009-08-08 23:55:18 +0200607 mutex_unlock(&intf->beacon_skb_mutex);
608
Ivo van Doornbd88a782008-07-09 15:12:44 +0200609 return 0;
610}
611
Ivo van Doorn181d6902008-02-05 16:42:23 -0500612struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev,
Ivo van Doorne58c6ac2008-04-21 19:00:47 +0200613 const enum data_queue_qid queue)
Ivo van Doorn181d6902008-02-05 16:42:23 -0500614{
615 int atim = test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
616
Ivo van Doorna2c9b652009-01-28 00:32:33 +0100617 if (queue == QID_RX)
618 return rt2x00dev->rx;
619
Gertjan van Wingerde61448f82008-05-10 13:43:33 +0200620 if (queue < rt2x00dev->ops->tx_queues && rt2x00dev->tx)
Ivo van Doorn181d6902008-02-05 16:42:23 -0500621 return &rt2x00dev->tx[queue];
622
623 if (!rt2x00dev->bcn)
624 return NULL;
625
Ivo van Doorne58c6ac2008-04-21 19:00:47 +0200626 if (queue == QID_BEACON)
Ivo van Doorn181d6902008-02-05 16:42:23 -0500627 return &rt2x00dev->bcn[0];
Ivo van Doorne58c6ac2008-04-21 19:00:47 +0200628 else if (queue == QID_ATIM && atim)
Ivo van Doorn181d6902008-02-05 16:42:23 -0500629 return &rt2x00dev->bcn[1];
630
631 return NULL;
632}
633EXPORT_SYMBOL_GPL(rt2x00queue_get_queue);
634
635struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
636 enum queue_index index)
637{
638 struct queue_entry *entry;
Ivo van Doorn5f46c4d2008-03-09 22:44:30 +0100639 unsigned long irqflags;
Ivo van Doorn181d6902008-02-05 16:42:23 -0500640
641 if (unlikely(index >= Q_INDEX_MAX)) {
642 ERROR(queue->rt2x00dev,
643 "Entry requested from invalid index type (%d)\n", index);
644 return NULL;
645 }
646
Ivo van Doorn5f46c4d2008-03-09 22:44:30 +0100647 spin_lock_irqsave(&queue->lock, irqflags);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500648
649 entry = &queue->entries[queue->index[index]];
650
Ivo van Doorn5f46c4d2008-03-09 22:44:30 +0100651 spin_unlock_irqrestore(&queue->lock, irqflags);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500652
653 return entry;
654}
655EXPORT_SYMBOL_GPL(rt2x00queue_get_entry);
656
657void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
658{
Ivo van Doorn5f46c4d2008-03-09 22:44:30 +0100659 unsigned long irqflags;
660
Ivo van Doorn181d6902008-02-05 16:42:23 -0500661 if (unlikely(index >= Q_INDEX_MAX)) {
662 ERROR(queue->rt2x00dev,
663 "Index change on invalid index type (%d)\n", index);
664 return;
665 }
666
Ivo van Doorn5f46c4d2008-03-09 22:44:30 +0100667 spin_lock_irqsave(&queue->lock, irqflags);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500668
669 queue->index[index]++;
670 if (queue->index[index] >= queue->limit)
671 queue->index[index] = 0;
672
Ivo van Doorn10b6b802008-02-03 15:55:21 +0100673 if (index == Q_INDEX) {
674 queue->length++;
675 } else if (index == Q_INDEX_DONE) {
676 queue->length--;
John Daiker55887512008-10-17 12:16:17 -0700677 queue->count++;
Ivo van Doorn10b6b802008-02-03 15:55:21 +0100678 }
Ivo van Doorn181d6902008-02-05 16:42:23 -0500679
Ivo van Doorn5f46c4d2008-03-09 22:44:30 +0100680 spin_unlock_irqrestore(&queue->lock, irqflags);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500681}
Ivo van Doorn181d6902008-02-05 16:42:23 -0500682
683static void rt2x00queue_reset(struct data_queue *queue)
684{
Ivo van Doorn5f46c4d2008-03-09 22:44:30 +0100685 unsigned long irqflags;
686
687 spin_lock_irqsave(&queue->lock, irqflags);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500688
689 queue->count = 0;
690 queue->length = 0;
691 memset(queue->index, 0, sizeof(queue->index));
692
Ivo van Doorn5f46c4d2008-03-09 22:44:30 +0100693 spin_unlock_irqrestore(&queue->lock, irqflags);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500694}
695
Ivo van Doorna2c9b652009-01-28 00:32:33 +0100696void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev)
697{
698 struct data_queue *queue;
699
700 txall_queue_for_each(rt2x00dev, queue)
701 rt2x00dev->ops->lib->kill_tx_queue(rt2x00dev, queue->qid);
702}
703
Ivo van Doorn798b7ad2008-11-08 15:25:33 +0100704void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
Ivo van Doorn181d6902008-02-05 16:42:23 -0500705{
706 struct data_queue *queue;
707 unsigned int i;
708
Ivo van Doorn798b7ad2008-11-08 15:25:33 +0100709 queue_for_each(rt2x00dev, queue) {
Ivo van Doorn181d6902008-02-05 16:42:23 -0500710 rt2x00queue_reset(queue);
711
Ivo van Doorn9c0ab712008-07-21 19:06:02 +0200712 for (i = 0; i < queue->limit; i++) {
713 queue->entries[i].flags = 0;
714
Ivo van Doorn798b7ad2008-11-08 15:25:33 +0100715 rt2x00dev->ops->lib->clear_entry(&queue->entries[i]);
Ivo van Doorn9c0ab712008-07-21 19:06:02 +0200716 }
Ivo van Doorn181d6902008-02-05 16:42:23 -0500717 }
718}
719
720static int rt2x00queue_alloc_entries(struct data_queue *queue,
721 const struct data_queue_desc *qdesc)
722{
723 struct queue_entry *entries;
724 unsigned int entry_size;
725 unsigned int i;
726
727 rt2x00queue_reset(queue);
728
729 queue->limit = qdesc->entry_num;
Ivo van Doornb8697672008-06-06 22:53:14 +0200730 queue->threshold = DIV_ROUND_UP(qdesc->entry_num, 10);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500731 queue->data_size = qdesc->data_size;
732 queue->desc_size = qdesc->desc_size;
733
734 /*
735 * Allocate all queue entries.
736 */
737 entry_size = sizeof(*entries) + qdesc->priv_size;
738 entries = kzalloc(queue->limit * entry_size, GFP_KERNEL);
739 if (!entries)
740 return -ENOMEM;
741
742#define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
Adam Baker231be4e2008-02-10 22:48:19 +0100743 ( ((char *)(__base)) + ((__limit) * (__esize)) + \
744 ((__index) * (__psize)) )
Ivo van Doorn181d6902008-02-05 16:42:23 -0500745
746 for (i = 0; i < queue->limit; i++) {
747 entries[i].flags = 0;
748 entries[i].queue = queue;
749 entries[i].skb = NULL;
750 entries[i].entry_idx = i;
751 entries[i].priv_data =
752 QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit,
753 sizeof(*entries), qdesc->priv_size);
754 }
755
756#undef QUEUE_ENTRY_PRIV_OFFSET
757
758 queue->entries = entries;
759
760 return 0;
761}
762
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200763static void rt2x00queue_free_skbs(struct rt2x00_dev *rt2x00dev,
764 struct data_queue *queue)
Gertjan van Wingerde30caa6e2008-06-16 19:56:08 +0200765{
766 unsigned int i;
767
768 if (!queue->entries)
769 return;
770
771 for (i = 0; i < queue->limit; i++) {
772 if (queue->entries[i].skb)
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200773 rt2x00queue_free_skb(rt2x00dev, queue->entries[i].skb);
Gertjan van Wingerde30caa6e2008-06-16 19:56:08 +0200774 }
775}
776
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200777static int rt2x00queue_alloc_rxskbs(struct rt2x00_dev *rt2x00dev,
778 struct data_queue *queue)
Gertjan van Wingerde30caa6e2008-06-16 19:56:08 +0200779{
780 unsigned int i;
781 struct sk_buff *skb;
782
783 for (i = 0; i < queue->limit; i++) {
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200784 skb = rt2x00queue_alloc_rxskb(rt2x00dev, &queue->entries[i]);
Gertjan van Wingerde30caa6e2008-06-16 19:56:08 +0200785 if (!skb)
Ivo van Doorn61243d82008-06-20 22:10:53 +0200786 return -ENOMEM;
Gertjan van Wingerde30caa6e2008-06-16 19:56:08 +0200787 queue->entries[i].skb = skb;
788 }
789
790 return 0;
Gertjan van Wingerde30caa6e2008-06-16 19:56:08 +0200791}
792
Ivo van Doorn181d6902008-02-05 16:42:23 -0500793int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
794{
795 struct data_queue *queue;
796 int status;
797
Ivo van Doorn181d6902008-02-05 16:42:23 -0500798 status = rt2x00queue_alloc_entries(rt2x00dev->rx, rt2x00dev->ops->rx);
799 if (status)
800 goto exit;
801
802 tx_queue_for_each(rt2x00dev, queue) {
803 status = rt2x00queue_alloc_entries(queue, rt2x00dev->ops->tx);
804 if (status)
805 goto exit;
806 }
807
808 status = rt2x00queue_alloc_entries(rt2x00dev->bcn, rt2x00dev->ops->bcn);
809 if (status)
810 goto exit;
811
Gertjan van Wingerde30caa6e2008-06-16 19:56:08 +0200812 if (test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags)) {
813 status = rt2x00queue_alloc_entries(&rt2x00dev->bcn[1],
814 rt2x00dev->ops->atim);
815 if (status)
816 goto exit;
817 }
Ivo van Doorn181d6902008-02-05 16:42:23 -0500818
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200819 status = rt2x00queue_alloc_rxskbs(rt2x00dev, rt2x00dev->rx);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500820 if (status)
821 goto exit;
822
823 return 0;
824
825exit:
826 ERROR(rt2x00dev, "Queue entries allocation failed.\n");
827
828 rt2x00queue_uninitialize(rt2x00dev);
829
830 return status;
831}
832
833void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
834{
835 struct data_queue *queue;
836
Gertjan van Wingerdec4da0042008-06-16 19:56:31 +0200837 rt2x00queue_free_skbs(rt2x00dev, rt2x00dev->rx);
Gertjan van Wingerde30caa6e2008-06-16 19:56:08 +0200838
Ivo van Doorn181d6902008-02-05 16:42:23 -0500839 queue_for_each(rt2x00dev, queue) {
840 kfree(queue->entries);
841 queue->entries = NULL;
842 }
843}
844
Ivo van Doorn8f539272008-02-10 22:51:41 +0100845static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
846 struct data_queue *queue, enum data_queue_qid qid)
847{
848 spin_lock_init(&queue->lock);
849
850 queue->rt2x00dev = rt2x00dev;
851 queue->qid = qid;
Ivo van Doorn2af0a572008-08-29 21:05:45 +0200852 queue->txop = 0;
Ivo van Doorn8f539272008-02-10 22:51:41 +0100853 queue->aifs = 2;
854 queue->cw_min = 5;
855 queue->cw_max = 10;
856}
857
Ivo van Doorn181d6902008-02-05 16:42:23 -0500858int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
859{
860 struct data_queue *queue;
861 enum data_queue_qid qid;
862 unsigned int req_atim =
863 !!test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
864
865 /*
866 * We need the following queues:
867 * RX: 1
Gertjan van Wingerde61448f82008-05-10 13:43:33 +0200868 * TX: ops->tx_queues
Ivo van Doorn181d6902008-02-05 16:42:23 -0500869 * Beacon: 1
870 * Atim: 1 (if required)
871 */
Gertjan van Wingerde61448f82008-05-10 13:43:33 +0200872 rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
Ivo van Doorn181d6902008-02-05 16:42:23 -0500873
874 queue = kzalloc(rt2x00dev->data_queues * sizeof(*queue), GFP_KERNEL);
875 if (!queue) {
876 ERROR(rt2x00dev, "Queue allocation failed.\n");
877 return -ENOMEM;
878 }
879
880 /*
881 * Initialize pointers
882 */
883 rt2x00dev->rx = queue;
884 rt2x00dev->tx = &queue[1];
Gertjan van Wingerde61448f82008-05-10 13:43:33 +0200885 rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues];
Ivo van Doorn181d6902008-02-05 16:42:23 -0500886
887 /*
888 * Initialize queue parameters.
889 * RX: qid = QID_RX
890 * TX: qid = QID_AC_BE + index
891 * TX: cw_min: 2^5 = 32.
892 * TX: cw_max: 2^10 = 1024.
Ivo van Doorn565a0192008-06-03 20:29:05 +0200893 * BCN: qid = QID_BEACON
894 * ATIM: qid = QID_ATIM
Ivo van Doorn181d6902008-02-05 16:42:23 -0500895 */
Ivo van Doorn8f539272008-02-10 22:51:41 +0100896 rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX);
897
Ivo van Doorn181d6902008-02-05 16:42:23 -0500898 qid = QID_AC_BE;
Ivo van Doorn8f539272008-02-10 22:51:41 +0100899 tx_queue_for_each(rt2x00dev, queue)
900 rt2x00queue_init(rt2x00dev, queue, qid++);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500901
Ivo van Doorn565a0192008-06-03 20:29:05 +0200902 rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[0], QID_BEACON);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500903 if (req_atim)
Ivo van Doorn565a0192008-06-03 20:29:05 +0200904 rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[1], QID_ATIM);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500905
906 return 0;
907}
908
909void rt2x00queue_free(struct rt2x00_dev *rt2x00dev)
910{
911 kfree(rt2x00dev->rx);
912 rt2x00dev->rx = NULL;
913 rt2x00dev->tx = NULL;
914 rt2x00dev->bcn = NULL;
915}