blob: 9b9044400218e4e59a085272001037993ccda90a [file] [log] [blame]
Michael Buesch5100d5a2008-03-29 21:01:16 +01001/*
2
3 Broadcom B43 wireless driver
4
5 PIO data transfer
6
7 Copyright (c) 2005-2008 Michael Buesch <mb@bu3sch.de>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
22 Boston, MA 02110-1301, USA.
23
24*/
25
26#include "b43.h"
27#include "pio.h"
28#include "dma.h"
29#include "main.h"
30#include "xmit.h"
31
32#include <linux/delay.h>
Alexey Dobriyand43c36d2009-10-07 17:09:06 +040033#include <linux/sched.h>
Michael Buesch5100d5a2008-03-29 21:01:16 +010034
35
Michael Buesch5100d5a2008-03-29 21:01:16 +010036static u16 generate_cookie(struct b43_pio_txqueue *q,
37 struct b43_pio_txpacket *pack)
38{
39 u16 cookie;
40
41 /* Use the upper 4 bits of the cookie as
42 * PIO controller ID and store the packet index number
43 * in the lower 12 bits.
44 * Note that the cookie must never be 0, as this
45 * is a special value used in RX path.
46 * It can also not be 0xFFFF because that is special
47 * for multicast frames.
48 */
49 cookie = (((u16)q->index + 1) << 12);
50 cookie |= pack->index;
51
52 return cookie;
53}
54
55static
John Daiker99da1852009-02-24 02:16:42 -080056struct b43_pio_txqueue *parse_cookie(struct b43_wldev *dev,
57 u16 cookie,
Michael Buesch5100d5a2008-03-29 21:01:16 +010058 struct b43_pio_txpacket **pack)
59{
60 struct b43_pio *pio = &dev->pio;
61 struct b43_pio_txqueue *q = NULL;
62 unsigned int pack_index;
63
64 switch (cookie & 0xF000) {
65 case 0x1000:
66 q = pio->tx_queue_AC_BK;
67 break;
68 case 0x2000:
69 q = pio->tx_queue_AC_BE;
70 break;
71 case 0x3000:
72 q = pio->tx_queue_AC_VI;
73 break;
74 case 0x4000:
75 q = pio->tx_queue_AC_VO;
76 break;
77 case 0x5000:
78 q = pio->tx_queue_mcast;
79 break;
80 }
81 if (B43_WARN_ON(!q))
82 return NULL;
83 pack_index = (cookie & 0x0FFF);
84 if (B43_WARN_ON(pack_index >= ARRAY_SIZE(q->packets)))
85 return NULL;
86 *pack = &q->packets[pack_index];
87
88 return q;
89}
90
91static u16 index_to_pioqueue_base(struct b43_wldev *dev,
92 unsigned int index)
93{
94 static const u16 bases[] = {
95 B43_MMIO_PIO_BASE0,
96 B43_MMIO_PIO_BASE1,
97 B43_MMIO_PIO_BASE2,
98 B43_MMIO_PIO_BASE3,
99 B43_MMIO_PIO_BASE4,
100 B43_MMIO_PIO_BASE5,
101 B43_MMIO_PIO_BASE6,
102 B43_MMIO_PIO_BASE7,
103 };
104 static const u16 bases_rev11[] = {
105 B43_MMIO_PIO11_BASE0,
106 B43_MMIO_PIO11_BASE1,
107 B43_MMIO_PIO11_BASE2,
108 B43_MMIO_PIO11_BASE3,
109 B43_MMIO_PIO11_BASE4,
110 B43_MMIO_PIO11_BASE5,
111 };
112
113 if (dev->dev->id.revision >= 11) {
114 B43_WARN_ON(index >= ARRAY_SIZE(bases_rev11));
115 return bases_rev11[index];
116 }
117 B43_WARN_ON(index >= ARRAY_SIZE(bases));
118 return bases[index];
119}
120
121static u16 pio_txqueue_offset(struct b43_wldev *dev)
122{
123 if (dev->dev->id.revision >= 11)
124 return 0x18;
125 return 0;
126}
127
128static u16 pio_rxqueue_offset(struct b43_wldev *dev)
129{
130 if (dev->dev->id.revision >= 11)
131 return 0x38;
132 return 8;
133}
134
John Daiker99da1852009-02-24 02:16:42 -0800135static struct b43_pio_txqueue *b43_setup_pioqueue_tx(struct b43_wldev *dev,
136 unsigned int index)
Michael Buesch5100d5a2008-03-29 21:01:16 +0100137{
138 struct b43_pio_txqueue *q;
139 struct b43_pio_txpacket *p;
140 unsigned int i;
141
142 q = kzalloc(sizeof(*q), GFP_KERNEL);
143 if (!q)
144 return NULL;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100145 q->dev = dev;
146 q->rev = dev->dev->id.revision;
147 q->mmio_base = index_to_pioqueue_base(dev, index) +
148 pio_txqueue_offset(dev);
149 q->index = index;
150
151 q->free_packet_slots = B43_PIO_MAX_NR_TXPACKETS;
152 if (q->rev >= 8) {
153 q->buffer_size = 1920; //FIXME this constant is wrong.
154 } else {
155 q->buffer_size = b43_piotx_read16(q, B43_PIO_TXQBUFSIZE);
156 q->buffer_size -= 80;
157 }
158
159 INIT_LIST_HEAD(&q->packets_list);
160 for (i = 0; i < ARRAY_SIZE(q->packets); i++) {
161 p = &(q->packets[i]);
162 INIT_LIST_HEAD(&p->list);
163 p->index = i;
164 p->queue = q;
165 list_add(&p->list, &q->packets_list);
166 }
167
168 return q;
169}
170
John Daiker99da1852009-02-24 02:16:42 -0800171static struct b43_pio_rxqueue *b43_setup_pioqueue_rx(struct b43_wldev *dev,
172 unsigned int index)
Michael Buesch5100d5a2008-03-29 21:01:16 +0100173{
174 struct b43_pio_rxqueue *q;
175
176 q = kzalloc(sizeof(*q), GFP_KERNEL);
177 if (!q)
178 return NULL;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100179 q->dev = dev;
180 q->rev = dev->dev->id.revision;
181 q->mmio_base = index_to_pioqueue_base(dev, index) +
182 pio_rxqueue_offset(dev);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100183
184 /* Enable Direct FIFO RX (PIO) on the engine. */
185 b43_dma_direct_fifo_rx(dev, index, 1);
186
187 return q;
188}
189
190static void b43_pio_cancel_tx_packets(struct b43_pio_txqueue *q)
191{
192 struct b43_pio_txpacket *pack;
193 unsigned int i;
194
195 for (i = 0; i < ARRAY_SIZE(q->packets); i++) {
196 pack = &(q->packets[i]);
197 if (pack->skb) {
198 dev_kfree_skb_any(pack->skb);
199 pack->skb = NULL;
200 }
201 }
202}
203
204static void b43_destroy_pioqueue_tx(struct b43_pio_txqueue *q,
205 const char *name)
206{
207 if (!q)
208 return;
209 b43_pio_cancel_tx_packets(q);
210 kfree(q);
211}
212
213static void b43_destroy_pioqueue_rx(struct b43_pio_rxqueue *q,
214 const char *name)
215{
216 if (!q)
217 return;
218 kfree(q);
219}
220
221#define destroy_queue_tx(pio, queue) do { \
222 b43_destroy_pioqueue_tx((pio)->queue, __stringify(queue)); \
223 (pio)->queue = NULL; \
224 } while (0)
225
226#define destroy_queue_rx(pio, queue) do { \
227 b43_destroy_pioqueue_rx((pio)->queue, __stringify(queue)); \
228 (pio)->queue = NULL; \
229 } while (0)
230
231void b43_pio_free(struct b43_wldev *dev)
232{
233 struct b43_pio *pio;
234
235 if (!b43_using_pio_transfers(dev))
236 return;
237 pio = &dev->pio;
238
239 destroy_queue_rx(pio, rx_queue);
240 destroy_queue_tx(pio, tx_queue_mcast);
241 destroy_queue_tx(pio, tx_queue_AC_VO);
242 destroy_queue_tx(pio, tx_queue_AC_VI);
243 destroy_queue_tx(pio, tx_queue_AC_BE);
244 destroy_queue_tx(pio, tx_queue_AC_BK);
245}
246
Michael Buesch5100d5a2008-03-29 21:01:16 +0100247int b43_pio_init(struct b43_wldev *dev)
248{
249 struct b43_pio *pio = &dev->pio;
250 int err = -ENOMEM;
251
252 b43_write32(dev, B43_MMIO_MACCTL, b43_read32(dev, B43_MMIO_MACCTL)
253 & ~B43_MACCTL_BE);
254 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_RXPADOFF, 0);
255
256 pio->tx_queue_AC_BK = b43_setup_pioqueue_tx(dev, 0);
257 if (!pio->tx_queue_AC_BK)
258 goto out;
259
260 pio->tx_queue_AC_BE = b43_setup_pioqueue_tx(dev, 1);
261 if (!pio->tx_queue_AC_BE)
262 goto err_destroy_bk;
263
264 pio->tx_queue_AC_VI = b43_setup_pioqueue_tx(dev, 2);
265 if (!pio->tx_queue_AC_VI)
266 goto err_destroy_be;
267
268 pio->tx_queue_AC_VO = b43_setup_pioqueue_tx(dev, 3);
269 if (!pio->tx_queue_AC_VO)
270 goto err_destroy_vi;
271
272 pio->tx_queue_mcast = b43_setup_pioqueue_tx(dev, 4);
273 if (!pio->tx_queue_mcast)
274 goto err_destroy_vo;
275
276 pio->rx_queue = b43_setup_pioqueue_rx(dev, 0);
277 if (!pio->rx_queue)
278 goto err_destroy_mcast;
279
280 b43dbg(dev->wl, "PIO initialized\n");
281 err = 0;
282out:
283 return err;
284
285err_destroy_mcast:
286 destroy_queue_tx(pio, tx_queue_mcast);
287err_destroy_vo:
288 destroy_queue_tx(pio, tx_queue_AC_VO);
289err_destroy_vi:
290 destroy_queue_tx(pio, tx_queue_AC_VI);
291err_destroy_be:
292 destroy_queue_tx(pio, tx_queue_AC_BE);
293err_destroy_bk:
294 destroy_queue_tx(pio, tx_queue_AC_BK);
295 return err;
296}
297
298/* Static mapping of mac80211's queues (priorities) to b43 PIO queues. */
John Daiker99da1852009-02-24 02:16:42 -0800299static struct b43_pio_txqueue *select_queue_by_priority(struct b43_wldev *dev,
300 u8 queue_prio)
Michael Buesch5100d5a2008-03-29 21:01:16 +0100301{
302 struct b43_pio_txqueue *q;
303
Michael Buesch403a3a12009-06-08 21:04:57 +0200304 if (dev->qos_enabled) {
Michael Buesch5100d5a2008-03-29 21:01:16 +0100305 /* 0 = highest priority */
306 switch (queue_prio) {
307 default:
308 B43_WARN_ON(1);
309 /* fallthrough */
310 case 0:
311 q = dev->pio.tx_queue_AC_VO;
312 break;
313 case 1:
314 q = dev->pio.tx_queue_AC_VI;
315 break;
316 case 2:
317 q = dev->pio.tx_queue_AC_BE;
318 break;
319 case 3:
320 q = dev->pio.tx_queue_AC_BK;
321 break;
322 }
323 } else
324 q = dev->pio.tx_queue_AC_BE;
325
326 return q;
327}
328
Michael Bueschd8c17e12008-04-02 19:58:20 +0200329static u16 tx_write_2byte_queue(struct b43_pio_txqueue *q,
330 u16 ctl,
331 const void *_data,
332 unsigned int data_len)
Michael Buesch5100d5a2008-03-29 21:01:16 +0100333{
Michael Bueschd8c17e12008-04-02 19:58:20 +0200334 struct b43_wldev *dev = q->dev;
Albert Herranz7e937c62009-10-07 00:07:44 +0200335 struct b43_wl *wl = dev->wl;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100336 const u8 *data = _data;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100337
Michael Bueschd8c17e12008-04-02 19:58:20 +0200338 ctl |= B43_PIO_TXCTL_WRITELO | B43_PIO_TXCTL_WRITEHI;
339 b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
340
341 ssb_block_write(dev->dev, data, (data_len & ~1),
342 q->mmio_base + B43_PIO_TXDATA,
343 sizeof(u16));
344 if (data_len & 1) {
345 /* Write the last byte. */
346 ctl &= ~B43_PIO_TXCTL_WRITEHI;
347 b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
Albert Herranz7e937c62009-10-07 00:07:44 +0200348 wl->tx_tail[0] = data[data_len - 1];
349 wl->tx_tail[1] = 0;
350 ssb_block_write(dev->dev, wl->tx_tail, 2,
Michael Bueschb96ab542009-09-23 18:51:21 +0200351 q->mmio_base + B43_PIO_TXDATA,
352 sizeof(u16));
Michael Buesch5100d5a2008-03-29 21:01:16 +0100353 }
Michael Bueschd8c17e12008-04-02 19:58:20 +0200354
355 return ctl;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100356}
357
358static void pio_tx_frame_2byte_queue(struct b43_pio_txpacket *pack,
359 const u8 *hdr, unsigned int hdrlen)
360{
361 struct b43_pio_txqueue *q = pack->queue;
362 const char *frame = pack->skb->data;
363 unsigned int frame_len = pack->skb->len;
364 u16 ctl;
365
366 ctl = b43_piotx_read16(q, B43_PIO_TXCTL);
367 ctl |= B43_PIO_TXCTL_FREADY;
368 ctl &= ~B43_PIO_TXCTL_EOF;
369
370 /* Transfer the header data. */
Michael Bueschd8c17e12008-04-02 19:58:20 +0200371 ctl = tx_write_2byte_queue(q, ctl, hdr, hdrlen);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100372 /* Transfer the frame data. */
Michael Bueschd8c17e12008-04-02 19:58:20 +0200373 ctl = tx_write_2byte_queue(q, ctl, frame, frame_len);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100374
375 ctl |= B43_PIO_TXCTL_EOF;
376 b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
377}
378
Michael Bueschd8c17e12008-04-02 19:58:20 +0200379static u32 tx_write_4byte_queue(struct b43_pio_txqueue *q,
380 u32 ctl,
381 const void *_data,
382 unsigned int data_len)
Michael Buesch5100d5a2008-03-29 21:01:16 +0100383{
Michael Bueschd8c17e12008-04-02 19:58:20 +0200384 struct b43_wldev *dev = q->dev;
Albert Herranz7e937c62009-10-07 00:07:44 +0200385 struct b43_wl *wl = dev->wl;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100386 const u8 *data = _data;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100387
Michael Bueschd8c17e12008-04-02 19:58:20 +0200388 ctl |= B43_PIO8_TXCTL_0_7 | B43_PIO8_TXCTL_8_15 |
389 B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_24_31;
390 b43_piotx_write32(q, B43_PIO8_TXCTL, ctl);
391
392 ssb_block_write(dev->dev, data, (data_len & ~3),
393 q->mmio_base + B43_PIO8_TXDATA,
394 sizeof(u32));
395 if (data_len & 3) {
Albert Herranz7e937c62009-10-07 00:07:44 +0200396 wl->tx_tail[3] = 0;
Michael Bueschd8c17e12008-04-02 19:58:20 +0200397 /* Write the last few bytes. */
398 ctl &= ~(B43_PIO8_TXCTL_8_15 | B43_PIO8_TXCTL_16_23 |
399 B43_PIO8_TXCTL_24_31);
Michael Bueschd8c17e12008-04-02 19:58:20 +0200400 switch (data_len & 3) {
401 case 3:
Michael Bueschb96ab542009-09-23 18:51:21 +0200402 ctl |= B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_8_15;
Albert Herranz7e937c62009-10-07 00:07:44 +0200403 wl->tx_tail[0] = data[data_len - 3];
404 wl->tx_tail[1] = data[data_len - 2];
405 wl->tx_tail[2] = data[data_len - 1];
Michael Bueschb96ab542009-09-23 18:51:21 +0200406 break;
Michael Bueschd8c17e12008-04-02 19:58:20 +0200407 case 2:
408 ctl |= B43_PIO8_TXCTL_8_15;
Albert Herranz7e937c62009-10-07 00:07:44 +0200409 wl->tx_tail[0] = data[data_len - 2];
410 wl->tx_tail[1] = data[data_len - 1];
411 wl->tx_tail[2] = 0;
Michael Bueschb96ab542009-09-23 18:51:21 +0200412 break;
Michael Bueschd8c17e12008-04-02 19:58:20 +0200413 case 1:
Albert Herranz7e937c62009-10-07 00:07:44 +0200414 wl->tx_tail[0] = data[data_len - 1];
415 wl->tx_tail[1] = 0;
416 wl->tx_tail[2] = 0;
Michael Bueschb96ab542009-09-23 18:51:21 +0200417 break;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100418 }
Michael Bueschd8c17e12008-04-02 19:58:20 +0200419 b43_piotx_write32(q, B43_PIO8_TXCTL, ctl);
Albert Herranz7e937c62009-10-07 00:07:44 +0200420 ssb_block_write(dev->dev, wl->tx_tail, 4,
Michael Bueschb96ab542009-09-23 18:51:21 +0200421 q->mmio_base + B43_PIO8_TXDATA,
422 sizeof(u32));
Michael Buesch5100d5a2008-03-29 21:01:16 +0100423 }
Michael Bueschd8c17e12008-04-02 19:58:20 +0200424
425 return ctl;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100426}
427
428static void pio_tx_frame_4byte_queue(struct b43_pio_txpacket *pack,
429 const u8 *hdr, unsigned int hdrlen)
430{
431 struct b43_pio_txqueue *q = pack->queue;
432 const char *frame = pack->skb->data;
433 unsigned int frame_len = pack->skb->len;
434 u32 ctl;
435
436 ctl = b43_piotx_read32(q, B43_PIO8_TXCTL);
437 ctl |= B43_PIO8_TXCTL_FREADY;
438 ctl &= ~B43_PIO8_TXCTL_EOF;
439
440 /* Transfer the header data. */
Michael Bueschd8c17e12008-04-02 19:58:20 +0200441 ctl = tx_write_4byte_queue(q, ctl, hdr, hdrlen);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100442 /* Transfer the frame data. */
Michael Bueschd8c17e12008-04-02 19:58:20 +0200443 ctl = tx_write_4byte_queue(q, ctl, frame, frame_len);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100444
445 ctl |= B43_PIO8_TXCTL_EOF;
446 b43_piotx_write32(q, B43_PIO_TXCTL, ctl);
447}
448
449static int pio_tx_frame(struct b43_pio_txqueue *q,
Johannes Berge039fa42008-05-15 12:55:29 +0200450 struct sk_buff *skb)
Michael Buesch5100d5a2008-03-29 21:01:16 +0100451{
Albert Herranz7e937c62009-10-07 00:07:44 +0200452 struct b43_wldev *dev = q->dev;
453 struct b43_wl *wl = dev->wl;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100454 struct b43_pio_txpacket *pack;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100455 u16 cookie;
456 int err;
457 unsigned int hdrlen;
Johannes Berge039fa42008-05-15 12:55:29 +0200458 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100459
460 B43_WARN_ON(list_empty(&q->packets_list));
461 pack = list_entry(q->packets_list.next,
462 struct b43_pio_txpacket, list);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100463
464 cookie = generate_cookie(q, pack);
Albert Herranz7e937c62009-10-07 00:07:44 +0200465 hdrlen = b43_txhdr_size(dev);
466 err = b43_generate_txhdr(dev, (u8 *)&wl->txhdr, skb,
gregor kowski035d0242009-08-19 22:35:45 +0200467 info, cookie);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100468 if (err)
469 return err;
470
Johannes Berge039fa42008-05-15 12:55:29 +0200471 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
Michael Buesch5100d5a2008-03-29 21:01:16 +0100472 /* Tell the firmware about the cookie of the last
473 * mcast frame, so it can clear the more-data bit in it. */
Albert Herranz7e937c62009-10-07 00:07:44 +0200474 b43_shm_write16(dev, B43_SHM_SHARED,
Michael Buesch5100d5a2008-03-29 21:01:16 +0100475 B43_SHM_SH_MCASTCOOKIE, cookie);
476 }
477
478 pack->skb = skb;
479 if (q->rev >= 8)
Albert Herranz7e937c62009-10-07 00:07:44 +0200480 pio_tx_frame_4byte_queue(pack, (const u8 *)&wl->txhdr, hdrlen);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100481 else
Albert Herranz7e937c62009-10-07 00:07:44 +0200482 pio_tx_frame_2byte_queue(pack, (const u8 *)&wl->txhdr, hdrlen);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100483
484 /* Remove it from the list of available packet slots.
485 * It will be put back when we receive the status report. */
486 list_del(&pack->list);
487
488 /* Update the queue statistics. */
489 q->buffer_used += roundup(skb->len + hdrlen, 4);
490 q->free_packet_slots -= 1;
491
492 return 0;
493}
494
Johannes Berge039fa42008-05-15 12:55:29 +0200495int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
Michael Buesch5100d5a2008-03-29 21:01:16 +0100496{
497 struct b43_pio_txqueue *q;
498 struct ieee80211_hdr *hdr;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100499 unsigned int hdrlen, total_len;
500 int err = 0;
Johannes Berge039fa42008-05-15 12:55:29 +0200501 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100502
503 hdr = (struct ieee80211_hdr *)skb->data;
Johannes Berge039fa42008-05-15 12:55:29 +0200504
505 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
Michael Buesch5100d5a2008-03-29 21:01:16 +0100506 /* The multicast queue will be sent after the DTIM. */
507 q = dev->pio.tx_queue_mcast;
508 /* Set the frame More-Data bit. Ucode will clear it
509 * for us on the last frame. */
510 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
511 } else {
512 /* Decide by priority where to put this frame. */
Johannes Berge2530082008-05-17 00:57:14 +0200513 q = select_queue_by_priority(dev, skb_get_queue_mapping(skb));
Michael Buesch5100d5a2008-03-29 21:01:16 +0100514 }
515
Michael Buesch5100d5a2008-03-29 21:01:16 +0100516 hdrlen = b43_txhdr_size(dev);
517 total_len = roundup(skb->len + hdrlen, 4);
518
519 if (unlikely(total_len > q->buffer_size)) {
520 err = -ENOBUFS;
521 b43dbg(dev->wl, "PIO: TX packet longer than queue.\n");
Michael Buesch637dae32009-09-04 22:55:00 +0200522 goto out;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100523 }
524 if (unlikely(q->free_packet_slots == 0)) {
525 err = -ENOBUFS;
526 b43warn(dev->wl, "PIO: TX packet overflow.\n");
Michael Buesch637dae32009-09-04 22:55:00 +0200527 goto out;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100528 }
529 B43_WARN_ON(q->buffer_used > q->buffer_size);
530
531 if (total_len > (q->buffer_size - q->buffer_used)) {
532 /* Not enough memory on the queue. */
533 err = -EBUSY;
Johannes Berge2530082008-05-17 00:57:14 +0200534 ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
Michael Buesch5100d5a2008-03-29 21:01:16 +0100535 q->stopped = 1;
Michael Buesch637dae32009-09-04 22:55:00 +0200536 goto out;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100537 }
538
539 /* Assign the queue number to the ring (if not already done before)
540 * so TX status handling can use it. The mac80211-queue to b43-queue
541 * mapping is static, so we don't need to store it per frame. */
Johannes Berge2530082008-05-17 00:57:14 +0200542 q->queue_prio = skb_get_queue_mapping(skb);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100543
Johannes Berge039fa42008-05-15 12:55:29 +0200544 err = pio_tx_frame(q, skb);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100545 if (unlikely(err == -ENOKEY)) {
546 /* Drop this packet, as we don't have the encryption key
547 * anymore and must not transmit it unencrypted. */
548 dev_kfree_skb_any(skb);
549 err = 0;
Michael Buesch637dae32009-09-04 22:55:00 +0200550 goto out;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100551 }
552 if (unlikely(err)) {
553 b43err(dev->wl, "PIO transmission failure\n");
Michael Buesch637dae32009-09-04 22:55:00 +0200554 goto out;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100555 }
556 q->nr_tx_packets++;
557
558 B43_WARN_ON(q->buffer_used > q->buffer_size);
559 if (((q->buffer_size - q->buffer_used) < roundup(2 + 2 + 6, 4)) ||
560 (q->free_packet_slots == 0)) {
561 /* The queue is full. */
Johannes Berge2530082008-05-17 00:57:14 +0200562 ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
Michael Buesch5100d5a2008-03-29 21:01:16 +0100563 q->stopped = 1;
564 }
565
Michael Buesch637dae32009-09-04 22:55:00 +0200566out:
Michael Buesch5100d5a2008-03-29 21:01:16 +0100567 return err;
568}
569
Michael Buesch5100d5a2008-03-29 21:01:16 +0100570void b43_pio_handle_txstatus(struct b43_wldev *dev,
571 const struct b43_txstatus *status)
572{
573 struct b43_pio_txqueue *q;
574 struct b43_pio_txpacket *pack = NULL;
575 unsigned int total_len;
Johannes Berge039fa42008-05-15 12:55:29 +0200576 struct ieee80211_tx_info *info;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100577
578 q = parse_cookie(dev, status->cookie, &pack);
579 if (unlikely(!q))
580 return;
581 B43_WARN_ON(!pack);
582
Michael Buesch14a7dd62008-06-24 12:22:05 +0200583 info = IEEE80211_SKB_CB(pack->skb);
Johannes Berge039fa42008-05-15 12:55:29 +0200584
Johannes Berge6a98542008-10-21 12:40:02 +0200585 b43_fill_txstatus_report(dev, info, status);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100586
587 total_len = pack->skb->len + b43_txhdr_size(dev);
588 total_len = roundup(total_len, 4);
589 q->buffer_used -= total_len;
590 q->free_packet_slots += 1;
591
Michael Bueschce6c4a12009-09-10 20:22:02 +0200592 ieee80211_tx_status(dev->wl->hw, pack->skb);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100593 pack->skb = NULL;
594 list_add(&pack->list, &q->packets_list);
595
596 if (q->stopped) {
597 ieee80211_wake_queue(dev->wl->hw, q->queue_prio);
598 q->stopped = 0;
599 }
Michael Buesch5100d5a2008-03-29 21:01:16 +0100600}
601
602void b43_pio_get_tx_stats(struct b43_wldev *dev,
603 struct ieee80211_tx_queue_stats *stats)
604{
605 const int nr_queues = dev->wl->hw->queues;
606 struct b43_pio_txqueue *q;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100607 int i;
608
609 for (i = 0; i < nr_queues; i++) {
Michael Buesch5100d5a2008-03-29 21:01:16 +0100610 q = select_queue_by_priority(dev, i);
611
Johannes Berg57ffc582008-04-29 17:18:59 +0200612 stats[i].len = B43_PIO_MAX_NR_TXPACKETS - q->free_packet_slots;
613 stats[i].limit = B43_PIO_MAX_NR_TXPACKETS;
614 stats[i].count = q->nr_tx_packets;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100615 }
616}
617
618/* Returns whether we should fetch another frame. */
619static bool pio_rx_frame(struct b43_pio_rxqueue *q)
620{
Michael Bueschd8c17e12008-04-02 19:58:20 +0200621 struct b43_wldev *dev = q->dev;
Albert Herranz7e937c62009-10-07 00:07:44 +0200622 struct b43_wl *wl = dev->wl;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100623 u16 len;
624 u32 macstat;
625 unsigned int i, padding;
626 struct sk_buff *skb;
627 const char *err_msg = NULL;
628
Albert Herranz7e937c62009-10-07 00:07:44 +0200629 memset(&wl->rxhdr, 0, sizeof(wl->rxhdr));
Michael Buesch5100d5a2008-03-29 21:01:16 +0100630
631 /* Check if we have data and wait for it to get ready. */
632 if (q->rev >= 8) {
633 u32 ctl;
634
635 ctl = b43_piorx_read32(q, B43_PIO8_RXCTL);
636 if (!(ctl & B43_PIO8_RXCTL_FRAMERDY))
637 return 0;
638 b43_piorx_write32(q, B43_PIO8_RXCTL,
639 B43_PIO8_RXCTL_FRAMERDY);
640 for (i = 0; i < 10; i++) {
641 ctl = b43_piorx_read32(q, B43_PIO8_RXCTL);
642 if (ctl & B43_PIO8_RXCTL_DATARDY)
643 goto data_ready;
644 udelay(10);
645 }
646 } else {
647 u16 ctl;
648
649 ctl = b43_piorx_read16(q, B43_PIO_RXCTL);
650 if (!(ctl & B43_PIO_RXCTL_FRAMERDY))
651 return 0;
652 b43_piorx_write16(q, B43_PIO_RXCTL,
653 B43_PIO_RXCTL_FRAMERDY);
654 for (i = 0; i < 10; i++) {
655 ctl = b43_piorx_read16(q, B43_PIO_RXCTL);
656 if (ctl & B43_PIO_RXCTL_DATARDY)
657 goto data_ready;
658 udelay(10);
659 }
660 }
661 b43dbg(q->dev->wl, "PIO RX timed out\n");
662 return 1;
663data_ready:
664
665 /* Get the preamble (RX header) */
666 if (q->rev >= 8) {
Albert Herranz7e937c62009-10-07 00:07:44 +0200667 ssb_block_read(dev->dev, &wl->rxhdr, sizeof(wl->rxhdr),
Michael Bueschd8c17e12008-04-02 19:58:20 +0200668 q->mmio_base + B43_PIO8_RXDATA,
669 sizeof(u32));
Michael Buesch5100d5a2008-03-29 21:01:16 +0100670 } else {
Albert Herranz7e937c62009-10-07 00:07:44 +0200671 ssb_block_read(dev->dev, &wl->rxhdr, sizeof(wl->rxhdr),
Michael Bueschd8c17e12008-04-02 19:58:20 +0200672 q->mmio_base + B43_PIO_RXDATA,
673 sizeof(u16));
Michael Buesch5100d5a2008-03-29 21:01:16 +0100674 }
675 /* Sanity checks. */
Albert Herranz7e937c62009-10-07 00:07:44 +0200676 len = le16_to_cpu(wl->rxhdr.frame_len);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100677 if (unlikely(len > 0x700)) {
678 err_msg = "len > 0x700";
679 goto rx_error;
680 }
681 if (unlikely(len == 0)) {
682 err_msg = "len == 0";
683 goto rx_error;
684 }
685
Albert Herranz7e937c62009-10-07 00:07:44 +0200686 macstat = le32_to_cpu(wl->rxhdr.mac_status);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100687 if (macstat & B43_RX_MAC_FCSERR) {
688 if (!(q->dev->wl->filter_flags & FIF_FCSFAIL)) {
689 /* Drop frames with failed FCS. */
690 err_msg = "Frame FCS error";
691 goto rx_error;
692 }
693 }
694
695 /* We always pad 2 bytes, as that's what upstream code expects
696 * due to the RX-header being 30 bytes. In case the frame is
697 * unaligned, we pad another 2 bytes. */
698 padding = (macstat & B43_RX_MAC_PADDING) ? 2 : 0;
699 skb = dev_alloc_skb(len + padding + 2);
700 if (unlikely(!skb)) {
701 err_msg = "Out of memory";
702 goto rx_error;
703 }
704 skb_reserve(skb, 2);
705 skb_put(skb, len + padding);
706 if (q->rev >= 8) {
Michael Bueschd8c17e12008-04-02 19:58:20 +0200707 ssb_block_read(dev->dev, skb->data + padding, (len & ~3),
708 q->mmio_base + B43_PIO8_RXDATA,
709 sizeof(u32));
710 if (len & 3) {
Michael Bueschd8c17e12008-04-02 19:58:20 +0200711 /* Read the last few bytes. */
Albert Herranz7e937c62009-10-07 00:07:44 +0200712 ssb_block_read(dev->dev, wl->rx_tail, 4,
Michael Bueschb96ab542009-09-23 18:51:21 +0200713 q->mmio_base + B43_PIO8_RXDATA,
714 sizeof(u32));
Michael Bueschd8c17e12008-04-02 19:58:20 +0200715 switch (len & 3) {
716 case 3:
Albert Herranz7e937c62009-10-07 00:07:44 +0200717 skb->data[len + padding - 3] = wl->rx_tail[0];
718 skb->data[len + padding - 2] = wl->rx_tail[1];
719 skb->data[len + padding - 1] = wl->rx_tail[2];
Michael Bueschb96ab542009-09-23 18:51:21 +0200720 break;
Michael Bueschd8c17e12008-04-02 19:58:20 +0200721 case 2:
Albert Herranz7e937c62009-10-07 00:07:44 +0200722 skb->data[len + padding - 2] = wl->rx_tail[0];
723 skb->data[len + padding - 1] = wl->rx_tail[1];
Michael Bueschb96ab542009-09-23 18:51:21 +0200724 break;
Michael Bueschd8c17e12008-04-02 19:58:20 +0200725 case 1:
Albert Herranz7e937c62009-10-07 00:07:44 +0200726 skb->data[len + padding - 1] = wl->rx_tail[0];
Michael Bueschb96ab542009-09-23 18:51:21 +0200727 break;
Michael Bueschd8c17e12008-04-02 19:58:20 +0200728 }
Michael Buesch5100d5a2008-03-29 21:01:16 +0100729 }
730 } else {
Michael Bueschd8c17e12008-04-02 19:58:20 +0200731 ssb_block_read(dev->dev, skb->data + padding, (len & ~1),
732 q->mmio_base + B43_PIO_RXDATA,
733 sizeof(u16));
734 if (len & 1) {
Michael Bueschd8c17e12008-04-02 19:58:20 +0200735 /* Read the last byte. */
Albert Herranz7e937c62009-10-07 00:07:44 +0200736 ssb_block_read(dev->dev, wl->rx_tail, 2,
Michael Bueschb96ab542009-09-23 18:51:21 +0200737 q->mmio_base + B43_PIO_RXDATA,
738 sizeof(u16));
Albert Herranz7e937c62009-10-07 00:07:44 +0200739 skb->data[len + padding - 1] = wl->rx_tail[0];
Michael Buesch5100d5a2008-03-29 21:01:16 +0100740 }
741 }
742
Albert Herranz7e937c62009-10-07 00:07:44 +0200743 b43_rx(q->dev, skb, &wl->rxhdr);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100744
745 return 1;
746
747rx_error:
748 if (err_msg)
749 b43dbg(q->dev->wl, "PIO RX error: %s\n", err_msg);
750 b43_piorx_write16(q, B43_PIO_RXCTL, B43_PIO_RXCTL_DATARDY);
751 return 1;
752}
753
Michael Buesch5100d5a2008-03-29 21:01:16 +0100754void b43_pio_rx(struct b43_pio_rxqueue *q)
755{
Michael Buesch77ca07f2009-09-04 22:56:19 +0200756 unsigned int count = 0;
757 bool stop;
758
759 while (1) {
760 stop = (pio_rx_frame(q) == 0);
761 if (stop)
762 break;
763 cond_resched();
764 if (WARN_ON_ONCE(++count > 10000))
765 break;
766 }
Michael Buesch5100d5a2008-03-29 21:01:16 +0100767}
768
769static void b43_pio_tx_suspend_queue(struct b43_pio_txqueue *q)
770{
Michael Buesch5100d5a2008-03-29 21:01:16 +0100771 if (q->rev >= 8) {
772 b43_piotx_write32(q, B43_PIO8_TXCTL,
773 b43_piotx_read32(q, B43_PIO8_TXCTL)
774 | B43_PIO8_TXCTL_SUSPREQ);
775 } else {
776 b43_piotx_write16(q, B43_PIO_TXCTL,
777 b43_piotx_read16(q, B43_PIO_TXCTL)
778 | B43_PIO_TXCTL_SUSPREQ);
779 }
Michael Buesch5100d5a2008-03-29 21:01:16 +0100780}
781
782static void b43_pio_tx_resume_queue(struct b43_pio_txqueue *q)
783{
Michael Buesch5100d5a2008-03-29 21:01:16 +0100784 if (q->rev >= 8) {
785 b43_piotx_write32(q, B43_PIO8_TXCTL,
786 b43_piotx_read32(q, B43_PIO8_TXCTL)
787 & ~B43_PIO8_TXCTL_SUSPREQ);
788 } else {
789 b43_piotx_write16(q, B43_PIO_TXCTL,
790 b43_piotx_read16(q, B43_PIO_TXCTL)
791 & ~B43_PIO_TXCTL_SUSPREQ);
792 }
Michael Buesch5100d5a2008-03-29 21:01:16 +0100793}
794
795void b43_pio_tx_suspend(struct b43_wldev *dev)
796{
797 b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
798 b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_BK);
799 b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_BE);
800 b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_VI);
801 b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_VO);
802 b43_pio_tx_suspend_queue(dev->pio.tx_queue_mcast);
803}
804
805void b43_pio_tx_resume(struct b43_wldev *dev)
806{
807 b43_pio_tx_resume_queue(dev->pio.tx_queue_mcast);
808 b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_VO);
809 b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_VI);
810 b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_BE);
811 b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_BK);
812 b43_power_saving_ctl_bits(dev, 0);
813}