blob: 764147dd5aea44f21ac832fbcb2729855a327728 [file] [log] [blame]
Ivo van Doorn95ea3622007-09-25 17:57:13 -07001/*
Ivo van Doorn811aa9c2008-02-03 15:42:53 +01002 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
Ivo van Doorn95ea3622007-09-25 17:57:13 -07003 <http://rt2x00.serialmonkey.com>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21/*
22 Module: rt2x00pci
23 Abstract: rt2x00 generic pci device routines.
24 */
25
Ivo van Doorn95ea3622007-09-25 17:57:13 -070026#include <linux/dma-mapping.h>
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/pci.h>
30
31#include "rt2x00.h"
32#include "rt2x00pci.h"
33
34/*
35 * Beacon handlers.
36 */
37int rt2x00pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
38 struct ieee80211_tx_control *control)
39{
40 struct rt2x00_dev *rt2x00dev = hw->priv;
Ivo van Doorn6bb40dd2008-02-03 15:49:59 +010041 struct rt2x00_intf *intf = vif_to_intf(control->vif);
Ivo van Doorn181d6902008-02-05 16:42:23 -050042 struct queue_entry_priv_pci_tx *priv_tx;
43 struct skb_frame_desc *skbdesc;
Ivo van Doorn95ea3622007-09-25 17:57:13 -070044
Ivo van Doorn6bb40dd2008-02-03 15:49:59 +010045 if (unlikely(!intf->beacon))
46 return -ENOBUFS;
47
48 priv_tx = intf->beacon->priv_data;
Ivo van Doorn95ea3622007-09-25 17:57:13 -070049
50 /*
Ivo van Doorn08992f72008-01-24 01:56:25 -080051 * Fill in skb descriptor
Ivo van Doorn95ea3622007-09-25 17:57:13 -070052 */
Ivo van Doorn181d6902008-02-05 16:42:23 -050053 skbdesc = get_skb_frame_desc(skb);
54 memset(skbdesc, 0, sizeof(*skbdesc));
55 skbdesc->data = skb->data;
Ivo van Doorn6bb40dd2008-02-03 15:49:59 +010056 skbdesc->data_len = skb->len;
Ivo van Doorn181d6902008-02-05 16:42:23 -050057 skbdesc->desc = priv_tx->desc;
Ivo van Doorn6bb40dd2008-02-03 15:49:59 +010058 skbdesc->desc_len = intf->beacon->queue->desc_size;
59 skbdesc->entry = intf->beacon;
Ivo van Doorn08992f72008-01-24 01:56:25 -080060
Ivo van Doorn6bb40dd2008-02-03 15:49:59 +010061 /*
62 * Just in case mac80211 doesn't set this correctly,
63 * but we need this queue set for the descriptor
64 * initialization.
65 */
66 control->queue = IEEE80211_TX_QUEUE_BEACON;
Ivo van Doorn08992f72008-01-24 01:56:25 -080067 rt2x00lib_write_tx_desc(rt2x00dev, skb, control);
Ivo van Doorn95ea3622007-09-25 17:57:13 -070068
69 /*
70 * Enable beacon generation.
Ivo van Doorn6bb40dd2008-02-03 15:49:59 +010071 * Write entire beacon with descriptor to register,
72 * and kick the beacon generator.
Ivo van Doorn95ea3622007-09-25 17:57:13 -070073 */
Ivo van Doorn6bb40dd2008-02-03 15:49:59 +010074 memcpy(priv_tx->data, skb->data, skb->len);
Ivo van Doorn95ea3622007-09-25 17:57:13 -070075 rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, control->queue);
76
77 return 0;
78}
79EXPORT_SYMBOL_GPL(rt2x00pci_beacon_update);
80
81/*
82 * TX data handlers.
83 */
84int rt2x00pci_write_tx_data(struct rt2x00_dev *rt2x00dev,
Ivo van Doorn181d6902008-02-05 16:42:23 -050085 struct data_queue *queue, struct sk_buff *skb,
Ivo van Doorn95ea3622007-09-25 17:57:13 -070086 struct ieee80211_tx_control *control)
87{
Ivo van Doorn181d6902008-02-05 16:42:23 -050088 struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
89 struct queue_entry_priv_pci_tx *priv_tx = entry->priv_data;
90 struct skb_frame_desc *skbdesc;
Ivo van Doorn95ea3622007-09-25 17:57:13 -070091 u32 word;
92
Ivo van Doorn181d6902008-02-05 16:42:23 -050093 if (rt2x00queue_full(queue))
Ivo van Doorn95ea3622007-09-25 17:57:13 -070094 return -EINVAL;
Ivo van Doorn95ea3622007-09-25 17:57:13 -070095
Ivo van Doorn181d6902008-02-05 16:42:23 -050096 rt2x00_desc_read(priv_tx->desc, 0, &word);
Ivo van Doorn95ea3622007-09-25 17:57:13 -070097
98 if (rt2x00_get_field32(word, TXD_ENTRY_OWNER_NIC) ||
99 rt2x00_get_field32(word, TXD_ENTRY_VALID)) {
100 ERROR(rt2x00dev,
101 "Arrived at non-free entry in the non-full queue %d.\n"
102 "Please file bug report to %s.\n",
103 control->queue, DRV_PROJECT);
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700104 return -EINVAL;
105 }
106
Ivo van Doorn08992f72008-01-24 01:56:25 -0800107 /*
108 * Fill in skb descriptor
109 */
Ivo van Doorn181d6902008-02-05 16:42:23 -0500110 skbdesc = get_skb_frame_desc(skb);
111 memset(skbdesc, 0, sizeof(*skbdesc));
112 skbdesc->data = skb->data;
113 skbdesc->data_len = queue->data_size;
114 skbdesc->desc = priv_tx->desc;
115 skbdesc->desc_len = queue->desc_size;
116 skbdesc->entry = entry;
Ivo van Doorn08992f72008-01-24 01:56:25 -0800117
Ivo van Doorn181d6902008-02-05 16:42:23 -0500118 memcpy(priv_tx->data, skb->data, skb->len);
Ivo van Doorn08992f72008-01-24 01:56:25 -0800119 rt2x00lib_write_tx_desc(rt2x00dev, skb, control);
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700120
Ivo van Doorn181d6902008-02-05 16:42:23 -0500121 rt2x00queue_index_inc(queue, Q_INDEX);
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700122
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700123 return 0;
124}
125EXPORT_SYMBOL_GPL(rt2x00pci_write_tx_data);
126
127/*
Ivo van Doorn3957ccb2007-11-12 15:02:40 +0100128 * TX/RX data handlers.
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700129 */
130void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
131{
Ivo van Doorn181d6902008-02-05 16:42:23 -0500132 struct data_queue *queue = rt2x00dev->rx;
133 struct queue_entry *entry;
134 struct queue_entry_priv_pci_rx *priv_rx;
Ivo van Doornc5d0dc52008-01-06 23:40:27 +0100135 struct ieee80211_hdr *hdr;
Ivo van Doorn181d6902008-02-05 16:42:23 -0500136 struct skb_frame_desc *skbdesc;
137 struct rxdone_entry_desc rxdesc;
Ivo van Doornc5d0dc52008-01-06 23:40:27 +0100138 int header_size;
139 int align;
Johannes Berg4150c572007-09-17 01:29:23 -0400140 u32 word;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700141
142 while (1) {
Ivo van Doorn181d6902008-02-05 16:42:23 -0500143 entry = rt2x00queue_get_entry(queue, Q_INDEX);
144 priv_rx = entry->priv_data;
145 rt2x00_desc_read(priv_rx->desc, 0, &word);
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700146
Johannes Berg4150c572007-09-17 01:29:23 -0400147 if (rt2x00_get_field32(word, RXD_ENTRY_OWNER_NIC))
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700148 break;
149
Ivo van Doorn181d6902008-02-05 16:42:23 -0500150 memset(&rxdesc, 0, sizeof(rxdesc));
151 rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc);
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700152
Ivo van Doorn181d6902008-02-05 16:42:23 -0500153 hdr = (struct ieee80211_hdr *)priv_rx->data;
Ivo van Doornc5d0dc52008-01-06 23:40:27 +0100154 header_size =
155 ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control));
156
157 /*
158 * The data behind the ieee80211 header must be
159 * aligned on a 4 byte boundary.
160 */
Ivo van Doornd101f642008-01-11 20:53:07 +0100161 align = header_size % 4;
Ivo van Doornc5d0dc52008-01-06 23:40:27 +0100162
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700163 /*
164 * Allocate the sk_buffer, initialize it and copy
165 * all data into it.
166 */
Ivo van Doorn181d6902008-02-05 16:42:23 -0500167 entry->skb = dev_alloc_skb(rxdesc.size + align);
168 if (!entry->skb)
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700169 return;
170
Ivo van Doorn181d6902008-02-05 16:42:23 -0500171 skb_reserve(entry->skb, align);
172 memcpy(skb_put(entry->skb, rxdesc.size),
173 priv_rx->data, rxdesc.size);
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700174
175 /*
Ivo van Doorn08992f72008-01-24 01:56:25 -0800176 * Fill in skb descriptor
177 */
Ivo van Doorn181d6902008-02-05 16:42:23 -0500178 skbdesc = get_skb_frame_desc(entry->skb);
179 memset(skbdesc, 0, sizeof(*skbdesc));
180 skbdesc->data = entry->skb->data;
181 skbdesc->data_len = queue->data_size;
182 skbdesc->desc = priv_rx->desc;
183 skbdesc->desc_len = queue->desc_size;
Ivo van Doorn08992f72008-01-24 01:56:25 -0800184 skbdesc->entry = entry;
185
186 /*
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700187 * Send the frame to rt2x00lib for further processing.
188 */
Ivo van Doorn181d6902008-02-05 16:42:23 -0500189 rt2x00lib_rxdone(entry, &rxdesc);
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700190
Ivo van Doorn181d6902008-02-05 16:42:23 -0500191 if (test_bit(DEVICE_ENABLED_RADIO, &queue->rt2x00dev->flags)) {
Johannes Berg4150c572007-09-17 01:29:23 -0400192 rt2x00_set_field32(&word, RXD_ENTRY_OWNER_NIC, 1);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500193 rt2x00_desc_write(priv_rx->desc, 0, word);
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700194 }
195
Ivo van Doorn181d6902008-02-05 16:42:23 -0500196 rt2x00queue_index_inc(queue, Q_INDEX);
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700197 }
198}
199EXPORT_SYMBOL_GPL(rt2x00pci_rxdone);
200
Ivo van Doorn181d6902008-02-05 16:42:23 -0500201void rt2x00pci_txdone(struct rt2x00_dev *rt2x00dev, struct queue_entry *entry,
202 struct txdone_entry_desc *txdesc)
Ivo van Doorn3957ccb2007-11-12 15:02:40 +0100203{
Ivo van Doorn181d6902008-02-05 16:42:23 -0500204 struct queue_entry_priv_pci_tx *priv_tx = entry->priv_data;
Ivo van Doorn3957ccb2007-11-12 15:02:40 +0100205 u32 word;
206
Ivo van Doorn181d6902008-02-05 16:42:23 -0500207 txdesc->control = &priv_tx->control;
208 rt2x00lib_txdone(entry, txdesc);
Ivo van Doorn3957ccb2007-11-12 15:02:40 +0100209
210 /*
211 * Make this entry available for reuse.
212 */
213 entry->flags = 0;
214
Ivo van Doorn181d6902008-02-05 16:42:23 -0500215 rt2x00_desc_read(priv_tx->desc, 0, &word);
Ivo van Doorn3957ccb2007-11-12 15:02:40 +0100216 rt2x00_set_field32(&word, TXD_ENTRY_OWNER_NIC, 0);
217 rt2x00_set_field32(&word, TXD_ENTRY_VALID, 0);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500218 rt2x00_desc_write(priv_tx->desc, 0, word);
Ivo van Doorn3957ccb2007-11-12 15:02:40 +0100219
Ivo van Doorn181d6902008-02-05 16:42:23 -0500220 rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE);
Ivo van Doorn3957ccb2007-11-12 15:02:40 +0100221
222 /*
Ivo van Doorn181d6902008-02-05 16:42:23 -0500223 * If the data queue was full before the txdone handler
Ivo van Doorn3957ccb2007-11-12 15:02:40 +0100224 * we must make sure the packet queue in the mac80211 stack
225 * is reenabled when the txdone handler has finished.
226 */
Ivo van Doorn181d6902008-02-05 16:42:23 -0500227 if (!rt2x00queue_full(entry->queue))
228 ieee80211_wake_queue(rt2x00dev->hw, priv_tx->control.queue);
Ivo van Doorn3957ccb2007-11-12 15:02:40 +0100229
230}
231EXPORT_SYMBOL_GPL(rt2x00pci_txdone);
232
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700233/*
234 * Device initialization handlers.
235 */
Ivo van Doorn181d6902008-02-05 16:42:23 -0500236#define dma_size(__queue) \
237({ \
238 (__queue)->limit * \
239 ((__queue)->desc_size + (__queue)->data_size);\
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700240})
241
Ivo van Doorn181d6902008-02-05 16:42:23 -0500242#define priv_offset(__queue, __base, __i) \
243({ \
244 (__base) + ((__i) * (__queue)->desc_size); \
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700245})
246
Ivo van Doorn181d6902008-02-05 16:42:23 -0500247#define data_addr_offset(__queue, __base, __i) \
248({ \
249 (__base) + \
250 ((__queue)->limit * (__queue)->desc_size) + \
251 ((__i) * (__queue)->data_size); \
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700252})
253
Ivo van Doorn181d6902008-02-05 16:42:23 -0500254#define data_dma_offset(__queue, __base, __i) \
255({ \
256 (__base) + \
257 ((__queue)->limit * (__queue)->desc_size) + \
258 ((__i) * (__queue)->data_size); \
259})
260
261static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
262 struct data_queue *queue)
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700263{
Ivo van Doorn181d6902008-02-05 16:42:23 -0500264 struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev);
265 struct queue_entry_priv_pci_tx *priv_tx;
266 void *data_addr;
267 dma_addr_t data_dma;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700268 unsigned int i;
269
270 /*
271 * Allocate DMA memory for descriptor and buffer.
272 */
Ivo van Doorn181d6902008-02-05 16:42:23 -0500273 data_addr = pci_alloc_consistent(pci_dev, dma_size(queue), &data_dma);
274 if (!data_addr)
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700275 return -ENOMEM;
276
277 /*
Ivo van Doorn181d6902008-02-05 16:42:23 -0500278 * Initialize all queue entries to contain valid addresses.
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700279 */
Ivo van Doorn181d6902008-02-05 16:42:23 -0500280 for (i = 0; i < queue->limit; i++) {
281 priv_tx = queue->entries[i].priv_data;
282 priv_tx->desc = priv_offset(queue, data_addr, i);
283 priv_tx->data = data_addr_offset(queue, data_addr, i);
284 priv_tx->dma = data_dma_offset(queue, data_dma, i);
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700285 }
286
287 return 0;
288}
289
Ivo van Doorn181d6902008-02-05 16:42:23 -0500290static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev,
291 struct data_queue *queue)
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700292{
Ivo van Doorn181d6902008-02-05 16:42:23 -0500293 struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev);
294 struct queue_entry_priv_pci_tx *priv_tx = queue->entries[0].priv_data;
295
296 if (priv_tx->data)
297 pci_free_consistent(pci_dev, dma_size(queue),
298 priv_tx->data, priv_tx->dma);
299 priv_tx->data = NULL;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700300}
301
302int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
303{
304 struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500305 struct data_queue *queue;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700306 int status;
307
308 /*
309 * Allocate DMA
310 */
Ivo van Doorn181d6902008-02-05 16:42:23 -0500311 queue_for_each(rt2x00dev, queue) {
312 status = rt2x00pci_alloc_queue_dma(rt2x00dev, queue);
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700313 if (status)
314 goto exit;
315 }
316
317 /*
318 * Register interrupt handler.
319 */
320 status = request_irq(pci_dev->irq, rt2x00dev->ops->lib->irq_handler,
321 IRQF_SHARED, pci_name(pci_dev), rt2x00dev);
322 if (status) {
323 ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n",
324 pci_dev->irq, status);
325 return status;
326 }
327
328 return 0;
329
330exit:
331 rt2x00pci_uninitialize(rt2x00dev);
332
333 return status;
334}
335EXPORT_SYMBOL_GPL(rt2x00pci_initialize);
336
337void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev)
338{
Ivo van Doorn181d6902008-02-05 16:42:23 -0500339 struct data_queue *queue;
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700340
341 /*
342 * Free irq line.
343 */
344 free_irq(rt2x00dev_pci(rt2x00dev)->irq, rt2x00dev);
345
346 /*
347 * Free DMA
348 */
Ivo van Doorn181d6902008-02-05 16:42:23 -0500349 queue_for_each(rt2x00dev, queue)
350 rt2x00pci_free_queue_dma(rt2x00dev, queue);
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700351}
352EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize);
353
354/*
355 * PCI driver handlers.
356 */
357static void rt2x00pci_free_reg(struct rt2x00_dev *rt2x00dev)
358{
359 kfree(rt2x00dev->rf);
360 rt2x00dev->rf = NULL;
361
362 kfree(rt2x00dev->eeprom);
363 rt2x00dev->eeprom = NULL;
364
365 if (rt2x00dev->csr_addr) {
366 iounmap(rt2x00dev->csr_addr);
367 rt2x00dev->csr_addr = NULL;
368 }
369}
370
371static int rt2x00pci_alloc_reg(struct rt2x00_dev *rt2x00dev)
372{
373 struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev);
374
375 rt2x00dev->csr_addr = ioremap(pci_resource_start(pci_dev, 0),
376 pci_resource_len(pci_dev, 0));
377 if (!rt2x00dev->csr_addr)
378 goto exit;
379
380 rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL);
381 if (!rt2x00dev->eeprom)
382 goto exit;
383
384 rt2x00dev->rf = kzalloc(rt2x00dev->ops->rf_size, GFP_KERNEL);
385 if (!rt2x00dev->rf)
386 goto exit;
387
388 return 0;
389
390exit:
391 ERROR_PROBE("Failed to allocate registers.\n");
392
393 rt2x00pci_free_reg(rt2x00dev);
394
395 return -ENOMEM;
396}
397
398int rt2x00pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
399{
400 struct rt2x00_ops *ops = (struct rt2x00_ops *)id->driver_data;
401 struct ieee80211_hw *hw;
402 struct rt2x00_dev *rt2x00dev;
403 int retval;
404
405 retval = pci_request_regions(pci_dev, pci_name(pci_dev));
406 if (retval) {
407 ERROR_PROBE("PCI request regions failed.\n");
408 return retval;
409 }
410
411 retval = pci_enable_device(pci_dev);
412 if (retval) {
413 ERROR_PROBE("Enable device failed.\n");
414 goto exit_release_regions;
415 }
416
417 pci_set_master(pci_dev);
418
419 if (pci_set_mwi(pci_dev))
420 ERROR_PROBE("MWI not available.\n");
421
422 if (pci_set_dma_mask(pci_dev, DMA_64BIT_MASK) &&
423 pci_set_dma_mask(pci_dev, DMA_32BIT_MASK)) {
424 ERROR_PROBE("PCI DMA not supported.\n");
425 retval = -EIO;
426 goto exit_disable_device;
427 }
428
429 hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
430 if (!hw) {
431 ERROR_PROBE("Failed to allocate hardware.\n");
432 retval = -ENOMEM;
433 goto exit_disable_device;
434 }
435
436 pci_set_drvdata(pci_dev, hw);
437
438 rt2x00dev = hw->priv;
439 rt2x00dev->dev = pci_dev;
440 rt2x00dev->ops = ops;
441 rt2x00dev->hw = hw;
442
443 retval = rt2x00pci_alloc_reg(rt2x00dev);
444 if (retval)
445 goto exit_free_device;
446
447 retval = rt2x00lib_probe_dev(rt2x00dev);
448 if (retval)
449 goto exit_free_reg;
450
451 return 0;
452
453exit_free_reg:
454 rt2x00pci_free_reg(rt2x00dev);
455
456exit_free_device:
457 ieee80211_free_hw(hw);
458
459exit_disable_device:
460 if (retval != -EBUSY)
461 pci_disable_device(pci_dev);
462
463exit_release_regions:
464 pci_release_regions(pci_dev);
465
466 pci_set_drvdata(pci_dev, NULL);
467
468 return retval;
469}
470EXPORT_SYMBOL_GPL(rt2x00pci_probe);
471
472void rt2x00pci_remove(struct pci_dev *pci_dev)
473{
474 struct ieee80211_hw *hw = pci_get_drvdata(pci_dev);
475 struct rt2x00_dev *rt2x00dev = hw->priv;
476
477 /*
478 * Free all allocated data.
479 */
480 rt2x00lib_remove_dev(rt2x00dev);
481 rt2x00pci_free_reg(rt2x00dev);
482 ieee80211_free_hw(hw);
483
484 /*
485 * Free the PCI device data.
486 */
487 pci_set_drvdata(pci_dev, NULL);
488 pci_disable_device(pci_dev);
489 pci_release_regions(pci_dev);
490}
491EXPORT_SYMBOL_GPL(rt2x00pci_remove);
492
493#ifdef CONFIG_PM
494int rt2x00pci_suspend(struct pci_dev *pci_dev, pm_message_t state)
495{
496 struct ieee80211_hw *hw = pci_get_drvdata(pci_dev);
497 struct rt2x00_dev *rt2x00dev = hw->priv;
498 int retval;
499
500 retval = rt2x00lib_suspend(rt2x00dev, state);
501 if (retval)
502 return retval;
503
504 rt2x00pci_free_reg(rt2x00dev);
505
506 pci_save_state(pci_dev);
507 pci_disable_device(pci_dev);
508 return pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
509}
510EXPORT_SYMBOL_GPL(rt2x00pci_suspend);
511
512int rt2x00pci_resume(struct pci_dev *pci_dev)
513{
514 struct ieee80211_hw *hw = pci_get_drvdata(pci_dev);
515 struct rt2x00_dev *rt2x00dev = hw->priv;
516 int retval;
517
518 if (pci_set_power_state(pci_dev, PCI_D0) ||
519 pci_enable_device(pci_dev) ||
520 pci_restore_state(pci_dev)) {
521 ERROR(rt2x00dev, "Failed to resume device.\n");
522 return -EIO;
523 }
524
525 retval = rt2x00pci_alloc_reg(rt2x00dev);
526 if (retval)
527 return retval;
528
529 retval = rt2x00lib_resume(rt2x00dev);
530 if (retval)
531 goto exit_free_reg;
532
533 return 0;
534
535exit_free_reg:
536 rt2x00pci_free_reg(rt2x00dev);
537
538 return retval;
539}
540EXPORT_SYMBOL_GPL(rt2x00pci_resume);
541#endif /* CONFIG_PM */
542
543/*
544 * rt2x00pci module information.
545 */
546MODULE_AUTHOR(DRV_PROJECT);
547MODULE_VERSION(DRV_VERSION);
Ivo van Doorn181d6902008-02-05 16:42:23 -0500548MODULE_DESCRIPTION("rt2x00 pci library");
Ivo van Doorn95ea3622007-09-25 17:57:13 -0700549MODULE_LICENSE("GPL");