blob: 15906027c8de3ad062ae4069081948b19acf715c [file] [log] [blame]
Chris Leech0bbd5f42006-05-23 17:35:34 -07001/*
Shannon Nelson43d6e362007-10-16 01:27:39 -07002 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2007 Intel Corporation.
Chris Leech0bbd5f42006-05-23 17:35:34 -07004 *
5 * This program is free software; you can redistribute it and/or modify it
Shannon Nelson43d6e362007-10-16 01:27:39 -07006 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
Chris Leech0bbd5f42006-05-23 17:35:34 -07008 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
Shannon Nelson43d6e362007-10-16 01:27:39 -070015 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
Chris Leech0bbd5f42006-05-23 17:35:34 -070017 *
Shannon Nelson43d6e362007-10-16 01:27:39 -070018 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
Chris Leech0bbd5f42006-05-23 17:35:34 -070021 */
22
23/*
24 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
25 * copy operations.
26 */
27
28#include <linux/init.h>
29#include <linux/module.h>
30#include <linux/pci.h>
31#include <linux/interrupt.h>
32#include <linux/dmaengine.h>
33#include <linux/delay.h>
David S. Miller6b00c922006-05-23 17:37:58 -070034#include <linux/dma-mapping.h>
Chris Leech0bbd5f42006-05-23 17:35:34 -070035#include "ioatdma.h"
Chris Leech0bbd5f42006-05-23 17:35:34 -070036#include "ioatdma_registers.h"
37#include "ioatdma_hw.h"
38
Shannon Nelson43d6e362007-10-16 01:27:39 -070039#define INITIAL_IOAT_DESC_COUNT 128
40
Chris Leech0bbd5f42006-05-23 17:35:34 -070041#define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common)
Shannon Nelson8ab89562007-10-16 01:27:39 -070042#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common)
Chris Leech0bbd5f42006-05-23 17:35:34 -070043#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
Dan Williams7405f742007-01-02 11:10:43 -070044#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx)
Chris Leech0bbd5f42006-05-23 17:35:34 -070045
46/* internal functions */
Shannon Nelson43d6e362007-10-16 01:27:39 -070047static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan);
48static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);
Shannon Nelson7f2b2912007-10-18 03:07:14 -070049static struct ioat_desc_sw *
50ioat_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
Chris Leech0bbd5f42006-05-23 17:35:34 -070051
Shannon Nelson7f2b2912007-10-18 03:07:14 -070052static inline struct ioat_dma_chan *ioat_lookup_chan_by_index(
53 struct ioatdma_device *device,
54 int index)
Shannon Nelson3e037452007-10-16 01:27:40 -070055{
56 return device->idx[index];
57}
58
59/**
60 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
61 * @irq: interrupt id
62 * @data: interrupt data
63 */
64static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
65{
66 struct ioatdma_device *instance = data;
67 struct ioat_dma_chan *ioat_chan;
68 unsigned long attnstatus;
69 int bit;
70 u8 intrctrl;
71
72 intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
73
74 if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
75 return IRQ_NONE;
76
77 if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
78 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
79 return IRQ_NONE;
80 }
81
82 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
83 for_each_bit(bit, &attnstatus, BITS_PER_LONG) {
84 ioat_chan = ioat_lookup_chan_by_index(instance, bit);
85 tasklet_schedule(&ioat_chan->cleanup_task);
86 }
87
88 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
89 return IRQ_HANDLED;
90}
91
92/**
93 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
94 * @irq: interrupt id
95 * @data: interrupt data
96 */
97static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
98{
99 struct ioat_dma_chan *ioat_chan = data;
100
101 tasklet_schedule(&ioat_chan->cleanup_task);
102
103 return IRQ_HANDLED;
104}
105
106static void ioat_dma_cleanup_tasklet(unsigned long data);
107
108/**
109 * ioat_dma_enumerate_channels - find and initialize the device's channels
110 * @device: the device to be enumerated
111 */
Shannon Nelson8ab89562007-10-16 01:27:39 -0700112static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700113{
114 u8 xfercap_scale;
115 u32 xfercap;
116 int i;
117 struct ioat_dma_chan *ioat_chan;
118
Chris Leeche3828812007-03-08 09:57:35 -0800119 device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
120 xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700121 xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
122
123 for (i = 0; i < device->common.chancnt; i++) {
124 ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL);
125 if (!ioat_chan) {
126 device->common.chancnt = i;
127 break;
128 }
129
130 ioat_chan->device = device;
131 ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1));
132 ioat_chan->xfercap = xfercap;
133 spin_lock_init(&ioat_chan->cleanup_lock);
134 spin_lock_init(&ioat_chan->desc_lock);
135 INIT_LIST_HEAD(&ioat_chan->free_desc);
136 INIT_LIST_HEAD(&ioat_chan->used_desc);
137 /* This should be made common somewhere in dmaengine.c */
138 ioat_chan->common.device = &device->common;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700139 list_add_tail(&ioat_chan->common.device_node,
Shannon Nelson43d6e362007-10-16 01:27:39 -0700140 &device->common.channels);
Shannon Nelson3e037452007-10-16 01:27:40 -0700141 device->idx[i] = ioat_chan;
142 tasklet_init(&ioat_chan->cleanup_task,
143 ioat_dma_cleanup_tasklet,
144 (unsigned long) ioat_chan);
145 tasklet_disable(&ioat_chan->cleanup_task);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700146 }
147 return device->common.chancnt;
148}
149
Shannon Nelson43d6e362007-10-16 01:27:39 -0700150static void ioat_set_src(dma_addr_t addr,
151 struct dma_async_tx_descriptor *tx,
152 int index)
Dan Williams7405f742007-01-02 11:10:43 -0700153{
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700154 tx_to_ioat_desc(tx)->src = addr;
Dan Williams7405f742007-01-02 11:10:43 -0700155}
156
Shannon Nelson43d6e362007-10-16 01:27:39 -0700157static void ioat_set_dest(dma_addr_t addr,
158 struct dma_async_tx_descriptor *tx,
159 int index)
Dan Williams7405f742007-01-02 11:10:43 -0700160{
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700161 tx_to_ioat_desc(tx)->dst = addr;
Dan Williams7405f742007-01-02 11:10:43 -0700162}
163
Shannon Nelson43d6e362007-10-16 01:27:39 -0700164static dma_cookie_t ioat_tx_submit(struct dma_async_tx_descriptor *tx)
Dan Williams7405f742007-01-02 11:10:43 -0700165{
166 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700167 struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
168 struct ioat_desc_sw *prev, *new;
169 struct ioat_dma_descriptor *hw;
Dan Williams7405f742007-01-02 11:10:43 -0700170 int append = 0;
171 dma_cookie_t cookie;
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700172 LIST_HEAD(new_chain);
173 u32 copy;
174 size_t len;
175 dma_addr_t src, dst;
176 int orig_ack;
177 unsigned int desc_count = 0;
Dan Williams7405f742007-01-02 11:10:43 -0700178
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700179 /* src and dest and len are stored in the initial descriptor */
180 len = first->len;
181 src = first->src;
182 dst = first->dst;
183 orig_ack = first->async_tx.ack;
184 new = first;
185
Dan Williams7405f742007-01-02 11:10:43 -0700186 spin_lock_bh(&ioat_chan->desc_lock);
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700187 prev = to_ioat_desc(ioat_chan->used_desc.prev);
188 prefetch(prev->hw);
189 do {
190 copy = min((u32) len, ioat_chan->xfercap);
191
192 new->async_tx.ack = 1;
193
194 hw = new->hw;
195 hw->size = copy;
196 hw->ctl = 0;
197 hw->src_addr = src;
198 hw->dst_addr = dst;
199 hw->next = 0;
200
201 /* chain together the physical address list for the HW */
202 wmb();
203 prev->hw->next = (u64) new->async_tx.phys;
204
205 len -= copy;
206 dst += copy;
207 src += copy;
208
209 list_add_tail(&new->node, &new_chain);
210 desc_count++;
211 prev = new;
212 } while (len && (new = ioat_dma_get_next_descriptor(ioat_chan)));
213
214 hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
215 new->tx_cnt = desc_count;
216 new->async_tx.ack = orig_ack; /* client is in control of this ack */
217
218 /* store the original values for use in later cleanup */
219 if (new != first) {
220 new->src = first->src;
221 new->dst = first->dst;
222 new->len = first->len;
223 }
224
Dan Williams7405f742007-01-02 11:10:43 -0700225 /* cookie incr and addition to used_list must be atomic */
226 cookie = ioat_chan->common.cookie;
227 cookie++;
228 if (cookie < 0)
229 cookie = 1;
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700230 ioat_chan->common.cookie = new->async_tx.cookie = cookie;
Dan Williams7405f742007-01-02 11:10:43 -0700231
232 /* write address into NextDescriptor field of last desc in chain */
233 to_ioat_desc(ioat_chan->used_desc.prev)->hw->next =
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700234 first->async_tx.phys;
235 __list_splice(&new_chain, ioat_chan->used_desc.prev);
Dan Williams7405f742007-01-02 11:10:43 -0700236
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700237 ioat_chan->pending += desc_count;
Dan Williams7405f742007-01-02 11:10:43 -0700238 if (ioat_chan->pending >= 4) {
239 append = 1;
240 ioat_chan->pending = 0;
241 }
242 spin_unlock_bh(&ioat_chan->desc_lock);
243
244 if (append)
245 writeb(IOAT_CHANCMD_APPEND,
246 ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
Shannon Nelson1fda5f42007-10-16 01:27:37 -0700247
Dan Williams7405f742007-01-02 11:10:43 -0700248 return cookie;
249}
250
Chris Leech0bbd5f42006-05-23 17:35:34 -0700251static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
Shannon Nelson43d6e362007-10-16 01:27:39 -0700252 struct ioat_dma_chan *ioat_chan,
253 gfp_t flags)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700254{
255 struct ioat_dma_descriptor *desc;
256 struct ioat_desc_sw *desc_sw;
Shannon Nelson8ab89562007-10-16 01:27:39 -0700257 struct ioatdma_device *ioatdma_device;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700258 dma_addr_t phys;
259
Shannon Nelson8ab89562007-10-16 01:27:39 -0700260 ioatdma_device = to_ioatdma_device(ioat_chan->common.device);
261 desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700262 if (unlikely(!desc))
263 return NULL;
264
265 desc_sw = kzalloc(sizeof(*desc_sw), flags);
266 if (unlikely(!desc_sw)) {
Shannon Nelson8ab89562007-10-16 01:27:39 -0700267 pci_pool_free(ioatdma_device->dma_pool, desc, phys);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700268 return NULL;
269 }
270
271 memset(desc, 0, sizeof(*desc));
Dan Williams7405f742007-01-02 11:10:43 -0700272 dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common);
273 desc_sw->async_tx.tx_set_src = ioat_set_src;
274 desc_sw->async_tx.tx_set_dest = ioat_set_dest;
275 desc_sw->async_tx.tx_submit = ioat_tx_submit;
276 INIT_LIST_HEAD(&desc_sw->async_tx.tx_list);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700277 desc_sw->hw = desc;
Dan Williams7405f742007-01-02 11:10:43 -0700278 desc_sw->async_tx.phys = phys;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700279
280 return desc_sw;
281}
282
Chris Leech0bbd5f42006-05-23 17:35:34 -0700283/* returns the actual number of allocated descriptors */
284static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
285{
286 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
287 struct ioat_desc_sw *desc = NULL;
288 u16 chanctrl;
289 u32 chanerr;
290 int i;
291 LIST_HEAD(tmp_list);
292
Shannon Nelsone4223972007-08-24 23:02:53 -0700293 /* have we already been set up? */
294 if (!list_empty(&ioat_chan->free_desc))
295 return INITIAL_IOAT_DESC_COUNT;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700296
Shannon Nelson43d6e362007-10-16 01:27:39 -0700297 /* Setup register to interrupt and write completion status on error */
Shannon Nelsone4223972007-08-24 23:02:53 -0700298 chanctrl = IOAT_CHANCTRL_ERR_INT_EN |
Chris Leech0bbd5f42006-05-23 17:35:34 -0700299 IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
300 IOAT_CHANCTRL_ERR_COMPLETION_EN;
Shannon Nelson43d6e362007-10-16 01:27:39 -0700301 writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700302
Chris Leeche3828812007-03-08 09:57:35 -0800303 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700304 if (chanerr) {
Shannon Nelson43d6e362007-10-16 01:27:39 -0700305 dev_err(&ioat_chan->device->pdev->dev,
Shannon Nelson5149fd02007-10-18 03:07:13 -0700306 "CHANERR = %x, clearing\n", chanerr);
Chris Leeche3828812007-03-08 09:57:35 -0800307 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700308 }
309
310 /* Allocate descriptors */
311 for (i = 0; i < INITIAL_IOAT_DESC_COUNT; i++) {
312 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
313 if (!desc) {
Shannon Nelson43d6e362007-10-16 01:27:39 -0700314 dev_err(&ioat_chan->device->pdev->dev,
Shannon Nelson5149fd02007-10-18 03:07:13 -0700315 "Only %d initial descriptors\n", i);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700316 break;
317 }
318 list_add_tail(&desc->node, &tmp_list);
319 }
320 spin_lock_bh(&ioat_chan->desc_lock);
321 list_splice(&tmp_list, &ioat_chan->free_desc);
322 spin_unlock_bh(&ioat_chan->desc_lock);
323
324 /* allocate a completion writeback area */
325 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
326 ioat_chan->completion_virt =
327 pci_pool_alloc(ioat_chan->device->completion_pool,
Shannon Nelson43d6e362007-10-16 01:27:39 -0700328 GFP_KERNEL,
329 &ioat_chan->completion_addr);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700330 memset(ioat_chan->completion_virt, 0,
331 sizeof(*ioat_chan->completion_virt));
Chris Leeche3828812007-03-08 09:57:35 -0800332 writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF,
333 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
334 writel(((u64) ioat_chan->completion_addr) >> 32,
335 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700336
Shannon Nelson3e037452007-10-16 01:27:40 -0700337 tasklet_enable(&ioat_chan->cleanup_task);
Shannon Nelson43d6e362007-10-16 01:27:39 -0700338 ioat_dma_start_null_desc(ioat_chan);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700339 return i;
340}
341
Chris Leech0bbd5f42006-05-23 17:35:34 -0700342static void ioat_dma_free_chan_resources(struct dma_chan *chan)
343{
344 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
Shannon Nelson8ab89562007-10-16 01:27:39 -0700345 struct ioatdma_device *ioatdma_device = to_ioatdma_device(chan->device);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700346 struct ioat_desc_sw *desc, *_desc;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700347 int in_use_descs = 0;
348
Shannon Nelson3e037452007-10-16 01:27:40 -0700349 tasklet_disable(&ioat_chan->cleanup_task);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700350 ioat_dma_memcpy_cleanup(ioat_chan);
351
Shannon Nelson3e037452007-10-16 01:27:40 -0700352 /* Delay 100ms after reset to allow internal DMA logic to quiesce
353 * before removing DMA descriptor resources.
354 */
Chris Leeche3828812007-03-08 09:57:35 -0800355 writeb(IOAT_CHANCMD_RESET, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
Shannon Nelson3e037452007-10-16 01:27:40 -0700356 mdelay(100);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700357
358 spin_lock_bh(&ioat_chan->desc_lock);
359 list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) {
360 in_use_descs++;
361 list_del(&desc->node);
Shannon Nelson8ab89562007-10-16 01:27:39 -0700362 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
Dan Williams7405f742007-01-02 11:10:43 -0700363 desc->async_tx.phys);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700364 kfree(desc);
365 }
366 list_for_each_entry_safe(desc, _desc, &ioat_chan->free_desc, node) {
367 list_del(&desc->node);
Shannon Nelson8ab89562007-10-16 01:27:39 -0700368 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
Dan Williams7405f742007-01-02 11:10:43 -0700369 desc->async_tx.phys);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700370 kfree(desc);
371 }
372 spin_unlock_bh(&ioat_chan->desc_lock);
373
Shannon Nelson8ab89562007-10-16 01:27:39 -0700374 pci_pool_free(ioatdma_device->completion_pool,
Shannon Nelson43d6e362007-10-16 01:27:39 -0700375 ioat_chan->completion_virt,
376 ioat_chan->completion_addr);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700377
378 /* one is ok since we left it on there on purpose */
379 if (in_use_descs > 1)
Shannon Nelson43d6e362007-10-16 01:27:39 -0700380 dev_err(&ioat_chan->device->pdev->dev,
Shannon Nelson5149fd02007-10-18 03:07:13 -0700381 "Freeing %d in use descriptors!\n",
Chris Leech0bbd5f42006-05-23 17:35:34 -0700382 in_use_descs - 1);
383
384 ioat_chan->last_completion = ioat_chan->completion_addr = 0;
Shannon Nelson3e037452007-10-16 01:27:40 -0700385 ioat_chan->pending = 0;
386}
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700387
Shannon Nelson3e037452007-10-16 01:27:40 -0700388/**
389 * ioat_dma_get_next_descriptor - return the next available descriptor
390 * @ioat_chan: IOAT DMA channel handle
391 *
392 * Gets the next descriptor from the chain, and must be called with the
393 * channel's desc_lock held. Allocates more descriptors if the channel
394 * has run out.
395 */
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700396static struct ioat_desc_sw *
397ioat_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
Shannon Nelson3e037452007-10-16 01:27:40 -0700398{
399 struct ioat_desc_sw *new = NULL;
400
401 if (!list_empty(&ioat_chan->free_desc)) {
402 new = to_ioat_desc(ioat_chan->free_desc.next);
403 list_del(&new->node);
404 } else {
405 /* try to get another desc */
406 new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
407 /* will this ever happen? */
408 /* TODO add upper limit on these */
409 BUG_ON(!new);
410 }
411
412 prefetch(new->hw);
413 return new;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700414}
415
Shannon Nelson43d6e362007-10-16 01:27:39 -0700416static struct dma_async_tx_descriptor *ioat_dma_prep_memcpy(
417 struct dma_chan *chan,
418 size_t len,
419 int int_en)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700420{
Dan Williams7405f742007-01-02 11:10:43 -0700421 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700422 struct ioat_desc_sw *new;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700423
424 spin_lock_bh(&ioat_chan->desc_lock);
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700425 new = ioat_dma_get_next_descriptor(ioat_chan);
426 new->len = len;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700427 spin_unlock_bh(&ioat_chan->desc_lock);
428
Dan Williams7405f742007-01-02 11:10:43 -0700429 return new ? &new->async_tx : NULL;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700430}
431
Chris Leech0bbd5f42006-05-23 17:35:34 -0700432/**
Shannon Nelson43d6e362007-10-16 01:27:39 -0700433 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
434 * descriptors to hw
Chris Leech0bbd5f42006-05-23 17:35:34 -0700435 * @chan: DMA channel handle
436 */
Chris Leech0bbd5f42006-05-23 17:35:34 -0700437static void ioat_dma_memcpy_issue_pending(struct dma_chan *chan)
438{
439 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
440
441 if (ioat_chan->pending != 0) {
442 ioat_chan->pending = 0;
Chris Leeche3828812007-03-08 09:57:35 -0800443 writeb(IOAT_CHANCMD_APPEND,
444 ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700445 }
446}
447
Shannon Nelson3e037452007-10-16 01:27:40 -0700448static void ioat_dma_cleanup_tasklet(unsigned long data)
449{
450 struct ioat_dma_chan *chan = (void *)data;
451 ioat_dma_memcpy_cleanup(chan);
452 writew(IOAT_CHANCTRL_INT_DISABLE,
453 chan->reg_base + IOAT_CHANCTRL_OFFSET);
454}
455
Shannon Nelson43d6e362007-10-16 01:27:39 -0700456static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700457{
458 unsigned long phys_complete;
459 struct ioat_desc_sw *desc, *_desc;
460 dma_cookie_t cookie = 0;
461
Shannon Nelson43d6e362007-10-16 01:27:39 -0700462 prefetch(ioat_chan->completion_virt);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700463
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700464 if (!spin_trylock_bh(&ioat_chan->cleanup_lock))
Chris Leech0bbd5f42006-05-23 17:35:34 -0700465 return;
466
467 /* The completion writeback can happen at any time,
468 so reads by the driver need to be atomic operations
469 The descriptor physical addresses are limited to 32-bits
470 when the CPU can only do a 32-bit mov */
471
472#if (BITS_PER_LONG == 64)
473 phys_complete =
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700474 ioat_chan->completion_virt->full
475 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700476#else
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700477 phys_complete =
478 ioat_chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700479#endif
480
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700481 if ((ioat_chan->completion_virt->full
482 & IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==
Shannon Nelson43d6e362007-10-16 01:27:39 -0700483 IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {
484 dev_err(&ioat_chan->device->pdev->dev,
Shannon Nelson5149fd02007-10-18 03:07:13 -0700485 "Channel halted, chanerr = %x\n",
Shannon Nelson43d6e362007-10-16 01:27:39 -0700486 readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET));
Chris Leech0bbd5f42006-05-23 17:35:34 -0700487
488 /* TODO do something to salvage the situation */
489 }
490
Shannon Nelson43d6e362007-10-16 01:27:39 -0700491 if (phys_complete == ioat_chan->last_completion) {
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700492 spin_unlock_bh(&ioat_chan->cleanup_lock);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700493 return;
494 }
495
Shannon Nelson3e037452007-10-16 01:27:40 -0700496 cookie = 0;
Shannon Nelson43d6e362007-10-16 01:27:39 -0700497 spin_lock_bh(&ioat_chan->desc_lock);
498 list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) {
Chris Leech0bbd5f42006-05-23 17:35:34 -0700499
500 /*
501 * Incoming DMA requests may use multiple descriptors, due to
502 * exceeding xfercap, perhaps. If so, only the last one will
503 * have a cookie, and require unmapping.
504 */
Dan Williams7405f742007-01-02 11:10:43 -0700505 if (desc->async_tx.cookie) {
506 cookie = desc->async_tx.cookie;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700507
Shannon Nelson43d6e362007-10-16 01:27:39 -0700508 /*
509 * yes we are unmapping both _page and _single alloc'd
510 * regions with unmap_page. Is this *really* that bad?
511 */
512 pci_unmap_page(ioat_chan->device->pdev,
Chris Leech0bbd5f42006-05-23 17:35:34 -0700513 pci_unmap_addr(desc, dst),
Shannon Nelson54a09fe2007-08-14 17:36:31 -0700514 pci_unmap_len(desc, len),
Chris Leech0bbd5f42006-05-23 17:35:34 -0700515 PCI_DMA_FROMDEVICE);
Shannon Nelson43d6e362007-10-16 01:27:39 -0700516 pci_unmap_page(ioat_chan->device->pdev,
Chris Leech0bbd5f42006-05-23 17:35:34 -0700517 pci_unmap_addr(desc, src),
Shannon Nelson54a09fe2007-08-14 17:36:31 -0700518 pci_unmap_len(desc, len),
Chris Leech0bbd5f42006-05-23 17:35:34 -0700519 PCI_DMA_TODEVICE);
520 }
521
Dan Williams7405f742007-01-02 11:10:43 -0700522 if (desc->async_tx.phys != phys_complete) {
Shannon Nelson43d6e362007-10-16 01:27:39 -0700523 /*
524 * a completed entry, but not the last, so cleanup
Dan Williams7405f742007-01-02 11:10:43 -0700525 * if the client is done with the descriptor
526 */
527 if (desc->async_tx.ack) {
528 list_del(&desc->node);
Shannon Nelson43d6e362007-10-16 01:27:39 -0700529 list_add_tail(&desc->node,
530 &ioat_chan->free_desc);
Dan Williams7405f742007-01-02 11:10:43 -0700531 } else
532 desc->async_tx.cookie = 0;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700533 } else {
Shannon Nelson43d6e362007-10-16 01:27:39 -0700534 /*
535 * last used desc. Do not remove, so we can append from
536 * it, but don't look at it next time, either
537 */
Dan Williams7405f742007-01-02 11:10:43 -0700538 desc->async_tx.cookie = 0;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700539
540 /* TODO check status bits? */
541 break;
542 }
543 }
544
Shannon Nelson43d6e362007-10-16 01:27:39 -0700545 spin_unlock_bh(&ioat_chan->desc_lock);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700546
Shannon Nelson43d6e362007-10-16 01:27:39 -0700547 ioat_chan->last_completion = phys_complete;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700548 if (cookie != 0)
Shannon Nelson43d6e362007-10-16 01:27:39 -0700549 ioat_chan->completed_cookie = cookie;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700550
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700551 spin_unlock_bh(&ioat_chan->cleanup_lock);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700552}
553
Dan Williams7405f742007-01-02 11:10:43 -0700554static void ioat_dma_dependency_added(struct dma_chan *chan)
555{
556 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
557 spin_lock_bh(&ioat_chan->desc_lock);
558 if (ioat_chan->pending == 0) {
559 spin_unlock_bh(&ioat_chan->desc_lock);
560 ioat_dma_memcpy_cleanup(ioat_chan);
561 } else
562 spin_unlock_bh(&ioat_chan->desc_lock);
563}
564
Chris Leech0bbd5f42006-05-23 17:35:34 -0700565/**
566 * ioat_dma_is_complete - poll the status of a IOAT DMA transaction
567 * @chan: IOAT DMA channel handle
568 * @cookie: DMA transaction identifier
Randy Dunlap65088712006-07-03 19:45:31 -0700569 * @done: if not %NULL, updated with last completed transaction
570 * @used: if not %NULL, updated with last used transaction
Chris Leech0bbd5f42006-05-23 17:35:34 -0700571 */
Chris Leech0bbd5f42006-05-23 17:35:34 -0700572static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,
Shannon Nelson43d6e362007-10-16 01:27:39 -0700573 dma_cookie_t cookie,
574 dma_cookie_t *done,
575 dma_cookie_t *used)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700576{
577 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
578 dma_cookie_t last_used;
579 dma_cookie_t last_complete;
580 enum dma_status ret;
581
582 last_used = chan->cookie;
583 last_complete = ioat_chan->completed_cookie;
584
585 if (done)
Shannon Nelson43d6e362007-10-16 01:27:39 -0700586 *done = last_complete;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700587 if (used)
588 *used = last_used;
589
590 ret = dma_async_is_complete(cookie, last_complete, last_used);
591 if (ret == DMA_SUCCESS)
592 return ret;
593
594 ioat_dma_memcpy_cleanup(ioat_chan);
595
596 last_used = chan->cookie;
597 last_complete = ioat_chan->completed_cookie;
598
599 if (done)
Shannon Nelson43d6e362007-10-16 01:27:39 -0700600 *done = last_complete;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700601 if (used)
602 *used = last_used;
603
604 return dma_async_is_complete(cookie, last_complete, last_used);
605}
606
607/* PCI API */
608
Shannon Nelson43d6e362007-10-16 01:27:39 -0700609static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700610{
611 struct ioat_desc_sw *desc;
612
613 spin_lock_bh(&ioat_chan->desc_lock);
614
Shannon Nelson3e037452007-10-16 01:27:40 -0700615 desc = ioat_dma_get_next_descriptor(ioat_chan);
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700616 desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL
617 | IOAT_DMA_DESCRIPTOR_CTL_INT_GN
618 | IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700619 desc->hw->next = 0;
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700620 desc->hw->size = 0;
621 desc->hw->src_addr = 0;
622 desc->hw->dst_addr = 0;
Dan Williams7405f742007-01-02 11:10:43 -0700623 desc->async_tx.ack = 1;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700624
625 list_add_tail(&desc->node, &ioat_chan->used_desc);
626 spin_unlock_bh(&ioat_chan->desc_lock);
627
Dan Williams7405f742007-01-02 11:10:43 -0700628 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
Chris Leeche3828812007-03-08 09:57:35 -0800629 ioat_chan->reg_base + IOAT_CHAINADDR_OFFSET_LOW);
Dan Williams7405f742007-01-02 11:10:43 -0700630 writel(((u64) desc->async_tx.phys) >> 32,
Chris Leech70774b42007-03-08 09:57:35 -0800631 ioat_chan->reg_base + IOAT_CHAINADDR_OFFSET_HIGH);
632
Chris Leeche3828812007-03-08 09:57:35 -0800633 writeb(IOAT_CHANCMD_START, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700634}
635
636/*
637 * Perform a IOAT transaction to verify the HW works.
638 */
639#define IOAT_TEST_SIZE 2000
640
Shannon Nelson3e037452007-10-16 01:27:40 -0700641/**
642 * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
643 * @device: device to be tested
644 */
645static int ioat_dma_self_test(struct ioatdma_device *device)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700646{
647 int i;
648 u8 *src;
649 u8 *dest;
650 struct dma_chan *dma_chan;
Shannon Nelson5149fd02007-10-18 03:07:13 -0700651 struct dma_async_tx_descriptor *tx = NULL;
Dan Williams7405f742007-01-02 11:10:43 -0700652 dma_addr_t addr;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700653 dma_cookie_t cookie;
654 int err = 0;
655
Christoph Lametere94b1762006-12-06 20:33:17 -0800656 src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700657 if (!src)
658 return -ENOMEM;
Christoph Lametere94b1762006-12-06 20:33:17 -0800659 dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700660 if (!dest) {
661 kfree(src);
662 return -ENOMEM;
663 }
664
665 /* Fill in src buffer */
666 for (i = 0; i < IOAT_TEST_SIZE; i++)
667 src[i] = (u8)i;
668
669 /* Start copy, using first DMA channel */
670 dma_chan = container_of(device->common.channels.next,
Shannon Nelson43d6e362007-10-16 01:27:39 -0700671 struct dma_chan,
672 device_node);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700673 if (ioat_dma_alloc_chan_resources(dma_chan) < 1) {
Shannon Nelson43d6e362007-10-16 01:27:39 -0700674 dev_err(&device->pdev->dev,
675 "selftest cannot allocate chan resource\n");
Chris Leech0bbd5f42006-05-23 17:35:34 -0700676 err = -ENODEV;
677 goto out;
678 }
679
Dan Williams7405f742007-01-02 11:10:43 -0700680 tx = ioat_dma_prep_memcpy(dma_chan, IOAT_TEST_SIZE, 0);
Shannon Nelson5149fd02007-10-18 03:07:13 -0700681 if (!tx) {
682 dev_err(&device->pdev->dev,
683 "Self-test prep failed, disabling\n");
684 err = -ENODEV;
685 goto free_resources;
686 }
687
Dan Williams7405f742007-01-02 11:10:43 -0700688 async_tx_ack(tx);
689 addr = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE,
690 DMA_TO_DEVICE);
691 ioat_set_src(addr, tx, 0);
692 addr = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE,
693 DMA_FROM_DEVICE);
694 ioat_set_dest(addr, tx, 0);
695 cookie = ioat_tx_submit(tx);
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700696 if (cookie < 0) {
697 dev_err(&device->pdev->dev,
698 "Self-test setup failed, disabling\n");
699 err = -ENODEV;
700 goto free_resources;
701 }
Chris Leech0bbd5f42006-05-23 17:35:34 -0700702 ioat_dma_memcpy_issue_pending(dma_chan);
703 msleep(1);
704
705 if (ioat_dma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
Shannon Nelson43d6e362007-10-16 01:27:39 -0700706 dev_err(&device->pdev->dev,
Shannon Nelson5149fd02007-10-18 03:07:13 -0700707 "Self-test copy timed out, disabling\n");
Chris Leech0bbd5f42006-05-23 17:35:34 -0700708 err = -ENODEV;
709 goto free_resources;
710 }
711 if (memcmp(src, dest, IOAT_TEST_SIZE)) {
Shannon Nelson43d6e362007-10-16 01:27:39 -0700712 dev_err(&device->pdev->dev,
Shannon Nelson5149fd02007-10-18 03:07:13 -0700713 "Self-test copy failed compare, disabling\n");
Chris Leech0bbd5f42006-05-23 17:35:34 -0700714 err = -ENODEV;
715 goto free_resources;
716 }
717
718free_resources:
719 ioat_dma_free_chan_resources(dma_chan);
720out:
721 kfree(src);
722 kfree(dest);
723 return err;
724}
725
Shannon Nelson3e037452007-10-16 01:27:40 -0700726static char ioat_interrupt_style[32] = "msix";
727module_param_string(ioat_interrupt_style, ioat_interrupt_style,
728 sizeof(ioat_interrupt_style), 0644);
729MODULE_PARM_DESC(ioat_interrupt_style,
730 "set ioat interrupt style: msix (default), "
731 "msix-single-vector, msi, intx)");
732
733/**
734 * ioat_dma_setup_interrupts - setup interrupt handler
735 * @device: ioat device
736 */
737static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
738{
739 struct ioat_dma_chan *ioat_chan;
740 int err, i, j, msixcnt;
741 u8 intrctrl = 0;
742
743 if (!strcmp(ioat_interrupt_style, "msix"))
744 goto msix;
745 if (!strcmp(ioat_interrupt_style, "msix-single-vector"))
746 goto msix_single_vector;
747 if (!strcmp(ioat_interrupt_style, "msi"))
748 goto msi;
749 if (!strcmp(ioat_interrupt_style, "intx"))
750 goto intx;
Shannon Nelson5149fd02007-10-18 03:07:13 -0700751 dev_err(&device->pdev->dev, "invalid ioat_interrupt_style %s\n",
752 ioat_interrupt_style);
753 goto err_no_irq;
Shannon Nelson3e037452007-10-16 01:27:40 -0700754
755msix:
756 /* The number of MSI-X vectors should equal the number of channels */
757 msixcnt = device->common.chancnt;
758 for (i = 0; i < msixcnt; i++)
759 device->msix_entries[i].entry = i;
760
761 err = pci_enable_msix(device->pdev, device->msix_entries, msixcnt);
762 if (err < 0)
763 goto msi;
764 if (err > 0)
765 goto msix_single_vector;
766
767 for (i = 0; i < msixcnt; i++) {
768 ioat_chan = ioat_lookup_chan_by_index(device, i);
769 err = request_irq(device->msix_entries[i].vector,
770 ioat_dma_do_interrupt_msix,
771 0, "ioat-msix", ioat_chan);
772 if (err) {
773 for (j = 0; j < i; j++) {
774 ioat_chan =
775 ioat_lookup_chan_by_index(device, j);
776 free_irq(device->msix_entries[j].vector,
777 ioat_chan);
778 }
779 goto msix_single_vector;
780 }
781 }
782 intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
783 device->irq_mode = msix_multi_vector;
784 goto done;
785
786msix_single_vector:
787 device->msix_entries[0].entry = 0;
788 err = pci_enable_msix(device->pdev, device->msix_entries, 1);
789 if (err)
790 goto msi;
791
792 err = request_irq(device->msix_entries[0].vector, ioat_dma_do_interrupt,
793 0, "ioat-msix", device);
794 if (err) {
795 pci_disable_msix(device->pdev);
796 goto msi;
797 }
798 device->irq_mode = msix_single_vector;
799 goto done;
800
801msi:
802 err = pci_enable_msi(device->pdev);
803 if (err)
804 goto intx;
805
806 err = request_irq(device->pdev->irq, ioat_dma_do_interrupt,
807 0, "ioat-msi", device);
808 if (err) {
809 pci_disable_msi(device->pdev);
810 goto intx;
811 }
812 /*
813 * CB 1.2 devices need a bit set in configuration space to enable MSI
814 */
815 if (device->version == IOAT_VER_1_2) {
816 u32 dmactrl;
817 pci_read_config_dword(device->pdev,
818 IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
819 dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
820 pci_write_config_dword(device->pdev,
821 IOAT_PCI_DMACTRL_OFFSET, dmactrl);
822 }
823 device->irq_mode = msi;
824 goto done;
825
826intx:
827 err = request_irq(device->pdev->irq, ioat_dma_do_interrupt,
828 IRQF_SHARED, "ioat-intx", device);
829 if (err)
830 goto err_no_irq;
831 device->irq_mode = intx;
832
833done:
834 intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
835 writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
836 return 0;
837
838err_no_irq:
839 /* Disable all interrupt generation */
840 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
841 dev_err(&device->pdev->dev, "no usable interrupts\n");
842 device->irq_mode = none;
843 return -1;
844}
845
846/**
847 * ioat_dma_remove_interrupts - remove whatever interrupts were set
848 * @device: ioat device
849 */
850static void ioat_dma_remove_interrupts(struct ioatdma_device *device)
851{
852 struct ioat_dma_chan *ioat_chan;
853 int i;
854
855 /* Disable all interrupt generation */
856 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
857
858 switch (device->irq_mode) {
859 case msix_multi_vector:
860 for (i = 0; i < device->common.chancnt; i++) {
861 ioat_chan = ioat_lookup_chan_by_index(device, i);
862 free_irq(device->msix_entries[i].vector, ioat_chan);
863 }
864 pci_disable_msix(device->pdev);
865 break;
866 case msix_single_vector:
867 free_irq(device->msix_entries[0].vector, device);
868 pci_disable_msix(device->pdev);
869 break;
870 case msi:
871 free_irq(device->pdev->irq, device);
872 pci_disable_msi(device->pdev);
873 break;
874 case intx:
875 free_irq(device->pdev->irq, device);
876 break;
877 case none:
878 dev_warn(&device->pdev->dev,
879 "call to %s without interrupts setup\n", __func__);
880 }
881 device->irq_mode = none;
882}
883
Shannon Nelson8ab89562007-10-16 01:27:39 -0700884struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
885 void __iomem *iobase)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700886{
887 int err;
Shannon Nelson8ab89562007-10-16 01:27:39 -0700888 struct ioatdma_device *device;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700889
890 device = kzalloc(sizeof(*device), GFP_KERNEL);
891 if (!device) {
892 err = -ENOMEM;
893 goto err_kzalloc;
894 }
Shannon Nelson8ab89562007-10-16 01:27:39 -0700895 device->pdev = pdev;
896 device->reg_base = iobase;
897 device->version = readb(device->reg_base + IOAT_VER_OFFSET);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700898
899 /* DMA coherent memory pool for DMA descriptor allocations */
900 device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
Shannon Nelson8ab89562007-10-16 01:27:39 -0700901 sizeof(struct ioat_dma_descriptor),
902 64, 0);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700903 if (!device->dma_pool) {
904 err = -ENOMEM;
905 goto err_dma_pool;
906 }
907
Shannon Nelson43d6e362007-10-16 01:27:39 -0700908 device->completion_pool = pci_pool_create("completion_pool", pdev,
909 sizeof(u64), SMP_CACHE_BYTES,
910 SMP_CACHE_BYTES);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700911 if (!device->completion_pool) {
912 err = -ENOMEM;
913 goto err_completion_pool;
914 }
915
Chris Leech0bbd5f42006-05-23 17:35:34 -0700916 INIT_LIST_HEAD(&device->common.channels);
Shannon Nelson43d6e362007-10-16 01:27:39 -0700917 ioat_dma_enumerate_channels(device);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700918
Dan Williams7405f742007-01-02 11:10:43 -0700919 dma_cap_set(DMA_MEMCPY, device->common.cap_mask);
Shannon Nelson43d6e362007-10-16 01:27:39 -0700920 device->common.device_alloc_chan_resources =
921 ioat_dma_alloc_chan_resources;
922 device->common.device_free_chan_resources =
923 ioat_dma_free_chan_resources;
Dan Williams7405f742007-01-02 11:10:43 -0700924 device->common.device_prep_dma_memcpy = ioat_dma_prep_memcpy;
925 device->common.device_is_tx_complete = ioat_dma_is_complete;
926 device->common.device_issue_pending = ioat_dma_memcpy_issue_pending;
927 device->common.device_dependency_added = ioat_dma_dependency_added;
928 device->common.dev = &pdev->dev;
Shannon Nelson3e037452007-10-16 01:27:40 -0700929 dev_err(&device->pdev->dev,
Shannon Nelson5149fd02007-10-18 03:07:13 -0700930 "Intel(R) I/OAT DMA Engine found,"
931 " %d channels, device version 0x%02x, driver version %s\n",
932 device->common.chancnt, device->version, IOAT_DMA_VERSION);
Shannon Nelson8ab89562007-10-16 01:27:39 -0700933
Shannon Nelson3e037452007-10-16 01:27:40 -0700934 err = ioat_dma_setup_interrupts(device);
Shannon Nelson8ab89562007-10-16 01:27:39 -0700935 if (err)
Shannon Nelson3e037452007-10-16 01:27:40 -0700936 goto err_setup_interrupts;
Shannon Nelson8ab89562007-10-16 01:27:39 -0700937
Shannon Nelson3e037452007-10-16 01:27:40 -0700938 err = ioat_dma_self_test(device);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700939 if (err)
940 goto err_self_test;
941
942 dma_async_device_register(&device->common);
943
Shannon Nelson8ab89562007-10-16 01:27:39 -0700944 return device;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700945
946err_self_test:
Shannon Nelson3e037452007-10-16 01:27:40 -0700947 ioat_dma_remove_interrupts(device);
948err_setup_interrupts:
Chris Leech0bbd5f42006-05-23 17:35:34 -0700949 pci_pool_destroy(device->completion_pool);
950err_completion_pool:
951 pci_pool_destroy(device->dma_pool);
952err_dma_pool:
953 kfree(device);
954err_kzalloc:
Shannon Nelson3e037452007-10-16 01:27:40 -0700955 dev_err(&device->pdev->dev,
Shannon Nelson5149fd02007-10-18 03:07:13 -0700956 "Intel(R) I/OAT DMA Engine initialization failed\n");
Shannon Nelson8ab89562007-10-16 01:27:39 -0700957 return NULL;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700958}
959
Shannon Nelson8ab89562007-10-16 01:27:39 -0700960void ioat_dma_remove(struct ioatdma_device *device)
Dan Aloni428ed602007-03-08 09:57:36 -0800961{
Chris Leech0bbd5f42006-05-23 17:35:34 -0700962 struct dma_chan *chan, *_chan;
963 struct ioat_dma_chan *ioat_chan;
964
Shannon Nelson3e037452007-10-16 01:27:40 -0700965 ioat_dma_remove_interrupts(device);
Shannon Nelson8ab89562007-10-16 01:27:39 -0700966
Shannon Nelsondfe22992007-10-18 03:07:13 -0700967 dma_async_device_unregister(&device->common);
968
Chris Leech0bbd5f42006-05-23 17:35:34 -0700969 pci_pool_destroy(device->dma_pool);
970 pci_pool_destroy(device->completion_pool);
Shannon Nelson8ab89562007-10-16 01:27:39 -0700971
Shannon Nelson7df7cf02007-10-18 03:07:12 -0700972 iounmap(device->reg_base);
973 pci_release_regions(device->pdev);
974 pci_disable_device(device->pdev);
975
Shannon Nelson43d6e362007-10-16 01:27:39 -0700976 list_for_each_entry_safe(chan, _chan,
977 &device->common.channels, device_node) {
Chris Leech0bbd5f42006-05-23 17:35:34 -0700978 ioat_chan = to_ioat_chan(chan);
979 list_del(&chan->device_node);
980 kfree(ioat_chan);
981 }
982 kfree(device);
983}
984