blob: 43b8cefad2c6ddfb57276e53a6ce19710677ea5a [file] [log] [blame]
Chris Leech0bbd5f42006-05-23 17:35:34 -07001/*
Shannon Nelson43d6e362007-10-16 01:27:39 -07002 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2007 Intel Corporation.
Chris Leech0bbd5f42006-05-23 17:35:34 -07004 *
5 * This program is free software; you can redistribute it and/or modify it
Shannon Nelson43d6e362007-10-16 01:27:39 -07006 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
Chris Leech0bbd5f42006-05-23 17:35:34 -07008 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
Shannon Nelson43d6e362007-10-16 01:27:39 -070015 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
Chris Leech0bbd5f42006-05-23 17:35:34 -070017 *
Shannon Nelson43d6e362007-10-16 01:27:39 -070018 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
Chris Leech0bbd5f42006-05-23 17:35:34 -070021 */
22
23/*
24 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
25 * copy operations.
26 */
27
28#include <linux/init.h>
29#include <linux/module.h>
30#include <linux/pci.h>
31#include <linux/interrupt.h>
32#include <linux/dmaengine.h>
33#include <linux/delay.h>
David S. Miller6b00c922006-05-23 17:37:58 -070034#include <linux/dma-mapping.h>
Maciej Sosnowski09177e82008-07-22 10:07:33 -070035#include <linux/workqueue.h>
Chris Leech0bbd5f42006-05-23 17:35:34 -070036#include "ioatdma.h"
Chris Leech0bbd5f42006-05-23 17:35:34 -070037#include "ioatdma_registers.h"
38#include "ioatdma_hw.h"
39
40#define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common)
Shannon Nelson8ab89562007-10-16 01:27:39 -070041#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common)
Chris Leech0bbd5f42006-05-23 17:35:34 -070042#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
Dan Williams7405f742007-01-02 11:10:43 -070043#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx)
Chris Leech0bbd5f42006-05-23 17:35:34 -070044
Maciej Sosnowski09177e82008-07-22 10:07:33 -070045#define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80)
Shannon Nelson7bb67c12007-11-14 16:59:51 -080046static int ioat_pending_level = 4;
47module_param(ioat_pending_level, int, 0644);
48MODULE_PARM_DESC(ioat_pending_level,
49 "high-water mark for pushing ioat descriptors (default: 4)");
50
Maciej Sosnowski09177e82008-07-22 10:07:33 -070051#define RESET_DELAY msecs_to_jiffies(100)
52#define WATCHDOG_DELAY round_jiffies(msecs_to_jiffies(2000))
53static void ioat_dma_chan_reset_part2(struct work_struct *work);
54static void ioat_dma_chan_watchdog(struct work_struct *work);
55
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -070056/*
57 * workaround for IOAT ver.3.0 null descriptor issue
58 * (channel returns error when size is 0)
59 */
60#define NULL_DESC_BUFFER_SIZE 1
61
Chris Leech0bbd5f42006-05-23 17:35:34 -070062/* internal functions */
Shannon Nelson43d6e362007-10-16 01:27:39 -070063static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan);
64static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);
Shannon Nelson7bb67c12007-11-14 16:59:51 -080065
Shannon Nelson7f2b2912007-10-18 03:07:14 -070066static struct ioat_desc_sw *
Shannon Nelson7bb67c12007-11-14 16:59:51 -080067ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
68static struct ioat_desc_sw *
69ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
Chris Leech0bbd5f42006-05-23 17:35:34 -070070
Shannon Nelson7f2b2912007-10-18 03:07:14 -070071static inline struct ioat_dma_chan *ioat_lookup_chan_by_index(
72 struct ioatdma_device *device,
73 int index)
Shannon Nelson3e037452007-10-16 01:27:40 -070074{
75 return device->idx[index];
76}
77
78/**
79 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
80 * @irq: interrupt id
81 * @data: interrupt data
82 */
83static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
84{
85 struct ioatdma_device *instance = data;
86 struct ioat_dma_chan *ioat_chan;
87 unsigned long attnstatus;
88 int bit;
89 u8 intrctrl;
90
91 intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
92
93 if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
94 return IRQ_NONE;
95
96 if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
97 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
98 return IRQ_NONE;
99 }
100
101 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
102 for_each_bit(bit, &attnstatus, BITS_PER_LONG) {
103 ioat_chan = ioat_lookup_chan_by_index(instance, bit);
104 tasklet_schedule(&ioat_chan->cleanup_task);
105 }
106
107 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
108 return IRQ_HANDLED;
109}
110
111/**
112 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
113 * @irq: interrupt id
114 * @data: interrupt data
115 */
116static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
117{
118 struct ioat_dma_chan *ioat_chan = data;
119
120 tasklet_schedule(&ioat_chan->cleanup_task);
121
122 return IRQ_HANDLED;
123}
124
125static void ioat_dma_cleanup_tasklet(unsigned long data);
126
127/**
128 * ioat_dma_enumerate_channels - find and initialize the device's channels
129 * @device: the device to be enumerated
130 */
Shannon Nelson8ab89562007-10-16 01:27:39 -0700131static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700132{
133 u8 xfercap_scale;
134 u32 xfercap;
135 int i;
136 struct ioat_dma_chan *ioat_chan;
137
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700138 /*
139 * IOAT ver.3 workarounds
140 */
141 if (device->version == IOAT_VER_3_0) {
142 u32 chan_err_mask;
143 u16 dev_id;
144 u32 dmauncerrsts;
145
146 /*
147 * Write CHANERRMSK_INT with 3E07h to mask out the errors
148 * that can cause stability issues for IOAT ver.3
149 */
150 chan_err_mask = 0x3E07;
151 pci_write_config_dword(device->pdev,
152 IOAT_PCI_CHANERRMASK_INT_OFFSET,
153 chan_err_mask);
154
155 /*
156 * Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
157 * (workaround for spurious config parity error after restart)
158 */
159 pci_read_config_word(device->pdev,
160 IOAT_PCI_DEVICE_ID_OFFSET,
161 &dev_id);
162 if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
163 dmauncerrsts = 0x10;
164 pci_write_config_dword(device->pdev,
165 IOAT_PCI_DMAUNCERRSTS_OFFSET,
166 dmauncerrsts);
167 }
168 }
169
Chris Leeche3828812007-03-08 09:57:35 -0800170 device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
171 xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700172 xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
173
Andy Henroid27471fd2008-10-09 11:45:22 -0700174#if CONFIG_I7300_IDLE_IOAT_CHANNEL
175 device->common.chancnt--;
176#endif
Chris Leech0bbd5f42006-05-23 17:35:34 -0700177 for (i = 0; i < device->common.chancnt; i++) {
178 ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL);
179 if (!ioat_chan) {
180 device->common.chancnt = i;
181 break;
182 }
183
184 ioat_chan->device = device;
185 ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1));
186 ioat_chan->xfercap = xfercap;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800187 ioat_chan->desccount = 0;
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700188 INIT_DELAYED_WORK(&ioat_chan->work, ioat_dma_chan_reset_part2);
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800189 if (ioat_chan->device->version != IOAT_VER_1_2) {
190 writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE
191 | IOAT_DMA_DCA_ANY_CPU,
192 ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
193 }
Chris Leech0bbd5f42006-05-23 17:35:34 -0700194 spin_lock_init(&ioat_chan->cleanup_lock);
195 spin_lock_init(&ioat_chan->desc_lock);
196 INIT_LIST_HEAD(&ioat_chan->free_desc);
197 INIT_LIST_HEAD(&ioat_chan->used_desc);
198 /* This should be made common somewhere in dmaengine.c */
199 ioat_chan->common.device = &device->common;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700200 list_add_tail(&ioat_chan->common.device_node,
Shannon Nelson43d6e362007-10-16 01:27:39 -0700201 &device->common.channels);
Shannon Nelson3e037452007-10-16 01:27:40 -0700202 device->idx[i] = ioat_chan;
203 tasklet_init(&ioat_chan->cleanup_task,
204 ioat_dma_cleanup_tasklet,
205 (unsigned long) ioat_chan);
206 tasklet_disable(&ioat_chan->cleanup_task);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700207 }
208 return device->common.chancnt;
209}
210
Shannon Nelson711924b2007-12-17 16:20:08 -0800211/**
212 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
213 * descriptors to hw
214 * @chan: DMA channel handle
215 */
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800216static inline void __ioat1_dma_memcpy_issue_pending(
Shannon Nelson711924b2007-12-17 16:20:08 -0800217 struct ioat_dma_chan *ioat_chan)
218{
219 ioat_chan->pending = 0;
220 writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET);
221}
222
223static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
224{
225 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
226
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700227 if (ioat_chan->pending > 0) {
Shannon Nelson711924b2007-12-17 16:20:08 -0800228 spin_lock_bh(&ioat_chan->desc_lock);
229 __ioat1_dma_memcpy_issue_pending(ioat_chan);
230 spin_unlock_bh(&ioat_chan->desc_lock);
231 }
232}
233
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800234static inline void __ioat2_dma_memcpy_issue_pending(
Shannon Nelson711924b2007-12-17 16:20:08 -0800235 struct ioat_dma_chan *ioat_chan)
236{
237 ioat_chan->pending = 0;
238 writew(ioat_chan->dmacount,
239 ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
240}
241
242static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan)
243{
244 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
245
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700246 if (ioat_chan->pending > 0) {
Shannon Nelson711924b2007-12-17 16:20:08 -0800247 spin_lock_bh(&ioat_chan->desc_lock);
248 __ioat2_dma_memcpy_issue_pending(ioat_chan);
249 spin_unlock_bh(&ioat_chan->desc_lock);
250 }
251}
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800252
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700253
254/**
255 * ioat_dma_chan_reset_part2 - reinit the channel after a reset
256 */
257static void ioat_dma_chan_reset_part2(struct work_struct *work)
258{
259 struct ioat_dma_chan *ioat_chan =
260 container_of(work, struct ioat_dma_chan, work.work);
261 struct ioat_desc_sw *desc;
262
263 spin_lock_bh(&ioat_chan->cleanup_lock);
264 spin_lock_bh(&ioat_chan->desc_lock);
265
266 ioat_chan->completion_virt->low = 0;
267 ioat_chan->completion_virt->high = 0;
268 ioat_chan->pending = 0;
269
270 /*
271 * count the descriptors waiting, and be sure to do it
272 * right for both the CB1 line and the CB2 ring
273 */
274 ioat_chan->dmacount = 0;
275 if (ioat_chan->used_desc.prev) {
276 desc = to_ioat_desc(ioat_chan->used_desc.prev);
277 do {
278 ioat_chan->dmacount++;
279 desc = to_ioat_desc(desc->node.next);
280 } while (&desc->node != ioat_chan->used_desc.next);
281 }
282
283 /*
284 * write the new starting descriptor address
285 * this puts channel engine into ARMED state
286 */
287 desc = to_ioat_desc(ioat_chan->used_desc.prev);
288 switch (ioat_chan->device->version) {
289 case IOAT_VER_1_2:
290 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
291 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
292 writel(((u64) desc->async_tx.phys) >> 32,
293 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
294
295 writeb(IOAT_CHANCMD_START, ioat_chan->reg_base
296 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
297 break;
298 case IOAT_VER_2_0:
299 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
300 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
301 writel(((u64) desc->async_tx.phys) >> 32,
302 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
303
304 /* tell the engine to go with what's left to be done */
305 writew(ioat_chan->dmacount,
306 ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
307
308 break;
309 }
310 dev_err(&ioat_chan->device->pdev->dev,
311 "chan%d reset - %d descs waiting, %d total desc\n",
312 chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
313
314 spin_unlock_bh(&ioat_chan->desc_lock);
315 spin_unlock_bh(&ioat_chan->cleanup_lock);
316}
317
318/**
319 * ioat_dma_reset_channel - restart a channel
320 * @ioat_chan: IOAT DMA channel handle
321 */
322static void ioat_dma_reset_channel(struct ioat_dma_chan *ioat_chan)
323{
324 u32 chansts, chanerr;
325
326 if (!ioat_chan->used_desc.prev)
327 return;
328
329 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
330 chansts = (ioat_chan->completion_virt->low
331 & IOAT_CHANSTS_DMA_TRANSFER_STATUS);
332 if (chanerr) {
333 dev_err(&ioat_chan->device->pdev->dev,
334 "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
335 chan_num(ioat_chan), chansts, chanerr);
336 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
337 }
338
339 /*
340 * whack it upside the head with a reset
341 * and wait for things to settle out.
342 * force the pending count to a really big negative
343 * to make sure no one forces an issue_pending
344 * while we're waiting.
345 */
346
347 spin_lock_bh(&ioat_chan->desc_lock);
348 ioat_chan->pending = INT_MIN;
349 writeb(IOAT_CHANCMD_RESET,
350 ioat_chan->reg_base
351 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
352 spin_unlock_bh(&ioat_chan->desc_lock);
353
354 /* schedule the 2nd half instead of sleeping a long time */
355 schedule_delayed_work(&ioat_chan->work, RESET_DELAY);
356}
357
358/**
359 * ioat_dma_chan_watchdog - watch for stuck channels
360 */
361static void ioat_dma_chan_watchdog(struct work_struct *work)
362{
363 struct ioatdma_device *device =
364 container_of(work, struct ioatdma_device, work.work);
365 struct ioat_dma_chan *ioat_chan;
366 int i;
367
368 union {
369 u64 full;
370 struct {
371 u32 low;
372 u32 high;
373 };
374 } completion_hw;
375 unsigned long compl_desc_addr_hw;
376
377 for (i = 0; i < device->common.chancnt; i++) {
378 ioat_chan = ioat_lookup_chan_by_index(device, i);
379
380 if (ioat_chan->device->version == IOAT_VER_1_2
381 /* have we started processing anything yet */
382 && ioat_chan->last_completion
383 /* have we completed any since last watchdog cycle? */
384 && (ioat_chan->last_completion ==
385 ioat_chan->watchdog_completion)
386 /* has TCP stuck on one cookie since last watchdog? */
387 && (ioat_chan->watchdog_tcp_cookie ==
388 ioat_chan->watchdog_last_tcp_cookie)
389 && (ioat_chan->watchdog_tcp_cookie !=
390 ioat_chan->completed_cookie)
391 /* is there something in the chain to be processed? */
392 /* CB1 chain always has at least the last one processed */
393 && (ioat_chan->used_desc.prev != ioat_chan->used_desc.next)
394 && ioat_chan->pending == 0) {
395
396 /*
397 * check CHANSTS register for completed
398 * descriptor address.
399 * if it is different than completion writeback,
400 * it is not zero
401 * and it has changed since the last watchdog
402 * we can assume that channel
403 * is still working correctly
404 * and the problem is in completion writeback.
405 * update completion writeback
406 * with actual CHANSTS value
407 * else
408 * try resetting the channel
409 */
410
411 completion_hw.low = readl(ioat_chan->reg_base +
412 IOAT_CHANSTS_OFFSET_LOW(ioat_chan->device->version));
413 completion_hw.high = readl(ioat_chan->reg_base +
414 IOAT_CHANSTS_OFFSET_HIGH(ioat_chan->device->version));
415#if (BITS_PER_LONG == 64)
416 compl_desc_addr_hw =
417 completion_hw.full
418 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
419#else
420 compl_desc_addr_hw =
421 completion_hw.low & IOAT_LOW_COMPLETION_MASK;
422#endif
423
424 if ((compl_desc_addr_hw != 0)
425 && (compl_desc_addr_hw != ioat_chan->watchdog_completion)
426 && (compl_desc_addr_hw != ioat_chan->last_compl_desc_addr_hw)) {
427 ioat_chan->last_compl_desc_addr_hw = compl_desc_addr_hw;
428 ioat_chan->completion_virt->low = completion_hw.low;
429 ioat_chan->completion_virt->high = completion_hw.high;
430 } else {
431 ioat_dma_reset_channel(ioat_chan);
432 ioat_chan->watchdog_completion = 0;
433 ioat_chan->last_compl_desc_addr_hw = 0;
434 }
435
436 /*
437 * for version 2.0 if there are descriptors yet to be processed
438 * and the last completed hasn't changed since the last watchdog
439 * if they haven't hit the pending level
440 * issue the pending to push them through
441 * else
442 * try resetting the channel
443 */
444 } else if (ioat_chan->device->version == IOAT_VER_2_0
445 && ioat_chan->used_desc.prev
446 && ioat_chan->last_completion
447 && ioat_chan->last_completion == ioat_chan->watchdog_completion) {
448
449 if (ioat_chan->pending < ioat_pending_level)
450 ioat2_dma_memcpy_issue_pending(&ioat_chan->common);
451 else {
452 ioat_dma_reset_channel(ioat_chan);
453 ioat_chan->watchdog_completion = 0;
454 }
455 } else {
456 ioat_chan->last_compl_desc_addr_hw = 0;
457 ioat_chan->watchdog_completion
458 = ioat_chan->last_completion;
459 }
460
461 ioat_chan->watchdog_last_tcp_cookie =
462 ioat_chan->watchdog_tcp_cookie;
463 }
464
465 schedule_delayed_work(&device->work, WATCHDOG_DELAY);
466}
467
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800468static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
Dan Williams7405f742007-01-02 11:10:43 -0700469{
470 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700471 struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
472 struct ioat_desc_sw *prev, *new;
473 struct ioat_dma_descriptor *hw;
Dan Williams7405f742007-01-02 11:10:43 -0700474 dma_cookie_t cookie;
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700475 LIST_HEAD(new_chain);
476 u32 copy;
477 size_t len;
478 dma_addr_t src, dst;
Dan Williams636bdeaa2008-04-17 20:17:26 -0700479 unsigned long orig_flags;
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700480 unsigned int desc_count = 0;
Dan Williams7405f742007-01-02 11:10:43 -0700481
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700482 /* src and dest and len are stored in the initial descriptor */
483 len = first->len;
484 src = first->src;
485 dst = first->dst;
Dan Williams636bdeaa2008-04-17 20:17:26 -0700486 orig_flags = first->async_tx.flags;
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700487 new = first;
488
Dan Williams7405f742007-01-02 11:10:43 -0700489 spin_lock_bh(&ioat_chan->desc_lock);
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700490 prev = to_ioat_desc(ioat_chan->used_desc.prev);
491 prefetch(prev->hw);
492 do {
Shannon Nelson711924b2007-12-17 16:20:08 -0800493 copy = min_t(size_t, len, ioat_chan->xfercap);
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700494
Dan Williams636bdeaa2008-04-17 20:17:26 -0700495 async_tx_ack(&new->async_tx);
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700496
497 hw = new->hw;
498 hw->size = copy;
499 hw->ctl = 0;
500 hw->src_addr = src;
501 hw->dst_addr = dst;
502 hw->next = 0;
503
504 /* chain together the physical address list for the HW */
505 wmb();
506 prev->hw->next = (u64) new->async_tx.phys;
507
508 len -= copy;
509 dst += copy;
510 src += copy;
511
512 list_add_tail(&new->node, &new_chain);
513 desc_count++;
514 prev = new;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800515 } while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan)));
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700516
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700517 if (!new) {
518 dev_err(&ioat_chan->device->pdev->dev,
519 "tx submit failed\n");
520 spin_unlock_bh(&ioat_chan->desc_lock);
521 return -ENOMEM;
522 }
523
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700524 hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
Shannon Nelson95218432007-10-18 03:07:15 -0700525 if (new->async_tx.callback) {
526 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
527 if (first != new) {
528 /* move callback into to last desc */
529 new->async_tx.callback = first->async_tx.callback;
530 new->async_tx.callback_param
531 = first->async_tx.callback_param;
532 first->async_tx.callback = NULL;
533 first->async_tx.callback_param = NULL;
534 }
535 }
536
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700537 new->tx_cnt = desc_count;
Dan Williams636bdeaa2008-04-17 20:17:26 -0700538 new->async_tx.flags = orig_flags; /* client is in control of this ack */
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700539
540 /* store the original values for use in later cleanup */
541 if (new != first) {
542 new->src = first->src;
543 new->dst = first->dst;
544 new->len = first->len;
545 }
546
Dan Williams7405f742007-01-02 11:10:43 -0700547 /* cookie incr and addition to used_list must be atomic */
548 cookie = ioat_chan->common.cookie;
549 cookie++;
550 if (cookie < 0)
551 cookie = 1;
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700552 ioat_chan->common.cookie = new->async_tx.cookie = cookie;
Dan Williams7405f742007-01-02 11:10:43 -0700553
554 /* write address into NextDescriptor field of last desc in chain */
555 to_ioat_desc(ioat_chan->used_desc.prev)->hw->next =
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700556 first->async_tx.phys;
Luis R. Rodriguez7d283ae2008-08-06 15:21:26 -0700557 list_splice_tail(&new_chain, &ioat_chan->used_desc);
Dan Williams7405f742007-01-02 11:10:43 -0700558
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800559 ioat_chan->dmacount += desc_count;
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700560 ioat_chan->pending += desc_count;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800561 if (ioat_chan->pending >= ioat_pending_level)
562 __ioat1_dma_memcpy_issue_pending(ioat_chan);
Dan Williams7405f742007-01-02 11:10:43 -0700563 spin_unlock_bh(&ioat_chan->desc_lock);
564
Dan Williams7405f742007-01-02 11:10:43 -0700565 return cookie;
566}
567
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800568static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx)
569{
570 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
571 struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
572 struct ioat_desc_sw *new;
573 struct ioat_dma_descriptor *hw;
574 dma_cookie_t cookie;
575 u32 copy;
576 size_t len;
577 dma_addr_t src, dst;
Dan Williams636bdeaa2008-04-17 20:17:26 -0700578 unsigned long orig_flags;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800579 unsigned int desc_count = 0;
580
581 /* src and dest and len are stored in the initial descriptor */
582 len = first->len;
583 src = first->src;
584 dst = first->dst;
Dan Williams636bdeaa2008-04-17 20:17:26 -0700585 orig_flags = first->async_tx.flags;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800586 new = first;
587
Shannon Nelson711924b2007-12-17 16:20:08 -0800588 /*
589 * ioat_chan->desc_lock is still in force in version 2 path
590 * it gets unlocked at end of this function
591 */
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800592 do {
Shannon Nelson711924b2007-12-17 16:20:08 -0800593 copy = min_t(size_t, len, ioat_chan->xfercap);
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800594
Dan Williams636bdeaa2008-04-17 20:17:26 -0700595 async_tx_ack(&new->async_tx);
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800596
597 hw = new->hw;
598 hw->size = copy;
599 hw->ctl = 0;
600 hw->src_addr = src;
601 hw->dst_addr = dst;
602
603 len -= copy;
604 dst += copy;
605 src += copy;
606 desc_count++;
607 } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan)));
608
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700609 if (!new) {
610 dev_err(&ioat_chan->device->pdev->dev,
611 "tx submit failed\n");
612 spin_unlock_bh(&ioat_chan->desc_lock);
613 return -ENOMEM;
614 }
615
616 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800617 if (new->async_tx.callback) {
618 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
619 if (first != new) {
620 /* move callback into to last desc */
621 new->async_tx.callback = first->async_tx.callback;
622 new->async_tx.callback_param
623 = first->async_tx.callback_param;
624 first->async_tx.callback = NULL;
625 first->async_tx.callback_param = NULL;
626 }
627 }
628
629 new->tx_cnt = desc_count;
Dan Williams636bdeaa2008-04-17 20:17:26 -0700630 new->async_tx.flags = orig_flags; /* client is in control of this ack */
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800631
632 /* store the original values for use in later cleanup */
633 if (new != first) {
634 new->src = first->src;
635 new->dst = first->dst;
636 new->len = first->len;
637 }
638
639 /* cookie incr and addition to used_list must be atomic */
640 cookie = ioat_chan->common.cookie;
641 cookie++;
642 if (cookie < 0)
643 cookie = 1;
644 ioat_chan->common.cookie = new->async_tx.cookie = cookie;
645
646 ioat_chan->dmacount += desc_count;
647 ioat_chan->pending += desc_count;
648 if (ioat_chan->pending >= ioat_pending_level)
649 __ioat2_dma_memcpy_issue_pending(ioat_chan);
650 spin_unlock_bh(&ioat_chan->desc_lock);
651
652 return cookie;
653}
654
655/**
656 * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
657 * @ioat_chan: the channel supplying the memory pool for the descriptors
658 * @flags: allocation flags
659 */
Chris Leech0bbd5f42006-05-23 17:35:34 -0700660static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
Shannon Nelson43d6e362007-10-16 01:27:39 -0700661 struct ioat_dma_chan *ioat_chan,
662 gfp_t flags)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700663{
664 struct ioat_dma_descriptor *desc;
665 struct ioat_desc_sw *desc_sw;
Shannon Nelson8ab89562007-10-16 01:27:39 -0700666 struct ioatdma_device *ioatdma_device;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700667 dma_addr_t phys;
668
Shannon Nelson8ab89562007-10-16 01:27:39 -0700669 ioatdma_device = to_ioatdma_device(ioat_chan->common.device);
670 desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700671 if (unlikely(!desc))
672 return NULL;
673
674 desc_sw = kzalloc(sizeof(*desc_sw), flags);
675 if (unlikely(!desc_sw)) {
Shannon Nelson8ab89562007-10-16 01:27:39 -0700676 pci_pool_free(ioatdma_device->dma_pool, desc, phys);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700677 return NULL;
678 }
679
680 memset(desc, 0, sizeof(*desc));
Dan Williams7405f742007-01-02 11:10:43 -0700681 dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common);
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800682 switch (ioat_chan->device->version) {
683 case IOAT_VER_1_2:
684 desc_sw->async_tx.tx_submit = ioat1_tx_submit;
685 break;
686 case IOAT_VER_2_0:
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700687 case IOAT_VER_3_0:
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800688 desc_sw->async_tx.tx_submit = ioat2_tx_submit;
689 break;
690 }
Dan Williams7405f742007-01-02 11:10:43 -0700691 INIT_LIST_HEAD(&desc_sw->async_tx.tx_list);
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800692
Chris Leech0bbd5f42006-05-23 17:35:34 -0700693 desc_sw->hw = desc;
Dan Williams7405f742007-01-02 11:10:43 -0700694 desc_sw->async_tx.phys = phys;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700695
696 return desc_sw;
697}
698
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800699static int ioat_initial_desc_count = 256;
700module_param(ioat_initial_desc_count, int, 0644);
701MODULE_PARM_DESC(ioat_initial_desc_count,
702 "initial descriptors per channel (default: 256)");
703
704/**
705 * ioat2_dma_massage_chan_desc - link the descriptors into a circle
706 * @ioat_chan: the channel to be massaged
707 */
708static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan)
709{
710 struct ioat_desc_sw *desc, *_desc;
711
712 /* setup used_desc */
713 ioat_chan->used_desc.next = ioat_chan->free_desc.next;
714 ioat_chan->used_desc.prev = NULL;
715
716 /* pull free_desc out of the circle so that every node is a hw
717 * descriptor, but leave it pointing to the list
718 */
719 ioat_chan->free_desc.prev->next = ioat_chan->free_desc.next;
720 ioat_chan->free_desc.next->prev = ioat_chan->free_desc.prev;
721
722 /* circle link the hw descriptors */
723 desc = to_ioat_desc(ioat_chan->free_desc.next);
724 desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys;
725 list_for_each_entry_safe(desc, _desc, ioat_chan->free_desc.next, node) {
726 desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys;
727 }
728}
729
730/**
731 * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors
732 * @chan: the channel to be filled out
733 */
Haavard Skinnemoen848c5362008-07-08 11:58:58 -0700734static int ioat_dma_alloc_chan_resources(struct dma_chan *chan,
735 struct dma_client *client)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700736{
737 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
Shannon Nelson711924b2007-12-17 16:20:08 -0800738 struct ioat_desc_sw *desc;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700739 u16 chanctrl;
740 u32 chanerr;
741 int i;
742 LIST_HEAD(tmp_list);
743
Shannon Nelsone4223972007-08-24 23:02:53 -0700744 /* have we already been set up? */
745 if (!list_empty(&ioat_chan->free_desc))
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800746 return ioat_chan->desccount;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700747
Shannon Nelson43d6e362007-10-16 01:27:39 -0700748 /* Setup register to interrupt and write completion status on error */
Shannon Nelsone4223972007-08-24 23:02:53 -0700749 chanctrl = IOAT_CHANCTRL_ERR_INT_EN |
Chris Leech0bbd5f42006-05-23 17:35:34 -0700750 IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
751 IOAT_CHANCTRL_ERR_COMPLETION_EN;
Shannon Nelson43d6e362007-10-16 01:27:39 -0700752 writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700753
Chris Leeche3828812007-03-08 09:57:35 -0800754 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700755 if (chanerr) {
Shannon Nelson43d6e362007-10-16 01:27:39 -0700756 dev_err(&ioat_chan->device->pdev->dev,
Shannon Nelson5149fd02007-10-18 03:07:13 -0700757 "CHANERR = %x, clearing\n", chanerr);
Chris Leeche3828812007-03-08 09:57:35 -0800758 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700759 }
760
761 /* Allocate descriptors */
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800762 for (i = 0; i < ioat_initial_desc_count; i++) {
Chris Leech0bbd5f42006-05-23 17:35:34 -0700763 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
764 if (!desc) {
Shannon Nelson43d6e362007-10-16 01:27:39 -0700765 dev_err(&ioat_chan->device->pdev->dev,
Shannon Nelson5149fd02007-10-18 03:07:13 -0700766 "Only %d initial descriptors\n", i);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700767 break;
768 }
769 list_add_tail(&desc->node, &tmp_list);
770 }
771 spin_lock_bh(&ioat_chan->desc_lock);
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800772 ioat_chan->desccount = i;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700773 list_splice(&tmp_list, &ioat_chan->free_desc);
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800774 if (ioat_chan->device->version != IOAT_VER_1_2)
775 ioat2_dma_massage_chan_desc(ioat_chan);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700776 spin_unlock_bh(&ioat_chan->desc_lock);
777
778 /* allocate a completion writeback area */
779 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
780 ioat_chan->completion_virt =
781 pci_pool_alloc(ioat_chan->device->completion_pool,
Shannon Nelson43d6e362007-10-16 01:27:39 -0700782 GFP_KERNEL,
783 &ioat_chan->completion_addr);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700784 memset(ioat_chan->completion_virt, 0,
785 sizeof(*ioat_chan->completion_virt));
Chris Leeche3828812007-03-08 09:57:35 -0800786 writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF,
787 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
788 writel(((u64) ioat_chan->completion_addr) >> 32,
789 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700790
Shannon Nelson3e037452007-10-16 01:27:40 -0700791 tasklet_enable(&ioat_chan->cleanup_task);
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800792 ioat_dma_start_null_desc(ioat_chan); /* give chain to dma device */
793 return ioat_chan->desccount;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700794}
795
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800796/**
797 * ioat_dma_free_chan_resources - release all the descriptors
798 * @chan: the channel to be cleaned
799 */
Chris Leech0bbd5f42006-05-23 17:35:34 -0700800static void ioat_dma_free_chan_resources(struct dma_chan *chan)
801{
802 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
Shannon Nelson8ab89562007-10-16 01:27:39 -0700803 struct ioatdma_device *ioatdma_device = to_ioatdma_device(chan->device);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700804 struct ioat_desc_sw *desc, *_desc;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700805 int in_use_descs = 0;
806
Shannon Nelson3e037452007-10-16 01:27:40 -0700807 tasklet_disable(&ioat_chan->cleanup_task);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700808 ioat_dma_memcpy_cleanup(ioat_chan);
809
Shannon Nelson3e037452007-10-16 01:27:40 -0700810 /* Delay 100ms after reset to allow internal DMA logic to quiesce
811 * before removing DMA descriptor resources.
812 */
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800813 writeb(IOAT_CHANCMD_RESET,
814 ioat_chan->reg_base
815 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
Shannon Nelson3e037452007-10-16 01:27:40 -0700816 mdelay(100);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700817
818 spin_lock_bh(&ioat_chan->desc_lock);
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800819 switch (ioat_chan->device->version) {
820 case IOAT_VER_1_2:
821 list_for_each_entry_safe(desc, _desc,
822 &ioat_chan->used_desc, node) {
823 in_use_descs++;
824 list_del(&desc->node);
825 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
826 desc->async_tx.phys);
827 kfree(desc);
828 }
829 list_for_each_entry_safe(desc, _desc,
830 &ioat_chan->free_desc, node) {
831 list_del(&desc->node);
832 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
833 desc->async_tx.phys);
834 kfree(desc);
835 }
836 break;
837 case IOAT_VER_2_0:
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700838 case IOAT_VER_3_0:
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800839 list_for_each_entry_safe(desc, _desc,
840 ioat_chan->free_desc.next, node) {
841 list_del(&desc->node);
842 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
843 desc->async_tx.phys);
844 kfree(desc);
845 }
846 desc = to_ioat_desc(ioat_chan->free_desc.next);
Shannon Nelson8ab89562007-10-16 01:27:39 -0700847 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
Dan Williams7405f742007-01-02 11:10:43 -0700848 desc->async_tx.phys);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700849 kfree(desc);
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800850 INIT_LIST_HEAD(&ioat_chan->free_desc);
851 INIT_LIST_HEAD(&ioat_chan->used_desc);
852 break;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700853 }
854 spin_unlock_bh(&ioat_chan->desc_lock);
855
Shannon Nelson8ab89562007-10-16 01:27:39 -0700856 pci_pool_free(ioatdma_device->completion_pool,
Shannon Nelson43d6e362007-10-16 01:27:39 -0700857 ioat_chan->completion_virt,
858 ioat_chan->completion_addr);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700859
860 /* one is ok since we left it on there on purpose */
861 if (in_use_descs > 1)
Shannon Nelson43d6e362007-10-16 01:27:39 -0700862 dev_err(&ioat_chan->device->pdev->dev,
Shannon Nelson5149fd02007-10-18 03:07:13 -0700863 "Freeing %d in use descriptors!\n",
Chris Leech0bbd5f42006-05-23 17:35:34 -0700864 in_use_descs - 1);
865
866 ioat_chan->last_completion = ioat_chan->completion_addr = 0;
Shannon Nelson3e037452007-10-16 01:27:40 -0700867 ioat_chan->pending = 0;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800868 ioat_chan->dmacount = 0;
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700869 ioat_chan->watchdog_completion = 0;
870 ioat_chan->last_compl_desc_addr_hw = 0;
871 ioat_chan->watchdog_tcp_cookie =
872 ioat_chan->watchdog_last_tcp_cookie = 0;
Shannon Nelson3e037452007-10-16 01:27:40 -0700873}
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700874
Shannon Nelson3e037452007-10-16 01:27:40 -0700875/**
876 * ioat_dma_get_next_descriptor - return the next available descriptor
877 * @ioat_chan: IOAT DMA channel handle
878 *
879 * Gets the next descriptor from the chain, and must be called with the
880 * channel's desc_lock held. Allocates more descriptors if the channel
881 * has run out.
882 */
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700883static struct ioat_desc_sw *
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800884ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
Shannon Nelson3e037452007-10-16 01:27:40 -0700885{
Shannon Nelson711924b2007-12-17 16:20:08 -0800886 struct ioat_desc_sw *new;
Shannon Nelson3e037452007-10-16 01:27:40 -0700887
888 if (!list_empty(&ioat_chan->free_desc)) {
889 new = to_ioat_desc(ioat_chan->free_desc.next);
890 list_del(&new->node);
891 } else {
892 /* try to get another desc */
893 new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
Shannon Nelson711924b2007-12-17 16:20:08 -0800894 if (!new) {
895 dev_err(&ioat_chan->device->pdev->dev,
896 "alloc failed\n");
897 return NULL;
898 }
Shannon Nelson3e037452007-10-16 01:27:40 -0700899 }
900
901 prefetch(new->hw);
902 return new;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700903}
904
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800905static struct ioat_desc_sw *
906ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
907{
Shannon Nelson711924b2007-12-17 16:20:08 -0800908 struct ioat_desc_sw *new;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800909
910 /*
911 * used.prev points to where to start processing
912 * used.next points to next free descriptor
913 * if used.prev == NULL, there are none waiting to be processed
914 * if used.next == used.prev.prev, there is only one free descriptor,
915 * and we need to use it to as a noop descriptor before
916 * linking in a new set of descriptors, since the device
917 * has probably already read the pointer to it
918 */
919 if (ioat_chan->used_desc.prev &&
920 ioat_chan->used_desc.next == ioat_chan->used_desc.prev->prev) {
921
Shannon Nelson711924b2007-12-17 16:20:08 -0800922 struct ioat_desc_sw *desc;
923 struct ioat_desc_sw *noop_desc;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800924 int i;
925
926 /* set up the noop descriptor */
927 noop_desc = to_ioat_desc(ioat_chan->used_desc.next);
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700928 /* set size to non-zero value (channel returns error when size is 0) */
929 noop_desc->hw->size = NULL_DESC_BUFFER_SIZE;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800930 noop_desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL;
931 noop_desc->hw->src_addr = 0;
932 noop_desc->hw->dst_addr = 0;
933
934 ioat_chan->used_desc.next = ioat_chan->used_desc.next->next;
935 ioat_chan->pending++;
936 ioat_chan->dmacount++;
937
Shannon Nelson711924b2007-12-17 16:20:08 -0800938 /* try to get a few more descriptors */
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800939 for (i = 16; i; i--) {
940 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
Shannon Nelson711924b2007-12-17 16:20:08 -0800941 if (!desc) {
942 dev_err(&ioat_chan->device->pdev->dev,
943 "alloc failed\n");
944 break;
945 }
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800946 list_add_tail(&desc->node, ioat_chan->used_desc.next);
947
948 desc->hw->next
949 = to_ioat_desc(desc->node.next)->async_tx.phys;
950 to_ioat_desc(desc->node.prev)->hw->next
951 = desc->async_tx.phys;
952 ioat_chan->desccount++;
953 }
954
955 ioat_chan->used_desc.next = noop_desc->node.next;
956 }
957 new = to_ioat_desc(ioat_chan->used_desc.next);
958 prefetch(new);
959 ioat_chan->used_desc.next = new->node.next;
960
961 if (ioat_chan->used_desc.prev == NULL)
962 ioat_chan->used_desc.prev = &new->node;
963
964 prefetch(new->hw);
965 return new;
966}
967
968static struct ioat_desc_sw *ioat_dma_get_next_descriptor(
969 struct ioat_dma_chan *ioat_chan)
970{
971 if (!ioat_chan)
972 return NULL;
973
974 switch (ioat_chan->device->version) {
975 case IOAT_VER_1_2:
976 return ioat1_dma_get_next_descriptor(ioat_chan);
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800977 case IOAT_VER_2_0:
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700978 case IOAT_VER_3_0:
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800979 return ioat2_dma_get_next_descriptor(ioat_chan);
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800980 }
981 return NULL;
982}
983
984static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
Shannon Nelson43d6e362007-10-16 01:27:39 -0700985 struct dma_chan *chan,
Dan Williams00367312008-02-02 19:49:57 -0700986 dma_addr_t dma_dest,
987 dma_addr_t dma_src,
Shannon Nelson43d6e362007-10-16 01:27:39 -0700988 size_t len,
Dan Williamsd4c56f92008-02-02 19:49:58 -0700989 unsigned long flags)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700990{
Dan Williams7405f742007-01-02 11:10:43 -0700991 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700992 struct ioat_desc_sw *new;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700993
994 spin_lock_bh(&ioat_chan->desc_lock);
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700995 new = ioat_dma_get_next_descriptor(ioat_chan);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700996 spin_unlock_bh(&ioat_chan->desc_lock);
997
Shannon Nelson711924b2007-12-17 16:20:08 -0800998 if (new) {
999 new->len = len;
Dan Williams00367312008-02-02 19:49:57 -07001000 new->dst = dma_dest;
1001 new->src = dma_src;
Dan Williams636bdeaa2008-04-17 20:17:26 -07001002 new->async_tx.flags = flags;
Shannon Nelson711924b2007-12-17 16:20:08 -08001003 return &new->async_tx;
Maciej Sosnowski09177e82008-07-22 10:07:33 -07001004 } else {
1005 dev_err(&ioat_chan->device->pdev->dev,
1006 "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n",
1007 chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
Shannon Nelson711924b2007-12-17 16:20:08 -08001008 return NULL;
Maciej Sosnowski09177e82008-07-22 10:07:33 -07001009 }
Chris Leech0bbd5f42006-05-23 17:35:34 -07001010}
1011
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001012static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
1013 struct dma_chan *chan,
Dan Williams00367312008-02-02 19:49:57 -07001014 dma_addr_t dma_dest,
1015 dma_addr_t dma_src,
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001016 size_t len,
Dan Williamsd4c56f92008-02-02 19:49:58 -07001017 unsigned long flags)
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001018{
1019 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
1020 struct ioat_desc_sw *new;
1021
1022 spin_lock_bh(&ioat_chan->desc_lock);
1023 new = ioat2_dma_get_next_descriptor(ioat_chan);
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001024
Shannon Nelson711924b2007-12-17 16:20:08 -08001025 /*
1026 * leave ioat_chan->desc_lock set in ioat 2 path
1027 * it will get unlocked at end of tx_submit
1028 */
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001029
Shannon Nelson711924b2007-12-17 16:20:08 -08001030 if (new) {
1031 new->len = len;
Dan Williams00367312008-02-02 19:49:57 -07001032 new->dst = dma_dest;
1033 new->src = dma_src;
Dan Williams636bdeaa2008-04-17 20:17:26 -07001034 new->async_tx.flags = flags;
Shannon Nelson711924b2007-12-17 16:20:08 -08001035 return &new->async_tx;
Maciej Sosnowski09177e82008-07-22 10:07:33 -07001036 } else {
1037 spin_unlock_bh(&ioat_chan->desc_lock);
1038 dev_err(&ioat_chan->device->pdev->dev,
1039 "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n",
1040 chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
Shannon Nelson711924b2007-12-17 16:20:08 -08001041 return NULL;
Maciej Sosnowski09177e82008-07-22 10:07:33 -07001042 }
Chris Leech0bbd5f42006-05-23 17:35:34 -07001043}
1044
Shannon Nelson3e037452007-10-16 01:27:40 -07001045static void ioat_dma_cleanup_tasklet(unsigned long data)
1046{
1047 struct ioat_dma_chan *chan = (void *)data;
1048 ioat_dma_memcpy_cleanup(chan);
1049 writew(IOAT_CHANCTRL_INT_DISABLE,
1050 chan->reg_base + IOAT_CHANCTRL_OFFSET);
1051}
1052
Dan Williamse1d181e2008-07-04 00:13:40 -07001053static void
1054ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc)
1055{
1056 /*
1057 * yes we are unmapping both _page and _single
1058 * alloc'd regions with unmap_page. Is this
1059 * *really* that bad?
1060 */
1061 if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP))
1062 pci_unmap_page(ioat_chan->device->pdev,
1063 pci_unmap_addr(desc, dst),
1064 pci_unmap_len(desc, len),
1065 PCI_DMA_FROMDEVICE);
1066
1067 if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP))
1068 pci_unmap_page(ioat_chan->device->pdev,
1069 pci_unmap_addr(desc, src),
1070 pci_unmap_len(desc, len),
1071 PCI_DMA_TODEVICE);
1072}
1073
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001074/**
1075 * ioat_dma_memcpy_cleanup - cleanup up finished descriptors
1076 * @chan: ioat channel to be cleaned up
1077 */
Shannon Nelson43d6e362007-10-16 01:27:39 -07001078static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
Chris Leech0bbd5f42006-05-23 17:35:34 -07001079{
1080 unsigned long phys_complete;
1081 struct ioat_desc_sw *desc, *_desc;
1082 dma_cookie_t cookie = 0;
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001083 unsigned long desc_phys;
1084 struct ioat_desc_sw *latest_desc;
Chris Leech0bbd5f42006-05-23 17:35:34 -07001085
Shannon Nelson43d6e362007-10-16 01:27:39 -07001086 prefetch(ioat_chan->completion_virt);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001087
Shannon Nelson7f2b2912007-10-18 03:07:14 -07001088 if (!spin_trylock_bh(&ioat_chan->cleanup_lock))
Chris Leech0bbd5f42006-05-23 17:35:34 -07001089 return;
1090
1091 /* The completion writeback can happen at any time,
1092 so reads by the driver need to be atomic operations
1093 The descriptor physical addresses are limited to 32-bits
1094 when the CPU can only do a 32-bit mov */
1095
1096#if (BITS_PER_LONG == 64)
1097 phys_complete =
Shannon Nelson7f2b2912007-10-18 03:07:14 -07001098 ioat_chan->completion_virt->full
1099 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
Chris Leech0bbd5f42006-05-23 17:35:34 -07001100#else
Shannon Nelson7f2b2912007-10-18 03:07:14 -07001101 phys_complete =
1102 ioat_chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK;
Chris Leech0bbd5f42006-05-23 17:35:34 -07001103#endif
1104
Shannon Nelson7f2b2912007-10-18 03:07:14 -07001105 if ((ioat_chan->completion_virt->full
1106 & IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==
Shannon Nelson43d6e362007-10-16 01:27:39 -07001107 IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {
1108 dev_err(&ioat_chan->device->pdev->dev,
Shannon Nelson5149fd02007-10-18 03:07:13 -07001109 "Channel halted, chanerr = %x\n",
Shannon Nelson43d6e362007-10-16 01:27:39 -07001110 readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET));
Chris Leech0bbd5f42006-05-23 17:35:34 -07001111
1112 /* TODO do something to salvage the situation */
1113 }
1114
Shannon Nelson43d6e362007-10-16 01:27:39 -07001115 if (phys_complete == ioat_chan->last_completion) {
Shannon Nelson7f2b2912007-10-18 03:07:14 -07001116 spin_unlock_bh(&ioat_chan->cleanup_lock);
Maciej Sosnowski09177e82008-07-22 10:07:33 -07001117 /*
1118 * perhaps we're stuck so hard that the watchdog can't go off?
1119 * try to catch it after 2 seconds
1120 */
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -07001121 if (ioat_chan->device->version != IOAT_VER_3_0) {
1122 if (time_after(jiffies,
1123 ioat_chan->last_completion_time + HZ*WATCHDOG_DELAY)) {
1124 ioat_dma_chan_watchdog(&(ioat_chan->device->work.work));
1125 ioat_chan->last_completion_time = jiffies;
1126 }
Maciej Sosnowski09177e82008-07-22 10:07:33 -07001127 }
1128 return;
1129 }
1130 ioat_chan->last_completion_time = jiffies;
1131
1132 cookie = 0;
1133 if (!spin_trylock_bh(&ioat_chan->desc_lock)) {
1134 spin_unlock_bh(&ioat_chan->cleanup_lock);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001135 return;
1136 }
1137
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001138 switch (ioat_chan->device->version) {
1139 case IOAT_VER_1_2:
1140 list_for_each_entry_safe(desc, _desc,
1141 &ioat_chan->used_desc, node) {
Chris Leech0bbd5f42006-05-23 17:35:34 -07001142
Shannon Nelson43d6e362007-10-16 01:27:39 -07001143 /*
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001144 * Incoming DMA requests may use multiple descriptors,
1145 * due to exceeding xfercap, perhaps. If so, only the
1146 * last one will have a cookie, and require unmapping.
Shannon Nelson43d6e362007-10-16 01:27:39 -07001147 */
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001148 if (desc->async_tx.cookie) {
1149 cookie = desc->async_tx.cookie;
Dan Williamse1d181e2008-07-04 00:13:40 -07001150 ioat_dma_unmap(ioat_chan, desc);
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001151 if (desc->async_tx.callback) {
1152 desc->async_tx.callback(desc->async_tx.callback_param);
1153 desc->async_tx.callback = NULL;
1154 }
1155 }
1156
1157 if (desc->async_tx.phys != phys_complete) {
1158 /*
1159 * a completed entry, but not the last, so clean
1160 * up if the client is done with the descriptor
1161 */
Dan Williams636bdeaa2008-04-17 20:17:26 -07001162 if (async_tx_test_ack(&desc->async_tx)) {
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001163 list_del(&desc->node);
1164 list_add_tail(&desc->node,
1165 &ioat_chan->free_desc);
1166 } else
1167 desc->async_tx.cookie = 0;
1168 } else {
1169 /*
1170 * last used desc. Do not remove, so we can
1171 * append from it, but don't look at it next
1172 * time, either
1173 */
1174 desc->async_tx.cookie = 0;
1175
1176 /* TODO check status bits? */
1177 break;
Shannon Nelson95218432007-10-18 03:07:15 -07001178 }
Chris Leech0bbd5f42006-05-23 17:35:34 -07001179 }
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001180 break;
1181 case IOAT_VER_2_0:
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -07001182 case IOAT_VER_3_0:
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001183 /* has some other thread has already cleaned up? */
1184 if (ioat_chan->used_desc.prev == NULL)
Chris Leech0bbd5f42006-05-23 17:35:34 -07001185 break;
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001186
1187 /* work backwards to find latest finished desc */
1188 desc = to_ioat_desc(ioat_chan->used_desc.next);
1189 latest_desc = NULL;
1190 do {
1191 desc = to_ioat_desc(desc->node.prev);
1192 desc_phys = (unsigned long)desc->async_tx.phys
1193 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
1194 if (desc_phys == phys_complete) {
1195 latest_desc = desc;
1196 break;
1197 }
1198 } while (&desc->node != ioat_chan->used_desc.prev);
1199
1200 if (latest_desc != NULL) {
1201
1202 /* work forwards to clear finished descriptors */
1203 for (desc = to_ioat_desc(ioat_chan->used_desc.prev);
1204 &desc->node != latest_desc->node.next &&
1205 &desc->node != ioat_chan->used_desc.next;
1206 desc = to_ioat_desc(desc->node.next)) {
1207 if (desc->async_tx.cookie) {
1208 cookie = desc->async_tx.cookie;
1209 desc->async_tx.cookie = 0;
Dan Williamse1d181e2008-07-04 00:13:40 -07001210 ioat_dma_unmap(ioat_chan, desc);
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001211 if (desc->async_tx.callback) {
1212 desc->async_tx.callback(desc->async_tx.callback_param);
1213 desc->async_tx.callback = NULL;
1214 }
1215 }
1216 }
1217
1218 /* move used.prev up beyond those that are finished */
1219 if (&desc->node == ioat_chan->used_desc.next)
1220 ioat_chan->used_desc.prev = NULL;
1221 else
1222 ioat_chan->used_desc.prev = &desc->node;
Chris Leech0bbd5f42006-05-23 17:35:34 -07001223 }
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001224 break;
Chris Leech0bbd5f42006-05-23 17:35:34 -07001225 }
1226
Shannon Nelson43d6e362007-10-16 01:27:39 -07001227 spin_unlock_bh(&ioat_chan->desc_lock);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001228
Shannon Nelson43d6e362007-10-16 01:27:39 -07001229 ioat_chan->last_completion = phys_complete;
Chris Leech0bbd5f42006-05-23 17:35:34 -07001230 if (cookie != 0)
Shannon Nelson43d6e362007-10-16 01:27:39 -07001231 ioat_chan->completed_cookie = cookie;
Chris Leech0bbd5f42006-05-23 17:35:34 -07001232
Shannon Nelson7f2b2912007-10-18 03:07:14 -07001233 spin_unlock_bh(&ioat_chan->cleanup_lock);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001234}
1235
1236/**
1237 * ioat_dma_is_complete - poll the status of a IOAT DMA transaction
1238 * @chan: IOAT DMA channel handle
1239 * @cookie: DMA transaction identifier
Randy Dunlap65088712006-07-03 19:45:31 -07001240 * @done: if not %NULL, updated with last completed transaction
1241 * @used: if not %NULL, updated with last used transaction
Chris Leech0bbd5f42006-05-23 17:35:34 -07001242 */
Chris Leech0bbd5f42006-05-23 17:35:34 -07001243static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,
Shannon Nelson43d6e362007-10-16 01:27:39 -07001244 dma_cookie_t cookie,
1245 dma_cookie_t *done,
1246 dma_cookie_t *used)
Chris Leech0bbd5f42006-05-23 17:35:34 -07001247{
1248 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
1249 dma_cookie_t last_used;
1250 dma_cookie_t last_complete;
1251 enum dma_status ret;
1252
1253 last_used = chan->cookie;
1254 last_complete = ioat_chan->completed_cookie;
Maciej Sosnowski09177e82008-07-22 10:07:33 -07001255 ioat_chan->watchdog_tcp_cookie = cookie;
Chris Leech0bbd5f42006-05-23 17:35:34 -07001256
1257 if (done)
Shannon Nelson43d6e362007-10-16 01:27:39 -07001258 *done = last_complete;
Chris Leech0bbd5f42006-05-23 17:35:34 -07001259 if (used)
1260 *used = last_used;
1261
1262 ret = dma_async_is_complete(cookie, last_complete, last_used);
1263 if (ret == DMA_SUCCESS)
1264 return ret;
1265
1266 ioat_dma_memcpy_cleanup(ioat_chan);
1267
1268 last_used = chan->cookie;
1269 last_complete = ioat_chan->completed_cookie;
1270
1271 if (done)
Shannon Nelson43d6e362007-10-16 01:27:39 -07001272 *done = last_complete;
Chris Leech0bbd5f42006-05-23 17:35:34 -07001273 if (used)
1274 *used = last_used;
1275
1276 return dma_async_is_complete(cookie, last_complete, last_used);
1277}
1278
Shannon Nelson43d6e362007-10-16 01:27:39 -07001279static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
Chris Leech0bbd5f42006-05-23 17:35:34 -07001280{
1281 struct ioat_desc_sw *desc;
1282
1283 spin_lock_bh(&ioat_chan->desc_lock);
1284
Shannon Nelson3e037452007-10-16 01:27:40 -07001285 desc = ioat_dma_get_next_descriptor(ioat_chan);
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -07001286
1287 if (!desc) {
1288 dev_err(&ioat_chan->device->pdev->dev,
1289 "Unable to start null desc - get next desc failed\n");
1290 spin_unlock_bh(&ioat_chan->desc_lock);
1291 return;
1292 }
1293
Shannon Nelson7f2b2912007-10-18 03:07:14 -07001294 desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL
1295 | IOAT_DMA_DESCRIPTOR_CTL_INT_GN
1296 | IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -07001297 /* set size to non-zero value (channel returns error when size is 0) */
1298 desc->hw->size = NULL_DESC_BUFFER_SIZE;
Shannon Nelson7f2b2912007-10-18 03:07:14 -07001299 desc->hw->src_addr = 0;
1300 desc->hw->dst_addr = 0;
Dan Williams636bdeaa2008-04-17 20:17:26 -07001301 async_tx_ack(&desc->async_tx);
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001302 switch (ioat_chan->device->version) {
1303 case IOAT_VER_1_2:
1304 desc->hw->next = 0;
1305 list_add_tail(&desc->node, &ioat_chan->used_desc);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001306
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001307 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
1308 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
1309 writel(((u64) desc->async_tx.phys) >> 32,
1310 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
1311
1312 writeb(IOAT_CHANCMD_START, ioat_chan->reg_base
1313 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
1314 break;
1315 case IOAT_VER_2_0:
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -07001316 case IOAT_VER_3_0:
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001317 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
1318 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
1319 writel(((u64) desc->async_tx.phys) >> 32,
1320 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
1321
1322 ioat_chan->dmacount++;
1323 __ioat2_dma_memcpy_issue_pending(ioat_chan);
1324 break;
1325 }
Chris Leech0bbd5f42006-05-23 17:35:34 -07001326 spin_unlock_bh(&ioat_chan->desc_lock);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001327}
1328
1329/*
1330 * Perform a IOAT transaction to verify the HW works.
1331 */
1332#define IOAT_TEST_SIZE 2000
1333
Shannon Nelson95218432007-10-18 03:07:15 -07001334static void ioat_dma_test_callback(void *dma_async_param)
1335{
1336 printk(KERN_ERR "ioatdma: ioat_dma_test_callback(%p)\n",
Shannon Nelson711924b2007-12-17 16:20:08 -08001337 dma_async_param);
Shannon Nelson95218432007-10-18 03:07:15 -07001338}
1339
Shannon Nelson3e037452007-10-16 01:27:40 -07001340/**
1341 * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
1342 * @device: device to be tested
1343 */
1344static int ioat_dma_self_test(struct ioatdma_device *device)
Chris Leech0bbd5f42006-05-23 17:35:34 -07001345{
1346 int i;
1347 u8 *src;
1348 u8 *dest;
1349 struct dma_chan *dma_chan;
Shannon Nelson711924b2007-12-17 16:20:08 -08001350 struct dma_async_tx_descriptor *tx;
Dan Williams00367312008-02-02 19:49:57 -07001351 dma_addr_t dma_dest, dma_src;
Chris Leech0bbd5f42006-05-23 17:35:34 -07001352 dma_cookie_t cookie;
1353 int err = 0;
1354
Christoph Lametere94b1762006-12-06 20:33:17 -08001355 src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001356 if (!src)
1357 return -ENOMEM;
Christoph Lametere94b1762006-12-06 20:33:17 -08001358 dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001359 if (!dest) {
1360 kfree(src);
1361 return -ENOMEM;
1362 }
1363
1364 /* Fill in src buffer */
1365 for (i = 0; i < IOAT_TEST_SIZE; i++)
1366 src[i] = (u8)i;
1367
1368 /* Start copy, using first DMA channel */
1369 dma_chan = container_of(device->common.channels.next,
Shannon Nelson43d6e362007-10-16 01:27:39 -07001370 struct dma_chan,
1371 device_node);
Haavard Skinnemoen848c5362008-07-08 11:58:58 -07001372 if (device->common.device_alloc_chan_resources(dma_chan, NULL) < 1) {
Shannon Nelson43d6e362007-10-16 01:27:39 -07001373 dev_err(&device->pdev->dev,
1374 "selftest cannot allocate chan resource\n");
Chris Leech0bbd5f42006-05-23 17:35:34 -07001375 err = -ENODEV;
1376 goto out;
1377 }
1378
Dan Williams00367312008-02-02 19:49:57 -07001379 dma_src = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE,
1380 DMA_TO_DEVICE);
1381 dma_dest = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE,
1382 DMA_FROM_DEVICE);
1383 tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
1384 IOAT_TEST_SIZE, 0);
Shannon Nelson5149fd02007-10-18 03:07:13 -07001385 if (!tx) {
1386 dev_err(&device->pdev->dev,
1387 "Self-test prep failed, disabling\n");
1388 err = -ENODEV;
1389 goto free_resources;
1390 }
1391
Dan Williams7405f742007-01-02 11:10:43 -07001392 async_tx_ack(tx);
Shannon Nelson95218432007-10-18 03:07:15 -07001393 tx->callback = ioat_dma_test_callback;
1394 tx->callback_param = (void *)0x8086;
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001395 cookie = tx->tx_submit(tx);
Shannon Nelson7f2b2912007-10-18 03:07:14 -07001396 if (cookie < 0) {
1397 dev_err(&device->pdev->dev,
1398 "Self-test setup failed, disabling\n");
1399 err = -ENODEV;
1400 goto free_resources;
1401 }
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001402 device->common.device_issue_pending(dma_chan);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001403 msleep(1);
1404
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001405 if (device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL)
1406 != DMA_SUCCESS) {
Shannon Nelson43d6e362007-10-16 01:27:39 -07001407 dev_err(&device->pdev->dev,
Shannon Nelson5149fd02007-10-18 03:07:13 -07001408 "Self-test copy timed out, disabling\n");
Chris Leech0bbd5f42006-05-23 17:35:34 -07001409 err = -ENODEV;
1410 goto free_resources;
1411 }
1412 if (memcmp(src, dest, IOAT_TEST_SIZE)) {
Shannon Nelson43d6e362007-10-16 01:27:39 -07001413 dev_err(&device->pdev->dev,
Shannon Nelson5149fd02007-10-18 03:07:13 -07001414 "Self-test copy failed compare, disabling\n");
Chris Leech0bbd5f42006-05-23 17:35:34 -07001415 err = -ENODEV;
1416 goto free_resources;
1417 }
1418
1419free_resources:
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001420 device->common.device_free_chan_resources(dma_chan);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001421out:
1422 kfree(src);
1423 kfree(dest);
1424 return err;
1425}
1426
Shannon Nelson3e037452007-10-16 01:27:40 -07001427static char ioat_interrupt_style[32] = "msix";
1428module_param_string(ioat_interrupt_style, ioat_interrupt_style,
1429 sizeof(ioat_interrupt_style), 0644);
1430MODULE_PARM_DESC(ioat_interrupt_style,
1431 "set ioat interrupt style: msix (default), "
1432 "msix-single-vector, msi, intx)");
1433
1434/**
1435 * ioat_dma_setup_interrupts - setup interrupt handler
1436 * @device: ioat device
1437 */
1438static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
1439{
1440 struct ioat_dma_chan *ioat_chan;
1441 int err, i, j, msixcnt;
1442 u8 intrctrl = 0;
1443
1444 if (!strcmp(ioat_interrupt_style, "msix"))
1445 goto msix;
1446 if (!strcmp(ioat_interrupt_style, "msix-single-vector"))
1447 goto msix_single_vector;
1448 if (!strcmp(ioat_interrupt_style, "msi"))
1449 goto msi;
1450 if (!strcmp(ioat_interrupt_style, "intx"))
1451 goto intx;
Shannon Nelson5149fd02007-10-18 03:07:13 -07001452 dev_err(&device->pdev->dev, "invalid ioat_interrupt_style %s\n",
1453 ioat_interrupt_style);
1454 goto err_no_irq;
Shannon Nelson3e037452007-10-16 01:27:40 -07001455
1456msix:
1457 /* The number of MSI-X vectors should equal the number of channels */
1458 msixcnt = device->common.chancnt;
1459 for (i = 0; i < msixcnt; i++)
1460 device->msix_entries[i].entry = i;
1461
1462 err = pci_enable_msix(device->pdev, device->msix_entries, msixcnt);
1463 if (err < 0)
1464 goto msi;
1465 if (err > 0)
1466 goto msix_single_vector;
1467
1468 for (i = 0; i < msixcnt; i++) {
1469 ioat_chan = ioat_lookup_chan_by_index(device, i);
1470 err = request_irq(device->msix_entries[i].vector,
1471 ioat_dma_do_interrupt_msix,
1472 0, "ioat-msix", ioat_chan);
1473 if (err) {
1474 for (j = 0; j < i; j++) {
1475 ioat_chan =
1476 ioat_lookup_chan_by_index(device, j);
1477 free_irq(device->msix_entries[j].vector,
1478 ioat_chan);
1479 }
1480 goto msix_single_vector;
1481 }
1482 }
1483 intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
1484 device->irq_mode = msix_multi_vector;
1485 goto done;
1486
1487msix_single_vector:
1488 device->msix_entries[0].entry = 0;
1489 err = pci_enable_msix(device->pdev, device->msix_entries, 1);
1490 if (err)
1491 goto msi;
1492
1493 err = request_irq(device->msix_entries[0].vector, ioat_dma_do_interrupt,
1494 0, "ioat-msix", device);
1495 if (err) {
1496 pci_disable_msix(device->pdev);
1497 goto msi;
1498 }
1499 device->irq_mode = msix_single_vector;
1500 goto done;
1501
1502msi:
1503 err = pci_enable_msi(device->pdev);
1504 if (err)
1505 goto intx;
1506
1507 err = request_irq(device->pdev->irq, ioat_dma_do_interrupt,
1508 0, "ioat-msi", device);
1509 if (err) {
1510 pci_disable_msi(device->pdev);
1511 goto intx;
1512 }
1513 /*
1514 * CB 1.2 devices need a bit set in configuration space to enable MSI
1515 */
1516 if (device->version == IOAT_VER_1_2) {
1517 u32 dmactrl;
1518 pci_read_config_dword(device->pdev,
1519 IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
1520 dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
1521 pci_write_config_dword(device->pdev,
1522 IOAT_PCI_DMACTRL_OFFSET, dmactrl);
1523 }
1524 device->irq_mode = msi;
1525 goto done;
1526
1527intx:
1528 err = request_irq(device->pdev->irq, ioat_dma_do_interrupt,
1529 IRQF_SHARED, "ioat-intx", device);
1530 if (err)
1531 goto err_no_irq;
1532 device->irq_mode = intx;
1533
1534done:
1535 intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
1536 writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
1537 return 0;
1538
1539err_no_irq:
1540 /* Disable all interrupt generation */
1541 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
1542 dev_err(&device->pdev->dev, "no usable interrupts\n");
1543 device->irq_mode = none;
1544 return -1;
1545}
1546
1547/**
1548 * ioat_dma_remove_interrupts - remove whatever interrupts were set
1549 * @device: ioat device
1550 */
1551static void ioat_dma_remove_interrupts(struct ioatdma_device *device)
1552{
1553 struct ioat_dma_chan *ioat_chan;
1554 int i;
1555
1556 /* Disable all interrupt generation */
1557 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
1558
1559 switch (device->irq_mode) {
1560 case msix_multi_vector:
1561 for (i = 0; i < device->common.chancnt; i++) {
1562 ioat_chan = ioat_lookup_chan_by_index(device, i);
1563 free_irq(device->msix_entries[i].vector, ioat_chan);
1564 }
1565 pci_disable_msix(device->pdev);
1566 break;
1567 case msix_single_vector:
1568 free_irq(device->msix_entries[0].vector, device);
1569 pci_disable_msix(device->pdev);
1570 break;
1571 case msi:
1572 free_irq(device->pdev->irq, device);
1573 pci_disable_msi(device->pdev);
1574 break;
1575 case intx:
1576 free_irq(device->pdev->irq, device);
1577 break;
1578 case none:
1579 dev_warn(&device->pdev->dev,
1580 "call to %s without interrupts setup\n", __func__);
1581 }
1582 device->irq_mode = none;
1583}
1584
Shannon Nelson8ab89562007-10-16 01:27:39 -07001585struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
1586 void __iomem *iobase)
Chris Leech0bbd5f42006-05-23 17:35:34 -07001587{
1588 int err;
Shannon Nelson8ab89562007-10-16 01:27:39 -07001589 struct ioatdma_device *device;
Chris Leech0bbd5f42006-05-23 17:35:34 -07001590
1591 device = kzalloc(sizeof(*device), GFP_KERNEL);
1592 if (!device) {
1593 err = -ENOMEM;
1594 goto err_kzalloc;
1595 }
Shannon Nelson8ab89562007-10-16 01:27:39 -07001596 device->pdev = pdev;
1597 device->reg_base = iobase;
1598 device->version = readb(device->reg_base + IOAT_VER_OFFSET);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001599
1600 /* DMA coherent memory pool for DMA descriptor allocations */
1601 device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
Shannon Nelson8ab89562007-10-16 01:27:39 -07001602 sizeof(struct ioat_dma_descriptor),
1603 64, 0);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001604 if (!device->dma_pool) {
1605 err = -ENOMEM;
1606 goto err_dma_pool;
1607 }
1608
Shannon Nelson43d6e362007-10-16 01:27:39 -07001609 device->completion_pool = pci_pool_create("completion_pool", pdev,
1610 sizeof(u64), SMP_CACHE_BYTES,
1611 SMP_CACHE_BYTES);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001612 if (!device->completion_pool) {
1613 err = -ENOMEM;
1614 goto err_completion_pool;
1615 }
1616
Chris Leech0bbd5f42006-05-23 17:35:34 -07001617 INIT_LIST_HEAD(&device->common.channels);
Shannon Nelson43d6e362007-10-16 01:27:39 -07001618 ioat_dma_enumerate_channels(device);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001619
Shannon Nelson43d6e362007-10-16 01:27:39 -07001620 device->common.device_alloc_chan_resources =
1621 ioat_dma_alloc_chan_resources;
1622 device->common.device_free_chan_resources =
1623 ioat_dma_free_chan_resources;
Dan Williams7405f742007-01-02 11:10:43 -07001624 device->common.dev = &pdev->dev;
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001625
1626 dma_cap_set(DMA_MEMCPY, device->common.cap_mask);
1627 device->common.device_is_tx_complete = ioat_dma_is_complete;
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001628 switch (device->version) {
1629 case IOAT_VER_1_2:
1630 device->common.device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
1631 device->common.device_issue_pending =
1632 ioat1_dma_memcpy_issue_pending;
1633 break;
1634 case IOAT_VER_2_0:
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -07001635 case IOAT_VER_3_0:
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001636 device->common.device_prep_dma_memcpy = ioat2_dma_prep_memcpy;
1637 device->common.device_issue_pending =
1638 ioat2_dma_memcpy_issue_pending;
1639 break;
1640 }
1641
Shannon Nelson3e037452007-10-16 01:27:40 -07001642 dev_err(&device->pdev->dev,
Shannon Nelson5149fd02007-10-18 03:07:13 -07001643 "Intel(R) I/OAT DMA Engine found,"
1644 " %d channels, device version 0x%02x, driver version %s\n",
1645 device->common.chancnt, device->version, IOAT_DMA_VERSION);
Shannon Nelson8ab89562007-10-16 01:27:39 -07001646
Shannon Nelson3e037452007-10-16 01:27:40 -07001647 err = ioat_dma_setup_interrupts(device);
Shannon Nelson8ab89562007-10-16 01:27:39 -07001648 if (err)
Shannon Nelson3e037452007-10-16 01:27:40 -07001649 goto err_setup_interrupts;
Shannon Nelson8ab89562007-10-16 01:27:39 -07001650
Shannon Nelson3e037452007-10-16 01:27:40 -07001651 err = ioat_dma_self_test(device);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001652 if (err)
1653 goto err_self_test;
1654
Maciej Sosnowski16a37ac2008-07-22 17:30:57 -07001655 ioat_set_tcp_copy_break(device);
1656
Chris Leech0bbd5f42006-05-23 17:35:34 -07001657 dma_async_device_register(&device->common);
1658
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -07001659 if (device->version != IOAT_VER_3_0) {
1660 INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog);
1661 schedule_delayed_work(&device->work,
1662 WATCHDOG_DELAY);
1663 }
Maciej Sosnowski09177e82008-07-22 10:07:33 -07001664
Shannon Nelson8ab89562007-10-16 01:27:39 -07001665 return device;
Chris Leech0bbd5f42006-05-23 17:35:34 -07001666
1667err_self_test:
Shannon Nelson3e037452007-10-16 01:27:40 -07001668 ioat_dma_remove_interrupts(device);
1669err_setup_interrupts:
Chris Leech0bbd5f42006-05-23 17:35:34 -07001670 pci_pool_destroy(device->completion_pool);
1671err_completion_pool:
1672 pci_pool_destroy(device->dma_pool);
1673err_dma_pool:
1674 kfree(device);
1675err_kzalloc:
Shannon Nelsonbb8e8bc2007-12-17 16:20:08 -08001676 dev_err(&pdev->dev,
Shannon Nelson5149fd02007-10-18 03:07:13 -07001677 "Intel(R) I/OAT DMA Engine initialization failed\n");
Shannon Nelson8ab89562007-10-16 01:27:39 -07001678 return NULL;
Chris Leech0bbd5f42006-05-23 17:35:34 -07001679}
1680
Shannon Nelson8ab89562007-10-16 01:27:39 -07001681void ioat_dma_remove(struct ioatdma_device *device)
Dan Aloni428ed602007-03-08 09:57:36 -08001682{
Chris Leech0bbd5f42006-05-23 17:35:34 -07001683 struct dma_chan *chan, *_chan;
1684 struct ioat_dma_chan *ioat_chan;
1685
Shannon Nelson3e037452007-10-16 01:27:40 -07001686 ioat_dma_remove_interrupts(device);
Shannon Nelson8ab89562007-10-16 01:27:39 -07001687
Shannon Nelsondfe22992007-10-18 03:07:13 -07001688 dma_async_device_unregister(&device->common);
1689
Chris Leech0bbd5f42006-05-23 17:35:34 -07001690 pci_pool_destroy(device->dma_pool);
1691 pci_pool_destroy(device->completion_pool);
Shannon Nelson8ab89562007-10-16 01:27:39 -07001692
Shannon Nelson7df7cf02007-10-18 03:07:12 -07001693 iounmap(device->reg_base);
1694 pci_release_regions(device->pdev);
1695 pci_disable_device(device->pdev);
1696
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -07001697 if (device->version != IOAT_VER_3_0) {
1698 cancel_delayed_work(&device->work);
1699 }
Maciej Sosnowski09177e82008-07-22 10:07:33 -07001700
Shannon Nelson43d6e362007-10-16 01:27:39 -07001701 list_for_each_entry_safe(chan, _chan,
1702 &device->common.channels, device_node) {
Chris Leech0bbd5f42006-05-23 17:35:34 -07001703 ioat_chan = to_ioat_chan(chan);
1704 list_del(&chan->device_node);
1705 kfree(ioat_chan);
1706 }
1707 kfree(device);
1708}
1709