Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 1 | /* |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 2 | * Intel I/OAT DMA Linux driver |
| 3 | * Copyright(c) 2004 - 2007 Intel Corporation. |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify it |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 6 | * under the terms and conditions of the GNU General Public License, |
| 7 | * version 2, as published by the Free Software Foundation. |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 12 | * more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License along with |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 15 | * this program; if not, write to the Free Software Foundation, Inc., |
| 16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 17 | * |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 18 | * The full GNU General Public License is included in this distribution in |
| 19 | * the file called "COPYING". |
| 20 | * |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 21 | */ |
| 22 | |
| 23 | /* |
| 24 | * This driver supports an Intel I/OAT DMA engine, which does asynchronous |
| 25 | * copy operations. |
| 26 | */ |
| 27 | |
| 28 | #include <linux/init.h> |
| 29 | #include <linux/module.h> |
| 30 | #include <linux/pci.h> |
| 31 | #include <linux/interrupt.h> |
| 32 | #include <linux/dmaengine.h> |
| 33 | #include <linux/delay.h> |
David S. Miller | 6b00c92 | 2006-05-23 17:37:58 -0700 | [diff] [blame] | 34 | #include <linux/dma-mapping.h> |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 35 | #include "ioatdma.h" |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 36 | #include "ioatdma_registers.h" |
| 37 | #include "ioatdma_hw.h" |
| 38 | |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 39 | #define INITIAL_IOAT_DESC_COUNT 128 |
| 40 | |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 41 | #define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common) |
Shannon Nelson | 8ab8956 | 2007-10-16 01:27:39 -0700 | [diff] [blame^] | 42 | #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common) |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 43 | #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 44 | #define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx) |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 45 | |
| 46 | /* internal functions */ |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 47 | static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan); |
| 48 | static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 49 | |
Shannon Nelson | 8ab8956 | 2007-10-16 01:27:39 -0700 | [diff] [blame^] | 50 | static int ioat_dma_enumerate_channels(struct ioatdma_device *device) |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 51 | { |
| 52 | u8 xfercap_scale; |
| 53 | u32 xfercap; |
| 54 | int i; |
| 55 | struct ioat_dma_chan *ioat_chan; |
| 56 | |
Chris Leech | e382881 | 2007-03-08 09:57:35 -0800 | [diff] [blame] | 57 | device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); |
| 58 | xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 59 | xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); |
| 60 | |
| 61 | for (i = 0; i < device->common.chancnt; i++) { |
| 62 | ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL); |
| 63 | if (!ioat_chan) { |
| 64 | device->common.chancnt = i; |
| 65 | break; |
| 66 | } |
| 67 | |
| 68 | ioat_chan->device = device; |
| 69 | ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1)); |
| 70 | ioat_chan->xfercap = xfercap; |
| 71 | spin_lock_init(&ioat_chan->cleanup_lock); |
| 72 | spin_lock_init(&ioat_chan->desc_lock); |
| 73 | INIT_LIST_HEAD(&ioat_chan->free_desc); |
| 74 | INIT_LIST_HEAD(&ioat_chan->used_desc); |
| 75 | /* This should be made common somewhere in dmaengine.c */ |
| 76 | ioat_chan->common.device = &device->common; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 77 | list_add_tail(&ioat_chan->common.device_node, |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 78 | &device->common.channels); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 79 | } |
| 80 | return device->common.chancnt; |
| 81 | } |
| 82 | |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 83 | static void ioat_set_src(dma_addr_t addr, |
| 84 | struct dma_async_tx_descriptor *tx, |
| 85 | int index) |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 86 | { |
| 87 | struct ioat_desc_sw *iter, *desc = tx_to_ioat_desc(tx); |
| 88 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); |
| 89 | |
| 90 | pci_unmap_addr_set(desc, src, addr); |
| 91 | |
| 92 | list_for_each_entry(iter, &desc->async_tx.tx_list, node) { |
| 93 | iter->hw->src_addr = addr; |
| 94 | addr += ioat_chan->xfercap; |
| 95 | } |
| 96 | |
| 97 | } |
| 98 | |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 99 | static void ioat_set_dest(dma_addr_t addr, |
| 100 | struct dma_async_tx_descriptor *tx, |
| 101 | int index) |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 102 | { |
| 103 | struct ioat_desc_sw *iter, *desc = tx_to_ioat_desc(tx); |
| 104 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); |
| 105 | |
| 106 | pci_unmap_addr_set(desc, dst, addr); |
| 107 | |
| 108 | list_for_each_entry(iter, &desc->async_tx.tx_list, node) { |
| 109 | iter->hw->dst_addr = addr; |
| 110 | addr += ioat_chan->xfercap; |
| 111 | } |
| 112 | } |
| 113 | |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 114 | static dma_cookie_t ioat_tx_submit(struct dma_async_tx_descriptor *tx) |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 115 | { |
| 116 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); |
| 117 | struct ioat_desc_sw *desc = tx_to_ioat_desc(tx); |
| 118 | int append = 0; |
| 119 | dma_cookie_t cookie; |
| 120 | struct ioat_desc_sw *group_start; |
| 121 | |
| 122 | group_start = list_entry(desc->async_tx.tx_list.next, |
| 123 | struct ioat_desc_sw, node); |
| 124 | spin_lock_bh(&ioat_chan->desc_lock); |
| 125 | /* cookie incr and addition to used_list must be atomic */ |
| 126 | cookie = ioat_chan->common.cookie; |
| 127 | cookie++; |
| 128 | if (cookie < 0) |
| 129 | cookie = 1; |
| 130 | ioat_chan->common.cookie = desc->async_tx.cookie = cookie; |
| 131 | |
| 132 | /* write address into NextDescriptor field of last desc in chain */ |
| 133 | to_ioat_desc(ioat_chan->used_desc.prev)->hw->next = |
| 134 | group_start->async_tx.phys; |
| 135 | list_splice_init(&desc->async_tx.tx_list, ioat_chan->used_desc.prev); |
| 136 | |
| 137 | ioat_chan->pending += desc->tx_cnt; |
| 138 | if (ioat_chan->pending >= 4) { |
| 139 | append = 1; |
| 140 | ioat_chan->pending = 0; |
| 141 | } |
| 142 | spin_unlock_bh(&ioat_chan->desc_lock); |
| 143 | |
| 144 | if (append) |
| 145 | writeb(IOAT_CHANCMD_APPEND, |
| 146 | ioat_chan->reg_base + IOAT_CHANCMD_OFFSET); |
Shannon Nelson | 1fda5f4 | 2007-10-16 01:27:37 -0700 | [diff] [blame] | 147 | |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 148 | return cookie; |
| 149 | } |
| 150 | |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 151 | static struct ioat_desc_sw *ioat_dma_alloc_descriptor( |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 152 | struct ioat_dma_chan *ioat_chan, |
| 153 | gfp_t flags) |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 154 | { |
| 155 | struct ioat_dma_descriptor *desc; |
| 156 | struct ioat_desc_sw *desc_sw; |
Shannon Nelson | 8ab8956 | 2007-10-16 01:27:39 -0700 | [diff] [blame^] | 157 | struct ioatdma_device *ioatdma_device; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 158 | dma_addr_t phys; |
| 159 | |
Shannon Nelson | 8ab8956 | 2007-10-16 01:27:39 -0700 | [diff] [blame^] | 160 | ioatdma_device = to_ioatdma_device(ioat_chan->common.device); |
| 161 | desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 162 | if (unlikely(!desc)) |
| 163 | return NULL; |
| 164 | |
| 165 | desc_sw = kzalloc(sizeof(*desc_sw), flags); |
| 166 | if (unlikely(!desc_sw)) { |
Shannon Nelson | 8ab8956 | 2007-10-16 01:27:39 -0700 | [diff] [blame^] | 167 | pci_pool_free(ioatdma_device->dma_pool, desc, phys); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 168 | return NULL; |
| 169 | } |
| 170 | |
| 171 | memset(desc, 0, sizeof(*desc)); |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 172 | dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common); |
| 173 | desc_sw->async_tx.tx_set_src = ioat_set_src; |
| 174 | desc_sw->async_tx.tx_set_dest = ioat_set_dest; |
| 175 | desc_sw->async_tx.tx_submit = ioat_tx_submit; |
| 176 | INIT_LIST_HEAD(&desc_sw->async_tx.tx_list); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 177 | desc_sw->hw = desc; |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 178 | desc_sw->async_tx.phys = phys; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 179 | |
| 180 | return desc_sw; |
| 181 | } |
| 182 | |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 183 | /* returns the actual number of allocated descriptors */ |
| 184 | static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) |
| 185 | { |
| 186 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); |
| 187 | struct ioat_desc_sw *desc = NULL; |
| 188 | u16 chanctrl; |
| 189 | u32 chanerr; |
| 190 | int i; |
| 191 | LIST_HEAD(tmp_list); |
| 192 | |
Shannon Nelson | e422397 | 2007-08-24 23:02:53 -0700 | [diff] [blame] | 193 | /* have we already been set up? */ |
| 194 | if (!list_empty(&ioat_chan->free_desc)) |
| 195 | return INITIAL_IOAT_DESC_COUNT; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 196 | |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 197 | /* Setup register to interrupt and write completion status on error */ |
Shannon Nelson | e422397 | 2007-08-24 23:02:53 -0700 | [diff] [blame] | 198 | chanctrl = IOAT_CHANCTRL_ERR_INT_EN | |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 199 | IOAT_CHANCTRL_ANY_ERR_ABORT_EN | |
| 200 | IOAT_CHANCTRL_ERR_COMPLETION_EN; |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 201 | writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 202 | |
Chris Leech | e382881 | 2007-03-08 09:57:35 -0800 | [diff] [blame] | 203 | chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 204 | if (chanerr) { |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 205 | dev_err(&ioat_chan->device->pdev->dev, |
| 206 | "ioatdma: CHANERR = %x, clearing\n", chanerr); |
Chris Leech | e382881 | 2007-03-08 09:57:35 -0800 | [diff] [blame] | 207 | writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 208 | } |
| 209 | |
| 210 | /* Allocate descriptors */ |
| 211 | for (i = 0; i < INITIAL_IOAT_DESC_COUNT; i++) { |
| 212 | desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL); |
| 213 | if (!desc) { |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 214 | dev_err(&ioat_chan->device->pdev->dev, |
| 215 | "ioatdma: Only %d initial descriptors\n", i); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 216 | break; |
| 217 | } |
| 218 | list_add_tail(&desc->node, &tmp_list); |
| 219 | } |
| 220 | spin_lock_bh(&ioat_chan->desc_lock); |
| 221 | list_splice(&tmp_list, &ioat_chan->free_desc); |
| 222 | spin_unlock_bh(&ioat_chan->desc_lock); |
| 223 | |
| 224 | /* allocate a completion writeback area */ |
| 225 | /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ |
| 226 | ioat_chan->completion_virt = |
| 227 | pci_pool_alloc(ioat_chan->device->completion_pool, |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 228 | GFP_KERNEL, |
| 229 | &ioat_chan->completion_addr); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 230 | memset(ioat_chan->completion_virt, 0, |
| 231 | sizeof(*ioat_chan->completion_virt)); |
Chris Leech | e382881 | 2007-03-08 09:57:35 -0800 | [diff] [blame] | 232 | writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF, |
| 233 | ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); |
| 234 | writel(((u64) ioat_chan->completion_addr) >> 32, |
| 235 | ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 236 | |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 237 | ioat_dma_start_null_desc(ioat_chan); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 238 | return i; |
| 239 | } |
| 240 | |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 241 | static void ioat_dma_free_chan_resources(struct dma_chan *chan) |
| 242 | { |
| 243 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); |
Shannon Nelson | 8ab8956 | 2007-10-16 01:27:39 -0700 | [diff] [blame^] | 244 | struct ioatdma_device *ioatdma_device = to_ioatdma_device(chan->device); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 245 | struct ioat_desc_sw *desc, *_desc; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 246 | int in_use_descs = 0; |
| 247 | |
| 248 | ioat_dma_memcpy_cleanup(ioat_chan); |
| 249 | |
Chris Leech | e382881 | 2007-03-08 09:57:35 -0800 | [diff] [blame] | 250 | writeb(IOAT_CHANCMD_RESET, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 251 | |
| 252 | spin_lock_bh(&ioat_chan->desc_lock); |
| 253 | list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) { |
| 254 | in_use_descs++; |
| 255 | list_del(&desc->node); |
Shannon Nelson | 8ab8956 | 2007-10-16 01:27:39 -0700 | [diff] [blame^] | 256 | pci_pool_free(ioatdma_device->dma_pool, desc->hw, |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 257 | desc->async_tx.phys); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 258 | kfree(desc); |
| 259 | } |
| 260 | list_for_each_entry_safe(desc, _desc, &ioat_chan->free_desc, node) { |
| 261 | list_del(&desc->node); |
Shannon Nelson | 8ab8956 | 2007-10-16 01:27:39 -0700 | [diff] [blame^] | 262 | pci_pool_free(ioatdma_device->dma_pool, desc->hw, |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 263 | desc->async_tx.phys); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 264 | kfree(desc); |
| 265 | } |
| 266 | spin_unlock_bh(&ioat_chan->desc_lock); |
| 267 | |
Shannon Nelson | 8ab8956 | 2007-10-16 01:27:39 -0700 | [diff] [blame^] | 268 | pci_pool_free(ioatdma_device->completion_pool, |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 269 | ioat_chan->completion_virt, |
| 270 | ioat_chan->completion_addr); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 271 | |
| 272 | /* one is ok since we left it on there on purpose */ |
| 273 | if (in_use_descs > 1) |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 274 | dev_err(&ioat_chan->device->pdev->dev, |
| 275 | "ioatdma: Freeing %d in use descriptors!\n", |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 276 | in_use_descs - 1); |
| 277 | |
| 278 | ioat_chan->last_completion = ioat_chan->completion_addr = 0; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 279 | } |
| 280 | |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 281 | static struct dma_async_tx_descriptor *ioat_dma_prep_memcpy( |
| 282 | struct dma_chan *chan, |
| 283 | size_t len, |
| 284 | int int_en) |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 285 | { |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 286 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); |
| 287 | struct ioat_desc_sw *first, *prev, *new; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 288 | LIST_HEAD(new_chain); |
| 289 | u32 copy; |
| 290 | size_t orig_len; |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 291 | int desc_count = 0; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 292 | |
| 293 | if (!len) |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 294 | return NULL; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 295 | |
| 296 | orig_len = len; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 297 | |
| 298 | first = NULL; |
| 299 | prev = NULL; |
| 300 | |
| 301 | spin_lock_bh(&ioat_chan->desc_lock); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 302 | while (len) { |
| 303 | if (!list_empty(&ioat_chan->free_desc)) { |
| 304 | new = to_ioat_desc(ioat_chan->free_desc.next); |
| 305 | list_del(&new->node); |
| 306 | } else { |
| 307 | /* try to get another desc */ |
| 308 | new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC); |
| 309 | /* will this ever happen? */ |
| 310 | /* TODO add upper limit on these */ |
| 311 | BUG_ON(!new); |
| 312 | } |
| 313 | |
| 314 | copy = min((u32) len, ioat_chan->xfercap); |
| 315 | |
| 316 | new->hw->size = copy; |
| 317 | new->hw->ctl = 0; |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 318 | new->async_tx.cookie = 0; |
| 319 | new->async_tx.ack = 1; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 320 | |
| 321 | /* chain together the physical address list for the HW */ |
| 322 | if (!first) |
| 323 | first = new; |
| 324 | else |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 325 | prev->hw->next = (u64) new->async_tx.phys; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 326 | |
| 327 | prev = new; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 328 | len -= copy; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 329 | list_add_tail(&new->node, &new_chain); |
| 330 | desc_count++; |
| 331 | } |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 332 | |
| 333 | list_splice(&new_chain, &new->async_tx.tx_list); |
| 334 | |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 335 | new->hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS; |
| 336 | new->hw->next = 0; |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 337 | new->tx_cnt = desc_count; |
| 338 | new->async_tx.ack = 0; /* client is in control of this ack */ |
| 339 | new->async_tx.cookie = -EBUSY; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 340 | |
Shannon Nelson | 54a09fe | 2007-08-14 17:36:31 -0700 | [diff] [blame] | 341 | pci_unmap_len_set(new, len, orig_len); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 342 | spin_unlock_bh(&ioat_chan->desc_lock); |
| 343 | |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 344 | return new ? &new->async_tx : NULL; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 345 | } |
| 346 | |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 347 | /** |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 348 | * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended |
| 349 | * descriptors to hw |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 350 | * @chan: DMA channel handle |
| 351 | */ |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 352 | static void ioat_dma_memcpy_issue_pending(struct dma_chan *chan) |
| 353 | { |
| 354 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); |
| 355 | |
| 356 | if (ioat_chan->pending != 0) { |
| 357 | ioat_chan->pending = 0; |
Chris Leech | e382881 | 2007-03-08 09:57:35 -0800 | [diff] [blame] | 358 | writeb(IOAT_CHANCMD_APPEND, |
| 359 | ioat_chan->reg_base + IOAT_CHANCMD_OFFSET); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 360 | } |
| 361 | } |
| 362 | |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 363 | static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 364 | { |
| 365 | unsigned long phys_complete; |
| 366 | struct ioat_desc_sw *desc, *_desc; |
| 367 | dma_cookie_t cookie = 0; |
| 368 | |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 369 | prefetch(ioat_chan->completion_virt); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 370 | |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 371 | if (!spin_trylock(&ioat_chan->cleanup_lock)) |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 372 | return; |
| 373 | |
| 374 | /* The completion writeback can happen at any time, |
| 375 | so reads by the driver need to be atomic operations |
| 376 | The descriptor physical addresses are limited to 32-bits |
| 377 | when the CPU can only do a 32-bit mov */ |
| 378 | |
| 379 | #if (BITS_PER_LONG == 64) |
| 380 | phys_complete = |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 381 | ioat_chan->completion_virt->full & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 382 | #else |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 383 | phys_complete = ioat_chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 384 | #endif |
| 385 | |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 386 | if ((ioat_chan->completion_virt->full & IOAT_CHANSTS_DMA_TRANSFER_STATUS) == |
| 387 | IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) { |
| 388 | dev_err(&ioat_chan->device->pdev->dev, |
| 389 | "ioatdma: Channel halted, chanerr = %x\n", |
| 390 | readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET)); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 391 | |
| 392 | /* TODO do something to salvage the situation */ |
| 393 | } |
| 394 | |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 395 | if (phys_complete == ioat_chan->last_completion) { |
| 396 | spin_unlock(&ioat_chan->cleanup_lock); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 397 | return; |
| 398 | } |
| 399 | |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 400 | spin_lock_bh(&ioat_chan->desc_lock); |
| 401 | list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) { |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 402 | |
| 403 | /* |
| 404 | * Incoming DMA requests may use multiple descriptors, due to |
| 405 | * exceeding xfercap, perhaps. If so, only the last one will |
| 406 | * have a cookie, and require unmapping. |
| 407 | */ |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 408 | if (desc->async_tx.cookie) { |
| 409 | cookie = desc->async_tx.cookie; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 410 | |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 411 | /* |
| 412 | * yes we are unmapping both _page and _single alloc'd |
| 413 | * regions with unmap_page. Is this *really* that bad? |
| 414 | */ |
| 415 | pci_unmap_page(ioat_chan->device->pdev, |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 416 | pci_unmap_addr(desc, dst), |
Shannon Nelson | 54a09fe | 2007-08-14 17:36:31 -0700 | [diff] [blame] | 417 | pci_unmap_len(desc, len), |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 418 | PCI_DMA_FROMDEVICE); |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 419 | pci_unmap_page(ioat_chan->device->pdev, |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 420 | pci_unmap_addr(desc, src), |
Shannon Nelson | 54a09fe | 2007-08-14 17:36:31 -0700 | [diff] [blame] | 421 | pci_unmap_len(desc, len), |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 422 | PCI_DMA_TODEVICE); |
| 423 | } |
| 424 | |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 425 | if (desc->async_tx.phys != phys_complete) { |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 426 | /* |
| 427 | * a completed entry, but not the last, so cleanup |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 428 | * if the client is done with the descriptor |
| 429 | */ |
| 430 | if (desc->async_tx.ack) { |
| 431 | list_del(&desc->node); |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 432 | list_add_tail(&desc->node, |
| 433 | &ioat_chan->free_desc); |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 434 | } else |
| 435 | desc->async_tx.cookie = 0; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 436 | } else { |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 437 | /* |
| 438 | * last used desc. Do not remove, so we can append from |
| 439 | * it, but don't look at it next time, either |
| 440 | */ |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 441 | desc->async_tx.cookie = 0; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 442 | |
| 443 | /* TODO check status bits? */ |
| 444 | break; |
| 445 | } |
| 446 | } |
| 447 | |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 448 | spin_unlock_bh(&ioat_chan->desc_lock); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 449 | |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 450 | ioat_chan->last_completion = phys_complete; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 451 | if (cookie != 0) |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 452 | ioat_chan->completed_cookie = cookie; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 453 | |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 454 | spin_unlock(&ioat_chan->cleanup_lock); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 455 | } |
| 456 | |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 457 | static void ioat_dma_dependency_added(struct dma_chan *chan) |
| 458 | { |
| 459 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); |
| 460 | spin_lock_bh(&ioat_chan->desc_lock); |
| 461 | if (ioat_chan->pending == 0) { |
| 462 | spin_unlock_bh(&ioat_chan->desc_lock); |
| 463 | ioat_dma_memcpy_cleanup(ioat_chan); |
| 464 | } else |
| 465 | spin_unlock_bh(&ioat_chan->desc_lock); |
| 466 | } |
| 467 | |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 468 | /** |
| 469 | * ioat_dma_is_complete - poll the status of a IOAT DMA transaction |
| 470 | * @chan: IOAT DMA channel handle |
| 471 | * @cookie: DMA transaction identifier |
Randy Dunlap | 6508871 | 2006-07-03 19:45:31 -0700 | [diff] [blame] | 472 | * @done: if not %NULL, updated with last completed transaction |
| 473 | * @used: if not %NULL, updated with last used transaction |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 474 | */ |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 475 | static enum dma_status ioat_dma_is_complete(struct dma_chan *chan, |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 476 | dma_cookie_t cookie, |
| 477 | dma_cookie_t *done, |
| 478 | dma_cookie_t *used) |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 479 | { |
| 480 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); |
| 481 | dma_cookie_t last_used; |
| 482 | dma_cookie_t last_complete; |
| 483 | enum dma_status ret; |
| 484 | |
| 485 | last_used = chan->cookie; |
| 486 | last_complete = ioat_chan->completed_cookie; |
| 487 | |
| 488 | if (done) |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 489 | *done = last_complete; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 490 | if (used) |
| 491 | *used = last_used; |
| 492 | |
| 493 | ret = dma_async_is_complete(cookie, last_complete, last_used); |
| 494 | if (ret == DMA_SUCCESS) |
| 495 | return ret; |
| 496 | |
| 497 | ioat_dma_memcpy_cleanup(ioat_chan); |
| 498 | |
| 499 | last_used = chan->cookie; |
| 500 | last_complete = ioat_chan->completed_cookie; |
| 501 | |
| 502 | if (done) |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 503 | *done = last_complete; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 504 | if (used) |
| 505 | *used = last_used; |
| 506 | |
| 507 | return dma_async_is_complete(cookie, last_complete, last_used); |
| 508 | } |
| 509 | |
| 510 | /* PCI API */ |
| 511 | |
David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 512 | static irqreturn_t ioat_do_interrupt(int irq, void *data) |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 513 | { |
Shannon Nelson | 8ab8956 | 2007-10-16 01:27:39 -0700 | [diff] [blame^] | 514 | struct ioatdma_device *instance = data; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 515 | unsigned long attnstatus; |
| 516 | u8 intrctrl; |
| 517 | |
Chris Leech | e382881 | 2007-03-08 09:57:35 -0800 | [diff] [blame] | 518 | intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 519 | |
| 520 | if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN)) |
| 521 | return IRQ_NONE; |
| 522 | |
| 523 | if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) { |
Chris Leech | e382881 | 2007-03-08 09:57:35 -0800 | [diff] [blame] | 524 | writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 525 | return IRQ_NONE; |
| 526 | } |
| 527 | |
Chris Leech | e382881 | 2007-03-08 09:57:35 -0800 | [diff] [blame] | 528 | attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 529 | |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 530 | printk(KERN_ERR "ioatdma: interrupt! status %lx\n", attnstatus); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 531 | |
Chris Leech | e382881 | 2007-03-08 09:57:35 -0800 | [diff] [blame] | 532 | writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 533 | return IRQ_HANDLED; |
| 534 | } |
| 535 | |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 536 | static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 537 | { |
| 538 | struct ioat_desc_sw *desc; |
| 539 | |
| 540 | spin_lock_bh(&ioat_chan->desc_lock); |
| 541 | |
| 542 | if (!list_empty(&ioat_chan->free_desc)) { |
| 543 | desc = to_ioat_desc(ioat_chan->free_desc.next); |
| 544 | list_del(&desc->node); |
| 545 | } else { |
| 546 | /* try to get another desc */ |
| 547 | spin_unlock_bh(&ioat_chan->desc_lock); |
| 548 | desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL); |
| 549 | spin_lock_bh(&ioat_chan->desc_lock); |
| 550 | /* will this ever happen? */ |
| 551 | BUG_ON(!desc); |
| 552 | } |
| 553 | |
| 554 | desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL; |
| 555 | desc->hw->next = 0; |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 556 | desc->async_tx.ack = 1; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 557 | |
| 558 | list_add_tail(&desc->node, &ioat_chan->used_desc); |
| 559 | spin_unlock_bh(&ioat_chan->desc_lock); |
| 560 | |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 561 | writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, |
Chris Leech | e382881 | 2007-03-08 09:57:35 -0800 | [diff] [blame] | 562 | ioat_chan->reg_base + IOAT_CHAINADDR_OFFSET_LOW); |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 563 | writel(((u64) desc->async_tx.phys) >> 32, |
Chris Leech | 70774b4 | 2007-03-08 09:57:35 -0800 | [diff] [blame] | 564 | ioat_chan->reg_base + IOAT_CHAINADDR_OFFSET_HIGH); |
| 565 | |
Chris Leech | e382881 | 2007-03-08 09:57:35 -0800 | [diff] [blame] | 566 | writeb(IOAT_CHANCMD_START, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 567 | } |
| 568 | |
| 569 | /* |
| 570 | * Perform a IOAT transaction to verify the HW works. |
| 571 | */ |
| 572 | #define IOAT_TEST_SIZE 2000 |
| 573 | |
Shannon Nelson | 8ab8956 | 2007-10-16 01:27:39 -0700 | [diff] [blame^] | 574 | static int ioat_self_test(struct ioatdma_device *device) |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 575 | { |
| 576 | int i; |
| 577 | u8 *src; |
| 578 | u8 *dest; |
| 579 | struct dma_chan *dma_chan; |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 580 | struct dma_async_tx_descriptor *tx; |
| 581 | dma_addr_t addr; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 582 | dma_cookie_t cookie; |
| 583 | int err = 0; |
| 584 | |
Christoph Lameter | e94b176 | 2006-12-06 20:33:17 -0800 | [diff] [blame] | 585 | src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 586 | if (!src) |
| 587 | return -ENOMEM; |
Christoph Lameter | e94b176 | 2006-12-06 20:33:17 -0800 | [diff] [blame] | 588 | dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 589 | if (!dest) { |
| 590 | kfree(src); |
| 591 | return -ENOMEM; |
| 592 | } |
| 593 | |
| 594 | /* Fill in src buffer */ |
| 595 | for (i = 0; i < IOAT_TEST_SIZE; i++) |
| 596 | src[i] = (u8)i; |
| 597 | |
| 598 | /* Start copy, using first DMA channel */ |
| 599 | dma_chan = container_of(device->common.channels.next, |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 600 | struct dma_chan, |
| 601 | device_node); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 602 | if (ioat_dma_alloc_chan_resources(dma_chan) < 1) { |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 603 | dev_err(&device->pdev->dev, |
| 604 | "selftest cannot allocate chan resource\n"); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 605 | err = -ENODEV; |
| 606 | goto out; |
| 607 | } |
| 608 | |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 609 | tx = ioat_dma_prep_memcpy(dma_chan, IOAT_TEST_SIZE, 0); |
| 610 | async_tx_ack(tx); |
| 611 | addr = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE, |
| 612 | DMA_TO_DEVICE); |
| 613 | ioat_set_src(addr, tx, 0); |
| 614 | addr = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE, |
| 615 | DMA_FROM_DEVICE); |
| 616 | ioat_set_dest(addr, tx, 0); |
| 617 | cookie = ioat_tx_submit(tx); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 618 | ioat_dma_memcpy_issue_pending(dma_chan); |
| 619 | msleep(1); |
| 620 | |
| 621 | if (ioat_dma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 622 | dev_err(&device->pdev->dev, |
| 623 | "ioatdma: Self-test copy timed out, disabling\n"); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 624 | err = -ENODEV; |
| 625 | goto free_resources; |
| 626 | } |
| 627 | if (memcmp(src, dest, IOAT_TEST_SIZE)) { |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 628 | dev_err(&device->pdev->dev, |
| 629 | "ioatdma: Self-test copy failed compare, disabling\n"); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 630 | err = -ENODEV; |
| 631 | goto free_resources; |
| 632 | } |
| 633 | |
| 634 | free_resources: |
| 635 | ioat_dma_free_chan_resources(dma_chan); |
| 636 | out: |
| 637 | kfree(src); |
| 638 | kfree(dest); |
| 639 | return err; |
| 640 | } |
| 641 | |
Shannon Nelson | 8ab8956 | 2007-10-16 01:27:39 -0700 | [diff] [blame^] | 642 | struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, |
| 643 | void __iomem *iobase) |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 644 | { |
| 645 | int err; |
Shannon Nelson | 8ab8956 | 2007-10-16 01:27:39 -0700 | [diff] [blame^] | 646 | struct ioatdma_device *device; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 647 | |
| 648 | device = kzalloc(sizeof(*device), GFP_KERNEL); |
| 649 | if (!device) { |
| 650 | err = -ENOMEM; |
| 651 | goto err_kzalloc; |
| 652 | } |
Shannon Nelson | 8ab8956 | 2007-10-16 01:27:39 -0700 | [diff] [blame^] | 653 | device->pdev = pdev; |
| 654 | device->reg_base = iobase; |
| 655 | device->version = readb(device->reg_base + IOAT_VER_OFFSET); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 656 | |
| 657 | /* DMA coherent memory pool for DMA descriptor allocations */ |
| 658 | device->dma_pool = pci_pool_create("dma_desc_pool", pdev, |
Shannon Nelson | 8ab8956 | 2007-10-16 01:27:39 -0700 | [diff] [blame^] | 659 | sizeof(struct ioat_dma_descriptor), |
| 660 | 64, 0); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 661 | if (!device->dma_pool) { |
| 662 | err = -ENOMEM; |
| 663 | goto err_dma_pool; |
| 664 | } |
| 665 | |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 666 | device->completion_pool = pci_pool_create("completion_pool", pdev, |
| 667 | sizeof(u64), SMP_CACHE_BYTES, |
| 668 | SMP_CACHE_BYTES); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 669 | if (!device->completion_pool) { |
| 670 | err = -ENOMEM; |
| 671 | goto err_completion_pool; |
| 672 | } |
| 673 | |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 674 | INIT_LIST_HEAD(&device->common.channels); |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 675 | ioat_dma_enumerate_channels(device); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 676 | |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 677 | dma_cap_set(DMA_MEMCPY, device->common.cap_mask); |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 678 | device->common.device_alloc_chan_resources = |
| 679 | ioat_dma_alloc_chan_resources; |
| 680 | device->common.device_free_chan_resources = |
| 681 | ioat_dma_free_chan_resources; |
Dan Williams | 7405f74 | 2007-01-02 11:10:43 -0700 | [diff] [blame] | 682 | device->common.device_prep_dma_memcpy = ioat_dma_prep_memcpy; |
| 683 | device->common.device_is_tx_complete = ioat_dma_is_complete; |
| 684 | device->common.device_issue_pending = ioat_dma_memcpy_issue_pending; |
| 685 | device->common.device_dependency_added = ioat_dma_dependency_added; |
| 686 | device->common.dev = &pdev->dev; |
Shannon Nelson | 8ab8956 | 2007-10-16 01:27:39 -0700 | [diff] [blame^] | 687 | printk(KERN_INFO "ioatdma: Intel(R) I/OAT DMA Engine found," |
| 688 | " %d channels, device version 0x%02x\n", |
| 689 | device->common.chancnt, device->version); |
| 690 | |
| 691 | pci_set_drvdata(pdev, device); |
| 692 | err = request_irq(pdev->irq, &ioat_do_interrupt, IRQF_SHARED, "ioat", |
| 693 | device); |
| 694 | if (err) |
| 695 | goto err_irq; |
| 696 | |
| 697 | writeb(IOAT_INTRCTRL_MASTER_INT_EN, |
| 698 | device->reg_base + IOAT_INTRCTRL_OFFSET); |
| 699 | pci_set_master(pdev); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 700 | |
| 701 | err = ioat_self_test(device); |
| 702 | if (err) |
| 703 | goto err_self_test; |
| 704 | |
| 705 | dma_async_device_register(&device->common); |
| 706 | |
Shannon Nelson | 8ab8956 | 2007-10-16 01:27:39 -0700 | [diff] [blame^] | 707 | return device; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 708 | |
| 709 | err_self_test: |
Shannon Nelson | 8ab8956 | 2007-10-16 01:27:39 -0700 | [diff] [blame^] | 710 | free_irq(device->pdev->irq, device); |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 711 | err_irq: |
| 712 | pci_pool_destroy(device->completion_pool); |
| 713 | err_completion_pool: |
| 714 | pci_pool_destroy(device->dma_pool); |
| 715 | err_dma_pool: |
| 716 | kfree(device); |
| 717 | err_kzalloc: |
Shannon Nelson | 8ab8956 | 2007-10-16 01:27:39 -0700 | [diff] [blame^] | 718 | iounmap(iobase); |
| 719 | printk(KERN_ERR |
| 720 | "ioatdma: Intel(R) I/OAT DMA Engine initialization failed\n"); |
| 721 | return NULL; |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 722 | } |
| 723 | |
Shannon Nelson | 8ab8956 | 2007-10-16 01:27:39 -0700 | [diff] [blame^] | 724 | void ioat_dma_remove(struct ioatdma_device *device) |
Dan Aloni | 428ed60 | 2007-03-08 09:57:36 -0800 | [diff] [blame] | 725 | { |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 726 | struct dma_chan *chan, *_chan; |
| 727 | struct ioat_dma_chan *ioat_chan; |
| 728 | |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 729 | dma_async_device_unregister(&device->common); |
| 730 | |
| 731 | free_irq(device->pdev->irq, device); |
Shannon Nelson | 8ab8956 | 2007-10-16 01:27:39 -0700 | [diff] [blame^] | 732 | |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 733 | pci_pool_destroy(device->dma_pool); |
| 734 | pci_pool_destroy(device->completion_pool); |
Shannon Nelson | 8ab8956 | 2007-10-16 01:27:39 -0700 | [diff] [blame^] | 735 | |
Shannon Nelson | 43d6e36 | 2007-10-16 01:27:39 -0700 | [diff] [blame] | 736 | list_for_each_entry_safe(chan, _chan, |
| 737 | &device->common.channels, device_node) { |
Chris Leech | 0bbd5f4 | 2006-05-23 17:35:34 -0700 | [diff] [blame] | 738 | ioat_chan = to_ioat_chan(chan); |
| 739 | list_del(&chan->device_node); |
| 740 | kfree(ioat_chan); |
| 741 | } |
| 742 | kfree(device); |
| 743 | } |
| 744 | |