| Joerg Roedel | b6c0271 | 2008-06-26 21:27:53 +0200 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. | 
|  | 3 | * Author: Joerg Roedel <joerg.roedel@amd.com> | 
|  | 4 | *         Leo Duran <leo.duran@amd.com> | 
|  | 5 | * | 
|  | 6 | * This program is free software; you can redistribute it and/or modify it | 
|  | 7 | * under the terms of the GNU General Public License version 2 as published | 
|  | 8 | * by the Free Software Foundation. | 
|  | 9 | * | 
|  | 10 | * This program is distributed in the hope that it will be useful, | 
|  | 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | 13 | * GNU General Public License for more details. | 
|  | 14 | * | 
|  | 15 | * You should have received a copy of the GNU General Public License | 
|  | 16 | * along with this program; if not, write to the Free Software | 
|  | 17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA | 
|  | 18 | */ | 
|  | 19 |  | 
|  | 20 | #include <linux/pci.h> | 
|  | 21 | #include <linux/gfp.h> | 
|  | 22 | #include <linux/bitops.h> | 
|  | 23 | #include <linux/scatterlist.h> | 
|  | 24 | #include <linux/iommu-helper.h> | 
|  | 25 | #include <asm/proto.h> | 
| FUJITA Tomonori | 46a7fa2 | 2008-07-11 10:23:42 +0900 | [diff] [blame] | 26 | #include <asm/iommu.h> | 
| Joerg Roedel | b6c0271 | 2008-06-26 21:27:53 +0200 | [diff] [blame] | 27 | #include <asm/amd_iommu_types.h> | 
| Joerg Roedel | c6da992 | 2008-06-26 21:28:06 +0200 | [diff] [blame] | 28 | #include <asm/amd_iommu.h> | 
| Joerg Roedel | b6c0271 | 2008-06-26 21:27:53 +0200 | [diff] [blame] | 29 |  | 
|  | 30 | #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) | 
|  | 31 |  | 
| Joerg Roedel | 136f78a | 2008-07-11 17:14:27 +0200 | [diff] [blame] | 32 | #define EXIT_LOOP_COUNT 10000000 | 
|  | 33 |  | 
| Joerg Roedel | b6c0271 | 2008-06-26 21:27:53 +0200 | [diff] [blame] | 34 | static DEFINE_RWLOCK(amd_iommu_devtable_lock); | 
|  | 35 |  | 
| Joerg Roedel | bd60b73 | 2008-09-11 10:24:48 +0200 | [diff] [blame] | 36 | /* A list of preallocated protection domains */ | 
|  | 37 | static LIST_HEAD(iommu_pd_list); | 
|  | 38 | static DEFINE_SPINLOCK(iommu_pd_list_lock); | 
|  | 39 |  | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 40 | /* | 
|  | 41 | * general struct to manage commands send to an IOMMU | 
|  | 42 | */ | 
| Joerg Roedel | d644953 | 2008-07-11 17:14:28 +0200 | [diff] [blame] | 43 | struct iommu_cmd { | 
| Joerg Roedel | b6c0271 | 2008-06-26 21:27:53 +0200 | [diff] [blame] | 44 | u32 data[4]; | 
|  | 45 | }; | 
|  | 46 |  | 
| Joerg Roedel | bd0e521 | 2008-06-26 21:27:56 +0200 | [diff] [blame] | 47 | static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, | 
|  | 48 | struct unity_map_entry *e); | 
|  | 49 |  | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 50 | /* returns !0 if the IOMMU is caching non-present entries in its TLB */ | 
| Joerg Roedel | 4da70b9 | 2008-06-26 21:28:01 +0200 | [diff] [blame] | 51 | static int iommu_has_npcache(struct amd_iommu *iommu) | 
|  | 52 | { | 
|  | 53 | return iommu->cap & IOMMU_CAP_NPCACHE; | 
|  | 54 | } | 
|  | 55 |  | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 56 | /**************************************************************************** | 
|  | 57 | * | 
| Joerg Roedel | a80dc3e | 2008-09-11 16:51:41 +0200 | [diff] [blame] | 58 | * Interrupt handling functions | 
|  | 59 | * | 
|  | 60 | ****************************************************************************/ | 
|  | 61 |  | 
| Joerg Roedel | 90008ee | 2008-09-09 16:41:05 +0200 | [diff] [blame] | 62 | static void iommu_print_event(void *__evt) | 
|  | 63 | { | 
|  | 64 | u32 *event = __evt; | 
|  | 65 | int type  = (event[1] >> EVENT_TYPE_SHIFT)  & EVENT_TYPE_MASK; | 
|  | 66 | int devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK; | 
|  | 67 | int domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK; | 
|  | 68 | int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK; | 
|  | 69 | u64 address = (u64)(((u64)event[3]) << 32) | event[2]; | 
|  | 70 |  | 
|  | 71 | printk(KERN_ERR "AMD IOMMU: Event logged ["); | 
|  | 72 |  | 
|  | 73 | switch (type) { | 
|  | 74 | case EVENT_TYPE_ILL_DEV: | 
|  | 75 | printk("ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x " | 
|  | 76 | "address=0x%016llx flags=0x%04x]\n", | 
|  | 77 | PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid), | 
|  | 78 | address, flags); | 
|  | 79 | break; | 
|  | 80 | case EVENT_TYPE_IO_FAULT: | 
|  | 81 | printk("IO_PAGE_FAULT device=%02x:%02x.%x " | 
|  | 82 | "domain=0x%04x address=0x%016llx flags=0x%04x]\n", | 
|  | 83 | PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid), | 
|  | 84 | domid, address, flags); | 
|  | 85 | break; | 
|  | 86 | case EVENT_TYPE_DEV_TAB_ERR: | 
|  | 87 | printk("DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x " | 
|  | 88 | "address=0x%016llx flags=0x%04x]\n", | 
|  | 89 | PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid), | 
|  | 90 | address, flags); | 
|  | 91 | break; | 
|  | 92 | case EVENT_TYPE_PAGE_TAB_ERR: | 
|  | 93 | printk("PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x " | 
|  | 94 | "domain=0x%04x address=0x%016llx flags=0x%04x]\n", | 
|  | 95 | PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid), | 
|  | 96 | domid, address, flags); | 
|  | 97 | break; | 
|  | 98 | case EVENT_TYPE_ILL_CMD: | 
|  | 99 | printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address); | 
|  | 100 | break; | 
|  | 101 | case EVENT_TYPE_CMD_HARD_ERR: | 
|  | 102 | printk("COMMAND_HARDWARE_ERROR address=0x%016llx " | 
|  | 103 | "flags=0x%04x]\n", address, flags); | 
|  | 104 | break; | 
|  | 105 | case EVENT_TYPE_IOTLB_INV_TO: | 
|  | 106 | printk("IOTLB_INV_TIMEOUT device=%02x:%02x.%x " | 
|  | 107 | "address=0x%016llx]\n", | 
|  | 108 | PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid), | 
|  | 109 | address); | 
|  | 110 | break; | 
|  | 111 | case EVENT_TYPE_INV_DEV_REQ: | 
|  | 112 | printk("INVALID_DEVICE_REQUEST device=%02x:%02x.%x " | 
|  | 113 | "address=0x%016llx flags=0x%04x]\n", | 
|  | 114 | PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid), | 
|  | 115 | address, flags); | 
|  | 116 | break; | 
|  | 117 | default: | 
|  | 118 | printk(KERN_ERR "UNKNOWN type=0x%02x]\n", type); | 
|  | 119 | } | 
|  | 120 | } | 
|  | 121 |  | 
|  | 122 | static void iommu_poll_events(struct amd_iommu *iommu) | 
|  | 123 | { | 
|  | 124 | u32 head, tail; | 
|  | 125 | unsigned long flags; | 
|  | 126 |  | 
|  | 127 | spin_lock_irqsave(&iommu->lock, flags); | 
|  | 128 |  | 
|  | 129 | head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); | 
|  | 130 | tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); | 
|  | 131 |  | 
|  | 132 | while (head != tail) { | 
|  | 133 | iommu_print_event(iommu->evt_buf + head); | 
|  | 134 | head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size; | 
|  | 135 | } | 
|  | 136 |  | 
|  | 137 | writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); | 
|  | 138 |  | 
|  | 139 | spin_unlock_irqrestore(&iommu->lock, flags); | 
|  | 140 | } | 
|  | 141 |  | 
| Joerg Roedel | a80dc3e | 2008-09-11 16:51:41 +0200 | [diff] [blame] | 142 | irqreturn_t amd_iommu_int_handler(int irq, void *data) | 
|  | 143 | { | 
| Joerg Roedel | 90008ee | 2008-09-09 16:41:05 +0200 | [diff] [blame] | 144 | struct amd_iommu *iommu; | 
|  | 145 |  | 
|  | 146 | list_for_each_entry(iommu, &amd_iommu_list, list) | 
|  | 147 | iommu_poll_events(iommu); | 
|  | 148 |  | 
|  | 149 | return IRQ_HANDLED; | 
| Joerg Roedel | a80dc3e | 2008-09-11 16:51:41 +0200 | [diff] [blame] | 150 | } | 
|  | 151 |  | 
|  | 152 | /**************************************************************************** | 
|  | 153 | * | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 154 | * IOMMU command queuing functions | 
|  | 155 | * | 
|  | 156 | ****************************************************************************/ | 
|  | 157 |  | 
|  | 158 | /* | 
|  | 159 | * Writes the command to the IOMMUs command buffer and informs the | 
|  | 160 | * hardware about the new command. Must be called with iommu->lock held. | 
|  | 161 | */ | 
| Joerg Roedel | d644953 | 2008-07-11 17:14:28 +0200 | [diff] [blame] | 162 | static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) | 
| Joerg Roedel | a19ae1e | 2008-06-26 21:27:55 +0200 | [diff] [blame] | 163 | { | 
|  | 164 | u32 tail, head; | 
|  | 165 | u8 *target; | 
|  | 166 |  | 
|  | 167 | tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); | 
| Jiri Kosina | 8a7c5ef | 2008-08-19 02:13:55 +0200 | [diff] [blame] | 168 | target = iommu->cmd_buf + tail; | 
| Joerg Roedel | a19ae1e | 2008-06-26 21:27:55 +0200 | [diff] [blame] | 169 | memcpy_toio(target, cmd, sizeof(*cmd)); | 
|  | 170 | tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size; | 
|  | 171 | head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); | 
|  | 172 | if (tail == head) | 
|  | 173 | return -ENOMEM; | 
|  | 174 | writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); | 
|  | 175 |  | 
|  | 176 | return 0; | 
|  | 177 | } | 
|  | 178 |  | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 179 | /* | 
|  | 180 | * General queuing function for commands. Takes iommu->lock and calls | 
|  | 181 | * __iommu_queue_command(). | 
|  | 182 | */ | 
| Joerg Roedel | d644953 | 2008-07-11 17:14:28 +0200 | [diff] [blame] | 183 | static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) | 
| Joerg Roedel | a19ae1e | 2008-06-26 21:27:55 +0200 | [diff] [blame] | 184 | { | 
|  | 185 | unsigned long flags; | 
|  | 186 | int ret; | 
|  | 187 |  | 
|  | 188 | spin_lock_irqsave(&iommu->lock, flags); | 
|  | 189 | ret = __iommu_queue_command(iommu, cmd); | 
|  | 190 | spin_unlock_irqrestore(&iommu->lock, flags); | 
|  | 191 |  | 
|  | 192 | return ret; | 
|  | 193 | } | 
|  | 194 |  | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 195 | /* | 
|  | 196 | * This function is called whenever we need to ensure that the IOMMU has | 
|  | 197 | * completed execution of all commands we sent. It sends a | 
|  | 198 | * COMPLETION_WAIT command and waits for it to finish. The IOMMU informs | 
|  | 199 | * us about that by writing a value to a physical address we pass with | 
|  | 200 | * the command. | 
|  | 201 | */ | 
| Joerg Roedel | a19ae1e | 2008-06-26 21:27:55 +0200 | [diff] [blame] | 202 | static int iommu_completion_wait(struct amd_iommu *iommu) | 
|  | 203 | { | 
| Joerg Roedel | 7e4f88d | 2008-09-17 14:19:15 +0200 | [diff] [blame] | 204 | int ret = 0, ready = 0; | 
| Joerg Roedel | 519c31b | 2008-08-14 19:55:15 +0200 | [diff] [blame] | 205 | unsigned status = 0; | 
| Joerg Roedel | d644953 | 2008-07-11 17:14:28 +0200 | [diff] [blame] | 206 | struct iommu_cmd cmd; | 
| Joerg Roedel | 7e4f88d | 2008-09-17 14:19:15 +0200 | [diff] [blame] | 207 | unsigned long flags, i = 0; | 
| Joerg Roedel | a19ae1e | 2008-06-26 21:27:55 +0200 | [diff] [blame] | 208 |  | 
|  | 209 | memset(&cmd, 0, sizeof(cmd)); | 
| Joerg Roedel | 519c31b | 2008-08-14 19:55:15 +0200 | [diff] [blame] | 210 | cmd.data[0] = CMD_COMPL_WAIT_INT_MASK; | 
| Joerg Roedel | a19ae1e | 2008-06-26 21:27:55 +0200 | [diff] [blame] | 211 | CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT); | 
|  | 212 |  | 
|  | 213 | iommu->need_sync = 0; | 
|  | 214 |  | 
| Joerg Roedel | 7e4f88d | 2008-09-17 14:19:15 +0200 | [diff] [blame] | 215 | spin_lock_irqsave(&iommu->lock, flags); | 
|  | 216 |  | 
|  | 217 | ret = __iommu_queue_command(iommu, &cmd); | 
| Joerg Roedel | a19ae1e | 2008-06-26 21:27:55 +0200 | [diff] [blame] | 218 |  | 
|  | 219 | if (ret) | 
| Joerg Roedel | 7e4f88d | 2008-09-17 14:19:15 +0200 | [diff] [blame] | 220 | goto out; | 
| Joerg Roedel | a19ae1e | 2008-06-26 21:27:55 +0200 | [diff] [blame] | 221 |  | 
| Joerg Roedel | 136f78a | 2008-07-11 17:14:27 +0200 | [diff] [blame] | 222 | while (!ready && (i < EXIT_LOOP_COUNT)) { | 
|  | 223 | ++i; | 
| Joerg Roedel | 519c31b | 2008-08-14 19:55:15 +0200 | [diff] [blame] | 224 | /* wait for the bit to become one */ | 
|  | 225 | status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); | 
|  | 226 | ready = status & MMIO_STATUS_COM_WAIT_INT_MASK; | 
| Joerg Roedel | 136f78a | 2008-07-11 17:14:27 +0200 | [diff] [blame] | 227 | } | 
|  | 228 |  | 
| Joerg Roedel | 519c31b | 2008-08-14 19:55:15 +0200 | [diff] [blame] | 229 | /* set bit back to zero */ | 
|  | 230 | status &= ~MMIO_STATUS_COM_WAIT_INT_MASK; | 
|  | 231 | writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET); | 
|  | 232 |  | 
| Joerg Roedel | 136f78a | 2008-07-11 17:14:27 +0200 | [diff] [blame] | 233 | if (unlikely((i == EXIT_LOOP_COUNT) && printk_ratelimit())) | 
|  | 234 | printk(KERN_WARNING "AMD IOMMU: Completion wait loop failed\n"); | 
| Joerg Roedel | 7e4f88d | 2008-09-17 14:19:15 +0200 | [diff] [blame] | 235 | out: | 
|  | 236 | spin_unlock_irqrestore(&iommu->lock, flags); | 
| Joerg Roedel | a19ae1e | 2008-06-26 21:27:55 +0200 | [diff] [blame] | 237 |  | 
|  | 238 | return 0; | 
|  | 239 | } | 
|  | 240 |  | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 241 | /* | 
|  | 242 | * Command send function for invalidating a device table entry | 
|  | 243 | */ | 
| Joerg Roedel | a19ae1e | 2008-06-26 21:27:55 +0200 | [diff] [blame] | 244 | static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid) | 
|  | 245 | { | 
| Joerg Roedel | d644953 | 2008-07-11 17:14:28 +0200 | [diff] [blame] | 246 | struct iommu_cmd cmd; | 
| Joerg Roedel | ee2fa74 | 2008-09-17 13:47:25 +0200 | [diff] [blame] | 247 | int ret; | 
| Joerg Roedel | a19ae1e | 2008-06-26 21:27:55 +0200 | [diff] [blame] | 248 |  | 
|  | 249 | BUG_ON(iommu == NULL); | 
|  | 250 |  | 
|  | 251 | memset(&cmd, 0, sizeof(cmd)); | 
|  | 252 | CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY); | 
|  | 253 | cmd.data[0] = devid; | 
|  | 254 |  | 
| Joerg Roedel | ee2fa74 | 2008-09-17 13:47:25 +0200 | [diff] [blame] | 255 | ret = iommu_queue_command(iommu, &cmd); | 
|  | 256 |  | 
| Joerg Roedel | a19ae1e | 2008-06-26 21:27:55 +0200 | [diff] [blame] | 257 | iommu->need_sync = 1; | 
|  | 258 |  | 
| Joerg Roedel | ee2fa74 | 2008-09-17 13:47:25 +0200 | [diff] [blame] | 259 | return ret; | 
| Joerg Roedel | a19ae1e | 2008-06-26 21:27:55 +0200 | [diff] [blame] | 260 | } | 
|  | 261 |  | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 262 | /* | 
|  | 263 | * Generic command send function for invalidaing TLB entries | 
|  | 264 | */ | 
| Joerg Roedel | a19ae1e | 2008-06-26 21:27:55 +0200 | [diff] [blame] | 265 | static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu, | 
|  | 266 | u64 address, u16 domid, int pde, int s) | 
|  | 267 | { | 
| Joerg Roedel | d644953 | 2008-07-11 17:14:28 +0200 | [diff] [blame] | 268 | struct iommu_cmd cmd; | 
| Joerg Roedel | ee2fa74 | 2008-09-17 13:47:25 +0200 | [diff] [blame] | 269 | int ret; | 
| Joerg Roedel | a19ae1e | 2008-06-26 21:27:55 +0200 | [diff] [blame] | 270 |  | 
|  | 271 | memset(&cmd, 0, sizeof(cmd)); | 
|  | 272 | address &= PAGE_MASK; | 
|  | 273 | CMD_SET_TYPE(&cmd, CMD_INV_IOMMU_PAGES); | 
|  | 274 | cmd.data[1] |= domid; | 
| Joerg Roedel | 8a45669 | 2008-08-14 19:55:17 +0200 | [diff] [blame] | 275 | cmd.data[2] = lower_32_bits(address); | 
| Joerg Roedel | 8ea80d7 | 2008-07-11 17:14:23 +0200 | [diff] [blame] | 276 | cmd.data[3] = upper_32_bits(address); | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 277 | if (s) /* size bit - we flush more than one 4kb page */ | 
| Joerg Roedel | a19ae1e | 2008-06-26 21:27:55 +0200 | [diff] [blame] | 278 | cmd.data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 279 | if (pde) /* PDE bit - we wan't flush everything not only the PTEs */ | 
| Joerg Roedel | a19ae1e | 2008-06-26 21:27:55 +0200 | [diff] [blame] | 280 | cmd.data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; | 
|  | 281 |  | 
| Joerg Roedel | ee2fa74 | 2008-09-17 13:47:25 +0200 | [diff] [blame] | 282 | ret = iommu_queue_command(iommu, &cmd); | 
|  | 283 |  | 
| Joerg Roedel | a19ae1e | 2008-06-26 21:27:55 +0200 | [diff] [blame] | 284 | iommu->need_sync = 1; | 
|  | 285 |  | 
| Joerg Roedel | ee2fa74 | 2008-09-17 13:47:25 +0200 | [diff] [blame] | 286 | return ret; | 
| Joerg Roedel | a19ae1e | 2008-06-26 21:27:55 +0200 | [diff] [blame] | 287 | } | 
|  | 288 |  | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 289 | /* | 
|  | 290 | * TLB invalidation function which is called from the mapping functions. | 
|  | 291 | * It invalidates a single PTE if the range to flush is within a single | 
|  | 292 | * page. Otherwise it flushes the whole TLB of the IOMMU. | 
|  | 293 | */ | 
| Joerg Roedel | a19ae1e | 2008-06-26 21:27:55 +0200 | [diff] [blame] | 294 | static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid, | 
|  | 295 | u64 address, size_t size) | 
|  | 296 | { | 
| Joerg Roedel | 999ba41 | 2008-07-03 19:35:08 +0200 | [diff] [blame] | 297 | int s = 0; | 
| Joerg Roedel | e3c449f | 2008-10-15 22:02:11 -0700 | [diff] [blame] | 298 | unsigned pages = iommu_num_pages(address, size, PAGE_SIZE); | 
| Joerg Roedel | a19ae1e | 2008-06-26 21:27:55 +0200 | [diff] [blame] | 299 |  | 
|  | 300 | address &= PAGE_MASK; | 
|  | 301 |  | 
| Joerg Roedel | 999ba41 | 2008-07-03 19:35:08 +0200 | [diff] [blame] | 302 | if (pages > 1) { | 
|  | 303 | /* | 
|  | 304 | * If we have to flush more than one page, flush all | 
|  | 305 | * TLB entries for this domain | 
|  | 306 | */ | 
|  | 307 | address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; | 
|  | 308 | s = 1; | 
| Joerg Roedel | a19ae1e | 2008-06-26 21:27:55 +0200 | [diff] [blame] | 309 | } | 
|  | 310 |  | 
| Joerg Roedel | 999ba41 | 2008-07-03 19:35:08 +0200 | [diff] [blame] | 311 | iommu_queue_inv_iommu_pages(iommu, address, domid, 0, s); | 
|  | 312 |  | 
| Joerg Roedel | a19ae1e | 2008-06-26 21:27:55 +0200 | [diff] [blame] | 313 | return 0; | 
|  | 314 | } | 
| Joerg Roedel | b6c0271 | 2008-06-26 21:27:53 +0200 | [diff] [blame] | 315 |  | 
| Joerg Roedel | 1c65577 | 2008-09-04 18:40:05 +0200 | [diff] [blame] | 316 | /* Flush the whole IO/TLB for a given protection domain */ | 
|  | 317 | static void iommu_flush_tlb(struct amd_iommu *iommu, u16 domid) | 
|  | 318 | { | 
|  | 319 | u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; | 
|  | 320 |  | 
|  | 321 | iommu_queue_inv_iommu_pages(iommu, address, domid, 0, 1); | 
|  | 322 | } | 
|  | 323 |  | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 324 | /**************************************************************************** | 
|  | 325 | * | 
|  | 326 | * The functions below are used the create the page table mappings for | 
|  | 327 | * unity mapped regions. | 
|  | 328 | * | 
|  | 329 | ****************************************************************************/ | 
|  | 330 |  | 
|  | 331 | /* | 
|  | 332 | * Generic mapping functions. It maps a physical address into a DMA | 
|  | 333 | * address space. It allocates the page table pages if necessary. | 
|  | 334 | * In the future it can be extended to a generic mapping function | 
|  | 335 | * supporting all features of AMD IOMMU page tables like level skipping | 
|  | 336 | * and full 64 bit address spaces. | 
|  | 337 | */ | 
| Joerg Roedel | bd0e521 | 2008-06-26 21:27:56 +0200 | [diff] [blame] | 338 | static int iommu_map(struct protection_domain *dom, | 
|  | 339 | unsigned long bus_addr, | 
|  | 340 | unsigned long phys_addr, | 
|  | 341 | int prot) | 
|  | 342 | { | 
|  | 343 | u64 __pte, *pte, *page; | 
|  | 344 |  | 
|  | 345 | bus_addr  = PAGE_ALIGN(bus_addr); | 
|  | 346 | phys_addr = PAGE_ALIGN(bus_addr); | 
|  | 347 |  | 
|  | 348 | /* only support 512GB address spaces for now */ | 
|  | 349 | if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK)) | 
|  | 350 | return -EINVAL; | 
|  | 351 |  | 
|  | 352 | pte = &dom->pt_root[IOMMU_PTE_L2_INDEX(bus_addr)]; | 
|  | 353 |  | 
|  | 354 | if (!IOMMU_PTE_PRESENT(*pte)) { | 
|  | 355 | page = (u64 *)get_zeroed_page(GFP_KERNEL); | 
|  | 356 | if (!page) | 
|  | 357 | return -ENOMEM; | 
|  | 358 | *pte = IOMMU_L2_PDE(virt_to_phys(page)); | 
|  | 359 | } | 
|  | 360 |  | 
|  | 361 | pte = IOMMU_PTE_PAGE(*pte); | 
|  | 362 | pte = &pte[IOMMU_PTE_L1_INDEX(bus_addr)]; | 
|  | 363 |  | 
|  | 364 | if (!IOMMU_PTE_PRESENT(*pte)) { | 
|  | 365 | page = (u64 *)get_zeroed_page(GFP_KERNEL); | 
|  | 366 | if (!page) | 
|  | 367 | return -ENOMEM; | 
|  | 368 | *pte = IOMMU_L1_PDE(virt_to_phys(page)); | 
|  | 369 | } | 
|  | 370 |  | 
|  | 371 | pte = IOMMU_PTE_PAGE(*pte); | 
|  | 372 | pte = &pte[IOMMU_PTE_L0_INDEX(bus_addr)]; | 
|  | 373 |  | 
|  | 374 | if (IOMMU_PTE_PRESENT(*pte)) | 
|  | 375 | return -EBUSY; | 
|  | 376 |  | 
|  | 377 | __pte = phys_addr | IOMMU_PTE_P; | 
|  | 378 | if (prot & IOMMU_PROT_IR) | 
|  | 379 | __pte |= IOMMU_PTE_IR; | 
|  | 380 | if (prot & IOMMU_PROT_IW) | 
|  | 381 | __pte |= IOMMU_PTE_IW; | 
|  | 382 |  | 
|  | 383 | *pte = __pte; | 
|  | 384 |  | 
|  | 385 | return 0; | 
|  | 386 | } | 
|  | 387 |  | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 388 | /* | 
|  | 389 | * This function checks if a specific unity mapping entry is needed for | 
|  | 390 | * this specific IOMMU. | 
|  | 391 | */ | 
| Joerg Roedel | bd0e521 | 2008-06-26 21:27:56 +0200 | [diff] [blame] | 392 | static int iommu_for_unity_map(struct amd_iommu *iommu, | 
|  | 393 | struct unity_map_entry *entry) | 
|  | 394 | { | 
|  | 395 | u16 bdf, i; | 
|  | 396 |  | 
|  | 397 | for (i = entry->devid_start; i <= entry->devid_end; ++i) { | 
|  | 398 | bdf = amd_iommu_alias_table[i]; | 
|  | 399 | if (amd_iommu_rlookup_table[bdf] == iommu) | 
|  | 400 | return 1; | 
|  | 401 | } | 
|  | 402 |  | 
|  | 403 | return 0; | 
|  | 404 | } | 
|  | 405 |  | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 406 | /* | 
|  | 407 | * Init the unity mappings for a specific IOMMU in the system | 
|  | 408 | * | 
|  | 409 | * Basically iterates over all unity mapping entries and applies them to | 
|  | 410 | * the default domain DMA of that IOMMU if necessary. | 
|  | 411 | */ | 
| Joerg Roedel | bd0e521 | 2008-06-26 21:27:56 +0200 | [diff] [blame] | 412 | static int iommu_init_unity_mappings(struct amd_iommu *iommu) | 
|  | 413 | { | 
|  | 414 | struct unity_map_entry *entry; | 
|  | 415 | int ret; | 
|  | 416 |  | 
|  | 417 | list_for_each_entry(entry, &amd_iommu_unity_map, list) { | 
|  | 418 | if (!iommu_for_unity_map(iommu, entry)) | 
|  | 419 | continue; | 
|  | 420 | ret = dma_ops_unity_map(iommu->default_dom, entry); | 
|  | 421 | if (ret) | 
|  | 422 | return ret; | 
|  | 423 | } | 
|  | 424 |  | 
|  | 425 | return 0; | 
|  | 426 | } | 
|  | 427 |  | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 428 | /* | 
|  | 429 | * This function actually applies the mapping to the page table of the | 
|  | 430 | * dma_ops domain. | 
|  | 431 | */ | 
| Joerg Roedel | bd0e521 | 2008-06-26 21:27:56 +0200 | [diff] [blame] | 432 | static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, | 
|  | 433 | struct unity_map_entry *e) | 
|  | 434 | { | 
|  | 435 | u64 addr; | 
|  | 436 | int ret; | 
|  | 437 |  | 
|  | 438 | for (addr = e->address_start; addr < e->address_end; | 
|  | 439 | addr += PAGE_SIZE) { | 
|  | 440 | ret = iommu_map(&dma_dom->domain, addr, addr, e->prot); | 
|  | 441 | if (ret) | 
|  | 442 | return ret; | 
|  | 443 | /* | 
|  | 444 | * if unity mapping is in aperture range mark the page | 
|  | 445 | * as allocated in the aperture | 
|  | 446 | */ | 
|  | 447 | if (addr < dma_dom->aperture_size) | 
|  | 448 | __set_bit(addr >> PAGE_SHIFT, dma_dom->bitmap); | 
|  | 449 | } | 
|  | 450 |  | 
|  | 451 | return 0; | 
|  | 452 | } | 
|  | 453 |  | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 454 | /* | 
|  | 455 | * Inits the unity mappings required for a specific device | 
|  | 456 | */ | 
| Joerg Roedel | bd0e521 | 2008-06-26 21:27:56 +0200 | [diff] [blame] | 457 | static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom, | 
|  | 458 | u16 devid) | 
|  | 459 | { | 
|  | 460 | struct unity_map_entry *e; | 
|  | 461 | int ret; | 
|  | 462 |  | 
|  | 463 | list_for_each_entry(e, &amd_iommu_unity_map, list) { | 
|  | 464 | if (!(devid >= e->devid_start && devid <= e->devid_end)) | 
|  | 465 | continue; | 
|  | 466 | ret = dma_ops_unity_map(dma_dom, e); | 
|  | 467 | if (ret) | 
|  | 468 | return ret; | 
|  | 469 | } | 
|  | 470 |  | 
|  | 471 | return 0; | 
|  | 472 | } | 
|  | 473 |  | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 474 | /**************************************************************************** | 
|  | 475 | * | 
|  | 476 | * The next functions belong to the address allocator for the dma_ops | 
|  | 477 | * interface functions. They work like the allocators in the other IOMMU | 
|  | 478 | * drivers. Its basically a bitmap which marks the allocated pages in | 
|  | 479 | * the aperture. Maybe it could be enhanced in the future to a more | 
|  | 480 | * efficient allocator. | 
|  | 481 | * | 
|  | 482 | ****************************************************************************/ | 
| Joerg Roedel | d308644 | 2008-06-26 21:27:57 +0200 | [diff] [blame] | 483 |  | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 484 | /* | 
|  | 485 | * The address allocator core function. | 
|  | 486 | * | 
|  | 487 | * called with domain->lock held | 
|  | 488 | */ | 
| Joerg Roedel | d308644 | 2008-06-26 21:27:57 +0200 | [diff] [blame] | 489 | static unsigned long dma_ops_alloc_addresses(struct device *dev, | 
|  | 490 | struct dma_ops_domain *dom, | 
| Joerg Roedel | 6d4f343 | 2008-09-04 19:18:02 +0200 | [diff] [blame] | 491 | unsigned int pages, | 
| Joerg Roedel | 832a90c | 2008-09-18 15:54:23 +0200 | [diff] [blame] | 492 | unsigned long align_mask, | 
|  | 493 | u64 dma_mask) | 
| Joerg Roedel | d308644 | 2008-06-26 21:27:57 +0200 | [diff] [blame] | 494 | { | 
| FUJITA Tomonori | 40becd8 | 2008-09-29 00:06:36 +0900 | [diff] [blame] | 495 | unsigned long limit; | 
| Joerg Roedel | d308644 | 2008-06-26 21:27:57 +0200 | [diff] [blame] | 496 | unsigned long address; | 
| Joerg Roedel | d308644 | 2008-06-26 21:27:57 +0200 | [diff] [blame] | 497 | unsigned long boundary_size; | 
|  | 498 |  | 
|  | 499 | boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, | 
|  | 500 | PAGE_SIZE) >> PAGE_SHIFT; | 
| FUJITA Tomonori | 40becd8 | 2008-09-29 00:06:36 +0900 | [diff] [blame] | 501 | limit = iommu_device_max_index(dom->aperture_size >> PAGE_SHIFT, 0, | 
|  | 502 | dma_mask >> PAGE_SHIFT); | 
| Joerg Roedel | d308644 | 2008-06-26 21:27:57 +0200 | [diff] [blame] | 503 |  | 
| Joerg Roedel | 1c65577 | 2008-09-04 18:40:05 +0200 | [diff] [blame] | 504 | if (dom->next_bit >= limit) { | 
| Joerg Roedel | d308644 | 2008-06-26 21:27:57 +0200 | [diff] [blame] | 505 | dom->next_bit = 0; | 
| Joerg Roedel | 1c65577 | 2008-09-04 18:40:05 +0200 | [diff] [blame] | 506 | dom->need_flush = true; | 
|  | 507 | } | 
| Joerg Roedel | d308644 | 2008-06-26 21:27:57 +0200 | [diff] [blame] | 508 |  | 
|  | 509 | address = iommu_area_alloc(dom->bitmap, limit, dom->next_bit, pages, | 
| Joerg Roedel | 6d4f343 | 2008-09-04 19:18:02 +0200 | [diff] [blame] | 510 | 0 , boundary_size, align_mask); | 
| Joerg Roedel | 1c65577 | 2008-09-04 18:40:05 +0200 | [diff] [blame] | 511 | if (address == -1) { | 
| Joerg Roedel | d308644 | 2008-06-26 21:27:57 +0200 | [diff] [blame] | 512 | address = iommu_area_alloc(dom->bitmap, limit, 0, pages, | 
| Joerg Roedel | 6d4f343 | 2008-09-04 19:18:02 +0200 | [diff] [blame] | 513 | 0, boundary_size, align_mask); | 
| Joerg Roedel | 1c65577 | 2008-09-04 18:40:05 +0200 | [diff] [blame] | 514 | dom->need_flush = true; | 
|  | 515 | } | 
| Joerg Roedel | d308644 | 2008-06-26 21:27:57 +0200 | [diff] [blame] | 516 |  | 
|  | 517 | if (likely(address != -1)) { | 
| Joerg Roedel | d308644 | 2008-06-26 21:27:57 +0200 | [diff] [blame] | 518 | dom->next_bit = address + pages; | 
|  | 519 | address <<= PAGE_SHIFT; | 
|  | 520 | } else | 
|  | 521 | address = bad_dma_address; | 
|  | 522 |  | 
|  | 523 | WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size); | 
|  | 524 |  | 
|  | 525 | return address; | 
|  | 526 | } | 
|  | 527 |  | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 528 | /* | 
|  | 529 | * The address free function. | 
|  | 530 | * | 
|  | 531 | * called with domain->lock held | 
|  | 532 | */ | 
| Joerg Roedel | d308644 | 2008-06-26 21:27:57 +0200 | [diff] [blame] | 533 | static void dma_ops_free_addresses(struct dma_ops_domain *dom, | 
|  | 534 | unsigned long address, | 
|  | 535 | unsigned int pages) | 
|  | 536 | { | 
|  | 537 | address >>= PAGE_SHIFT; | 
|  | 538 | iommu_area_free(dom->bitmap, address, pages); | 
|  | 539 | } | 
|  | 540 |  | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 541 | /**************************************************************************** | 
|  | 542 | * | 
|  | 543 | * The next functions belong to the domain allocation. A domain is | 
|  | 544 | * allocated for every IOMMU as the default domain. If device isolation | 
|  | 545 | * is enabled, every device get its own domain. The most important thing | 
|  | 546 | * about domains is the page table mapping the DMA address space they | 
|  | 547 | * contain. | 
|  | 548 | * | 
|  | 549 | ****************************************************************************/ | 
|  | 550 |  | 
| Joerg Roedel | ec487d1 | 2008-06-26 21:27:58 +0200 | [diff] [blame] | 551 | static u16 domain_id_alloc(void) | 
|  | 552 | { | 
|  | 553 | unsigned long flags; | 
|  | 554 | int id; | 
|  | 555 |  | 
|  | 556 | write_lock_irqsave(&amd_iommu_devtable_lock, flags); | 
|  | 557 | id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID); | 
|  | 558 | BUG_ON(id == 0); | 
|  | 559 | if (id > 0 && id < MAX_DOMAIN_ID) | 
|  | 560 | __set_bit(id, amd_iommu_pd_alloc_bitmap); | 
|  | 561 | else | 
|  | 562 | id = 0; | 
|  | 563 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); | 
|  | 564 |  | 
|  | 565 | return id; | 
|  | 566 | } | 
|  | 567 |  | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 568 | /* | 
|  | 569 | * Used to reserve address ranges in the aperture (e.g. for exclusion | 
|  | 570 | * ranges. | 
|  | 571 | */ | 
| Joerg Roedel | ec487d1 | 2008-06-26 21:27:58 +0200 | [diff] [blame] | 572 | static void dma_ops_reserve_addresses(struct dma_ops_domain *dom, | 
|  | 573 | unsigned long start_page, | 
|  | 574 | unsigned int pages) | 
|  | 575 | { | 
|  | 576 | unsigned int last_page = dom->aperture_size >> PAGE_SHIFT; | 
|  | 577 |  | 
|  | 578 | if (start_page + pages > last_page) | 
|  | 579 | pages = last_page - start_page; | 
|  | 580 |  | 
| FUJITA Tomonori | d26dbc5 | 2008-09-22 22:35:07 +0900 | [diff] [blame] | 581 | iommu_area_reserve(dom->bitmap, start_page, pages); | 
| Joerg Roedel | ec487d1 | 2008-06-26 21:27:58 +0200 | [diff] [blame] | 582 | } | 
|  | 583 |  | 
|  | 584 | static void dma_ops_free_pagetable(struct dma_ops_domain *dma_dom) | 
|  | 585 | { | 
|  | 586 | int i, j; | 
|  | 587 | u64 *p1, *p2, *p3; | 
|  | 588 |  | 
|  | 589 | p1 = dma_dom->domain.pt_root; | 
|  | 590 |  | 
|  | 591 | if (!p1) | 
|  | 592 | return; | 
|  | 593 |  | 
|  | 594 | for (i = 0; i < 512; ++i) { | 
|  | 595 | if (!IOMMU_PTE_PRESENT(p1[i])) | 
|  | 596 | continue; | 
|  | 597 |  | 
|  | 598 | p2 = IOMMU_PTE_PAGE(p1[i]); | 
|  | 599 | for (j = 0; j < 512; ++i) { | 
|  | 600 | if (!IOMMU_PTE_PRESENT(p2[j])) | 
|  | 601 | continue; | 
|  | 602 | p3 = IOMMU_PTE_PAGE(p2[j]); | 
|  | 603 | free_page((unsigned long)p3); | 
|  | 604 | } | 
|  | 605 |  | 
|  | 606 | free_page((unsigned long)p2); | 
|  | 607 | } | 
|  | 608 |  | 
|  | 609 | free_page((unsigned long)p1); | 
|  | 610 | } | 
|  | 611 |  | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 612 | /* | 
|  | 613 | * Free a domain, only used if something went wrong in the | 
|  | 614 | * allocation path and we need to free an already allocated page table | 
|  | 615 | */ | 
| Joerg Roedel | ec487d1 | 2008-06-26 21:27:58 +0200 | [diff] [blame] | 616 | static void dma_ops_domain_free(struct dma_ops_domain *dom) | 
|  | 617 | { | 
|  | 618 | if (!dom) | 
|  | 619 | return; | 
|  | 620 |  | 
|  | 621 | dma_ops_free_pagetable(dom); | 
|  | 622 |  | 
|  | 623 | kfree(dom->pte_pages); | 
|  | 624 |  | 
|  | 625 | kfree(dom->bitmap); | 
|  | 626 |  | 
|  | 627 | kfree(dom); | 
|  | 628 | } | 
|  | 629 |  | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 630 | /* | 
|  | 631 | * Allocates a new protection domain usable for the dma_ops functions. | 
|  | 632 | * It also intializes the page table and the address allocator data | 
|  | 633 | * structures required for the dma_ops interface | 
|  | 634 | */ | 
| Joerg Roedel | ec487d1 | 2008-06-26 21:27:58 +0200 | [diff] [blame] | 635 | static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu, | 
|  | 636 | unsigned order) | 
|  | 637 | { | 
|  | 638 | struct dma_ops_domain *dma_dom; | 
|  | 639 | unsigned i, num_pte_pages; | 
|  | 640 | u64 *l2_pde; | 
|  | 641 | u64 address; | 
|  | 642 |  | 
|  | 643 | /* | 
|  | 644 | * Currently the DMA aperture must be between 32 MB and 1GB in size | 
|  | 645 | */ | 
|  | 646 | if ((order < 25) || (order > 30)) | 
|  | 647 | return NULL; | 
|  | 648 |  | 
|  | 649 | dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL); | 
|  | 650 | if (!dma_dom) | 
|  | 651 | return NULL; | 
|  | 652 |  | 
|  | 653 | spin_lock_init(&dma_dom->domain.lock); | 
|  | 654 |  | 
|  | 655 | dma_dom->domain.id = domain_id_alloc(); | 
|  | 656 | if (dma_dom->domain.id == 0) | 
|  | 657 | goto free_dma_dom; | 
|  | 658 | dma_dom->domain.mode = PAGE_MODE_3_LEVEL; | 
|  | 659 | dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL); | 
|  | 660 | dma_dom->domain.priv = dma_dom; | 
|  | 661 | if (!dma_dom->domain.pt_root) | 
|  | 662 | goto free_dma_dom; | 
|  | 663 | dma_dom->aperture_size = (1ULL << order); | 
|  | 664 | dma_dom->bitmap = kzalloc(dma_dom->aperture_size / (PAGE_SIZE * 8), | 
|  | 665 | GFP_KERNEL); | 
|  | 666 | if (!dma_dom->bitmap) | 
|  | 667 | goto free_dma_dom; | 
|  | 668 | /* | 
|  | 669 | * mark the first page as allocated so we never return 0 as | 
|  | 670 | * a valid dma-address. So we can use 0 as error value | 
|  | 671 | */ | 
|  | 672 | dma_dom->bitmap[0] = 1; | 
|  | 673 | dma_dom->next_bit = 0; | 
|  | 674 |  | 
| Joerg Roedel | 1c65577 | 2008-09-04 18:40:05 +0200 | [diff] [blame] | 675 | dma_dom->need_flush = false; | 
| Joerg Roedel | bd60b73 | 2008-09-11 10:24:48 +0200 | [diff] [blame] | 676 | dma_dom->target_dev = 0xffff; | 
| Joerg Roedel | 1c65577 | 2008-09-04 18:40:05 +0200 | [diff] [blame] | 677 |  | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 678 | /* Intialize the exclusion range if necessary */ | 
| Joerg Roedel | ec487d1 | 2008-06-26 21:27:58 +0200 | [diff] [blame] | 679 | if (iommu->exclusion_start && | 
|  | 680 | iommu->exclusion_start < dma_dom->aperture_size) { | 
|  | 681 | unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT; | 
| Joerg Roedel | e3c449f | 2008-10-15 22:02:11 -0700 | [diff] [blame] | 682 | int pages = iommu_num_pages(iommu->exclusion_start, | 
|  | 683 | iommu->exclusion_length, | 
|  | 684 | PAGE_SIZE); | 
| Joerg Roedel | ec487d1 | 2008-06-26 21:27:58 +0200 | [diff] [blame] | 685 | dma_ops_reserve_addresses(dma_dom, startpage, pages); | 
|  | 686 | } | 
|  | 687 |  | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 688 | /* | 
|  | 689 | * At the last step, build the page tables so we don't need to | 
|  | 690 | * allocate page table pages in the dma_ops mapping/unmapping | 
|  | 691 | * path. | 
|  | 692 | */ | 
| Joerg Roedel | ec487d1 | 2008-06-26 21:27:58 +0200 | [diff] [blame] | 693 | num_pte_pages = dma_dom->aperture_size / (PAGE_SIZE * 512); | 
|  | 694 | dma_dom->pte_pages = kzalloc(num_pte_pages * sizeof(void *), | 
|  | 695 | GFP_KERNEL); | 
|  | 696 | if (!dma_dom->pte_pages) | 
|  | 697 | goto free_dma_dom; | 
|  | 698 |  | 
|  | 699 | l2_pde = (u64 *)get_zeroed_page(GFP_KERNEL); | 
|  | 700 | if (l2_pde == NULL) | 
|  | 701 | goto free_dma_dom; | 
|  | 702 |  | 
|  | 703 | dma_dom->domain.pt_root[0] = IOMMU_L2_PDE(virt_to_phys(l2_pde)); | 
|  | 704 |  | 
|  | 705 | for (i = 0; i < num_pte_pages; ++i) { | 
|  | 706 | dma_dom->pte_pages[i] = (u64 *)get_zeroed_page(GFP_KERNEL); | 
|  | 707 | if (!dma_dom->pte_pages[i]) | 
|  | 708 | goto free_dma_dom; | 
|  | 709 | address = virt_to_phys(dma_dom->pte_pages[i]); | 
|  | 710 | l2_pde[i] = IOMMU_L1_PDE(address); | 
|  | 711 | } | 
|  | 712 |  | 
|  | 713 | return dma_dom; | 
|  | 714 |  | 
|  | 715 | free_dma_dom: | 
|  | 716 | dma_ops_domain_free(dma_dom); | 
|  | 717 |  | 
|  | 718 | return NULL; | 
|  | 719 | } | 
|  | 720 |  | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 721 | /* | 
|  | 722 | * Find out the protection domain structure for a given PCI device. This | 
|  | 723 | * will give us the pointer to the page table root for example. | 
|  | 724 | */ | 
| Joerg Roedel | b20ac0d | 2008-06-26 21:27:59 +0200 | [diff] [blame] | 725 | static struct protection_domain *domain_for_device(u16 devid) | 
|  | 726 | { | 
|  | 727 | struct protection_domain *dom; | 
|  | 728 | unsigned long flags; | 
|  | 729 |  | 
|  | 730 | read_lock_irqsave(&amd_iommu_devtable_lock, flags); | 
|  | 731 | dom = amd_iommu_pd_table[devid]; | 
|  | 732 | read_unlock_irqrestore(&amd_iommu_devtable_lock, flags); | 
|  | 733 |  | 
|  | 734 | return dom; | 
|  | 735 | } | 
|  | 736 |  | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 737 | /* | 
|  | 738 | * If a device is not yet associated with a domain, this function does | 
|  | 739 | * assigns it visible for the hardware | 
|  | 740 | */ | 
| Joerg Roedel | b20ac0d | 2008-06-26 21:27:59 +0200 | [diff] [blame] | 741 | static void set_device_domain(struct amd_iommu *iommu, | 
|  | 742 | struct protection_domain *domain, | 
|  | 743 | u16 devid) | 
|  | 744 | { | 
|  | 745 | unsigned long flags; | 
|  | 746 |  | 
|  | 747 | u64 pte_root = virt_to_phys(domain->pt_root); | 
|  | 748 |  | 
| Joerg Roedel | 38ddf41 | 2008-09-11 10:38:32 +0200 | [diff] [blame] | 749 | pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK) | 
|  | 750 | << DEV_ENTRY_MODE_SHIFT; | 
|  | 751 | pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV; | 
| Joerg Roedel | b20ac0d | 2008-06-26 21:27:59 +0200 | [diff] [blame] | 752 |  | 
|  | 753 | write_lock_irqsave(&amd_iommu_devtable_lock, flags); | 
| Joerg Roedel | 38ddf41 | 2008-09-11 10:38:32 +0200 | [diff] [blame] | 754 | amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root); | 
|  | 755 | amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root); | 
| Joerg Roedel | b20ac0d | 2008-06-26 21:27:59 +0200 | [diff] [blame] | 756 | amd_iommu_dev_table[devid].data[2] = domain->id; | 
|  | 757 |  | 
|  | 758 | amd_iommu_pd_table[devid] = domain; | 
|  | 759 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); | 
|  | 760 |  | 
|  | 761 | iommu_queue_inv_dev_entry(iommu, devid); | 
|  | 762 |  | 
|  | 763 | iommu->need_sync = 1; | 
|  | 764 | } | 
|  | 765 |  | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 766 | /***************************************************************************** | 
|  | 767 | * | 
|  | 768 | * The next functions belong to the dma_ops mapping/unmapping code. | 
|  | 769 | * | 
|  | 770 | *****************************************************************************/ | 
|  | 771 |  | 
|  | 772 | /* | 
| Joerg Roedel | dbcc112 | 2008-09-04 15:04:26 +0200 | [diff] [blame] | 773 | * This function checks if the driver got a valid device from the caller to | 
|  | 774 | * avoid dereferencing invalid pointers. | 
|  | 775 | */ | 
|  | 776 | static bool check_device(struct device *dev) | 
|  | 777 | { | 
|  | 778 | if (!dev || !dev->dma_mask) | 
|  | 779 | return false; | 
|  | 780 |  | 
|  | 781 | return true; | 
|  | 782 | } | 
|  | 783 |  | 
|  | 784 | /* | 
| Joerg Roedel | bd60b73 | 2008-09-11 10:24:48 +0200 | [diff] [blame] | 785 | * In this function the list of preallocated protection domains is traversed to | 
|  | 786 | * find the domain for a specific device | 
|  | 787 | */ | 
|  | 788 | static struct dma_ops_domain *find_protection_domain(u16 devid) | 
|  | 789 | { | 
|  | 790 | struct dma_ops_domain *entry, *ret = NULL; | 
|  | 791 | unsigned long flags; | 
|  | 792 |  | 
|  | 793 | if (list_empty(&iommu_pd_list)) | 
|  | 794 | return NULL; | 
|  | 795 |  | 
|  | 796 | spin_lock_irqsave(&iommu_pd_list_lock, flags); | 
|  | 797 |  | 
|  | 798 | list_for_each_entry(entry, &iommu_pd_list, list) { | 
|  | 799 | if (entry->target_dev == devid) { | 
|  | 800 | ret = entry; | 
|  | 801 | list_del(&ret->list); | 
|  | 802 | break; | 
|  | 803 | } | 
|  | 804 | } | 
|  | 805 |  | 
|  | 806 | spin_unlock_irqrestore(&iommu_pd_list_lock, flags); | 
|  | 807 |  | 
|  | 808 | return ret; | 
|  | 809 | } | 
|  | 810 |  | 
|  | 811 | /* | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 812 | * In the dma_ops path we only have the struct device. This function | 
|  | 813 | * finds the corresponding IOMMU, the protection domain and the | 
|  | 814 | * requestor id for a given device. | 
|  | 815 | * If the device is not yet associated with a domain this is also done | 
|  | 816 | * in this function. | 
|  | 817 | */ | 
| Joerg Roedel | b20ac0d | 2008-06-26 21:27:59 +0200 | [diff] [blame] | 818 | static int get_device_resources(struct device *dev, | 
|  | 819 | struct amd_iommu **iommu, | 
|  | 820 | struct protection_domain **domain, | 
|  | 821 | u16 *bdf) | 
|  | 822 | { | 
|  | 823 | struct dma_ops_domain *dma_dom; | 
|  | 824 | struct pci_dev *pcidev; | 
|  | 825 | u16 _bdf; | 
|  | 826 |  | 
| Joerg Roedel | dbcc112 | 2008-09-04 15:04:26 +0200 | [diff] [blame] | 827 | *iommu = NULL; | 
|  | 828 | *domain = NULL; | 
|  | 829 | *bdf = 0xffff; | 
|  | 830 |  | 
|  | 831 | if (dev->bus != &pci_bus_type) | 
|  | 832 | return 0; | 
| Joerg Roedel | b20ac0d | 2008-06-26 21:27:59 +0200 | [diff] [blame] | 833 |  | 
|  | 834 | pcidev = to_pci_dev(dev); | 
| Joerg Roedel | d591b0a | 2008-07-11 17:14:35 +0200 | [diff] [blame] | 835 | _bdf = calc_devid(pcidev->bus->number, pcidev->devfn); | 
| Joerg Roedel | b20ac0d | 2008-06-26 21:27:59 +0200 | [diff] [blame] | 836 |  | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 837 | /* device not translated by any IOMMU in the system? */ | 
| Joerg Roedel | dbcc112 | 2008-09-04 15:04:26 +0200 | [diff] [blame] | 838 | if (_bdf > amd_iommu_last_bdf) | 
| Joerg Roedel | b20ac0d | 2008-06-26 21:27:59 +0200 | [diff] [blame] | 839 | return 0; | 
| Joerg Roedel | b20ac0d | 2008-06-26 21:27:59 +0200 | [diff] [blame] | 840 |  | 
|  | 841 | *bdf = amd_iommu_alias_table[_bdf]; | 
|  | 842 |  | 
|  | 843 | *iommu = amd_iommu_rlookup_table[*bdf]; | 
|  | 844 | if (*iommu == NULL) | 
|  | 845 | return 0; | 
| Joerg Roedel | b20ac0d | 2008-06-26 21:27:59 +0200 | [diff] [blame] | 846 | *domain = domain_for_device(*bdf); | 
|  | 847 | if (*domain == NULL) { | 
| Joerg Roedel | bd60b73 | 2008-09-11 10:24:48 +0200 | [diff] [blame] | 848 | dma_dom = find_protection_domain(*bdf); | 
|  | 849 | if (!dma_dom) | 
|  | 850 | dma_dom = (*iommu)->default_dom; | 
| Joerg Roedel | b20ac0d | 2008-06-26 21:27:59 +0200 | [diff] [blame] | 851 | *domain = &dma_dom->domain; | 
|  | 852 | set_device_domain(*iommu, *domain, *bdf); | 
|  | 853 | printk(KERN_INFO "AMD IOMMU: Using protection domain %d for " | 
|  | 854 | "device ", (*domain)->id); | 
|  | 855 | print_devid(_bdf, 1); | 
|  | 856 | } | 
|  | 857 |  | 
|  | 858 | return 1; | 
|  | 859 | } | 
|  | 860 |  | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 861 | /* | 
|  | 862 | * This is the generic map function. It maps one 4kb page at paddr to | 
|  | 863 | * the given address in the DMA address space for the domain. | 
|  | 864 | */ | 
| Joerg Roedel | cb76c32 | 2008-06-26 21:28:00 +0200 | [diff] [blame] | 865 | static dma_addr_t dma_ops_domain_map(struct amd_iommu *iommu, | 
|  | 866 | struct dma_ops_domain *dom, | 
|  | 867 | unsigned long address, | 
|  | 868 | phys_addr_t paddr, | 
|  | 869 | int direction) | 
|  | 870 | { | 
|  | 871 | u64 *pte, __pte; | 
|  | 872 |  | 
|  | 873 | WARN_ON(address > dom->aperture_size); | 
|  | 874 |  | 
|  | 875 | paddr &= PAGE_MASK; | 
|  | 876 |  | 
|  | 877 | pte  = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)]; | 
|  | 878 | pte += IOMMU_PTE_L0_INDEX(address); | 
|  | 879 |  | 
|  | 880 | __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC; | 
|  | 881 |  | 
|  | 882 | if (direction == DMA_TO_DEVICE) | 
|  | 883 | __pte |= IOMMU_PTE_IR; | 
|  | 884 | else if (direction == DMA_FROM_DEVICE) | 
|  | 885 | __pte |= IOMMU_PTE_IW; | 
|  | 886 | else if (direction == DMA_BIDIRECTIONAL) | 
|  | 887 | __pte |= IOMMU_PTE_IR | IOMMU_PTE_IW; | 
|  | 888 |  | 
|  | 889 | WARN_ON(*pte); | 
|  | 890 |  | 
|  | 891 | *pte = __pte; | 
|  | 892 |  | 
|  | 893 | return (dma_addr_t)address; | 
|  | 894 | } | 
|  | 895 |  | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 896 | /* | 
|  | 897 | * The generic unmapping function for on page in the DMA address space. | 
|  | 898 | */ | 
| Joerg Roedel | cb76c32 | 2008-06-26 21:28:00 +0200 | [diff] [blame] | 899 | static void dma_ops_domain_unmap(struct amd_iommu *iommu, | 
|  | 900 | struct dma_ops_domain *dom, | 
|  | 901 | unsigned long address) | 
|  | 902 | { | 
|  | 903 | u64 *pte; | 
|  | 904 |  | 
|  | 905 | if (address >= dom->aperture_size) | 
|  | 906 | return; | 
|  | 907 |  | 
|  | 908 | WARN_ON(address & 0xfffULL || address > dom->aperture_size); | 
|  | 909 |  | 
|  | 910 | pte  = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)]; | 
|  | 911 | pte += IOMMU_PTE_L0_INDEX(address); | 
|  | 912 |  | 
|  | 913 | WARN_ON(!*pte); | 
|  | 914 |  | 
|  | 915 | *pte = 0ULL; | 
|  | 916 | } | 
|  | 917 |  | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 918 | /* | 
|  | 919 | * This function contains common code for mapping of a physically | 
|  | 920 | * contiguous memory region into DMA address space. It is uses by all | 
|  | 921 | * mapping functions provided by this IOMMU driver. | 
|  | 922 | * Must be called with the domain lock held. | 
|  | 923 | */ | 
| Joerg Roedel | cb76c32 | 2008-06-26 21:28:00 +0200 | [diff] [blame] | 924 | static dma_addr_t __map_single(struct device *dev, | 
|  | 925 | struct amd_iommu *iommu, | 
|  | 926 | struct dma_ops_domain *dma_dom, | 
|  | 927 | phys_addr_t paddr, | 
|  | 928 | size_t size, | 
| Joerg Roedel | 6d4f343 | 2008-09-04 19:18:02 +0200 | [diff] [blame] | 929 | int dir, | 
| Joerg Roedel | 832a90c | 2008-09-18 15:54:23 +0200 | [diff] [blame] | 930 | bool align, | 
|  | 931 | u64 dma_mask) | 
| Joerg Roedel | cb76c32 | 2008-06-26 21:28:00 +0200 | [diff] [blame] | 932 | { | 
|  | 933 | dma_addr_t offset = paddr & ~PAGE_MASK; | 
|  | 934 | dma_addr_t address, start; | 
|  | 935 | unsigned int pages; | 
| Joerg Roedel | 6d4f343 | 2008-09-04 19:18:02 +0200 | [diff] [blame] | 936 | unsigned long align_mask = 0; | 
| Joerg Roedel | cb76c32 | 2008-06-26 21:28:00 +0200 | [diff] [blame] | 937 | int i; | 
|  | 938 |  | 
| Joerg Roedel | e3c449f | 2008-10-15 22:02:11 -0700 | [diff] [blame] | 939 | pages = iommu_num_pages(paddr, size, PAGE_SIZE); | 
| Joerg Roedel | cb76c32 | 2008-06-26 21:28:00 +0200 | [diff] [blame] | 940 | paddr &= PAGE_MASK; | 
|  | 941 |  | 
| Joerg Roedel | 6d4f343 | 2008-09-04 19:18:02 +0200 | [diff] [blame] | 942 | if (align) | 
|  | 943 | align_mask = (1UL << get_order(size)) - 1; | 
|  | 944 |  | 
| Joerg Roedel | 832a90c | 2008-09-18 15:54:23 +0200 | [diff] [blame] | 945 | address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask, | 
|  | 946 | dma_mask); | 
| Joerg Roedel | cb76c32 | 2008-06-26 21:28:00 +0200 | [diff] [blame] | 947 | if (unlikely(address == bad_dma_address)) | 
|  | 948 | goto out; | 
|  | 949 |  | 
|  | 950 | start = address; | 
|  | 951 | for (i = 0; i < pages; ++i) { | 
|  | 952 | dma_ops_domain_map(iommu, dma_dom, start, paddr, dir); | 
|  | 953 | paddr += PAGE_SIZE; | 
|  | 954 | start += PAGE_SIZE; | 
|  | 955 | } | 
|  | 956 | address += offset; | 
|  | 957 |  | 
| FUJITA Tomonori | afa9fdc | 2008-09-20 01:23:30 +0900 | [diff] [blame] | 958 | if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) { | 
| Joerg Roedel | 1c65577 | 2008-09-04 18:40:05 +0200 | [diff] [blame] | 959 | iommu_flush_tlb(iommu, dma_dom->domain.id); | 
|  | 960 | dma_dom->need_flush = false; | 
|  | 961 | } else if (unlikely(iommu_has_npcache(iommu))) | 
| Joerg Roedel | 270cab24 | 2008-09-04 15:49:46 +0200 | [diff] [blame] | 962 | iommu_flush_pages(iommu, dma_dom->domain.id, address, size); | 
|  | 963 |  | 
| Joerg Roedel | cb76c32 | 2008-06-26 21:28:00 +0200 | [diff] [blame] | 964 | out: | 
|  | 965 | return address; | 
|  | 966 | } | 
|  | 967 |  | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 968 | /* | 
|  | 969 | * Does the reverse of the __map_single function. Must be called with | 
|  | 970 | * the domain lock held too | 
|  | 971 | */ | 
| Joerg Roedel | cb76c32 | 2008-06-26 21:28:00 +0200 | [diff] [blame] | 972 | static void __unmap_single(struct amd_iommu *iommu, | 
|  | 973 | struct dma_ops_domain *dma_dom, | 
|  | 974 | dma_addr_t dma_addr, | 
|  | 975 | size_t size, | 
|  | 976 | int dir) | 
|  | 977 | { | 
|  | 978 | dma_addr_t i, start; | 
|  | 979 | unsigned int pages; | 
|  | 980 |  | 
|  | 981 | if ((dma_addr == 0) || (dma_addr + size > dma_dom->aperture_size)) | 
|  | 982 | return; | 
|  | 983 |  | 
| Joerg Roedel | e3c449f | 2008-10-15 22:02:11 -0700 | [diff] [blame] | 984 | pages = iommu_num_pages(dma_addr, size, PAGE_SIZE); | 
| Joerg Roedel | cb76c32 | 2008-06-26 21:28:00 +0200 | [diff] [blame] | 985 | dma_addr &= PAGE_MASK; | 
|  | 986 | start = dma_addr; | 
|  | 987 |  | 
|  | 988 | for (i = 0; i < pages; ++i) { | 
|  | 989 | dma_ops_domain_unmap(iommu, dma_dom, start); | 
|  | 990 | start += PAGE_SIZE; | 
|  | 991 | } | 
|  | 992 |  | 
|  | 993 | dma_ops_free_addresses(dma_dom, dma_addr, pages); | 
| Joerg Roedel | 270cab24 | 2008-09-04 15:49:46 +0200 | [diff] [blame] | 994 |  | 
| FUJITA Tomonori | afa9fdc | 2008-09-20 01:23:30 +0900 | [diff] [blame] | 995 | if (amd_iommu_unmap_flush) | 
| Joerg Roedel | 1c65577 | 2008-09-04 18:40:05 +0200 | [diff] [blame] | 996 | iommu_flush_pages(iommu, dma_dom->domain.id, dma_addr, size); | 
| Joerg Roedel | cb76c32 | 2008-06-26 21:28:00 +0200 | [diff] [blame] | 997 | } | 
|  | 998 |  | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 999 | /* | 
|  | 1000 | * The exported map_single function for dma_ops. | 
|  | 1001 | */ | 
| Joerg Roedel | 4da70b9 | 2008-06-26 21:28:01 +0200 | [diff] [blame] | 1002 | static dma_addr_t map_single(struct device *dev, phys_addr_t paddr, | 
|  | 1003 | size_t size, int dir) | 
|  | 1004 | { | 
|  | 1005 | unsigned long flags; | 
|  | 1006 | struct amd_iommu *iommu; | 
|  | 1007 | struct protection_domain *domain; | 
|  | 1008 | u16 devid; | 
|  | 1009 | dma_addr_t addr; | 
| Joerg Roedel | 832a90c | 2008-09-18 15:54:23 +0200 | [diff] [blame] | 1010 | u64 dma_mask; | 
| Joerg Roedel | 4da70b9 | 2008-06-26 21:28:01 +0200 | [diff] [blame] | 1011 |  | 
| Joerg Roedel | dbcc112 | 2008-09-04 15:04:26 +0200 | [diff] [blame] | 1012 | if (!check_device(dev)) | 
|  | 1013 | return bad_dma_address; | 
|  | 1014 |  | 
| Joerg Roedel | 832a90c | 2008-09-18 15:54:23 +0200 | [diff] [blame] | 1015 | dma_mask = *dev->dma_mask; | 
| Joerg Roedel | 4da70b9 | 2008-06-26 21:28:01 +0200 | [diff] [blame] | 1016 |  | 
|  | 1017 | get_device_resources(dev, &iommu, &domain, &devid); | 
|  | 1018 |  | 
|  | 1019 | if (iommu == NULL || domain == NULL) | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 1020 | /* device not handled by any AMD IOMMU */ | 
| Joerg Roedel | 4da70b9 | 2008-06-26 21:28:01 +0200 | [diff] [blame] | 1021 | return (dma_addr_t)paddr; | 
|  | 1022 |  | 
|  | 1023 | spin_lock_irqsave(&domain->lock, flags); | 
| Joerg Roedel | 832a90c | 2008-09-18 15:54:23 +0200 | [diff] [blame] | 1024 | addr = __map_single(dev, iommu, domain->priv, paddr, size, dir, false, | 
|  | 1025 | dma_mask); | 
| Joerg Roedel | 4da70b9 | 2008-06-26 21:28:01 +0200 | [diff] [blame] | 1026 | if (addr == bad_dma_address) | 
|  | 1027 | goto out; | 
|  | 1028 |  | 
| Joerg Roedel | 5507eef | 2008-09-04 19:01:02 +0200 | [diff] [blame] | 1029 | if (unlikely(iommu->need_sync)) | 
| Joerg Roedel | 4da70b9 | 2008-06-26 21:28:01 +0200 | [diff] [blame] | 1030 | iommu_completion_wait(iommu); | 
|  | 1031 |  | 
|  | 1032 | out: | 
|  | 1033 | spin_unlock_irqrestore(&domain->lock, flags); | 
|  | 1034 |  | 
|  | 1035 | return addr; | 
|  | 1036 | } | 
|  | 1037 |  | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 1038 | /* | 
|  | 1039 | * The exported unmap_single function for dma_ops. | 
|  | 1040 | */ | 
| Joerg Roedel | 4da70b9 | 2008-06-26 21:28:01 +0200 | [diff] [blame] | 1041 | static void unmap_single(struct device *dev, dma_addr_t dma_addr, | 
|  | 1042 | size_t size, int dir) | 
|  | 1043 | { | 
|  | 1044 | unsigned long flags; | 
|  | 1045 | struct amd_iommu *iommu; | 
|  | 1046 | struct protection_domain *domain; | 
|  | 1047 | u16 devid; | 
|  | 1048 |  | 
| Joerg Roedel | dbcc112 | 2008-09-04 15:04:26 +0200 | [diff] [blame] | 1049 | if (!check_device(dev) || | 
|  | 1050 | !get_device_resources(dev, &iommu, &domain, &devid)) | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 1051 | /* device not handled by any AMD IOMMU */ | 
| Joerg Roedel | 4da70b9 | 2008-06-26 21:28:01 +0200 | [diff] [blame] | 1052 | return; | 
|  | 1053 |  | 
|  | 1054 | spin_lock_irqsave(&domain->lock, flags); | 
|  | 1055 |  | 
|  | 1056 | __unmap_single(iommu, domain->priv, dma_addr, size, dir); | 
|  | 1057 |  | 
| Joerg Roedel | 5507eef | 2008-09-04 19:01:02 +0200 | [diff] [blame] | 1058 | if (unlikely(iommu->need_sync)) | 
| Joerg Roedel | 4da70b9 | 2008-06-26 21:28:01 +0200 | [diff] [blame] | 1059 | iommu_completion_wait(iommu); | 
|  | 1060 |  | 
|  | 1061 | spin_unlock_irqrestore(&domain->lock, flags); | 
|  | 1062 | } | 
|  | 1063 |  | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 1064 | /* | 
|  | 1065 | * This is a special map_sg function which is used if we should map a | 
|  | 1066 | * device which is not handled by an AMD IOMMU in the system. | 
|  | 1067 | */ | 
| Joerg Roedel | 65b050a | 2008-06-26 21:28:02 +0200 | [diff] [blame] | 1068 | static int map_sg_no_iommu(struct device *dev, struct scatterlist *sglist, | 
|  | 1069 | int nelems, int dir) | 
|  | 1070 | { | 
|  | 1071 | struct scatterlist *s; | 
|  | 1072 | int i; | 
|  | 1073 |  | 
|  | 1074 | for_each_sg(sglist, s, nelems, i) { | 
|  | 1075 | s->dma_address = (dma_addr_t)sg_phys(s); | 
|  | 1076 | s->dma_length  = s->length; | 
|  | 1077 | } | 
|  | 1078 |  | 
|  | 1079 | return nelems; | 
|  | 1080 | } | 
|  | 1081 |  | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 1082 | /* | 
|  | 1083 | * The exported map_sg function for dma_ops (handles scatter-gather | 
|  | 1084 | * lists). | 
|  | 1085 | */ | 
| Joerg Roedel | 65b050a | 2008-06-26 21:28:02 +0200 | [diff] [blame] | 1086 | static int map_sg(struct device *dev, struct scatterlist *sglist, | 
|  | 1087 | int nelems, int dir) | 
|  | 1088 | { | 
|  | 1089 | unsigned long flags; | 
|  | 1090 | struct amd_iommu *iommu; | 
|  | 1091 | struct protection_domain *domain; | 
|  | 1092 | u16 devid; | 
|  | 1093 | int i; | 
|  | 1094 | struct scatterlist *s; | 
|  | 1095 | phys_addr_t paddr; | 
|  | 1096 | int mapped_elems = 0; | 
| Joerg Roedel | 832a90c | 2008-09-18 15:54:23 +0200 | [diff] [blame] | 1097 | u64 dma_mask; | 
| Joerg Roedel | 65b050a | 2008-06-26 21:28:02 +0200 | [diff] [blame] | 1098 |  | 
| Joerg Roedel | dbcc112 | 2008-09-04 15:04:26 +0200 | [diff] [blame] | 1099 | if (!check_device(dev)) | 
|  | 1100 | return 0; | 
|  | 1101 |  | 
| Joerg Roedel | 832a90c | 2008-09-18 15:54:23 +0200 | [diff] [blame] | 1102 | dma_mask = *dev->dma_mask; | 
| Joerg Roedel | 65b050a | 2008-06-26 21:28:02 +0200 | [diff] [blame] | 1103 |  | 
|  | 1104 | get_device_resources(dev, &iommu, &domain, &devid); | 
|  | 1105 |  | 
|  | 1106 | if (!iommu || !domain) | 
|  | 1107 | return map_sg_no_iommu(dev, sglist, nelems, dir); | 
|  | 1108 |  | 
|  | 1109 | spin_lock_irqsave(&domain->lock, flags); | 
|  | 1110 |  | 
|  | 1111 | for_each_sg(sglist, s, nelems, i) { | 
|  | 1112 | paddr = sg_phys(s); | 
|  | 1113 |  | 
|  | 1114 | s->dma_address = __map_single(dev, iommu, domain->priv, | 
| Joerg Roedel | 832a90c | 2008-09-18 15:54:23 +0200 | [diff] [blame] | 1115 | paddr, s->length, dir, false, | 
|  | 1116 | dma_mask); | 
| Joerg Roedel | 65b050a | 2008-06-26 21:28:02 +0200 | [diff] [blame] | 1117 |  | 
|  | 1118 | if (s->dma_address) { | 
|  | 1119 | s->dma_length = s->length; | 
|  | 1120 | mapped_elems++; | 
|  | 1121 | } else | 
|  | 1122 | goto unmap; | 
| Joerg Roedel | 65b050a | 2008-06-26 21:28:02 +0200 | [diff] [blame] | 1123 | } | 
|  | 1124 |  | 
| Joerg Roedel | 5507eef | 2008-09-04 19:01:02 +0200 | [diff] [blame] | 1125 | if (unlikely(iommu->need_sync)) | 
| Joerg Roedel | 65b050a | 2008-06-26 21:28:02 +0200 | [diff] [blame] | 1126 | iommu_completion_wait(iommu); | 
|  | 1127 |  | 
|  | 1128 | out: | 
|  | 1129 | spin_unlock_irqrestore(&domain->lock, flags); | 
|  | 1130 |  | 
|  | 1131 | return mapped_elems; | 
|  | 1132 | unmap: | 
|  | 1133 | for_each_sg(sglist, s, mapped_elems, i) { | 
|  | 1134 | if (s->dma_address) | 
|  | 1135 | __unmap_single(iommu, domain->priv, s->dma_address, | 
|  | 1136 | s->dma_length, dir); | 
|  | 1137 | s->dma_address = s->dma_length = 0; | 
|  | 1138 | } | 
|  | 1139 |  | 
|  | 1140 | mapped_elems = 0; | 
|  | 1141 |  | 
|  | 1142 | goto out; | 
|  | 1143 | } | 
|  | 1144 |  | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 1145 | /* | 
|  | 1146 | * The exported map_sg function for dma_ops (handles scatter-gather | 
|  | 1147 | * lists). | 
|  | 1148 | */ | 
| Joerg Roedel | 65b050a | 2008-06-26 21:28:02 +0200 | [diff] [blame] | 1149 | static void unmap_sg(struct device *dev, struct scatterlist *sglist, | 
|  | 1150 | int nelems, int dir) | 
|  | 1151 | { | 
|  | 1152 | unsigned long flags; | 
|  | 1153 | struct amd_iommu *iommu; | 
|  | 1154 | struct protection_domain *domain; | 
|  | 1155 | struct scatterlist *s; | 
|  | 1156 | u16 devid; | 
|  | 1157 | int i; | 
|  | 1158 |  | 
| Joerg Roedel | dbcc112 | 2008-09-04 15:04:26 +0200 | [diff] [blame] | 1159 | if (!check_device(dev) || | 
|  | 1160 | !get_device_resources(dev, &iommu, &domain, &devid)) | 
| Joerg Roedel | 65b050a | 2008-06-26 21:28:02 +0200 | [diff] [blame] | 1161 | return; | 
|  | 1162 |  | 
|  | 1163 | spin_lock_irqsave(&domain->lock, flags); | 
|  | 1164 |  | 
|  | 1165 | for_each_sg(sglist, s, nelems, i) { | 
|  | 1166 | __unmap_single(iommu, domain->priv, s->dma_address, | 
|  | 1167 | s->dma_length, dir); | 
| Joerg Roedel | 65b050a | 2008-06-26 21:28:02 +0200 | [diff] [blame] | 1168 | s->dma_address = s->dma_length = 0; | 
|  | 1169 | } | 
|  | 1170 |  | 
| Joerg Roedel | 5507eef | 2008-09-04 19:01:02 +0200 | [diff] [blame] | 1171 | if (unlikely(iommu->need_sync)) | 
| Joerg Roedel | 65b050a | 2008-06-26 21:28:02 +0200 | [diff] [blame] | 1172 | iommu_completion_wait(iommu); | 
|  | 1173 |  | 
|  | 1174 | spin_unlock_irqrestore(&domain->lock, flags); | 
|  | 1175 | } | 
|  | 1176 |  | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 1177 | /* | 
|  | 1178 | * The exported alloc_coherent function for dma_ops. | 
|  | 1179 | */ | 
| Joerg Roedel | 5d8b53c | 2008-06-26 21:28:03 +0200 | [diff] [blame] | 1180 | static void *alloc_coherent(struct device *dev, size_t size, | 
|  | 1181 | dma_addr_t *dma_addr, gfp_t flag) | 
|  | 1182 | { | 
|  | 1183 | unsigned long flags; | 
|  | 1184 | void *virt_addr; | 
|  | 1185 | struct amd_iommu *iommu; | 
|  | 1186 | struct protection_domain *domain; | 
|  | 1187 | u16 devid; | 
|  | 1188 | phys_addr_t paddr; | 
| Joerg Roedel | 832a90c | 2008-09-18 15:54:23 +0200 | [diff] [blame] | 1189 | u64 dma_mask = dev->coherent_dma_mask; | 
| Joerg Roedel | 5d8b53c | 2008-06-26 21:28:03 +0200 | [diff] [blame] | 1190 |  | 
| Joerg Roedel | dbcc112 | 2008-09-04 15:04:26 +0200 | [diff] [blame] | 1191 | if (!check_device(dev)) | 
|  | 1192 | return NULL; | 
|  | 1193 |  | 
| FUJITA Tomonori | 13d9fea | 2008-09-10 20:19:40 +0900 | [diff] [blame] | 1194 | if (!get_device_resources(dev, &iommu, &domain, &devid)) | 
|  | 1195 | flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); | 
|  | 1196 |  | 
| Joerg Roedel | c97ac53 | 2008-09-11 10:59:15 +0200 | [diff] [blame] | 1197 | flag |= __GFP_ZERO; | 
| Joerg Roedel | 5d8b53c | 2008-06-26 21:28:03 +0200 | [diff] [blame] | 1198 | virt_addr = (void *)__get_free_pages(flag, get_order(size)); | 
|  | 1199 | if (!virt_addr) | 
|  | 1200 | return 0; | 
|  | 1201 |  | 
| Joerg Roedel | 5d8b53c | 2008-06-26 21:28:03 +0200 | [diff] [blame] | 1202 | paddr = virt_to_phys(virt_addr); | 
|  | 1203 |  | 
| Joerg Roedel | 5d8b53c | 2008-06-26 21:28:03 +0200 | [diff] [blame] | 1204 | if (!iommu || !domain) { | 
|  | 1205 | *dma_addr = (dma_addr_t)paddr; | 
|  | 1206 | return virt_addr; | 
|  | 1207 | } | 
|  | 1208 |  | 
| Joerg Roedel | 832a90c | 2008-09-18 15:54:23 +0200 | [diff] [blame] | 1209 | if (!dma_mask) | 
|  | 1210 | dma_mask = *dev->dma_mask; | 
|  | 1211 |  | 
| Joerg Roedel | 5d8b53c | 2008-06-26 21:28:03 +0200 | [diff] [blame] | 1212 | spin_lock_irqsave(&domain->lock, flags); | 
|  | 1213 |  | 
|  | 1214 | *dma_addr = __map_single(dev, iommu, domain->priv, paddr, | 
| Joerg Roedel | 832a90c | 2008-09-18 15:54:23 +0200 | [diff] [blame] | 1215 | size, DMA_BIDIRECTIONAL, true, dma_mask); | 
| Joerg Roedel | 5d8b53c | 2008-06-26 21:28:03 +0200 | [diff] [blame] | 1216 |  | 
|  | 1217 | if (*dma_addr == bad_dma_address) { | 
|  | 1218 | free_pages((unsigned long)virt_addr, get_order(size)); | 
|  | 1219 | virt_addr = NULL; | 
|  | 1220 | goto out; | 
|  | 1221 | } | 
|  | 1222 |  | 
| Joerg Roedel | 5507eef | 2008-09-04 19:01:02 +0200 | [diff] [blame] | 1223 | if (unlikely(iommu->need_sync)) | 
| Joerg Roedel | 5d8b53c | 2008-06-26 21:28:03 +0200 | [diff] [blame] | 1224 | iommu_completion_wait(iommu); | 
|  | 1225 |  | 
|  | 1226 | out: | 
|  | 1227 | spin_unlock_irqrestore(&domain->lock, flags); | 
|  | 1228 |  | 
|  | 1229 | return virt_addr; | 
|  | 1230 | } | 
|  | 1231 |  | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 1232 | /* | 
|  | 1233 | * The exported free_coherent function for dma_ops. | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 1234 | */ | 
| Joerg Roedel | 5d8b53c | 2008-06-26 21:28:03 +0200 | [diff] [blame] | 1235 | static void free_coherent(struct device *dev, size_t size, | 
|  | 1236 | void *virt_addr, dma_addr_t dma_addr) | 
|  | 1237 | { | 
|  | 1238 | unsigned long flags; | 
|  | 1239 | struct amd_iommu *iommu; | 
|  | 1240 | struct protection_domain *domain; | 
|  | 1241 | u16 devid; | 
|  | 1242 |  | 
| Joerg Roedel | dbcc112 | 2008-09-04 15:04:26 +0200 | [diff] [blame] | 1243 | if (!check_device(dev)) | 
|  | 1244 | return; | 
|  | 1245 |  | 
| Joerg Roedel | 5d8b53c | 2008-06-26 21:28:03 +0200 | [diff] [blame] | 1246 | get_device_resources(dev, &iommu, &domain, &devid); | 
|  | 1247 |  | 
|  | 1248 | if (!iommu || !domain) | 
|  | 1249 | goto free_mem; | 
|  | 1250 |  | 
|  | 1251 | spin_lock_irqsave(&domain->lock, flags); | 
|  | 1252 |  | 
|  | 1253 | __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); | 
| Joerg Roedel | 5d8b53c | 2008-06-26 21:28:03 +0200 | [diff] [blame] | 1254 |  | 
| Joerg Roedel | 5507eef | 2008-09-04 19:01:02 +0200 | [diff] [blame] | 1255 | if (unlikely(iommu->need_sync)) | 
| Joerg Roedel | 5d8b53c | 2008-06-26 21:28:03 +0200 | [diff] [blame] | 1256 | iommu_completion_wait(iommu); | 
|  | 1257 |  | 
|  | 1258 | spin_unlock_irqrestore(&domain->lock, flags); | 
|  | 1259 |  | 
|  | 1260 | free_mem: | 
|  | 1261 | free_pages((unsigned long)virt_addr, get_order(size)); | 
|  | 1262 | } | 
|  | 1263 |  | 
| Joerg Roedel | c432f3d | 2008-06-26 21:28:04 +0200 | [diff] [blame] | 1264 | /* | 
| Joerg Roedel | b39ba6a | 2008-09-09 18:40:46 +0200 | [diff] [blame] | 1265 | * This function is called by the DMA layer to find out if we can handle a | 
|  | 1266 | * particular device. It is part of the dma_ops. | 
|  | 1267 | */ | 
|  | 1268 | static int amd_iommu_dma_supported(struct device *dev, u64 mask) | 
|  | 1269 | { | 
|  | 1270 | u16 bdf; | 
|  | 1271 | struct pci_dev *pcidev; | 
|  | 1272 |  | 
|  | 1273 | /* No device or no PCI device */ | 
|  | 1274 | if (!dev || dev->bus != &pci_bus_type) | 
|  | 1275 | return 0; | 
|  | 1276 |  | 
|  | 1277 | pcidev = to_pci_dev(dev); | 
|  | 1278 |  | 
|  | 1279 | bdf = calc_devid(pcidev->bus->number, pcidev->devfn); | 
|  | 1280 |  | 
|  | 1281 | /* Out of our scope? */ | 
|  | 1282 | if (bdf > amd_iommu_last_bdf) | 
|  | 1283 | return 0; | 
|  | 1284 |  | 
|  | 1285 | return 1; | 
|  | 1286 | } | 
|  | 1287 |  | 
|  | 1288 | /* | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 1289 | * The function for pre-allocating protection domains. | 
|  | 1290 | * | 
| Joerg Roedel | c432f3d | 2008-06-26 21:28:04 +0200 | [diff] [blame] | 1291 | * If the driver core informs the DMA layer if a driver grabs a device | 
|  | 1292 | * we don't need to preallocate the protection domains anymore. | 
|  | 1293 | * For now we have to. | 
|  | 1294 | */ | 
|  | 1295 | void prealloc_protection_domains(void) | 
|  | 1296 | { | 
|  | 1297 | struct pci_dev *dev = NULL; | 
|  | 1298 | struct dma_ops_domain *dma_dom; | 
|  | 1299 | struct amd_iommu *iommu; | 
|  | 1300 | int order = amd_iommu_aperture_order; | 
|  | 1301 | u16 devid; | 
|  | 1302 |  | 
|  | 1303 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { | 
|  | 1304 | devid = (dev->bus->number << 8) | dev->devfn; | 
| Joerg Roedel | 3a61ec3 | 2008-07-25 13:07:50 +0200 | [diff] [blame] | 1305 | if (devid > amd_iommu_last_bdf) | 
| Joerg Roedel | c432f3d | 2008-06-26 21:28:04 +0200 | [diff] [blame] | 1306 | continue; | 
|  | 1307 | devid = amd_iommu_alias_table[devid]; | 
|  | 1308 | if (domain_for_device(devid)) | 
|  | 1309 | continue; | 
|  | 1310 | iommu = amd_iommu_rlookup_table[devid]; | 
|  | 1311 | if (!iommu) | 
|  | 1312 | continue; | 
|  | 1313 | dma_dom = dma_ops_domain_alloc(iommu, order); | 
|  | 1314 | if (!dma_dom) | 
|  | 1315 | continue; | 
|  | 1316 | init_unity_mappings_for_device(dma_dom, devid); | 
| Joerg Roedel | bd60b73 | 2008-09-11 10:24:48 +0200 | [diff] [blame] | 1317 | dma_dom->target_dev = devid; | 
|  | 1318 |  | 
|  | 1319 | list_add_tail(&dma_dom->list, &iommu_pd_list); | 
| Joerg Roedel | c432f3d | 2008-06-26 21:28:04 +0200 | [diff] [blame] | 1320 | } | 
|  | 1321 | } | 
|  | 1322 |  | 
| Joerg Roedel | 6631ee9 | 2008-06-26 21:28:05 +0200 | [diff] [blame] | 1323 | static struct dma_mapping_ops amd_iommu_dma_ops = { | 
|  | 1324 | .alloc_coherent = alloc_coherent, | 
|  | 1325 | .free_coherent = free_coherent, | 
|  | 1326 | .map_single = map_single, | 
|  | 1327 | .unmap_single = unmap_single, | 
|  | 1328 | .map_sg = map_sg, | 
|  | 1329 | .unmap_sg = unmap_sg, | 
| Joerg Roedel | b39ba6a | 2008-09-09 18:40:46 +0200 | [diff] [blame] | 1330 | .dma_supported = amd_iommu_dma_supported, | 
| Joerg Roedel | 6631ee9 | 2008-06-26 21:28:05 +0200 | [diff] [blame] | 1331 | }; | 
|  | 1332 |  | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 1333 | /* | 
|  | 1334 | * The function which clues the AMD IOMMU driver into dma_ops. | 
|  | 1335 | */ | 
| Joerg Roedel | 6631ee9 | 2008-06-26 21:28:05 +0200 | [diff] [blame] | 1336 | int __init amd_iommu_init_dma_ops(void) | 
|  | 1337 | { | 
|  | 1338 | struct amd_iommu *iommu; | 
|  | 1339 | int order = amd_iommu_aperture_order; | 
|  | 1340 | int ret; | 
|  | 1341 |  | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 1342 | /* | 
|  | 1343 | * first allocate a default protection domain for every IOMMU we | 
|  | 1344 | * found in the system. Devices not assigned to any other | 
|  | 1345 | * protection domain will be assigned to the default one. | 
|  | 1346 | */ | 
| Joerg Roedel | 6631ee9 | 2008-06-26 21:28:05 +0200 | [diff] [blame] | 1347 | list_for_each_entry(iommu, &amd_iommu_list, list) { | 
|  | 1348 | iommu->default_dom = dma_ops_domain_alloc(iommu, order); | 
|  | 1349 | if (iommu->default_dom == NULL) | 
|  | 1350 | return -ENOMEM; | 
|  | 1351 | ret = iommu_init_unity_mappings(iommu); | 
|  | 1352 | if (ret) | 
|  | 1353 | goto free_domains; | 
|  | 1354 | } | 
|  | 1355 |  | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 1356 | /* | 
|  | 1357 | * If device isolation is enabled, pre-allocate the protection | 
|  | 1358 | * domains for each device. | 
|  | 1359 | */ | 
| Joerg Roedel | 6631ee9 | 2008-06-26 21:28:05 +0200 | [diff] [blame] | 1360 | if (amd_iommu_isolate) | 
|  | 1361 | prealloc_protection_domains(); | 
|  | 1362 |  | 
|  | 1363 | iommu_detected = 1; | 
|  | 1364 | force_iommu = 1; | 
|  | 1365 | bad_dma_address = 0; | 
| Ingo Molnar | 92af4e2 | 2008-06-27 10:48:16 +0200 | [diff] [blame] | 1366 | #ifdef CONFIG_GART_IOMMU | 
| Joerg Roedel | 6631ee9 | 2008-06-26 21:28:05 +0200 | [diff] [blame] | 1367 | gart_iommu_aperture_disabled = 1; | 
|  | 1368 | gart_iommu_aperture = 0; | 
| Ingo Molnar | 92af4e2 | 2008-06-27 10:48:16 +0200 | [diff] [blame] | 1369 | #endif | 
| Joerg Roedel | 6631ee9 | 2008-06-26 21:28:05 +0200 | [diff] [blame] | 1370 |  | 
| Joerg Roedel | 431b2a2 | 2008-07-11 17:14:22 +0200 | [diff] [blame] | 1371 | /* Make the driver finally visible to the drivers */ | 
| Joerg Roedel | 6631ee9 | 2008-06-26 21:28:05 +0200 | [diff] [blame] | 1372 | dma_ops = &amd_iommu_dma_ops; | 
|  | 1373 |  | 
|  | 1374 | return 0; | 
|  | 1375 |  | 
|  | 1376 | free_domains: | 
|  | 1377 |  | 
|  | 1378 | list_for_each_entry(iommu, &amd_iommu_list, list) { | 
|  | 1379 | if (iommu->default_dom) | 
|  | 1380 | dma_ops_domain_free(iommu->default_dom); | 
|  | 1381 | } | 
|  | 1382 |  | 
|  | 1383 | return ret; | 
|  | 1384 | } |