blob: c04dcb7f40b29f00ebdfb1947aa82f0ca064b0fe [file] [log] [blame]
Joerg Roedelb6c02712008-06-26 21:27:53 +02001/*
Joerg Roedelbf3118c2009-11-20 13:39:19 +01002 * Copyright (C) 2007-2009 Advanced Micro Devices, Inc.
Joerg Roedelb6c02712008-06-26 21:27:53 +02003 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 * Leo Duran <leo.duran@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/pci.h>
21#include <linux/gfp.h>
22#include <linux/bitops.h>
Joerg Roedel7f265082008-12-12 13:50:21 +010023#include <linux/debugfs.h>
Joerg Roedelb6c02712008-06-26 21:27:53 +020024#include <linux/scatterlist.h>
FUJITA Tomonori51491362009-01-05 23:47:25 +090025#include <linux/dma-mapping.h>
Joerg Roedelb6c02712008-06-26 21:27:53 +020026#include <linux/iommu-helper.h>
Joerg Roedelc156e342008-12-02 18:13:27 +010027#include <linux/iommu.h>
Joerg Roedelb6c02712008-06-26 21:27:53 +020028#include <asm/proto.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090029#include <asm/iommu.h>
Joerg Roedel1d9b16d2008-11-27 18:39:15 +010030#include <asm/gart.h>
Joerg Roedel6a9401a2009-11-20 13:22:21 +010031#include <asm/amd_iommu_proto.h>
Joerg Roedelb6c02712008-06-26 21:27:53 +020032#include <asm/amd_iommu_types.h>
Joerg Roedelc6da9922008-06-26 21:28:06 +020033#include <asm/amd_iommu.h>
Joerg Roedelb6c02712008-06-26 21:27:53 +020034
35#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
36
Joerg Roedel136f78a2008-07-11 17:14:27 +020037#define EXIT_LOOP_COUNT 10000000
38
Joerg Roedelb6c02712008-06-26 21:27:53 +020039static DEFINE_RWLOCK(amd_iommu_devtable_lock);
40
Joerg Roedelbd60b732008-09-11 10:24:48 +020041/* A list of preallocated protection domains */
42static LIST_HEAD(iommu_pd_list);
43static DEFINE_SPINLOCK(iommu_pd_list_lock);
44
Joerg Roedel0feae532009-08-26 15:26:30 +020045/*
46 * Domain for untranslated devices - only allocated
47 * if iommu=pt passed on kernel cmd line.
48 */
49static struct protection_domain *pt_domain;
50
Joerg Roedel26961ef2008-12-03 17:00:17 +010051static struct iommu_ops amd_iommu_ops;
Joerg Roedel26961ef2008-12-03 17:00:17 +010052
Joerg Roedel431b2a22008-07-11 17:14:22 +020053/*
54 * general struct to manage commands send to an IOMMU
55 */
Joerg Roedeld6449532008-07-11 17:14:28 +020056struct iommu_cmd {
Joerg Roedelb6c02712008-06-26 21:27:53 +020057 u32 data[4];
58};
59
Joerg Roedelbd0e5212008-06-26 21:27:56 +020060static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
61 struct unity_map_entry *e);
Joerg Roedele275a2a2008-12-10 18:27:25 +010062static struct dma_ops_domain *find_protection_domain(u16 devid);
Joerg Roedel8bc3e122009-09-02 16:48:40 +020063static u64 *alloc_pte(struct protection_domain *domain,
Joerg Roedelabdc5eb2009-09-03 11:33:51 +020064 unsigned long address, int end_lvl,
65 u64 **pte_page, gfp_t gfp);
Joerg Roedel00cd1222009-05-19 09:52:40 +020066static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
67 unsigned long start_page,
68 unsigned int pages);
Joerg Roedela345b232009-09-03 15:01:43 +020069static void reset_iommu_command_buffer(struct amd_iommu *iommu);
Joerg Roedel9355a082009-09-02 14:24:08 +020070static u64 *fetch_pte(struct protection_domain *domain,
Joerg Roedela6b256b2009-09-03 12:21:31 +020071 unsigned long address, int map_size);
Joerg Roedel04bfdd82009-09-02 16:00:23 +020072static void update_domain(struct protection_domain *domain);
Chris Wrightc1eee672009-05-21 00:56:58 -070073
Joerg Roedel7f265082008-12-12 13:50:21 +010074#ifdef CONFIG_AMD_IOMMU_STATS
75
76/*
77 * Initialization code for statistics collection
78 */
79
Joerg Roedelda49f6d2008-12-12 14:59:58 +010080DECLARE_STATS_COUNTER(compl_wait);
Joerg Roedel0f2a86f2008-12-12 15:05:16 +010081DECLARE_STATS_COUNTER(cnt_map_single);
Joerg Roedel146a6912008-12-12 15:07:12 +010082DECLARE_STATS_COUNTER(cnt_unmap_single);
Joerg Roedeld03f0672008-12-12 15:09:48 +010083DECLARE_STATS_COUNTER(cnt_map_sg);
Joerg Roedel55877a62008-12-12 15:12:14 +010084DECLARE_STATS_COUNTER(cnt_unmap_sg);
Joerg Roedelc8f0fb32008-12-12 15:14:21 +010085DECLARE_STATS_COUNTER(cnt_alloc_coherent);
Joerg Roedel5d31ee72008-12-12 15:16:38 +010086DECLARE_STATS_COUNTER(cnt_free_coherent);
Joerg Roedelc1858972008-12-12 15:42:39 +010087DECLARE_STATS_COUNTER(cross_page);
Joerg Roedelf57d98a2008-12-12 15:46:29 +010088DECLARE_STATS_COUNTER(domain_flush_single);
Joerg Roedel18811f52008-12-12 15:48:28 +010089DECLARE_STATS_COUNTER(domain_flush_all);
Joerg Roedel5774f7c2008-12-12 15:57:30 +010090DECLARE_STATS_COUNTER(alloced_io_mem);
Joerg Roedel8ecaf8f2008-12-12 16:13:04 +010091DECLARE_STATS_COUNTER(total_map_requests);
Joerg Roedelda49f6d2008-12-12 14:59:58 +010092
Joerg Roedel7f265082008-12-12 13:50:21 +010093static struct dentry *stats_dir;
94static struct dentry *de_isolate;
95static struct dentry *de_fflush;
96
97static void amd_iommu_stats_add(struct __iommu_counter *cnt)
98{
99 if (stats_dir == NULL)
100 return;
101
102 cnt->dent = debugfs_create_u64(cnt->name, 0444, stats_dir,
103 &cnt->value);
104}
105
106static void amd_iommu_stats_init(void)
107{
108 stats_dir = debugfs_create_dir("amd-iommu", NULL);
109 if (stats_dir == NULL)
110 return;
111
112 de_isolate = debugfs_create_bool("isolation", 0444, stats_dir,
113 (u32 *)&amd_iommu_isolate);
114
115 de_fflush = debugfs_create_bool("fullflush", 0444, stats_dir,
116 (u32 *)&amd_iommu_unmap_flush);
Joerg Roedelda49f6d2008-12-12 14:59:58 +0100117
118 amd_iommu_stats_add(&compl_wait);
Joerg Roedel0f2a86f2008-12-12 15:05:16 +0100119 amd_iommu_stats_add(&cnt_map_single);
Joerg Roedel146a6912008-12-12 15:07:12 +0100120 amd_iommu_stats_add(&cnt_unmap_single);
Joerg Roedeld03f0672008-12-12 15:09:48 +0100121 amd_iommu_stats_add(&cnt_map_sg);
Joerg Roedel55877a62008-12-12 15:12:14 +0100122 amd_iommu_stats_add(&cnt_unmap_sg);
Joerg Roedelc8f0fb32008-12-12 15:14:21 +0100123 amd_iommu_stats_add(&cnt_alloc_coherent);
Joerg Roedel5d31ee72008-12-12 15:16:38 +0100124 amd_iommu_stats_add(&cnt_free_coherent);
Joerg Roedelc1858972008-12-12 15:42:39 +0100125 amd_iommu_stats_add(&cross_page);
Joerg Roedelf57d98a2008-12-12 15:46:29 +0100126 amd_iommu_stats_add(&domain_flush_single);
Joerg Roedel18811f52008-12-12 15:48:28 +0100127 amd_iommu_stats_add(&domain_flush_all);
Joerg Roedel5774f7c2008-12-12 15:57:30 +0100128 amd_iommu_stats_add(&alloced_io_mem);
Joerg Roedel8ecaf8f2008-12-12 16:13:04 +0100129 amd_iommu_stats_add(&total_map_requests);
Joerg Roedel7f265082008-12-12 13:50:21 +0100130}
131
132#endif
133
Joerg Roedel431b2a22008-07-11 17:14:22 +0200134/****************************************************************************
135 *
Joerg Roedela80dc3e2008-09-11 16:51:41 +0200136 * Interrupt handling functions
137 *
138 ****************************************************************************/
139
Joerg Roedele3e59872009-09-03 14:02:10 +0200140static void dump_dte_entry(u16 devid)
141{
142 int i;
143
144 for (i = 0; i < 8; ++i)
145 pr_err("AMD-Vi: DTE[%d]: %08x\n", i,
146 amd_iommu_dev_table[devid].data[i]);
147}
148
Joerg Roedel945b4ac2009-09-03 14:25:02 +0200149static void dump_command(unsigned long phys_addr)
150{
151 struct iommu_cmd *cmd = phys_to_virt(phys_addr);
152 int i;
153
154 for (i = 0; i < 4; ++i)
155 pr_err("AMD-Vi: CMD[%d]: %08x\n", i, cmd->data[i]);
156}
157
Joerg Roedela345b232009-09-03 15:01:43 +0200158static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
Joerg Roedel90008ee2008-09-09 16:41:05 +0200159{
160 u32 *event = __evt;
161 int type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
162 int devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
163 int domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
164 int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
165 u64 address = (u64)(((u64)event[3]) << 32) | event[2];
166
Joerg Roedel4c6f40d2009-09-01 16:43:58 +0200167 printk(KERN_ERR "AMD-Vi: Event logged [");
Joerg Roedel90008ee2008-09-09 16:41:05 +0200168
169 switch (type) {
170 case EVENT_TYPE_ILL_DEV:
171 printk("ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x "
172 "address=0x%016llx flags=0x%04x]\n",
173 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
174 address, flags);
Joerg Roedele3e59872009-09-03 14:02:10 +0200175 dump_dte_entry(devid);
Joerg Roedel90008ee2008-09-09 16:41:05 +0200176 break;
177 case EVENT_TYPE_IO_FAULT:
178 printk("IO_PAGE_FAULT device=%02x:%02x.%x "
179 "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
180 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
181 domid, address, flags);
182 break;
183 case EVENT_TYPE_DEV_TAB_ERR:
184 printk("DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
185 "address=0x%016llx flags=0x%04x]\n",
186 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
187 address, flags);
188 break;
189 case EVENT_TYPE_PAGE_TAB_ERR:
190 printk("PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
191 "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
192 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
193 domid, address, flags);
194 break;
195 case EVENT_TYPE_ILL_CMD:
196 printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
Joerg Roedela345b232009-09-03 15:01:43 +0200197 reset_iommu_command_buffer(iommu);
Joerg Roedel945b4ac2009-09-03 14:25:02 +0200198 dump_command(address);
Joerg Roedel90008ee2008-09-09 16:41:05 +0200199 break;
200 case EVENT_TYPE_CMD_HARD_ERR:
201 printk("COMMAND_HARDWARE_ERROR address=0x%016llx "
202 "flags=0x%04x]\n", address, flags);
203 break;
204 case EVENT_TYPE_IOTLB_INV_TO:
205 printk("IOTLB_INV_TIMEOUT device=%02x:%02x.%x "
206 "address=0x%016llx]\n",
207 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
208 address);
209 break;
210 case EVENT_TYPE_INV_DEV_REQ:
211 printk("INVALID_DEVICE_REQUEST device=%02x:%02x.%x "
212 "address=0x%016llx flags=0x%04x]\n",
213 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
214 address, flags);
215 break;
216 default:
217 printk(KERN_ERR "UNKNOWN type=0x%02x]\n", type);
218 }
219}
220
221static void iommu_poll_events(struct amd_iommu *iommu)
222{
223 u32 head, tail;
224 unsigned long flags;
225
226 spin_lock_irqsave(&iommu->lock, flags);
227
228 head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
229 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
230
231 while (head != tail) {
Joerg Roedela345b232009-09-03 15:01:43 +0200232 iommu_print_event(iommu, iommu->evt_buf + head);
Joerg Roedel90008ee2008-09-09 16:41:05 +0200233 head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size;
234 }
235
236 writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
237
238 spin_unlock_irqrestore(&iommu->lock, flags);
239}
240
Joerg Roedela80dc3e2008-09-11 16:51:41 +0200241irqreturn_t amd_iommu_int_handler(int irq, void *data)
242{
Joerg Roedel90008ee2008-09-09 16:41:05 +0200243 struct amd_iommu *iommu;
244
Joerg Roedel3bd22172009-05-04 15:06:20 +0200245 for_each_iommu(iommu)
Joerg Roedel90008ee2008-09-09 16:41:05 +0200246 iommu_poll_events(iommu);
247
248 return IRQ_HANDLED;
Joerg Roedela80dc3e2008-09-11 16:51:41 +0200249}
250
251/****************************************************************************
252 *
Joerg Roedel431b2a22008-07-11 17:14:22 +0200253 * IOMMU command queuing functions
254 *
255 ****************************************************************************/
256
257/*
258 * Writes the command to the IOMMUs command buffer and informs the
259 * hardware about the new command. Must be called with iommu->lock held.
260 */
Joerg Roedeld6449532008-07-11 17:14:28 +0200261static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200262{
263 u32 tail, head;
264 u8 *target;
265
266 tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
Jiri Kosina8a7c5ef2008-08-19 02:13:55 +0200267 target = iommu->cmd_buf + tail;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200268 memcpy_toio(target, cmd, sizeof(*cmd));
269 tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
270 head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
271 if (tail == head)
272 return -ENOMEM;
273 writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
274
275 return 0;
276}
277
Joerg Roedel431b2a22008-07-11 17:14:22 +0200278/*
279 * General queuing function for commands. Takes iommu->lock and calls
280 * __iommu_queue_command().
281 */
Joerg Roedeld6449532008-07-11 17:14:28 +0200282static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200283{
284 unsigned long flags;
285 int ret;
286
287 spin_lock_irqsave(&iommu->lock, flags);
288 ret = __iommu_queue_command(iommu, cmd);
Joerg Roedel09ee17e2008-12-03 12:19:27 +0100289 if (!ret)
Joerg Roedel0cfd7aa2008-12-10 19:58:00 +0100290 iommu->need_sync = true;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200291 spin_unlock_irqrestore(&iommu->lock, flags);
292
293 return ret;
294}
295
Joerg Roedel431b2a22008-07-11 17:14:22 +0200296/*
Joerg Roedel8d201962008-12-02 20:34:41 +0100297 * This function waits until an IOMMU has completed a completion
298 * wait command
Joerg Roedel431b2a22008-07-11 17:14:22 +0200299 */
Joerg Roedel8d201962008-12-02 20:34:41 +0100300static void __iommu_wait_for_completion(struct amd_iommu *iommu)
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200301{
Joerg Roedel8d201962008-12-02 20:34:41 +0100302 int ready = 0;
Joerg Roedel519c31b2008-08-14 19:55:15 +0200303 unsigned status = 0;
Joerg Roedel8d201962008-12-02 20:34:41 +0100304 unsigned long i = 0;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200305
Joerg Roedelda49f6d2008-12-12 14:59:58 +0100306 INC_STATS_COUNTER(compl_wait);
307
Joerg Roedel136f78a2008-07-11 17:14:27 +0200308 while (!ready && (i < EXIT_LOOP_COUNT)) {
309 ++i;
Joerg Roedel519c31b2008-08-14 19:55:15 +0200310 /* wait for the bit to become one */
311 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
312 ready = status & MMIO_STATUS_COM_WAIT_INT_MASK;
Joerg Roedel136f78a2008-07-11 17:14:27 +0200313 }
314
Joerg Roedel519c31b2008-08-14 19:55:15 +0200315 /* set bit back to zero */
316 status &= ~MMIO_STATUS_COM_WAIT_INT_MASK;
317 writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET);
318
Joerg Roedel6a1eddd2009-09-03 15:15:10 +0200319 if (unlikely(i == EXIT_LOOP_COUNT)) {
320 spin_unlock(&iommu->lock);
321 reset_iommu_command_buffer(iommu);
322 spin_lock(&iommu->lock);
323 }
Joerg Roedel8d201962008-12-02 20:34:41 +0100324}
325
326/*
327 * This function queues a completion wait command into the command
328 * buffer of an IOMMU
329 */
330static int __iommu_completion_wait(struct amd_iommu *iommu)
331{
332 struct iommu_cmd cmd;
333
334 memset(&cmd, 0, sizeof(cmd));
335 cmd.data[0] = CMD_COMPL_WAIT_INT_MASK;
336 CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT);
337
338 return __iommu_queue_command(iommu, &cmd);
339}
340
341/*
342 * This function is called whenever we need to ensure that the IOMMU has
343 * completed execution of all commands we sent. It sends a
344 * COMPLETION_WAIT command and waits for it to finish. The IOMMU informs
345 * us about that by writing a value to a physical address we pass with
346 * the command.
347 */
348static int iommu_completion_wait(struct amd_iommu *iommu)
349{
350 int ret = 0;
351 unsigned long flags;
352
353 spin_lock_irqsave(&iommu->lock, flags);
354
355 if (!iommu->need_sync)
356 goto out;
357
358 ret = __iommu_completion_wait(iommu);
359
Joerg Roedel0cfd7aa2008-12-10 19:58:00 +0100360 iommu->need_sync = false;
Joerg Roedel8d201962008-12-02 20:34:41 +0100361
362 if (ret)
363 goto out;
364
365 __iommu_wait_for_completion(iommu);
Joerg Roedel84df8172008-12-17 16:36:44 +0100366
Joerg Roedel7e4f88d2008-09-17 14:19:15 +0200367out:
368 spin_unlock_irqrestore(&iommu->lock, flags);
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200369
370 return 0;
371}
372
Joerg Roedel0518a3a2009-11-20 16:00:05 +0100373static void iommu_flush_complete(struct protection_domain *domain)
374{
375 int i;
376
377 for (i = 0; i < amd_iommus_present; ++i) {
378 if (!domain->dev_iommu[i])
379 continue;
380
381 /*
382 * Devices of this domain are behind this IOMMU
383 * We need to wait for completion of all commands.
384 */
385 iommu_completion_wait(amd_iommus[i]);
386 }
387}
388
Joerg Roedel431b2a22008-07-11 17:14:22 +0200389/*
390 * Command send function for invalidating a device table entry
391 */
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200392static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid)
393{
Joerg Roedeld6449532008-07-11 17:14:28 +0200394 struct iommu_cmd cmd;
Joerg Roedelee2fa742008-09-17 13:47:25 +0200395 int ret;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200396
397 BUG_ON(iommu == NULL);
398
399 memset(&cmd, 0, sizeof(cmd));
400 CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY);
401 cmd.data[0] = devid;
402
Joerg Roedelee2fa742008-09-17 13:47:25 +0200403 ret = iommu_queue_command(iommu, &cmd);
404
Joerg Roedelee2fa742008-09-17 13:47:25 +0200405 return ret;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200406}
407
Joerg Roedel237b6f32008-12-02 20:54:37 +0100408static void __iommu_build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
409 u16 domid, int pde, int s)
410{
411 memset(cmd, 0, sizeof(*cmd));
412 address &= PAGE_MASK;
413 CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
414 cmd->data[1] |= domid;
415 cmd->data[2] = lower_32_bits(address);
416 cmd->data[3] = upper_32_bits(address);
417 if (s) /* size bit - we flush more than one 4kb page */
418 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
419 if (pde) /* PDE bit - we wan't flush everything not only the PTEs */
420 cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
421}
422
Joerg Roedel431b2a22008-07-11 17:14:22 +0200423/*
424 * Generic command send function for invalidaing TLB entries
425 */
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200426static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
427 u64 address, u16 domid, int pde, int s)
428{
Joerg Roedeld6449532008-07-11 17:14:28 +0200429 struct iommu_cmd cmd;
Joerg Roedelee2fa742008-09-17 13:47:25 +0200430 int ret;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200431
Joerg Roedel237b6f32008-12-02 20:54:37 +0100432 __iommu_build_inv_iommu_pages(&cmd, address, domid, pde, s);
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200433
Joerg Roedelee2fa742008-09-17 13:47:25 +0200434 ret = iommu_queue_command(iommu, &cmd);
435
Joerg Roedelee2fa742008-09-17 13:47:25 +0200436 return ret;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200437}
438
Joerg Roedel431b2a22008-07-11 17:14:22 +0200439/*
440 * TLB invalidation function which is called from the mapping functions.
441 * It invalidates a single PTE if the range to flush is within a single
442 * page. Otherwise it flushes the whole TLB of the IOMMU.
443 */
Joerg Roedel6de8ad92009-11-23 18:30:32 +0100444static void __iommu_flush_pages(struct protection_domain *domain,
445 u64 address, size_t size, int pde)
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200446{
Joerg Roedel6de8ad92009-11-23 18:30:32 +0100447 int s = 0, i;
Joerg Roedeldcd1e922009-11-20 15:30:58 +0100448 unsigned long pages = iommu_num_pages(address, size, PAGE_SIZE);
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200449
450 address &= PAGE_MASK;
451
Joerg Roedel999ba412008-07-03 19:35:08 +0200452 if (pages > 1) {
453 /*
454 * If we have to flush more than one page, flush all
455 * TLB entries for this domain
456 */
457 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
458 s = 1;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200459 }
460
Joerg Roedel999ba412008-07-03 19:35:08 +0200461
Joerg Roedel6de8ad92009-11-23 18:30:32 +0100462 for (i = 0; i < amd_iommus_present; ++i) {
463 if (!domain->dev_iommu[i])
464 continue;
465
466 /*
467 * Devices of this domain are behind this IOMMU
468 * We need a TLB flush
469 */
470 iommu_queue_inv_iommu_pages(amd_iommus[i], address,
471 domain->id, pde, s);
472 }
473
474 return;
475}
476
477static void iommu_flush_pages(struct protection_domain *domain,
478 u64 address, size_t size)
479{
480 __iommu_flush_pages(domain, address, size, 0);
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200481}
Joerg Roedelb6c02712008-06-26 21:27:53 +0200482
Joerg Roedel1c655772008-09-04 18:40:05 +0200483/* Flush the whole IO/TLB for a given protection domain */
Joerg Roedeldcd1e922009-11-20 15:30:58 +0100484static void iommu_flush_tlb(struct protection_domain *domain)
Joerg Roedel1c655772008-09-04 18:40:05 +0200485{
Joerg Roedeldcd1e922009-11-20 15:30:58 +0100486 __iommu_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0);
Joerg Roedel1c655772008-09-04 18:40:05 +0200487}
488
Chris Wright42a49f92009-06-15 15:42:00 +0200489/* Flush the whole IO/TLB for a given protection domain - including PDE */
Joerg Roedeldcd1e922009-11-20 15:30:58 +0100490static void iommu_flush_tlb_pde(struct protection_domain *domain)
Chris Wright42a49f92009-06-15 15:42:00 +0200491{
Joerg Roedeldcd1e922009-11-20 15:30:58 +0100492 __iommu_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1);
Chris Wright42a49f92009-06-15 15:42:00 +0200493}
494
Joerg Roedel43f49602008-12-02 21:01:12 +0100495/*
Joerg Roedel09b42802009-11-20 17:02:44 +0100496 * This function flushes all domains that have devices on the given IOMMU
Joerg Roedel43f49602008-12-02 21:01:12 +0100497 */
Joerg Roedele394d722009-09-03 15:28:33 +0200498static void flush_all_domains_on_iommu(struct amd_iommu *iommu)
Joerg Roedelbfd1be12009-05-05 15:33:57 +0200499{
Joerg Roedel09b42802009-11-20 17:02:44 +0100500 u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
501 struct protection_domain *domain;
502 unsigned long flags;
Joerg Roedelbfd1be12009-05-05 15:33:57 +0200503
Joerg Roedel09b42802009-11-20 17:02:44 +0100504 spin_lock_irqsave(&amd_iommu_pd_lock, flags);
505
506 list_for_each_entry(domain, &amd_iommu_pd_list, list) {
507 if (domain->dev_iommu[iommu->index] == 0)
Joerg Roedelbfd1be12009-05-05 15:33:57 +0200508 continue;
Joerg Roedel09b42802009-11-20 17:02:44 +0100509
510 spin_lock(&domain->lock);
511 iommu_queue_inv_iommu_pages(iommu, address, domain->id, 1, 1);
512 iommu_flush_complete(domain);
513 spin_unlock(&domain->lock);
Joerg Roedelbfd1be12009-05-05 15:33:57 +0200514 }
Joerg Roedele394d722009-09-03 15:28:33 +0200515
Joerg Roedel09b42802009-11-20 17:02:44 +0100516 spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
Joerg Roedele394d722009-09-03 15:28:33 +0200517}
518
Joerg Roedel09b42802009-11-20 17:02:44 +0100519/*
520 * This function uses heavy locking and may disable irqs for some time. But
521 * this is no issue because it is only called during resume.
522 */
Joerg Roedele394d722009-09-03 15:28:33 +0200523void amd_iommu_flush_all_domains(void)
524{
Joerg Roedele3306662009-11-20 16:48:58 +0100525 struct protection_domain *domain;
Joerg Roedel09b42802009-11-20 17:02:44 +0100526 unsigned long flags;
527
528 spin_lock_irqsave(&amd_iommu_pd_lock, flags);
Joerg Roedele394d722009-09-03 15:28:33 +0200529
Joerg Roedele3306662009-11-20 16:48:58 +0100530 list_for_each_entry(domain, &amd_iommu_pd_list, list) {
Joerg Roedel09b42802009-11-20 17:02:44 +0100531 spin_lock(&domain->lock);
Joerg Roedele3306662009-11-20 16:48:58 +0100532 iommu_flush_tlb_pde(domain);
533 iommu_flush_complete(domain);
Joerg Roedel09b42802009-11-20 17:02:44 +0100534 spin_unlock(&domain->lock);
Joerg Roedele3306662009-11-20 16:48:58 +0100535 }
Joerg Roedel09b42802009-11-20 17:02:44 +0100536
537 spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
Joerg Roedelbfd1be12009-05-05 15:33:57 +0200538}
539
Joerg Roedeld586d782009-09-03 15:39:23 +0200540static void flush_all_devices_for_iommu(struct amd_iommu *iommu)
541{
542 int i;
543
544 for (i = 0; i <= amd_iommu_last_bdf; ++i) {
545 if (iommu != amd_iommu_rlookup_table[i])
546 continue;
547
548 iommu_queue_inv_dev_entry(iommu, i);
549 iommu_completion_wait(iommu);
Joerg Roedel431b2a22008-07-11 17:14:22 +0200550 }
551}
552
Joerg Roedel6a0dbcb2009-09-02 15:41:59 +0200553static void flush_devices_by_domain(struct protection_domain *domain)
Joerg Roedel7d7a1102009-05-05 15:48:10 +0200554{
555 struct amd_iommu *iommu;
556 int i;
557
558 for (i = 0; i <= amd_iommu_last_bdf; ++i) {
Joerg Roedel6a0dbcb2009-09-02 15:41:59 +0200559 if ((domain == NULL && amd_iommu_pd_table[i] == NULL) ||
560 (amd_iommu_pd_table[i] != domain))
Joerg Roedel7d7a1102009-05-05 15:48:10 +0200561 continue;
562
563 iommu = amd_iommu_rlookup_table[i];
564 if (!iommu)
565 continue;
566
567 iommu_queue_inv_dev_entry(iommu, i);
568 iommu_completion_wait(iommu);
569 }
570}
571
Joerg Roedela345b232009-09-03 15:01:43 +0200572static void reset_iommu_command_buffer(struct amd_iommu *iommu)
573{
574 pr_err("AMD-Vi: Resetting IOMMU command buffer\n");
575
Joerg Roedelb26e81b2009-09-03 15:08:09 +0200576 if (iommu->reset_in_progress)
577 panic("AMD-Vi: ILLEGAL_COMMAND_ERROR while resetting command buffer\n");
578
579 iommu->reset_in_progress = true;
580
Joerg Roedela345b232009-09-03 15:01:43 +0200581 amd_iommu_reset_cmd_buffer(iommu);
582 flush_all_devices_for_iommu(iommu);
583 flush_all_domains_on_iommu(iommu);
Joerg Roedelb26e81b2009-09-03 15:08:09 +0200584
585 iommu->reset_in_progress = false;
Joerg Roedela345b232009-09-03 15:01:43 +0200586}
587
Joerg Roedel6a0dbcb2009-09-02 15:41:59 +0200588void amd_iommu_flush_all_devices(void)
589{
590 flush_devices_by_domain(NULL);
591}
592
Joerg Roedel431b2a22008-07-11 17:14:22 +0200593/****************************************************************************
594 *
595 * The functions below are used the create the page table mappings for
596 * unity mapped regions.
597 *
598 ****************************************************************************/
599
600/*
601 * Generic mapping functions. It maps a physical address into a DMA
602 * address space. It allocates the page table pages if necessary.
603 * In the future it can be extended to a generic mapping function
604 * supporting all features of AMD IOMMU page tables like level skipping
605 * and full 64 bit address spaces.
606 */
Joerg Roedel38e817f2008-12-02 17:27:52 +0100607static int iommu_map_page(struct protection_domain *dom,
608 unsigned long bus_addr,
609 unsigned long phys_addr,
Joerg Roedelabdc5eb2009-09-03 11:33:51 +0200610 int prot,
611 int map_size)
Joerg Roedelbd0e5212008-06-26 21:27:56 +0200612{
Joerg Roedel8bda3092009-05-12 12:02:46 +0200613 u64 __pte, *pte;
Joerg Roedelbd0e5212008-06-26 21:27:56 +0200614
615 bus_addr = PAGE_ALIGN(bus_addr);
Joerg Roedelbb9d4ff2008-12-04 15:59:48 +0100616 phys_addr = PAGE_ALIGN(phys_addr);
Joerg Roedelbd0e5212008-06-26 21:27:56 +0200617
Joerg Roedelabdc5eb2009-09-03 11:33:51 +0200618 BUG_ON(!PM_ALIGNED(map_size, bus_addr));
619 BUG_ON(!PM_ALIGNED(map_size, phys_addr));
620
Joerg Roedelbad1cac2009-09-02 16:52:23 +0200621 if (!(prot & IOMMU_PROT_MASK))
Joerg Roedelbd0e5212008-06-26 21:27:56 +0200622 return -EINVAL;
623
Joerg Roedelabdc5eb2009-09-03 11:33:51 +0200624 pte = alloc_pte(dom, bus_addr, map_size, NULL, GFP_KERNEL);
Joerg Roedelbd0e5212008-06-26 21:27:56 +0200625
626 if (IOMMU_PTE_PRESENT(*pte))
627 return -EBUSY;
628
629 __pte = phys_addr | IOMMU_PTE_P;
630 if (prot & IOMMU_PROT_IR)
631 __pte |= IOMMU_PTE_IR;
632 if (prot & IOMMU_PROT_IW)
633 __pte |= IOMMU_PTE_IW;
634
635 *pte = __pte;
636
Joerg Roedel04bfdd82009-09-02 16:00:23 +0200637 update_domain(dom);
638
Joerg Roedelbd0e5212008-06-26 21:27:56 +0200639 return 0;
640}
641
Joerg Roedeleb74ff62008-12-02 19:59:10 +0100642static void iommu_unmap_page(struct protection_domain *dom,
Joerg Roedela6b256b2009-09-03 12:21:31 +0200643 unsigned long bus_addr, int map_size)
Joerg Roedeleb74ff62008-12-02 19:59:10 +0100644{
Joerg Roedela6b256b2009-09-03 12:21:31 +0200645 u64 *pte = fetch_pte(dom, bus_addr, map_size);
Joerg Roedeleb74ff62008-12-02 19:59:10 +0100646
Joerg Roedel38a76ee2009-09-02 17:02:47 +0200647 if (pte)
648 *pte = 0;
Joerg Roedeleb74ff62008-12-02 19:59:10 +0100649}
Joerg Roedeleb74ff62008-12-02 19:59:10 +0100650
Joerg Roedel431b2a22008-07-11 17:14:22 +0200651/*
652 * This function checks if a specific unity mapping entry is needed for
653 * this specific IOMMU.
654 */
Joerg Roedelbd0e5212008-06-26 21:27:56 +0200655static int iommu_for_unity_map(struct amd_iommu *iommu,
656 struct unity_map_entry *entry)
657{
658 u16 bdf, i;
659
660 for (i = entry->devid_start; i <= entry->devid_end; ++i) {
661 bdf = amd_iommu_alias_table[i];
662 if (amd_iommu_rlookup_table[bdf] == iommu)
663 return 1;
664 }
665
666 return 0;
667}
668
Joerg Roedel431b2a22008-07-11 17:14:22 +0200669/*
670 * Init the unity mappings for a specific IOMMU in the system
671 *
672 * Basically iterates over all unity mapping entries and applies them to
673 * the default domain DMA of that IOMMU if necessary.
674 */
Joerg Roedelbd0e5212008-06-26 21:27:56 +0200675static int iommu_init_unity_mappings(struct amd_iommu *iommu)
676{
677 struct unity_map_entry *entry;
678 int ret;
679
680 list_for_each_entry(entry, &amd_iommu_unity_map, list) {
681 if (!iommu_for_unity_map(iommu, entry))
682 continue;
683 ret = dma_ops_unity_map(iommu->default_dom, entry);
684 if (ret)
685 return ret;
686 }
687
688 return 0;
689}
690
Joerg Roedel431b2a22008-07-11 17:14:22 +0200691/*
692 * This function actually applies the mapping to the page table of the
693 * dma_ops domain.
694 */
Joerg Roedelbd0e5212008-06-26 21:27:56 +0200695static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
696 struct unity_map_entry *e)
697{
698 u64 addr;
699 int ret;
700
701 for (addr = e->address_start; addr < e->address_end;
702 addr += PAGE_SIZE) {
Joerg Roedelabdc5eb2009-09-03 11:33:51 +0200703 ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot,
704 PM_MAP_4k);
Joerg Roedelbd0e5212008-06-26 21:27:56 +0200705 if (ret)
706 return ret;
707 /*
708 * if unity mapping is in aperture range mark the page
709 * as allocated in the aperture
710 */
711 if (addr < dma_dom->aperture_size)
Joerg Roedelc3239562009-05-12 10:56:44 +0200712 __set_bit(addr >> PAGE_SHIFT,
Joerg Roedel384de722009-05-15 12:30:05 +0200713 dma_dom->aperture[0]->bitmap);
Joerg Roedelbd0e5212008-06-26 21:27:56 +0200714 }
715
716 return 0;
717}
718
Joerg Roedel431b2a22008-07-11 17:14:22 +0200719/*
720 * Inits the unity mappings required for a specific device
721 */
Joerg Roedelbd0e5212008-06-26 21:27:56 +0200722static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom,
723 u16 devid)
724{
725 struct unity_map_entry *e;
726 int ret;
727
728 list_for_each_entry(e, &amd_iommu_unity_map, list) {
729 if (!(devid >= e->devid_start && devid <= e->devid_end))
730 continue;
731 ret = dma_ops_unity_map(dma_dom, e);
732 if (ret)
733 return ret;
734 }
735
736 return 0;
737}
738
Joerg Roedel431b2a22008-07-11 17:14:22 +0200739/****************************************************************************
740 *
741 * The next functions belong to the address allocator for the dma_ops
742 * interface functions. They work like the allocators in the other IOMMU
743 * drivers. Its basically a bitmap which marks the allocated pages in
744 * the aperture. Maybe it could be enhanced in the future to a more
745 * efficient allocator.
746 *
747 ****************************************************************************/
Joerg Roedeld3086442008-06-26 21:27:57 +0200748
Joerg Roedel431b2a22008-07-11 17:14:22 +0200749/*
Joerg Roedel384de722009-05-15 12:30:05 +0200750 * The address allocator core functions.
Joerg Roedel431b2a22008-07-11 17:14:22 +0200751 *
752 * called with domain->lock held
753 */
Joerg Roedel384de722009-05-15 12:30:05 +0200754
Joerg Roedel9cabe892009-05-18 16:38:55 +0200755/*
Joerg Roedel00cd1222009-05-19 09:52:40 +0200756 * This function checks if there is a PTE for a given dma address. If
757 * there is one, it returns the pointer to it.
758 */
Joerg Roedel9355a082009-09-02 14:24:08 +0200759static u64 *fetch_pte(struct protection_domain *domain,
Joerg Roedela6b256b2009-09-03 12:21:31 +0200760 unsigned long address, int map_size)
Joerg Roedel00cd1222009-05-19 09:52:40 +0200761{
Joerg Roedel9355a082009-09-02 14:24:08 +0200762 int level;
Joerg Roedel00cd1222009-05-19 09:52:40 +0200763 u64 *pte;
764
Joerg Roedel9355a082009-09-02 14:24:08 +0200765 level = domain->mode - 1;
766 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
Joerg Roedel00cd1222009-05-19 09:52:40 +0200767
Joerg Roedela6b256b2009-09-03 12:21:31 +0200768 while (level > map_size) {
Joerg Roedel9355a082009-09-02 14:24:08 +0200769 if (!IOMMU_PTE_PRESENT(*pte))
770 return NULL;
Joerg Roedel00cd1222009-05-19 09:52:40 +0200771
Joerg Roedel9355a082009-09-02 14:24:08 +0200772 level -= 1;
Joerg Roedel00cd1222009-05-19 09:52:40 +0200773
Joerg Roedel9355a082009-09-02 14:24:08 +0200774 pte = IOMMU_PTE_PAGE(*pte);
775 pte = &pte[PM_LEVEL_INDEX(level, address)];
Joerg Roedel00cd1222009-05-19 09:52:40 +0200776
Joerg Roedela6b256b2009-09-03 12:21:31 +0200777 if ((PM_PTE_LEVEL(*pte) == 0) && level != map_size) {
778 pte = NULL;
779 break;
780 }
Joerg Roedel9355a082009-09-02 14:24:08 +0200781 }
Joerg Roedel00cd1222009-05-19 09:52:40 +0200782
783 return pte;
784}
785
786/*
Joerg Roedel9cabe892009-05-18 16:38:55 +0200787 * This function is used to add a new aperture range to an existing
788 * aperture in case of dma_ops domain allocation or address allocation
789 * failure.
790 */
Joerg Roedel576175c2009-11-23 19:08:46 +0100791static int alloc_new_range(struct dma_ops_domain *dma_dom,
Joerg Roedel9cabe892009-05-18 16:38:55 +0200792 bool populate, gfp_t gfp)
793{
794 int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT;
Joerg Roedel576175c2009-11-23 19:08:46 +0100795 struct amd_iommu *iommu;
Joerg Roedel00cd1222009-05-19 09:52:40 +0200796 int i;
Joerg Roedel9cabe892009-05-18 16:38:55 +0200797
Joerg Roedelf5e97052009-05-22 12:31:53 +0200798#ifdef CONFIG_IOMMU_STRESS
799 populate = false;
800#endif
801
Joerg Roedel9cabe892009-05-18 16:38:55 +0200802 if (index >= APERTURE_MAX_RANGES)
803 return -ENOMEM;
804
805 dma_dom->aperture[index] = kzalloc(sizeof(struct aperture_range), gfp);
806 if (!dma_dom->aperture[index])
807 return -ENOMEM;
808
809 dma_dom->aperture[index]->bitmap = (void *)get_zeroed_page(gfp);
810 if (!dma_dom->aperture[index]->bitmap)
811 goto out_free;
812
813 dma_dom->aperture[index]->offset = dma_dom->aperture_size;
814
815 if (populate) {
816 unsigned long address = dma_dom->aperture_size;
817 int i, num_ptes = APERTURE_RANGE_PAGES / 512;
818 u64 *pte, *pte_page;
819
820 for (i = 0; i < num_ptes; ++i) {
Joerg Roedelabdc5eb2009-09-03 11:33:51 +0200821 pte = alloc_pte(&dma_dom->domain, address, PM_MAP_4k,
Joerg Roedel9cabe892009-05-18 16:38:55 +0200822 &pte_page, gfp);
823 if (!pte)
824 goto out_free;
825
826 dma_dom->aperture[index]->pte_pages[i] = pte_page;
827
828 address += APERTURE_RANGE_SIZE / 64;
829 }
830 }
831
832 dma_dom->aperture_size += APERTURE_RANGE_SIZE;
833
Joerg Roedel00cd1222009-05-19 09:52:40 +0200834 /* Intialize the exclusion range if necessary */
Joerg Roedel576175c2009-11-23 19:08:46 +0100835 for_each_iommu(iommu) {
836 if (iommu->exclusion_start &&
837 iommu->exclusion_start >= dma_dom->aperture[index]->offset
838 && iommu->exclusion_start < dma_dom->aperture_size) {
839 unsigned long startpage;
840 int pages = iommu_num_pages(iommu->exclusion_start,
841 iommu->exclusion_length,
842 PAGE_SIZE);
843 startpage = iommu->exclusion_start >> PAGE_SHIFT;
844 dma_ops_reserve_addresses(dma_dom, startpage, pages);
845 }
Joerg Roedel00cd1222009-05-19 09:52:40 +0200846 }
847
848 /*
849 * Check for areas already mapped as present in the new aperture
850 * range and mark those pages as reserved in the allocator. Such
851 * mappings may already exist as a result of requested unity
852 * mappings for devices.
853 */
854 for (i = dma_dom->aperture[index]->offset;
855 i < dma_dom->aperture_size;
856 i += PAGE_SIZE) {
Joerg Roedela6b256b2009-09-03 12:21:31 +0200857 u64 *pte = fetch_pte(&dma_dom->domain, i, PM_MAP_4k);
Joerg Roedel00cd1222009-05-19 09:52:40 +0200858 if (!pte || !IOMMU_PTE_PRESENT(*pte))
859 continue;
860
861 dma_ops_reserve_addresses(dma_dom, i << PAGE_SHIFT, 1);
862 }
863
Joerg Roedel04bfdd82009-09-02 16:00:23 +0200864 update_domain(&dma_dom->domain);
865
Joerg Roedel9cabe892009-05-18 16:38:55 +0200866 return 0;
867
868out_free:
Joerg Roedel04bfdd82009-09-02 16:00:23 +0200869 update_domain(&dma_dom->domain);
870
Joerg Roedel9cabe892009-05-18 16:38:55 +0200871 free_page((unsigned long)dma_dom->aperture[index]->bitmap);
872
873 kfree(dma_dom->aperture[index]);
874 dma_dom->aperture[index] = NULL;
875
876 return -ENOMEM;
877}
878
Joerg Roedel384de722009-05-15 12:30:05 +0200879static unsigned long dma_ops_area_alloc(struct device *dev,
880 struct dma_ops_domain *dom,
881 unsigned int pages,
882 unsigned long align_mask,
883 u64 dma_mask,
884 unsigned long start)
885{
Joerg Roedel803b8cb2009-05-18 15:32:48 +0200886 unsigned long next_bit = dom->next_address % APERTURE_RANGE_SIZE;
Joerg Roedel384de722009-05-15 12:30:05 +0200887 int max_index = dom->aperture_size >> APERTURE_RANGE_SHIFT;
888 int i = start >> APERTURE_RANGE_SHIFT;
889 unsigned long boundary_size;
890 unsigned long address = -1;
891 unsigned long limit;
892
Joerg Roedel803b8cb2009-05-18 15:32:48 +0200893 next_bit >>= PAGE_SHIFT;
894
Joerg Roedel384de722009-05-15 12:30:05 +0200895 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
896 PAGE_SIZE) >> PAGE_SHIFT;
897
898 for (;i < max_index; ++i) {
899 unsigned long offset = dom->aperture[i]->offset >> PAGE_SHIFT;
900
901 if (dom->aperture[i]->offset >= dma_mask)
902 break;
903
904 limit = iommu_device_max_index(APERTURE_RANGE_PAGES, offset,
905 dma_mask >> PAGE_SHIFT);
906
907 address = iommu_area_alloc(dom->aperture[i]->bitmap,
908 limit, next_bit, pages, 0,
909 boundary_size, align_mask);
910 if (address != -1) {
911 address = dom->aperture[i]->offset +
912 (address << PAGE_SHIFT);
Joerg Roedel803b8cb2009-05-18 15:32:48 +0200913 dom->next_address = address + (pages << PAGE_SHIFT);
Joerg Roedel384de722009-05-15 12:30:05 +0200914 break;
915 }
916
917 next_bit = 0;
918 }
919
920 return address;
921}
922
Joerg Roedeld3086442008-06-26 21:27:57 +0200923static unsigned long dma_ops_alloc_addresses(struct device *dev,
924 struct dma_ops_domain *dom,
Joerg Roedel6d4f3432008-09-04 19:18:02 +0200925 unsigned int pages,
Joerg Roedel832a90c2008-09-18 15:54:23 +0200926 unsigned long align_mask,
927 u64 dma_mask)
Joerg Roedeld3086442008-06-26 21:27:57 +0200928{
Joerg Roedeld3086442008-06-26 21:27:57 +0200929 unsigned long address;
Joerg Roedeld3086442008-06-26 21:27:57 +0200930
Joerg Roedelfe16f082009-05-22 12:27:53 +0200931#ifdef CONFIG_IOMMU_STRESS
932 dom->next_address = 0;
933 dom->need_flush = true;
934#endif
Joerg Roedeld3086442008-06-26 21:27:57 +0200935
Joerg Roedel384de722009-05-15 12:30:05 +0200936 address = dma_ops_area_alloc(dev, dom, pages, align_mask,
Joerg Roedel803b8cb2009-05-18 15:32:48 +0200937 dma_mask, dom->next_address);
Joerg Roedeld3086442008-06-26 21:27:57 +0200938
Joerg Roedel1c655772008-09-04 18:40:05 +0200939 if (address == -1) {
Joerg Roedel803b8cb2009-05-18 15:32:48 +0200940 dom->next_address = 0;
Joerg Roedel384de722009-05-15 12:30:05 +0200941 address = dma_ops_area_alloc(dev, dom, pages, align_mask,
942 dma_mask, 0);
Joerg Roedel1c655772008-09-04 18:40:05 +0200943 dom->need_flush = true;
944 }
Joerg Roedeld3086442008-06-26 21:27:57 +0200945
Joerg Roedel384de722009-05-15 12:30:05 +0200946 if (unlikely(address == -1))
FUJITA Tomonori8fd524b2009-11-15 21:19:53 +0900947 address = DMA_ERROR_CODE;
Joerg Roedeld3086442008-06-26 21:27:57 +0200948
949 WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size);
950
951 return address;
952}
953
Joerg Roedel431b2a22008-07-11 17:14:22 +0200954/*
955 * The address free function.
956 *
957 * called with domain->lock held
958 */
Joerg Roedeld3086442008-06-26 21:27:57 +0200959static void dma_ops_free_addresses(struct dma_ops_domain *dom,
960 unsigned long address,
961 unsigned int pages)
962{
Joerg Roedel384de722009-05-15 12:30:05 +0200963 unsigned i = address >> APERTURE_RANGE_SHIFT;
964 struct aperture_range *range = dom->aperture[i];
Joerg Roedel80be3082008-11-06 14:59:05 +0100965
Joerg Roedel384de722009-05-15 12:30:05 +0200966 BUG_ON(i >= APERTURE_MAX_RANGES || range == NULL);
967
Joerg Roedel47bccd62009-05-22 12:40:54 +0200968#ifdef CONFIG_IOMMU_STRESS
969 if (i < 4)
970 return;
971#endif
972
Joerg Roedel803b8cb2009-05-18 15:32:48 +0200973 if (address >= dom->next_address)
Joerg Roedel80be3082008-11-06 14:59:05 +0100974 dom->need_flush = true;
Joerg Roedel384de722009-05-15 12:30:05 +0200975
976 address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT;
Joerg Roedel803b8cb2009-05-18 15:32:48 +0200977
Joerg Roedel384de722009-05-15 12:30:05 +0200978 iommu_area_free(range->bitmap, address, pages);
979
Joerg Roedeld3086442008-06-26 21:27:57 +0200980}
981
Joerg Roedel431b2a22008-07-11 17:14:22 +0200982/****************************************************************************
983 *
984 * The next functions belong to the domain allocation. A domain is
985 * allocated for every IOMMU as the default domain. If device isolation
986 * is enabled, every device get its own domain. The most important thing
987 * about domains is the page table mapping the DMA address space they
988 * contain.
989 *
990 ****************************************************************************/
991
Joerg Roedelaeb26f52009-11-20 16:44:01 +0100992/*
993 * This function adds a protection domain to the global protection domain list
994 */
995static void add_domain_to_list(struct protection_domain *domain)
996{
997 unsigned long flags;
998
999 spin_lock_irqsave(&amd_iommu_pd_lock, flags);
1000 list_add(&domain->list, &amd_iommu_pd_list);
1001 spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
1002}
1003
1004/*
1005 * This function removes a protection domain to the global
1006 * protection domain list
1007 */
1008static void del_domain_from_list(struct protection_domain *domain)
1009{
1010 unsigned long flags;
1011
1012 spin_lock_irqsave(&amd_iommu_pd_lock, flags);
1013 list_del(&domain->list);
1014 spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
1015}
1016
Joerg Roedelec487d12008-06-26 21:27:58 +02001017static u16 domain_id_alloc(void)
1018{
1019 unsigned long flags;
1020 int id;
1021
1022 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1023 id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
1024 BUG_ON(id == 0);
1025 if (id > 0 && id < MAX_DOMAIN_ID)
1026 __set_bit(id, amd_iommu_pd_alloc_bitmap);
1027 else
1028 id = 0;
1029 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1030
1031 return id;
1032}
1033
Joerg Roedela2acfb72008-12-02 18:28:53 +01001034static void domain_id_free(int id)
1035{
1036 unsigned long flags;
1037
1038 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1039 if (id > 0 && id < MAX_DOMAIN_ID)
1040 __clear_bit(id, amd_iommu_pd_alloc_bitmap);
1041 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1042}
Joerg Roedela2acfb72008-12-02 18:28:53 +01001043
Joerg Roedel431b2a22008-07-11 17:14:22 +02001044/*
1045 * Used to reserve address ranges in the aperture (e.g. for exclusion
1046 * ranges.
1047 */
Joerg Roedelec487d12008-06-26 21:27:58 +02001048static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
1049 unsigned long start_page,
1050 unsigned int pages)
1051{
Joerg Roedel384de722009-05-15 12:30:05 +02001052 unsigned int i, last_page = dom->aperture_size >> PAGE_SHIFT;
Joerg Roedelec487d12008-06-26 21:27:58 +02001053
1054 if (start_page + pages > last_page)
1055 pages = last_page - start_page;
1056
Joerg Roedel384de722009-05-15 12:30:05 +02001057 for (i = start_page; i < start_page + pages; ++i) {
1058 int index = i / APERTURE_RANGE_PAGES;
1059 int page = i % APERTURE_RANGE_PAGES;
1060 __set_bit(page, dom->aperture[index]->bitmap);
1061 }
Joerg Roedelec487d12008-06-26 21:27:58 +02001062}
1063
Joerg Roedel86db2e52008-12-02 18:20:21 +01001064static void free_pagetable(struct protection_domain *domain)
Joerg Roedelec487d12008-06-26 21:27:58 +02001065{
1066 int i, j;
1067 u64 *p1, *p2, *p3;
1068
Joerg Roedel86db2e52008-12-02 18:20:21 +01001069 p1 = domain->pt_root;
Joerg Roedelec487d12008-06-26 21:27:58 +02001070
1071 if (!p1)
1072 return;
1073
1074 for (i = 0; i < 512; ++i) {
1075 if (!IOMMU_PTE_PRESENT(p1[i]))
1076 continue;
1077
1078 p2 = IOMMU_PTE_PAGE(p1[i]);
Joerg Roedel3cc3d842008-12-04 16:44:31 +01001079 for (j = 0; j < 512; ++j) {
Joerg Roedelec487d12008-06-26 21:27:58 +02001080 if (!IOMMU_PTE_PRESENT(p2[j]))
1081 continue;
1082 p3 = IOMMU_PTE_PAGE(p2[j]);
1083 free_page((unsigned long)p3);
1084 }
1085
1086 free_page((unsigned long)p2);
1087 }
1088
1089 free_page((unsigned long)p1);
Joerg Roedel86db2e52008-12-02 18:20:21 +01001090
1091 domain->pt_root = NULL;
Joerg Roedelec487d12008-06-26 21:27:58 +02001092}
1093
Joerg Roedel431b2a22008-07-11 17:14:22 +02001094/*
1095 * Free a domain, only used if something went wrong in the
1096 * allocation path and we need to free an already allocated page table
1097 */
Joerg Roedelec487d12008-06-26 21:27:58 +02001098static void dma_ops_domain_free(struct dma_ops_domain *dom)
1099{
Joerg Roedel384de722009-05-15 12:30:05 +02001100 int i;
1101
Joerg Roedelec487d12008-06-26 21:27:58 +02001102 if (!dom)
1103 return;
1104
Joerg Roedelaeb26f52009-11-20 16:44:01 +01001105 del_domain_from_list(&dom->domain);
1106
Joerg Roedel86db2e52008-12-02 18:20:21 +01001107 free_pagetable(&dom->domain);
Joerg Roedelec487d12008-06-26 21:27:58 +02001108
Joerg Roedel384de722009-05-15 12:30:05 +02001109 for (i = 0; i < APERTURE_MAX_RANGES; ++i) {
1110 if (!dom->aperture[i])
1111 continue;
1112 free_page((unsigned long)dom->aperture[i]->bitmap);
1113 kfree(dom->aperture[i]);
1114 }
Joerg Roedelec487d12008-06-26 21:27:58 +02001115
1116 kfree(dom);
1117}
1118
Joerg Roedel431b2a22008-07-11 17:14:22 +02001119/*
1120 * Allocates a new protection domain usable for the dma_ops functions.
1121 * It also intializes the page table and the address allocator data
1122 * structures required for the dma_ops interface
1123 */
Joerg Roedeld9cfed92009-05-19 12:16:29 +02001124static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu)
Joerg Roedelec487d12008-06-26 21:27:58 +02001125{
1126 struct dma_ops_domain *dma_dom;
Joerg Roedelec487d12008-06-26 21:27:58 +02001127
1128 dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL);
1129 if (!dma_dom)
1130 return NULL;
1131
1132 spin_lock_init(&dma_dom->domain.lock);
1133
1134 dma_dom->domain.id = domain_id_alloc();
1135 if (dma_dom->domain.id == 0)
1136 goto free_dma_dom;
Joerg Roedel8f7a0172009-09-02 16:55:24 +02001137 dma_dom->domain.mode = PAGE_MODE_2_LEVEL;
Joerg Roedelec487d12008-06-26 21:27:58 +02001138 dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL);
Joerg Roedel9fdb19d2008-12-02 17:46:25 +01001139 dma_dom->domain.flags = PD_DMA_OPS_MASK;
Joerg Roedelec487d12008-06-26 21:27:58 +02001140 dma_dom->domain.priv = dma_dom;
1141 if (!dma_dom->domain.pt_root)
1142 goto free_dma_dom;
Joerg Roedelec487d12008-06-26 21:27:58 +02001143
Joerg Roedel1c655772008-09-04 18:40:05 +02001144 dma_dom->need_flush = false;
Joerg Roedelbd60b732008-09-11 10:24:48 +02001145 dma_dom->target_dev = 0xffff;
Joerg Roedel1c655772008-09-04 18:40:05 +02001146
Joerg Roedelaeb26f52009-11-20 16:44:01 +01001147 add_domain_to_list(&dma_dom->domain);
1148
Joerg Roedel576175c2009-11-23 19:08:46 +01001149 if (alloc_new_range(dma_dom, true, GFP_KERNEL))
Joerg Roedelec487d12008-06-26 21:27:58 +02001150 goto free_dma_dom;
Joerg Roedelec487d12008-06-26 21:27:58 +02001151
Joerg Roedel431b2a22008-07-11 17:14:22 +02001152 /*
Joerg Roedelec487d12008-06-26 21:27:58 +02001153 * mark the first page as allocated so we never return 0 as
1154 * a valid dma-address. So we can use 0 as error value
Joerg Roedel431b2a22008-07-11 17:14:22 +02001155 */
Joerg Roedel384de722009-05-15 12:30:05 +02001156 dma_dom->aperture[0]->bitmap[0] = 1;
Joerg Roedel803b8cb2009-05-18 15:32:48 +02001157 dma_dom->next_address = 0;
Joerg Roedelec487d12008-06-26 21:27:58 +02001158
Joerg Roedelec487d12008-06-26 21:27:58 +02001159
1160 return dma_dom;
1161
1162free_dma_dom:
1163 dma_ops_domain_free(dma_dom);
1164
1165 return NULL;
1166}
1167
Joerg Roedel431b2a22008-07-11 17:14:22 +02001168/*
Joerg Roedel5b28df62008-12-02 17:49:42 +01001169 * little helper function to check whether a given protection domain is a
1170 * dma_ops domain
1171 */
1172static bool dma_ops_domain(struct protection_domain *domain)
1173{
1174 return domain->flags & PD_DMA_OPS_MASK;
1175}
1176
1177/*
Joerg Roedel431b2a22008-07-11 17:14:22 +02001178 * Find out the protection domain structure for a given PCI device. This
1179 * will give us the pointer to the page table root for example.
1180 */
Joerg Roedelb20ac0d2008-06-26 21:27:59 +02001181static struct protection_domain *domain_for_device(u16 devid)
1182{
1183 struct protection_domain *dom;
1184 unsigned long flags;
1185
1186 read_lock_irqsave(&amd_iommu_devtable_lock, flags);
1187 dom = amd_iommu_pd_table[devid];
1188 read_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1189
1190 return dom;
1191}
1192
Joerg Roedel407d7332009-09-02 16:07:00 +02001193static void set_dte_entry(u16 devid, struct protection_domain *domain)
Joerg Roedelb20ac0d2008-06-26 21:27:59 +02001194{
Joerg Roedelb20ac0d2008-06-26 21:27:59 +02001195 u64 pte_root = virt_to_phys(domain->pt_root);
Joerg Roedel863c74e2008-12-02 17:56:36 +01001196
Joerg Roedel38ddf412008-09-11 10:38:32 +02001197 pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
1198 << DEV_ENTRY_MODE_SHIFT;
1199 pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
Joerg Roedelb20ac0d2008-06-26 21:27:59 +02001200
Joerg Roedelb20ac0d2008-06-26 21:27:59 +02001201 amd_iommu_dev_table[devid].data[2] = domain->id;
Joerg Roedelaa879ff2009-08-31 16:01:48 +02001202 amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root);
1203 amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root);
Joerg Roedelb20ac0d2008-06-26 21:27:59 +02001204
1205 amd_iommu_pd_table[devid] = domain;
Joerg Roedel2b681fa2009-09-03 17:14:57 +02001206}
1207
1208/*
1209 * If a device is not yet associated with a domain, this function does
1210 * assigns it visible for the hardware
1211 */
1212static void __attach_device(struct amd_iommu *iommu,
1213 struct protection_domain *domain,
1214 u16 devid)
1215{
1216 /* lock domain */
1217 spin_lock(&domain->lock);
1218
1219 /* update DTE entry */
1220 set_dte_entry(devid, domain);
Joerg Roedeleba6ac62009-09-01 12:07:08 +02001221
Joerg Roedelc4596112009-11-20 14:57:32 +01001222 /* Do reference counting */
1223 domain->dev_iommu[iommu->index] += 1;
1224 domain->dev_cnt += 1;
Joerg Roedeleba6ac62009-09-01 12:07:08 +02001225
1226 /* ready */
1227 spin_unlock(&domain->lock);
Joerg Roedel0feae532009-08-26 15:26:30 +02001228}
Joerg Roedelb20ac0d2008-06-26 21:27:59 +02001229
Joerg Roedel407d7332009-09-02 16:07:00 +02001230/*
1231 * If a device is not yet associated with a domain, this function does
1232 * assigns it visible for the hardware
1233 */
Joerg Roedel0feae532009-08-26 15:26:30 +02001234static void attach_device(struct amd_iommu *iommu,
1235 struct protection_domain *domain,
1236 u16 devid)
1237{
Joerg Roedeleba6ac62009-09-01 12:07:08 +02001238 unsigned long flags;
1239
1240 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
Joerg Roedel0feae532009-08-26 15:26:30 +02001241 __attach_device(iommu, domain, devid);
Joerg Roedelb20ac0d2008-06-26 21:27:59 +02001242 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1243
Joerg Roedel0feae532009-08-26 15:26:30 +02001244 /*
1245 * We might boot into a crash-kernel here. The crashed kernel
1246 * left the caches in the IOMMU dirty. So we have to flush
1247 * here to evict all dirty stuff.
1248 */
Joerg Roedelb20ac0d2008-06-26 21:27:59 +02001249 iommu_queue_inv_dev_entry(iommu, devid);
Joerg Roedeldcd1e922009-11-20 15:30:58 +01001250 iommu_flush_tlb_pde(domain);
Joerg Roedelb20ac0d2008-06-26 21:27:59 +02001251}
1252
Joerg Roedel355bf552008-12-08 12:02:41 +01001253/*
1254 * Removes a device from a protection domain (unlocked)
1255 */
1256static void __detach_device(struct protection_domain *domain, u16 devid)
1257{
Joerg Roedelc4596112009-11-20 14:57:32 +01001258 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
1259
1260 BUG_ON(!iommu);
Joerg Roedel355bf552008-12-08 12:02:41 +01001261
1262 /* lock domain */
1263 spin_lock(&domain->lock);
1264
1265 /* remove domain from the lookup table */
1266 amd_iommu_pd_table[devid] = NULL;
1267
1268 /* remove entry from the device table seen by the hardware */
1269 amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV;
1270 amd_iommu_dev_table[devid].data[1] = 0;
1271 amd_iommu_dev_table[devid].data[2] = 0;
1272
Joerg Roedelc5cca142009-10-09 18:31:20 +02001273 amd_iommu_apply_erratum_63(devid);
1274
Joerg Roedelc4596112009-11-20 14:57:32 +01001275 /* decrease reference counters */
1276 domain->dev_iommu[iommu->index] -= 1;
1277 domain->dev_cnt -= 1;
Joerg Roedel355bf552008-12-08 12:02:41 +01001278
1279 /* ready */
1280 spin_unlock(&domain->lock);
Joerg Roedel21129f72009-09-01 11:59:42 +02001281
1282 /*
1283 * If we run in passthrough mode the device must be assigned to the
1284 * passthrough domain if it is detached from any other domain
1285 */
1286 if (iommu_pass_through) {
1287 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
1288 __attach_device(iommu, pt_domain, devid);
1289 }
Joerg Roedel355bf552008-12-08 12:02:41 +01001290}
1291
1292/*
1293 * Removes a device from a protection domain (with devtable_lock held)
1294 */
1295static void detach_device(struct protection_domain *domain, u16 devid)
1296{
1297 unsigned long flags;
1298
1299 /* lock device table */
1300 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1301 __detach_device(domain, devid);
1302 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1303}
Joerg Roedele275a2a2008-12-10 18:27:25 +01001304
1305static int device_change_notifier(struct notifier_block *nb,
1306 unsigned long action, void *data)
1307{
1308 struct device *dev = data;
1309 struct pci_dev *pdev = to_pci_dev(dev);
1310 u16 devid = calc_devid(pdev->bus->number, pdev->devfn);
1311 struct protection_domain *domain;
1312 struct dma_ops_domain *dma_domain;
1313 struct amd_iommu *iommu;
Joerg Roedel1ac4cbb2008-12-10 19:33:26 +01001314 unsigned long flags;
Joerg Roedele275a2a2008-12-10 18:27:25 +01001315
1316 if (devid > amd_iommu_last_bdf)
1317 goto out;
1318
1319 devid = amd_iommu_alias_table[devid];
1320
1321 iommu = amd_iommu_rlookup_table[devid];
1322 if (iommu == NULL)
1323 goto out;
1324
1325 domain = domain_for_device(devid);
1326
1327 if (domain && !dma_ops_domain(domain))
1328 WARN_ONCE(1, "AMD IOMMU WARNING: device %s already bound "
1329 "to a non-dma-ops domain\n", dev_name(dev));
1330
1331 switch (action) {
Chris Wrightc1eee672009-05-21 00:56:58 -07001332 case BUS_NOTIFY_UNBOUND_DRIVER:
Joerg Roedele275a2a2008-12-10 18:27:25 +01001333 if (!domain)
1334 goto out;
Joerg Roedela1ca3312009-09-01 12:22:22 +02001335 if (iommu_pass_through)
1336 break;
Joerg Roedele275a2a2008-12-10 18:27:25 +01001337 detach_device(domain, devid);
1338 break;
Joerg Roedel1ac4cbb2008-12-10 19:33:26 +01001339 case BUS_NOTIFY_ADD_DEVICE:
1340 /* allocate a protection domain if a device is added */
1341 dma_domain = find_protection_domain(devid);
1342 if (dma_domain)
1343 goto out;
Joerg Roedeld9cfed92009-05-19 12:16:29 +02001344 dma_domain = dma_ops_domain_alloc(iommu);
Joerg Roedel1ac4cbb2008-12-10 19:33:26 +01001345 if (!dma_domain)
1346 goto out;
1347 dma_domain->target_dev = devid;
1348
1349 spin_lock_irqsave(&iommu_pd_list_lock, flags);
1350 list_add_tail(&dma_domain->list, &iommu_pd_list);
1351 spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
1352
1353 break;
Joerg Roedele275a2a2008-12-10 18:27:25 +01001354 default:
1355 goto out;
1356 }
1357
1358 iommu_queue_inv_dev_entry(iommu, devid);
1359 iommu_completion_wait(iommu);
1360
1361out:
1362 return 0;
1363}
1364
Jaswinder Singh Rajputb25ae672009-07-01 19:53:14 +05301365static struct notifier_block device_nb = {
Joerg Roedele275a2a2008-12-10 18:27:25 +01001366 .notifier_call = device_change_notifier,
1367};
Joerg Roedel355bf552008-12-08 12:02:41 +01001368
Joerg Roedel431b2a22008-07-11 17:14:22 +02001369/*****************************************************************************
1370 *
1371 * The next functions belong to the dma_ops mapping/unmapping code.
1372 *
1373 *****************************************************************************/
1374
1375/*
Joerg Roedeldbcc1122008-09-04 15:04:26 +02001376 * This function checks if the driver got a valid device from the caller to
1377 * avoid dereferencing invalid pointers.
1378 */
1379static bool check_device(struct device *dev)
1380{
Joerg Roedel420aef82009-11-23 16:14:57 +01001381 u16 bdf;
1382 struct pci_dev *pcidev;
1383
Joerg Roedeldbcc1122008-09-04 15:04:26 +02001384 if (!dev || !dev->dma_mask)
1385 return false;
1386
Joerg Roedel420aef82009-11-23 16:14:57 +01001387 /* No device or no PCI device */
1388 if (!dev || dev->bus != &pci_bus_type)
1389 return false;
1390
1391 pcidev = to_pci_dev(dev);
1392
1393 bdf = calc_devid(pcidev->bus->number, pcidev->devfn);
1394
1395 /* Out of our scope? */
1396 if (bdf > amd_iommu_last_bdf)
1397 return false;
1398
1399 if (amd_iommu_rlookup_table[bdf] == NULL)
1400 return false;
1401
Joerg Roedeldbcc1122008-09-04 15:04:26 +02001402 return true;
1403}
1404
1405/*
Joerg Roedelbd60b732008-09-11 10:24:48 +02001406 * In this function the list of preallocated protection domains is traversed to
1407 * find the domain for a specific device
1408 */
1409static struct dma_ops_domain *find_protection_domain(u16 devid)
1410{
1411 struct dma_ops_domain *entry, *ret = NULL;
1412 unsigned long flags;
1413
1414 if (list_empty(&iommu_pd_list))
1415 return NULL;
1416
1417 spin_lock_irqsave(&iommu_pd_list_lock, flags);
1418
1419 list_for_each_entry(entry, &iommu_pd_list, list) {
1420 if (entry->target_dev == devid) {
1421 ret = entry;
Joerg Roedelbd60b732008-09-11 10:24:48 +02001422 break;
1423 }
1424 }
1425
1426 spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
1427
1428 return ret;
1429}
1430
1431/*
Joerg Roedel431b2a22008-07-11 17:14:22 +02001432 * In the dma_ops path we only have the struct device. This function
1433 * finds the corresponding IOMMU, the protection domain and the
1434 * requestor id for a given device.
1435 * If the device is not yet associated with a domain this is also done
1436 * in this function.
1437 */
Joerg Roedelf99c0f12009-11-23 16:52:56 +01001438static bool get_device_resources(struct device *dev,
1439 struct amd_iommu **iommu,
1440 struct protection_domain **domain,
1441 u16 *bdf)
Joerg Roedelb20ac0d2008-06-26 21:27:59 +02001442{
1443 struct dma_ops_domain *dma_dom;
1444 struct pci_dev *pcidev;
1445 u16 _bdf;
1446
Joerg Roedelf99c0f12009-11-23 16:52:56 +01001447 if (!check_device(dev))
1448 return false;
Joerg Roedeldbcc1122008-09-04 15:04:26 +02001449
Joerg Roedelf99c0f12009-11-23 16:52:56 +01001450 pcidev = to_pci_dev(dev);
1451 _bdf = calc_devid(pcidev->bus->number, pcidev->devfn);
1452 *bdf = amd_iommu_alias_table[_bdf];
1453 *iommu = amd_iommu_rlookup_table[*bdf];
Joerg Roedelb20ac0d2008-06-26 21:27:59 +02001454 *domain = domain_for_device(*bdf);
Joerg Roedelf99c0f12009-11-23 16:52:56 +01001455
Joerg Roedelb20ac0d2008-06-26 21:27:59 +02001456 if (*domain == NULL) {
Joerg Roedelbd60b732008-09-11 10:24:48 +02001457 dma_dom = find_protection_domain(*bdf);
1458 if (!dma_dom)
1459 dma_dom = (*iommu)->default_dom;
Joerg Roedelb20ac0d2008-06-26 21:27:59 +02001460 *domain = &dma_dom->domain;
Joerg Roedelf1179dc2008-12-10 14:39:51 +01001461 attach_device(*iommu, *domain, *bdf);
Joerg Roedele9a22a12009-06-09 12:00:37 +02001462 DUMP_printk("Using protection domain %d for device %s\n",
1463 (*domain)->id, dev_name(dev));
Joerg Roedelb20ac0d2008-06-26 21:27:59 +02001464 }
1465
Joerg Roedelf91ba192008-11-25 12:56:12 +01001466 if (domain_for_device(_bdf) == NULL)
Joerg Roedelf1179dc2008-12-10 14:39:51 +01001467 attach_device(*iommu, *domain, _bdf);
Joerg Roedelf91ba192008-11-25 12:56:12 +01001468
Joerg Roedelf99c0f12009-11-23 16:52:56 +01001469 return true;
Joerg Roedelb20ac0d2008-06-26 21:27:59 +02001470}
1471
Joerg Roedel04bfdd82009-09-02 16:00:23 +02001472static void update_device_table(struct protection_domain *domain)
1473{
Joerg Roedel2b681fa2009-09-03 17:14:57 +02001474 unsigned long flags;
Joerg Roedel04bfdd82009-09-02 16:00:23 +02001475 int i;
1476
1477 for (i = 0; i <= amd_iommu_last_bdf; ++i) {
1478 if (amd_iommu_pd_table[i] != domain)
1479 continue;
Joerg Roedel2b681fa2009-09-03 17:14:57 +02001480 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
Joerg Roedel04bfdd82009-09-02 16:00:23 +02001481 set_dte_entry(i, domain);
Joerg Roedel2b681fa2009-09-03 17:14:57 +02001482 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
Joerg Roedel04bfdd82009-09-02 16:00:23 +02001483 }
1484}
1485
1486static void update_domain(struct protection_domain *domain)
1487{
1488 if (!domain->updated)
1489 return;
1490
1491 update_device_table(domain);
1492 flush_devices_by_domain(domain);
Joerg Roedel601367d2009-11-20 16:08:55 +01001493 iommu_flush_tlb_pde(domain);
Joerg Roedel04bfdd82009-09-02 16:00:23 +02001494
1495 domain->updated = false;
1496}
1497
Joerg Roedel431b2a22008-07-11 17:14:22 +02001498/*
Joerg Roedel50020fb2009-09-02 15:38:40 +02001499 * This function is used to add another level to an IO page table. Adding
1500 * another level increases the size of the address space by 9 bits to a size up
1501 * to 64 bits.
Joerg Roedel8bda3092009-05-12 12:02:46 +02001502 */
Joerg Roedel50020fb2009-09-02 15:38:40 +02001503static bool increase_address_space(struct protection_domain *domain,
1504 gfp_t gfp)
1505{
1506 u64 *pte;
1507
1508 if (domain->mode == PAGE_MODE_6_LEVEL)
1509 /* address space already 64 bit large */
1510 return false;
1511
1512 pte = (void *)get_zeroed_page(gfp);
1513 if (!pte)
1514 return false;
1515
1516 *pte = PM_LEVEL_PDE(domain->mode,
1517 virt_to_phys(domain->pt_root));
1518 domain->pt_root = pte;
1519 domain->mode += 1;
1520 domain->updated = true;
1521
1522 return true;
1523}
1524
Joerg Roedel8bc3e122009-09-02 16:48:40 +02001525static u64 *alloc_pte(struct protection_domain *domain,
Joerg Roedelabdc5eb2009-09-03 11:33:51 +02001526 unsigned long address,
1527 int end_lvl,
1528 u64 **pte_page,
1529 gfp_t gfp)
Joerg Roedel8bda3092009-05-12 12:02:46 +02001530{
1531 u64 *pte, *page;
Joerg Roedel8bc3e122009-09-02 16:48:40 +02001532 int level;
Joerg Roedel8bda3092009-05-12 12:02:46 +02001533
Joerg Roedel8bc3e122009-09-02 16:48:40 +02001534 while (address > PM_LEVEL_SIZE(domain->mode))
1535 increase_address_space(domain, gfp);
Joerg Roedel8bda3092009-05-12 12:02:46 +02001536
Joerg Roedel8bc3e122009-09-02 16:48:40 +02001537 level = domain->mode - 1;
1538 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
1539
Joerg Roedelabdc5eb2009-09-03 11:33:51 +02001540 while (level > end_lvl) {
Joerg Roedel8bc3e122009-09-02 16:48:40 +02001541 if (!IOMMU_PTE_PRESENT(*pte)) {
1542 page = (u64 *)get_zeroed_page(gfp);
1543 if (!page)
1544 return NULL;
1545 *pte = PM_LEVEL_PDE(level, virt_to_phys(page));
1546 }
1547
1548 level -= 1;
1549
1550 pte = IOMMU_PTE_PAGE(*pte);
1551
Joerg Roedelabdc5eb2009-09-03 11:33:51 +02001552 if (pte_page && level == end_lvl)
Joerg Roedel8bc3e122009-09-02 16:48:40 +02001553 *pte_page = pte;
1554
1555 pte = &pte[PM_LEVEL_INDEX(level, address)];
Joerg Roedel8bda3092009-05-12 12:02:46 +02001556 }
1557
Joerg Roedel8bda3092009-05-12 12:02:46 +02001558 return pte;
1559}
1560
1561/*
1562 * This function fetches the PTE for a given address in the aperture
1563 */
1564static u64* dma_ops_get_pte(struct dma_ops_domain *dom,
1565 unsigned long address)
1566{
Joerg Roedel384de722009-05-15 12:30:05 +02001567 struct aperture_range *aperture;
Joerg Roedel8bda3092009-05-12 12:02:46 +02001568 u64 *pte, *pte_page;
1569
Joerg Roedel384de722009-05-15 12:30:05 +02001570 aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
1571 if (!aperture)
1572 return NULL;
1573
1574 pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
Joerg Roedel8bda3092009-05-12 12:02:46 +02001575 if (!pte) {
Joerg Roedelabdc5eb2009-09-03 11:33:51 +02001576 pte = alloc_pte(&dom->domain, address, PM_MAP_4k, &pte_page,
1577 GFP_ATOMIC);
Joerg Roedel384de722009-05-15 12:30:05 +02001578 aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page;
1579 } else
Joerg Roedel8c8c1432009-09-02 17:30:00 +02001580 pte += PM_LEVEL_INDEX(0, address);
Joerg Roedel8bda3092009-05-12 12:02:46 +02001581
Joerg Roedel04bfdd82009-09-02 16:00:23 +02001582 update_domain(&dom->domain);
Joerg Roedel8bda3092009-05-12 12:02:46 +02001583
1584 return pte;
1585}
1586
1587/*
Joerg Roedel431b2a22008-07-11 17:14:22 +02001588 * This is the generic map function. It maps one 4kb page at paddr to
1589 * the given address in the DMA address space for the domain.
1590 */
Joerg Roedel680525e2009-11-23 18:44:42 +01001591static dma_addr_t dma_ops_domain_map(struct dma_ops_domain *dom,
Joerg Roedelcb76c322008-06-26 21:28:00 +02001592 unsigned long address,
1593 phys_addr_t paddr,
1594 int direction)
1595{
1596 u64 *pte, __pte;
1597
1598 WARN_ON(address > dom->aperture_size);
1599
1600 paddr &= PAGE_MASK;
1601
Joerg Roedel8bda3092009-05-12 12:02:46 +02001602 pte = dma_ops_get_pte(dom, address);
Joerg Roedel53812c12009-05-12 12:17:38 +02001603 if (!pte)
FUJITA Tomonori8fd524b2009-11-15 21:19:53 +09001604 return DMA_ERROR_CODE;
Joerg Roedelcb76c322008-06-26 21:28:00 +02001605
1606 __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC;
1607
1608 if (direction == DMA_TO_DEVICE)
1609 __pte |= IOMMU_PTE_IR;
1610 else if (direction == DMA_FROM_DEVICE)
1611 __pte |= IOMMU_PTE_IW;
1612 else if (direction == DMA_BIDIRECTIONAL)
1613 __pte |= IOMMU_PTE_IR | IOMMU_PTE_IW;
1614
1615 WARN_ON(*pte);
1616
1617 *pte = __pte;
1618
1619 return (dma_addr_t)address;
1620}
1621
Joerg Roedel431b2a22008-07-11 17:14:22 +02001622/*
1623 * The generic unmapping function for on page in the DMA address space.
1624 */
Joerg Roedel680525e2009-11-23 18:44:42 +01001625static void dma_ops_domain_unmap(struct dma_ops_domain *dom,
Joerg Roedelcb76c322008-06-26 21:28:00 +02001626 unsigned long address)
1627{
Joerg Roedel384de722009-05-15 12:30:05 +02001628 struct aperture_range *aperture;
Joerg Roedelcb76c322008-06-26 21:28:00 +02001629 u64 *pte;
1630
1631 if (address >= dom->aperture_size)
1632 return;
1633
Joerg Roedel384de722009-05-15 12:30:05 +02001634 aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
1635 if (!aperture)
1636 return;
Joerg Roedelcb76c322008-06-26 21:28:00 +02001637
Joerg Roedel384de722009-05-15 12:30:05 +02001638 pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
1639 if (!pte)
1640 return;
1641
Joerg Roedel8c8c1432009-09-02 17:30:00 +02001642 pte += PM_LEVEL_INDEX(0, address);
Joerg Roedelcb76c322008-06-26 21:28:00 +02001643
1644 WARN_ON(!*pte);
1645
1646 *pte = 0ULL;
1647}
1648
Joerg Roedel431b2a22008-07-11 17:14:22 +02001649/*
1650 * This function contains common code for mapping of a physically
Joerg Roedel24f81162008-12-08 14:25:39 +01001651 * contiguous memory region into DMA address space. It is used by all
1652 * mapping functions provided with this IOMMU driver.
Joerg Roedel431b2a22008-07-11 17:14:22 +02001653 * Must be called with the domain lock held.
1654 */
Joerg Roedelcb76c322008-06-26 21:28:00 +02001655static dma_addr_t __map_single(struct device *dev,
Joerg Roedelcb76c322008-06-26 21:28:00 +02001656 struct dma_ops_domain *dma_dom,
1657 phys_addr_t paddr,
1658 size_t size,
Joerg Roedel6d4f3432008-09-04 19:18:02 +02001659 int dir,
Joerg Roedel832a90c2008-09-18 15:54:23 +02001660 bool align,
1661 u64 dma_mask)
Joerg Roedelcb76c322008-06-26 21:28:00 +02001662{
1663 dma_addr_t offset = paddr & ~PAGE_MASK;
Joerg Roedel53812c12009-05-12 12:17:38 +02001664 dma_addr_t address, start, ret;
Joerg Roedelcb76c322008-06-26 21:28:00 +02001665 unsigned int pages;
Joerg Roedel6d4f3432008-09-04 19:18:02 +02001666 unsigned long align_mask = 0;
Joerg Roedelcb76c322008-06-26 21:28:00 +02001667 int i;
1668
Joerg Roedele3c449f2008-10-15 22:02:11 -07001669 pages = iommu_num_pages(paddr, size, PAGE_SIZE);
Joerg Roedelcb76c322008-06-26 21:28:00 +02001670 paddr &= PAGE_MASK;
1671
Joerg Roedel8ecaf8f2008-12-12 16:13:04 +01001672 INC_STATS_COUNTER(total_map_requests);
1673
Joerg Roedelc1858972008-12-12 15:42:39 +01001674 if (pages > 1)
1675 INC_STATS_COUNTER(cross_page);
1676
Joerg Roedel6d4f3432008-09-04 19:18:02 +02001677 if (align)
1678 align_mask = (1UL << get_order(size)) - 1;
1679
Joerg Roedel11b83882009-05-19 10:23:15 +02001680retry:
Joerg Roedel832a90c2008-09-18 15:54:23 +02001681 address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask,
1682 dma_mask);
FUJITA Tomonori8fd524b2009-11-15 21:19:53 +09001683 if (unlikely(address == DMA_ERROR_CODE)) {
Joerg Roedel11b83882009-05-19 10:23:15 +02001684 /*
1685 * setting next_address here will let the address
1686 * allocator only scan the new allocated range in the
1687 * first run. This is a small optimization.
1688 */
1689 dma_dom->next_address = dma_dom->aperture_size;
1690
Joerg Roedel576175c2009-11-23 19:08:46 +01001691 if (alloc_new_range(dma_dom, false, GFP_ATOMIC))
Joerg Roedel11b83882009-05-19 10:23:15 +02001692 goto out;
1693
1694 /*
1695 * aperture was sucessfully enlarged by 128 MB, try
1696 * allocation again
1697 */
1698 goto retry;
1699 }
Joerg Roedelcb76c322008-06-26 21:28:00 +02001700
1701 start = address;
1702 for (i = 0; i < pages; ++i) {
Joerg Roedel680525e2009-11-23 18:44:42 +01001703 ret = dma_ops_domain_map(dma_dom, start, paddr, dir);
FUJITA Tomonori8fd524b2009-11-15 21:19:53 +09001704 if (ret == DMA_ERROR_CODE)
Joerg Roedel53812c12009-05-12 12:17:38 +02001705 goto out_unmap;
1706
Joerg Roedelcb76c322008-06-26 21:28:00 +02001707 paddr += PAGE_SIZE;
1708 start += PAGE_SIZE;
1709 }
1710 address += offset;
1711
Joerg Roedel5774f7c2008-12-12 15:57:30 +01001712 ADD_STATS_COUNTER(alloced_io_mem, size);
1713
FUJITA Tomonoriafa9fdc2008-09-20 01:23:30 +09001714 if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) {
Joerg Roedeldcd1e922009-11-20 15:30:58 +01001715 iommu_flush_tlb(&dma_dom->domain);
Joerg Roedel1c655772008-09-04 18:40:05 +02001716 dma_dom->need_flush = false;
Joerg Roedel318afd42009-11-23 18:32:38 +01001717 } else if (unlikely(amd_iommu_np_cache))
Joerg Roedel6de8ad92009-11-23 18:30:32 +01001718 iommu_flush_pages(&dma_dom->domain, address, size);
Joerg Roedel270cab242008-09-04 15:49:46 +02001719
Joerg Roedelcb76c322008-06-26 21:28:00 +02001720out:
1721 return address;
Joerg Roedel53812c12009-05-12 12:17:38 +02001722
1723out_unmap:
1724
1725 for (--i; i >= 0; --i) {
1726 start -= PAGE_SIZE;
Joerg Roedel680525e2009-11-23 18:44:42 +01001727 dma_ops_domain_unmap(dma_dom, start);
Joerg Roedel53812c12009-05-12 12:17:38 +02001728 }
1729
1730 dma_ops_free_addresses(dma_dom, address, pages);
1731
FUJITA Tomonori8fd524b2009-11-15 21:19:53 +09001732 return DMA_ERROR_CODE;
Joerg Roedelcb76c322008-06-26 21:28:00 +02001733}
1734
Joerg Roedel431b2a22008-07-11 17:14:22 +02001735/*
1736 * Does the reverse of the __map_single function. Must be called with
1737 * the domain lock held too
1738 */
Joerg Roedelcd8c82e2009-11-23 19:33:56 +01001739static void __unmap_single(struct dma_ops_domain *dma_dom,
Joerg Roedelcb76c322008-06-26 21:28:00 +02001740 dma_addr_t dma_addr,
1741 size_t size,
1742 int dir)
1743{
1744 dma_addr_t i, start;
1745 unsigned int pages;
1746
FUJITA Tomonori8fd524b2009-11-15 21:19:53 +09001747 if ((dma_addr == DMA_ERROR_CODE) ||
Joerg Roedelb8d99052008-12-08 14:40:26 +01001748 (dma_addr + size > dma_dom->aperture_size))
Joerg Roedelcb76c322008-06-26 21:28:00 +02001749 return;
1750
Joerg Roedele3c449f2008-10-15 22:02:11 -07001751 pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
Joerg Roedelcb76c322008-06-26 21:28:00 +02001752 dma_addr &= PAGE_MASK;
1753 start = dma_addr;
1754
1755 for (i = 0; i < pages; ++i) {
Joerg Roedel680525e2009-11-23 18:44:42 +01001756 dma_ops_domain_unmap(dma_dom, start);
Joerg Roedelcb76c322008-06-26 21:28:00 +02001757 start += PAGE_SIZE;
1758 }
1759
Joerg Roedel5774f7c2008-12-12 15:57:30 +01001760 SUB_STATS_COUNTER(alloced_io_mem, size);
1761
Joerg Roedelcb76c322008-06-26 21:28:00 +02001762 dma_ops_free_addresses(dma_dom, dma_addr, pages);
Joerg Roedel270cab242008-09-04 15:49:46 +02001763
Joerg Roedel80be3082008-11-06 14:59:05 +01001764 if (amd_iommu_unmap_flush || dma_dom->need_flush) {
Joerg Roedel6de8ad92009-11-23 18:30:32 +01001765 iommu_flush_pages(&dma_dom->domain, dma_addr, size);
Joerg Roedel80be3082008-11-06 14:59:05 +01001766 dma_dom->need_flush = false;
1767 }
Joerg Roedelcb76c322008-06-26 21:28:00 +02001768}
1769
Joerg Roedel431b2a22008-07-11 17:14:22 +02001770/*
1771 * The exported map_single function for dma_ops.
1772 */
FUJITA Tomonori51491362009-01-05 23:47:25 +09001773static dma_addr_t map_page(struct device *dev, struct page *page,
1774 unsigned long offset, size_t size,
1775 enum dma_data_direction dir,
1776 struct dma_attrs *attrs)
Joerg Roedel4da70b92008-06-26 21:28:01 +02001777{
1778 unsigned long flags;
1779 struct amd_iommu *iommu;
1780 struct protection_domain *domain;
1781 u16 devid;
1782 dma_addr_t addr;
Joerg Roedel832a90c2008-09-18 15:54:23 +02001783 u64 dma_mask;
FUJITA Tomonori51491362009-01-05 23:47:25 +09001784 phys_addr_t paddr = page_to_phys(page) + offset;
Joerg Roedel4da70b92008-06-26 21:28:01 +02001785
Joerg Roedel0f2a86f2008-12-12 15:05:16 +01001786 INC_STATS_COUNTER(cnt_map_single);
1787
Joerg Roedelf99c0f12009-11-23 16:52:56 +01001788 if (!get_device_resources(dev, &iommu, &domain, &devid))
Joerg Roedel431b2a22008-07-11 17:14:22 +02001789 /* device not handled by any AMD IOMMU */
Joerg Roedel4da70b92008-06-26 21:28:01 +02001790 return (dma_addr_t)paddr;
1791
Joerg Roedelf99c0f12009-11-23 16:52:56 +01001792 dma_mask = *dev->dma_mask;
1793
Joerg Roedel5b28df62008-12-02 17:49:42 +01001794 if (!dma_ops_domain(domain))
FUJITA Tomonori8fd524b2009-11-15 21:19:53 +09001795 return DMA_ERROR_CODE;
Joerg Roedel5b28df62008-12-02 17:49:42 +01001796
Joerg Roedel4da70b92008-06-26 21:28:01 +02001797 spin_lock_irqsave(&domain->lock, flags);
Joerg Roedelcd8c82e2009-11-23 19:33:56 +01001798 addr = __map_single(dev, domain->priv, paddr, size, dir, false,
Joerg Roedel832a90c2008-09-18 15:54:23 +02001799 dma_mask);
FUJITA Tomonori8fd524b2009-11-15 21:19:53 +09001800 if (addr == DMA_ERROR_CODE)
Joerg Roedel4da70b92008-06-26 21:28:01 +02001801 goto out;
1802
Joerg Roedel0518a3a2009-11-20 16:00:05 +01001803 iommu_flush_complete(domain);
Joerg Roedel4da70b92008-06-26 21:28:01 +02001804
1805out:
1806 spin_unlock_irqrestore(&domain->lock, flags);
1807
1808 return addr;
1809}
1810
Joerg Roedel431b2a22008-07-11 17:14:22 +02001811/*
1812 * The exported unmap_single function for dma_ops.
1813 */
FUJITA Tomonori51491362009-01-05 23:47:25 +09001814static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
1815 enum dma_data_direction dir, struct dma_attrs *attrs)
Joerg Roedel4da70b92008-06-26 21:28:01 +02001816{
1817 unsigned long flags;
1818 struct amd_iommu *iommu;
1819 struct protection_domain *domain;
1820 u16 devid;
1821
Joerg Roedel146a6912008-12-12 15:07:12 +01001822 INC_STATS_COUNTER(cnt_unmap_single);
1823
Joerg Roedelf99c0f12009-11-23 16:52:56 +01001824 if (!get_device_resources(dev, &iommu, &domain, &devid))
Joerg Roedel431b2a22008-07-11 17:14:22 +02001825 /* device not handled by any AMD IOMMU */
Joerg Roedel4da70b92008-06-26 21:28:01 +02001826 return;
1827
Joerg Roedel5b28df62008-12-02 17:49:42 +01001828 if (!dma_ops_domain(domain))
1829 return;
1830
Joerg Roedel4da70b92008-06-26 21:28:01 +02001831 spin_lock_irqsave(&domain->lock, flags);
1832
Joerg Roedelcd8c82e2009-11-23 19:33:56 +01001833 __unmap_single(domain->priv, dma_addr, size, dir);
Joerg Roedel4da70b92008-06-26 21:28:01 +02001834
Joerg Roedel0518a3a2009-11-20 16:00:05 +01001835 iommu_flush_complete(domain);
Joerg Roedel4da70b92008-06-26 21:28:01 +02001836
1837 spin_unlock_irqrestore(&domain->lock, flags);
1838}
1839
Joerg Roedel431b2a22008-07-11 17:14:22 +02001840/*
1841 * This is a special map_sg function which is used if we should map a
1842 * device which is not handled by an AMD IOMMU in the system.
1843 */
Joerg Roedel65b050a2008-06-26 21:28:02 +02001844static int map_sg_no_iommu(struct device *dev, struct scatterlist *sglist,
1845 int nelems, int dir)
1846{
1847 struct scatterlist *s;
1848 int i;
1849
1850 for_each_sg(sglist, s, nelems, i) {
1851 s->dma_address = (dma_addr_t)sg_phys(s);
1852 s->dma_length = s->length;
1853 }
1854
1855 return nelems;
1856}
1857
Joerg Roedel431b2a22008-07-11 17:14:22 +02001858/*
1859 * The exported map_sg function for dma_ops (handles scatter-gather
1860 * lists).
1861 */
Joerg Roedel65b050a2008-06-26 21:28:02 +02001862static int map_sg(struct device *dev, struct scatterlist *sglist,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09001863 int nelems, enum dma_data_direction dir,
1864 struct dma_attrs *attrs)
Joerg Roedel65b050a2008-06-26 21:28:02 +02001865{
1866 unsigned long flags;
1867 struct amd_iommu *iommu;
1868 struct protection_domain *domain;
1869 u16 devid;
1870 int i;
1871 struct scatterlist *s;
1872 phys_addr_t paddr;
1873 int mapped_elems = 0;
Joerg Roedel832a90c2008-09-18 15:54:23 +02001874 u64 dma_mask;
Joerg Roedel65b050a2008-06-26 21:28:02 +02001875
Joerg Roedeld03f0672008-12-12 15:09:48 +01001876 INC_STATS_COUNTER(cnt_map_sg);
1877
Joerg Roedelf99c0f12009-11-23 16:52:56 +01001878 if (!get_device_resources(dev, &iommu, &domain, &devid))
1879 return map_sg_no_iommu(dev, sglist, nelems, dir);
Joerg Roedeldbcc1122008-09-04 15:04:26 +02001880
Joerg Roedel832a90c2008-09-18 15:54:23 +02001881 dma_mask = *dev->dma_mask;
Joerg Roedel65b050a2008-06-26 21:28:02 +02001882
Joerg Roedel5b28df62008-12-02 17:49:42 +01001883 if (!dma_ops_domain(domain))
1884 return 0;
1885
Joerg Roedel65b050a2008-06-26 21:28:02 +02001886 spin_lock_irqsave(&domain->lock, flags);
1887
1888 for_each_sg(sglist, s, nelems, i) {
1889 paddr = sg_phys(s);
1890
Joerg Roedelcd8c82e2009-11-23 19:33:56 +01001891 s->dma_address = __map_single(dev, domain->priv,
Joerg Roedel832a90c2008-09-18 15:54:23 +02001892 paddr, s->length, dir, false,
1893 dma_mask);
Joerg Roedel65b050a2008-06-26 21:28:02 +02001894
1895 if (s->dma_address) {
1896 s->dma_length = s->length;
1897 mapped_elems++;
1898 } else
1899 goto unmap;
Joerg Roedel65b050a2008-06-26 21:28:02 +02001900 }
1901
Joerg Roedel0518a3a2009-11-20 16:00:05 +01001902 iommu_flush_complete(domain);
Joerg Roedel65b050a2008-06-26 21:28:02 +02001903
1904out:
1905 spin_unlock_irqrestore(&domain->lock, flags);
1906
1907 return mapped_elems;
1908unmap:
1909 for_each_sg(sglist, s, mapped_elems, i) {
1910 if (s->dma_address)
Joerg Roedelcd8c82e2009-11-23 19:33:56 +01001911 __unmap_single(domain->priv, s->dma_address,
Joerg Roedel65b050a2008-06-26 21:28:02 +02001912 s->dma_length, dir);
1913 s->dma_address = s->dma_length = 0;
1914 }
1915
1916 mapped_elems = 0;
1917
1918 goto out;
1919}
1920
Joerg Roedel431b2a22008-07-11 17:14:22 +02001921/*
1922 * The exported map_sg function for dma_ops (handles scatter-gather
1923 * lists).
1924 */
Joerg Roedel65b050a2008-06-26 21:28:02 +02001925static void unmap_sg(struct device *dev, struct scatterlist *sglist,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09001926 int nelems, enum dma_data_direction dir,
1927 struct dma_attrs *attrs)
Joerg Roedel65b050a2008-06-26 21:28:02 +02001928{
1929 unsigned long flags;
1930 struct amd_iommu *iommu;
1931 struct protection_domain *domain;
1932 struct scatterlist *s;
1933 u16 devid;
1934 int i;
1935
Joerg Roedel55877a62008-12-12 15:12:14 +01001936 INC_STATS_COUNTER(cnt_unmap_sg);
1937
Joerg Roedelf99c0f12009-11-23 16:52:56 +01001938 if (!get_device_resources(dev, &iommu, &domain, &devid))
Joerg Roedel65b050a2008-06-26 21:28:02 +02001939 return;
1940
Joerg Roedel5b28df62008-12-02 17:49:42 +01001941 if (!dma_ops_domain(domain))
1942 return;
1943
Joerg Roedel65b050a2008-06-26 21:28:02 +02001944 spin_lock_irqsave(&domain->lock, flags);
1945
1946 for_each_sg(sglist, s, nelems, i) {
Joerg Roedelcd8c82e2009-11-23 19:33:56 +01001947 __unmap_single(domain->priv, s->dma_address,
Joerg Roedel65b050a2008-06-26 21:28:02 +02001948 s->dma_length, dir);
Joerg Roedel65b050a2008-06-26 21:28:02 +02001949 s->dma_address = s->dma_length = 0;
1950 }
1951
Joerg Roedel0518a3a2009-11-20 16:00:05 +01001952 iommu_flush_complete(domain);
Joerg Roedel65b050a2008-06-26 21:28:02 +02001953
1954 spin_unlock_irqrestore(&domain->lock, flags);
1955}
1956
Joerg Roedel431b2a22008-07-11 17:14:22 +02001957/*
1958 * The exported alloc_coherent function for dma_ops.
1959 */
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02001960static void *alloc_coherent(struct device *dev, size_t size,
1961 dma_addr_t *dma_addr, gfp_t flag)
1962{
1963 unsigned long flags;
1964 void *virt_addr;
1965 struct amd_iommu *iommu;
1966 struct protection_domain *domain;
1967 u16 devid;
1968 phys_addr_t paddr;
Joerg Roedel832a90c2008-09-18 15:54:23 +02001969 u64 dma_mask = dev->coherent_dma_mask;
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02001970
Joerg Roedelc8f0fb32008-12-12 15:14:21 +01001971 INC_STATS_COUNTER(cnt_alloc_coherent);
1972
Joerg Roedelf99c0f12009-11-23 16:52:56 +01001973 if (!get_device_resources(dev, &iommu, &domain, &devid)) {
1974 virt_addr = (void *)__get_free_pages(flag, get_order(size));
1975 *dma_addr = __pa(virt_addr);
1976 return virt_addr;
1977 }
Joerg Roedeldbcc1122008-09-04 15:04:26 +02001978
Joerg Roedelf99c0f12009-11-23 16:52:56 +01001979 dma_mask = dev->coherent_dma_mask;
1980 flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
1981 flag |= __GFP_ZERO;
FUJITA Tomonori13d9fea2008-09-10 20:19:40 +09001982
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02001983 virt_addr = (void *)__get_free_pages(flag, get_order(size));
1984 if (!virt_addr)
Jaswinder Singh Rajputb25ae672009-07-01 19:53:14 +05301985 return NULL;
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02001986
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02001987 paddr = virt_to_phys(virt_addr);
1988
Joerg Roedel5b28df62008-12-02 17:49:42 +01001989 if (!dma_ops_domain(domain))
1990 goto out_free;
1991
Joerg Roedel832a90c2008-09-18 15:54:23 +02001992 if (!dma_mask)
1993 dma_mask = *dev->dma_mask;
1994
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02001995 spin_lock_irqsave(&domain->lock, flags);
1996
Joerg Roedelcd8c82e2009-11-23 19:33:56 +01001997 *dma_addr = __map_single(dev, domain->priv, paddr,
Joerg Roedel832a90c2008-09-18 15:54:23 +02001998 size, DMA_BIDIRECTIONAL, true, dma_mask);
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02001999
FUJITA Tomonori8fd524b2009-11-15 21:19:53 +09002000 if (*dma_addr == DMA_ERROR_CODE) {
Jiri Slaby367d04c2009-05-28 09:54:48 +02002001 spin_unlock_irqrestore(&domain->lock, flags);
Joerg Roedel5b28df62008-12-02 17:49:42 +01002002 goto out_free;
Jiri Slaby367d04c2009-05-28 09:54:48 +02002003 }
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002004
Joerg Roedel0518a3a2009-11-20 16:00:05 +01002005 iommu_flush_complete(domain);
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002006
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002007 spin_unlock_irqrestore(&domain->lock, flags);
2008
2009 return virt_addr;
Joerg Roedel5b28df62008-12-02 17:49:42 +01002010
2011out_free:
2012
2013 free_pages((unsigned long)virt_addr, get_order(size));
2014
2015 return NULL;
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002016}
2017
Joerg Roedel431b2a22008-07-11 17:14:22 +02002018/*
2019 * The exported free_coherent function for dma_ops.
Joerg Roedel431b2a22008-07-11 17:14:22 +02002020 */
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002021static void free_coherent(struct device *dev, size_t size,
2022 void *virt_addr, dma_addr_t dma_addr)
2023{
2024 unsigned long flags;
2025 struct amd_iommu *iommu;
2026 struct protection_domain *domain;
2027 u16 devid;
2028
Joerg Roedel5d31ee72008-12-12 15:16:38 +01002029 INC_STATS_COUNTER(cnt_free_coherent);
2030
Joerg Roedelf99c0f12009-11-23 16:52:56 +01002031 if (!get_device_resources(dev, &iommu, &domain, &devid))
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002032 goto free_mem;
2033
Joerg Roedel5b28df62008-12-02 17:49:42 +01002034 if (!dma_ops_domain(domain))
2035 goto free_mem;
2036
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002037 spin_lock_irqsave(&domain->lock, flags);
2038
Joerg Roedelcd8c82e2009-11-23 19:33:56 +01002039 __unmap_single(domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002040
Joerg Roedel0518a3a2009-11-20 16:00:05 +01002041 iommu_flush_complete(domain);
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002042
2043 spin_unlock_irqrestore(&domain->lock, flags);
2044
2045free_mem:
2046 free_pages((unsigned long)virt_addr, get_order(size));
2047}
2048
Joerg Roedelc432f3d2008-06-26 21:28:04 +02002049/*
Joerg Roedelb39ba6a2008-09-09 18:40:46 +02002050 * This function is called by the DMA layer to find out if we can handle a
2051 * particular device. It is part of the dma_ops.
2052 */
2053static int amd_iommu_dma_supported(struct device *dev, u64 mask)
2054{
Joerg Roedel420aef82009-11-23 16:14:57 +01002055 return check_device(dev);
Joerg Roedelb39ba6a2008-09-09 18:40:46 +02002056}
2057
2058/*
Joerg Roedel431b2a22008-07-11 17:14:22 +02002059 * The function for pre-allocating protection domains.
2060 *
Joerg Roedelc432f3d2008-06-26 21:28:04 +02002061 * If the driver core informs the DMA layer if a driver grabs a device
2062 * we don't need to preallocate the protection domains anymore.
2063 * For now we have to.
2064 */
Jaswinder Singh Rajput0e93dd82008-12-29 21:45:22 +05302065static void prealloc_protection_domains(void)
Joerg Roedelc432f3d2008-06-26 21:28:04 +02002066{
2067 struct pci_dev *dev = NULL;
2068 struct dma_ops_domain *dma_dom;
2069 struct amd_iommu *iommu;
Joerg Roedelbe831292009-11-23 12:50:00 +01002070 u16 devid, __devid;
Joerg Roedelc432f3d2008-06-26 21:28:04 +02002071
2072 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
Joerg Roedelbe831292009-11-23 12:50:00 +01002073 __devid = devid = calc_devid(dev->bus->number, dev->devfn);
Joerg Roedel3a61ec32008-07-25 13:07:50 +02002074 if (devid > amd_iommu_last_bdf)
Joerg Roedelc432f3d2008-06-26 21:28:04 +02002075 continue;
2076 devid = amd_iommu_alias_table[devid];
2077 if (domain_for_device(devid))
2078 continue;
2079 iommu = amd_iommu_rlookup_table[devid];
2080 if (!iommu)
2081 continue;
Joerg Roedeld9cfed92009-05-19 12:16:29 +02002082 dma_dom = dma_ops_domain_alloc(iommu);
Joerg Roedelc432f3d2008-06-26 21:28:04 +02002083 if (!dma_dom)
2084 continue;
2085 init_unity_mappings_for_device(dma_dom, devid);
Joerg Roedelbd60b732008-09-11 10:24:48 +02002086 dma_dom->target_dev = devid;
2087
Joerg Roedelbe831292009-11-23 12:50:00 +01002088 attach_device(iommu, &dma_dom->domain, devid);
2089 if (__devid != devid)
2090 attach_device(iommu, &dma_dom->domain, __devid);
2091
Joerg Roedelbd60b732008-09-11 10:24:48 +02002092 list_add_tail(&dma_dom->list, &iommu_pd_list);
Joerg Roedelc432f3d2008-06-26 21:28:04 +02002093 }
2094}
2095
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09002096static struct dma_map_ops amd_iommu_dma_ops = {
Joerg Roedel6631ee92008-06-26 21:28:05 +02002097 .alloc_coherent = alloc_coherent,
2098 .free_coherent = free_coherent,
FUJITA Tomonori51491362009-01-05 23:47:25 +09002099 .map_page = map_page,
2100 .unmap_page = unmap_page,
Joerg Roedel6631ee92008-06-26 21:28:05 +02002101 .map_sg = map_sg,
2102 .unmap_sg = unmap_sg,
Joerg Roedelb39ba6a2008-09-09 18:40:46 +02002103 .dma_supported = amd_iommu_dma_supported,
Joerg Roedel6631ee92008-06-26 21:28:05 +02002104};
2105
Joerg Roedel431b2a22008-07-11 17:14:22 +02002106/*
2107 * The function which clues the AMD IOMMU driver into dma_ops.
2108 */
Joerg Roedel6631ee92008-06-26 21:28:05 +02002109int __init amd_iommu_init_dma_ops(void)
2110{
2111 struct amd_iommu *iommu;
Joerg Roedel6631ee92008-06-26 21:28:05 +02002112 int ret;
2113
Joerg Roedel431b2a22008-07-11 17:14:22 +02002114 /*
2115 * first allocate a default protection domain for every IOMMU we
2116 * found in the system. Devices not assigned to any other
2117 * protection domain will be assigned to the default one.
2118 */
Joerg Roedel3bd22172009-05-04 15:06:20 +02002119 for_each_iommu(iommu) {
Joerg Roedeld9cfed92009-05-19 12:16:29 +02002120 iommu->default_dom = dma_ops_domain_alloc(iommu);
Joerg Roedel6631ee92008-06-26 21:28:05 +02002121 if (iommu->default_dom == NULL)
2122 return -ENOMEM;
Joerg Roedele2dc14a2008-12-10 18:48:59 +01002123 iommu->default_dom->domain.flags |= PD_DEFAULT_MASK;
Joerg Roedel6631ee92008-06-26 21:28:05 +02002124 ret = iommu_init_unity_mappings(iommu);
2125 if (ret)
2126 goto free_domains;
2127 }
2128
Joerg Roedel431b2a22008-07-11 17:14:22 +02002129 /*
2130 * If device isolation is enabled, pre-allocate the protection
2131 * domains for each device.
2132 */
Joerg Roedel6631ee92008-06-26 21:28:05 +02002133 if (amd_iommu_isolate)
2134 prealloc_protection_domains();
2135
2136 iommu_detected = 1;
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09002137 swiotlb = 0;
Ingo Molnar92af4e22008-06-27 10:48:16 +02002138#ifdef CONFIG_GART_IOMMU
Joerg Roedel6631ee92008-06-26 21:28:05 +02002139 gart_iommu_aperture_disabled = 1;
2140 gart_iommu_aperture = 0;
Ingo Molnar92af4e22008-06-27 10:48:16 +02002141#endif
Joerg Roedel6631ee92008-06-26 21:28:05 +02002142
Joerg Roedel431b2a22008-07-11 17:14:22 +02002143 /* Make the driver finally visible to the drivers */
Joerg Roedel6631ee92008-06-26 21:28:05 +02002144 dma_ops = &amd_iommu_dma_ops;
2145
Joerg Roedel26961ef2008-12-03 17:00:17 +01002146 register_iommu(&amd_iommu_ops);
Joerg Roedel26961ef2008-12-03 17:00:17 +01002147
Joerg Roedele275a2a2008-12-10 18:27:25 +01002148 bus_register_notifier(&pci_bus_type, &device_nb);
2149
Joerg Roedel7f265082008-12-12 13:50:21 +01002150 amd_iommu_stats_init();
2151
Joerg Roedel6631ee92008-06-26 21:28:05 +02002152 return 0;
2153
2154free_domains:
2155
Joerg Roedel3bd22172009-05-04 15:06:20 +02002156 for_each_iommu(iommu) {
Joerg Roedel6631ee92008-06-26 21:28:05 +02002157 if (iommu->default_dom)
2158 dma_ops_domain_free(iommu->default_dom);
2159 }
2160
2161 return ret;
2162}
Joerg Roedel6d98cd82008-12-08 12:05:55 +01002163
2164/*****************************************************************************
2165 *
2166 * The following functions belong to the exported interface of AMD IOMMU
2167 *
2168 * This interface allows access to lower level functions of the IOMMU
2169 * like protection domain handling and assignement of devices to domains
2170 * which is not possible with the dma_ops interface.
2171 *
2172 *****************************************************************************/
2173
Joerg Roedel6d98cd82008-12-08 12:05:55 +01002174static void cleanup_domain(struct protection_domain *domain)
2175{
2176 unsigned long flags;
2177 u16 devid;
2178
2179 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
2180
2181 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
2182 if (amd_iommu_pd_table[devid] == domain)
2183 __detach_device(domain, devid);
2184
2185 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
2186}
2187
Joerg Roedel26508152009-08-26 16:52:40 +02002188static void protection_domain_free(struct protection_domain *domain)
2189{
2190 if (!domain)
2191 return;
2192
Joerg Roedelaeb26f52009-11-20 16:44:01 +01002193 del_domain_from_list(domain);
2194
Joerg Roedel26508152009-08-26 16:52:40 +02002195 if (domain->id)
2196 domain_id_free(domain->id);
2197
2198 kfree(domain);
2199}
2200
2201static struct protection_domain *protection_domain_alloc(void)
Joerg Roedelc156e342008-12-02 18:13:27 +01002202{
2203 struct protection_domain *domain;
2204
2205 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
2206 if (!domain)
Joerg Roedel26508152009-08-26 16:52:40 +02002207 return NULL;
Joerg Roedelc156e342008-12-02 18:13:27 +01002208
2209 spin_lock_init(&domain->lock);
Joerg Roedelc156e342008-12-02 18:13:27 +01002210 domain->id = domain_id_alloc();
2211 if (!domain->id)
Joerg Roedel26508152009-08-26 16:52:40 +02002212 goto out_err;
2213
Joerg Roedelaeb26f52009-11-20 16:44:01 +01002214 add_domain_to_list(domain);
2215
Joerg Roedel26508152009-08-26 16:52:40 +02002216 return domain;
2217
2218out_err:
2219 kfree(domain);
2220
2221 return NULL;
2222}
2223
2224static int amd_iommu_domain_init(struct iommu_domain *dom)
2225{
2226 struct protection_domain *domain;
2227
2228 domain = protection_domain_alloc();
2229 if (!domain)
Joerg Roedelc156e342008-12-02 18:13:27 +01002230 goto out_free;
Joerg Roedel26508152009-08-26 16:52:40 +02002231
2232 domain->mode = PAGE_MODE_3_LEVEL;
Joerg Roedelc156e342008-12-02 18:13:27 +01002233 domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
2234 if (!domain->pt_root)
2235 goto out_free;
2236
2237 dom->priv = domain;
2238
2239 return 0;
2240
2241out_free:
Joerg Roedel26508152009-08-26 16:52:40 +02002242 protection_domain_free(domain);
Joerg Roedelc156e342008-12-02 18:13:27 +01002243
2244 return -ENOMEM;
2245}
2246
Joerg Roedel98383fc2008-12-02 18:34:12 +01002247static void amd_iommu_domain_destroy(struct iommu_domain *dom)
2248{
2249 struct protection_domain *domain = dom->priv;
2250
2251 if (!domain)
2252 return;
2253
2254 if (domain->dev_cnt > 0)
2255 cleanup_domain(domain);
2256
2257 BUG_ON(domain->dev_cnt != 0);
2258
2259 free_pagetable(domain);
2260
2261 domain_id_free(domain->id);
2262
2263 kfree(domain);
2264
2265 dom->priv = NULL;
2266}
2267
Joerg Roedel684f2882008-12-08 12:07:44 +01002268static void amd_iommu_detach_device(struct iommu_domain *dom,
2269 struct device *dev)
2270{
2271 struct protection_domain *domain = dom->priv;
2272 struct amd_iommu *iommu;
2273 struct pci_dev *pdev;
2274 u16 devid;
2275
2276 if (dev->bus != &pci_bus_type)
2277 return;
2278
2279 pdev = to_pci_dev(dev);
2280
2281 devid = calc_devid(pdev->bus->number, pdev->devfn);
2282
2283 if (devid > 0)
2284 detach_device(domain, devid);
2285
2286 iommu = amd_iommu_rlookup_table[devid];
2287 if (!iommu)
2288 return;
2289
2290 iommu_queue_inv_dev_entry(iommu, devid);
2291 iommu_completion_wait(iommu);
2292}
2293
Joerg Roedel01106062008-12-02 19:34:11 +01002294static int amd_iommu_attach_device(struct iommu_domain *dom,
2295 struct device *dev)
2296{
2297 struct protection_domain *domain = dom->priv;
2298 struct protection_domain *old_domain;
2299 struct amd_iommu *iommu;
2300 struct pci_dev *pdev;
2301 u16 devid;
2302
2303 if (dev->bus != &pci_bus_type)
2304 return -EINVAL;
2305
2306 pdev = to_pci_dev(dev);
2307
2308 devid = calc_devid(pdev->bus->number, pdev->devfn);
2309
2310 if (devid >= amd_iommu_last_bdf ||
2311 devid != amd_iommu_alias_table[devid])
2312 return -EINVAL;
2313
2314 iommu = amd_iommu_rlookup_table[devid];
2315 if (!iommu)
2316 return -EINVAL;
2317
2318 old_domain = domain_for_device(devid);
2319 if (old_domain)
Joerg Roedel71ff3bc2009-06-08 13:47:33 -07002320 detach_device(old_domain, devid);
Joerg Roedel01106062008-12-02 19:34:11 +01002321
2322 attach_device(iommu, domain, devid);
2323
2324 iommu_completion_wait(iommu);
2325
2326 return 0;
2327}
2328
Joerg Roedelc6229ca2008-12-02 19:48:43 +01002329static int amd_iommu_map_range(struct iommu_domain *dom,
2330 unsigned long iova, phys_addr_t paddr,
2331 size_t size, int iommu_prot)
2332{
2333 struct protection_domain *domain = dom->priv;
2334 unsigned long i, npages = iommu_num_pages(paddr, size, PAGE_SIZE);
2335 int prot = 0;
2336 int ret;
2337
2338 if (iommu_prot & IOMMU_READ)
2339 prot |= IOMMU_PROT_IR;
2340 if (iommu_prot & IOMMU_WRITE)
2341 prot |= IOMMU_PROT_IW;
2342
2343 iova &= PAGE_MASK;
2344 paddr &= PAGE_MASK;
2345
2346 for (i = 0; i < npages; ++i) {
Joerg Roedelabdc5eb2009-09-03 11:33:51 +02002347 ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k);
Joerg Roedelc6229ca2008-12-02 19:48:43 +01002348 if (ret)
2349 return ret;
2350
2351 iova += PAGE_SIZE;
2352 paddr += PAGE_SIZE;
2353 }
2354
2355 return 0;
2356}
2357
Joerg Roedeleb74ff62008-12-02 19:59:10 +01002358static void amd_iommu_unmap_range(struct iommu_domain *dom,
2359 unsigned long iova, size_t size)
2360{
2361
2362 struct protection_domain *domain = dom->priv;
2363 unsigned long i, npages = iommu_num_pages(iova, size, PAGE_SIZE);
2364
2365 iova &= PAGE_MASK;
2366
2367 for (i = 0; i < npages; ++i) {
Joerg Roedela6b256b2009-09-03 12:21:31 +02002368 iommu_unmap_page(domain, iova, PM_MAP_4k);
Joerg Roedeleb74ff62008-12-02 19:59:10 +01002369 iova += PAGE_SIZE;
2370 }
2371
Joerg Roedel601367d2009-11-20 16:08:55 +01002372 iommu_flush_tlb_pde(domain);
Joerg Roedeleb74ff62008-12-02 19:59:10 +01002373}
2374
Joerg Roedel645c4c82008-12-02 20:05:50 +01002375static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
2376 unsigned long iova)
2377{
2378 struct protection_domain *domain = dom->priv;
2379 unsigned long offset = iova & ~PAGE_MASK;
2380 phys_addr_t paddr;
2381 u64 *pte;
2382
Joerg Roedela6b256b2009-09-03 12:21:31 +02002383 pte = fetch_pte(domain, iova, PM_MAP_4k);
Joerg Roedel645c4c82008-12-02 20:05:50 +01002384
Joerg Roedela6d41a42009-09-02 17:08:55 +02002385 if (!pte || !IOMMU_PTE_PRESENT(*pte))
Joerg Roedel645c4c82008-12-02 20:05:50 +01002386 return 0;
2387
2388 paddr = *pte & IOMMU_PAGE_MASK;
2389 paddr |= offset;
2390
2391 return paddr;
2392}
2393
Sheng Yangdbb9fd82009-03-18 15:33:06 +08002394static int amd_iommu_domain_has_cap(struct iommu_domain *domain,
2395 unsigned long cap)
2396{
2397 return 0;
2398}
2399
Joerg Roedel26961ef2008-12-03 17:00:17 +01002400static struct iommu_ops amd_iommu_ops = {
2401 .domain_init = amd_iommu_domain_init,
2402 .domain_destroy = amd_iommu_domain_destroy,
2403 .attach_dev = amd_iommu_attach_device,
2404 .detach_dev = amd_iommu_detach_device,
2405 .map = amd_iommu_map_range,
2406 .unmap = amd_iommu_unmap_range,
2407 .iova_to_phys = amd_iommu_iova_to_phys,
Sheng Yangdbb9fd82009-03-18 15:33:06 +08002408 .domain_has_cap = amd_iommu_domain_has_cap,
Joerg Roedel26961ef2008-12-03 17:00:17 +01002409};
2410
Joerg Roedel0feae532009-08-26 15:26:30 +02002411/*****************************************************************************
2412 *
2413 * The next functions do a basic initialization of IOMMU for pass through
2414 * mode
2415 *
2416 * In passthrough mode the IOMMU is initialized and enabled but not used for
2417 * DMA-API translation.
2418 *
2419 *****************************************************************************/
2420
2421int __init amd_iommu_init_passthrough(void)
2422{
2423 struct pci_dev *dev = NULL;
2424 u16 devid, devid2;
2425
2426 /* allocate passthroug domain */
2427 pt_domain = protection_domain_alloc();
2428 if (!pt_domain)
2429 return -ENOMEM;
2430
2431 pt_domain->mode |= PAGE_MODE_NONE;
2432
2433 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
2434 struct amd_iommu *iommu;
2435
2436 devid = calc_devid(dev->bus->number, dev->devfn);
2437 if (devid > amd_iommu_last_bdf)
2438 continue;
2439
2440 devid2 = amd_iommu_alias_table[devid];
2441
2442 iommu = amd_iommu_rlookup_table[devid2];
2443 if (!iommu)
2444 continue;
2445
2446 __attach_device(iommu, pt_domain, devid);
2447 __attach_device(iommu, pt_domain, devid2);
2448 }
2449
2450 pr_info("AMD-Vi: Initialized for Passthrough Mode\n");
2451
2452 return 0;
2453}