blob: ef69d91516da2a14050145b946a6dcb0b04376ac [file] [log] [blame]
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001/*
2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +020019#define pr_fmt(fmt) "%s: " fmt, __func__
20
Joerg Roedel905d66c2011-09-06 16:03:26 +020021#include <linux/device.h>
Ohad Ben-Cohen40998182011-09-02 13:32:32 -040022#include <linux/kernel.h>
Joerg Roedelfc2100e2008-11-26 17:21:24 +010023#include <linux/bug.h>
24#include <linux/types.h>
Andrew Morton60db4022009-05-06 16:03:07 -070025#include <linux/module.h>
26#include <linux/slab.h>
Joerg Roedelfc2100e2008-11-26 17:21:24 +010027#include <linux/errno.h>
28#include <linux/iommu.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070029#include <linux/scatterlist.h>
Joerg Roedelfc2100e2008-11-26 17:21:24 +010030
Alex Williamson14604322011-10-21 15:56:05 -040031static ssize_t show_iommu_group(struct device *dev,
32 struct device_attribute *attr, char *buf)
33{
34 unsigned int groupid;
35
36 if (iommu_device_group(dev, &groupid))
37 return 0;
38
39 return sprintf(buf, "%u", groupid);
40}
41static DEVICE_ATTR(iommu_group, S_IRUGO, show_iommu_group, NULL);
42
43static int add_iommu_group(struct device *dev, void *data)
44{
45 unsigned int groupid;
46
47 if (iommu_device_group(dev, &groupid) == 0)
48 return device_create_file(dev, &dev_attr_iommu_group);
49
50 return 0;
51}
52
53static int remove_iommu_group(struct device *dev)
54{
55 unsigned int groupid;
56
57 if (iommu_device_group(dev, &groupid) == 0)
58 device_remove_file(dev, &dev_attr_iommu_group);
59
60 return 0;
61}
62
63static int iommu_device_notifier(struct notifier_block *nb,
64 unsigned long action, void *data)
65{
66 struct device *dev = data;
67
68 if (action == BUS_NOTIFY_ADD_DEVICE)
69 return add_iommu_group(dev, NULL);
70 else if (action == BUS_NOTIFY_DEL_DEVICE)
71 return remove_iommu_group(dev);
72
73 return 0;
74}
75
76static struct notifier_block iommu_device_nb = {
77 .notifier_call = iommu_device_notifier,
78};
79
Joerg Roedelff217762011-08-26 16:48:26 +020080static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
Joerg Roedelfc2100e2008-11-26 17:21:24 +010081{
Alex Williamson14604322011-10-21 15:56:05 -040082 bus_register_notifier(bus, &iommu_device_nb);
83 bus_for_each_dev(bus, NULL, NULL, add_iommu_group);
Joerg Roedelfc2100e2008-11-26 17:21:24 +010084}
85
Joerg Roedelff217762011-08-26 16:48:26 +020086/**
87 * bus_set_iommu - set iommu-callbacks for the bus
88 * @bus: bus.
89 * @ops: the callbacks provided by the iommu-driver
90 *
91 * This function is called by an iommu driver to set the iommu methods
92 * used for a particular bus. Drivers for devices on that bus can use
93 * the iommu-api after these ops are registered.
94 * This special function is needed because IOMMUs are usually devices on
95 * the bus itself, so the iommu drivers are not initialized when the bus
96 * is set up. With this function the iommu-driver can set the iommu-ops
97 * afterwards.
98 */
99int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops)
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100100{
Joerg Roedelff217762011-08-26 16:48:26 +0200101 if (bus->iommu_ops != NULL)
102 return -EBUSY;
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100103
Joerg Roedelff217762011-08-26 16:48:26 +0200104 bus->iommu_ops = ops;
105
106 /* Do IOMMU specific setup for this bus-type */
107 iommu_bus_init(bus, ops);
108
109 return 0;
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100110}
Joerg Roedelff217762011-08-26 16:48:26 +0200111EXPORT_SYMBOL_GPL(bus_set_iommu);
112
Joerg Roedela1b60c12011-09-06 18:46:34 +0200113bool iommu_present(struct bus_type *bus)
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100114{
Joerg Roedel94441c32011-09-06 18:58:54 +0200115 return bus->iommu_ops != NULL;
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100116}
Joerg Roedela1b60c12011-09-06 18:46:34 +0200117EXPORT_SYMBOL_GPL(iommu_present);
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100118
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -0400119/**
120 * iommu_set_fault_handler() - set a fault handler for an iommu domain
121 * @domain: iommu domain
122 * @handler: fault handler
Ohad Ben-Cohen0ed6d2d2011-09-27 07:36:40 -0400123 *
124 * This function should be used by IOMMU users which want to be notified
125 * whenever an IOMMU fault happens.
126 *
127 * The fault handler itself should return 0 on success, and an appropriate
128 * error code otherwise.
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -0400129 */
130void iommu_set_fault_handler(struct iommu_domain *domain,
131 iommu_fault_handler_t handler)
132{
133 BUG_ON(!domain);
134
135 domain->handler = handler;
136}
Ohad Ben-Cohen30bd9182011-09-26 09:11:46 -0400137EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -0400138
Steve Mucklef132c6c2012-06-06 18:30:57 -0700139struct iommu_domain *iommu_domain_alloc(struct bus_type *bus, int flags)
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100140{
141 struct iommu_domain *domain;
142 int ret;
143
Joerg Roedel94441c32011-09-06 18:58:54 +0200144 if (bus == NULL || bus->iommu_ops == NULL)
Joerg Roedel905d66c2011-09-06 16:03:26 +0200145 return NULL;
146
KyongHo Cho8bd69602011-12-16 21:38:25 +0900147 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100148 if (!domain)
149 return NULL;
150
Joerg Roedel94441c32011-09-06 18:58:54 +0200151 domain->ops = bus->iommu_ops;
Joerg Roedel905d66c2011-09-06 16:03:26 +0200152
Steve Mucklef132c6c2012-06-06 18:30:57 -0700153 ret = domain->ops->domain_init(domain, flags);
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100154 if (ret)
155 goto out_free;
156
157 return domain;
158
159out_free:
160 kfree(domain);
161
162 return NULL;
163}
164EXPORT_SYMBOL_GPL(iommu_domain_alloc);
165
166void iommu_domain_free(struct iommu_domain *domain)
167{
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200168 if (likely(domain->ops->domain_destroy != NULL))
169 domain->ops->domain_destroy(domain);
170
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100171 kfree(domain);
172}
173EXPORT_SYMBOL_GPL(iommu_domain_free);
174
175int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
176{
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200177 if (unlikely(domain->ops->attach_dev == NULL))
178 return -ENODEV;
179
180 return domain->ops->attach_dev(domain, dev);
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100181}
182EXPORT_SYMBOL_GPL(iommu_attach_device);
183
184void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
185{
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200186 if (unlikely(domain->ops->detach_dev == NULL))
187 return;
188
189 domain->ops->detach_dev(domain, dev);
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100190}
191EXPORT_SYMBOL_GPL(iommu_detach_device);
192
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100193phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
194 unsigned long iova)
195{
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200196 if (unlikely(domain->ops->iova_to_phys == NULL))
197 return 0;
198
199 return domain->ops->iova_to_phys(domain, iova);
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100200}
201EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
Sheng Yangdbb9fd82009-03-18 15:33:06 +0800202
203int iommu_domain_has_cap(struct iommu_domain *domain,
204 unsigned long cap)
205{
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200206 if (unlikely(domain->ops->domain_has_cap == NULL))
207 return 0;
208
209 return domain->ops->domain_has_cap(domain, cap);
Sheng Yangdbb9fd82009-03-18 15:33:06 +0800210}
211EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100212
213int iommu_map(struct iommu_domain *domain, unsigned long iova,
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200214 phys_addr_t paddr, size_t size, int prot)
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100215{
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200216 unsigned long orig_iova = iova;
217 unsigned int min_pagesz;
218 size_t orig_size = size;
219 int ret = 0;
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100220
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200221 if (unlikely(domain->ops->map == NULL))
222 return -ENODEV;
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100223
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200224 /* find out the minimum page size supported */
225 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100226
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200227 /*
228 * both the virtual address and the physical one, as well as
229 * the size of the mapping, must be aligned (at least) to the
230 * size of the smallest page supported by the hardware
231 */
232 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
233 pr_err("unaligned: iova 0x%lx pa 0x%lx size 0x%lx min_pagesz "
234 "0x%x\n", iova, (unsigned long)paddr,
235 (unsigned long)size, min_pagesz);
236 return -EINVAL;
237 }
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100238
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200239 pr_debug("map: iova 0x%lx pa 0x%lx size 0x%lx\n", iova,
240 (unsigned long)paddr, (unsigned long)size);
241
242 while (size) {
243 unsigned long pgsize, addr_merge = iova | paddr;
244 unsigned int pgsize_idx;
245
246 /* Max page size that still fits into 'size' */
247 pgsize_idx = __fls(size);
248
249 /* need to consider alignment requirements ? */
250 if (likely(addr_merge)) {
251 /* Max page size allowed by both iova and paddr */
252 unsigned int align_pgsize_idx = __ffs(addr_merge);
253
254 pgsize_idx = min(pgsize_idx, align_pgsize_idx);
255 }
256
257 /* build a mask of acceptable page sizes */
258 pgsize = (1UL << (pgsize_idx + 1)) - 1;
259
260 /* throw away page sizes not supported by the hardware */
261 pgsize &= domain->ops->pgsize_bitmap;
262
263 /* make sure we're still sane */
264 BUG_ON(!pgsize);
265
266 /* pick the biggest page */
267 pgsize_idx = __fls(pgsize);
268 pgsize = 1UL << pgsize_idx;
269
270 pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova,
271 (unsigned long)paddr, pgsize);
272
273 ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
274 if (ret)
275 break;
276
277 iova += pgsize;
278 paddr += pgsize;
279 size -= pgsize;
280 }
281
282 /* unroll mapping in case something went wrong */
283 if (ret)
284 iommu_unmap(domain, orig_iova, orig_size - size);
285
286 return ret;
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100287}
288EXPORT_SYMBOL_GPL(iommu_map);
289
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200290size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100291{
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200292 size_t unmapped_page, unmapped = 0;
293 unsigned int min_pagesz;
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100294
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200295 if (unlikely(domain->ops->unmap == NULL))
296 return -ENODEV;
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100297
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200298 /* find out the minimum page size supported */
299 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100300
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200301 /*
302 * The virtual address, as well as the size of the mapping, must be
303 * aligned (at least) to the size of the smallest page supported
304 * by the hardware
305 */
306 if (!IS_ALIGNED(iova | size, min_pagesz)) {
307 pr_err("unaligned: iova 0x%lx size 0x%lx min_pagesz 0x%x\n",
308 iova, (unsigned long)size, min_pagesz);
309 return -EINVAL;
310 }
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100311
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200312 pr_debug("unmap this: iova 0x%lx size 0x%lx\n", iova,
313 (unsigned long)size);
Ohad Ben-Cohen50090652011-11-10 11:32:25 +0200314
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200315 /*
316 * Keep iterating until we either unmap 'size' bytes (or more)
317 * or we hit an area that isn't mapped.
318 */
319 while (unmapped < size) {
320 size_t left = size - unmapped;
321
322 unmapped_page = domain->ops->unmap(domain, iova, left);
323 if (!unmapped_page)
324 break;
325
326 pr_debug("unmapped: iova 0x%lx size %lx\n", iova,
327 (unsigned long)unmapped_page);
328
329 iova += unmapped_page;
330 unmapped += unmapped_page;
331 }
332
333 return unmapped;
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100334}
335EXPORT_SYMBOL_GPL(iommu_unmap);
Alex Williamson14604322011-10-21 15:56:05 -0400336
Steve Mucklef132c6c2012-06-06 18:30:57 -0700337int iommu_map_range(struct iommu_domain *domain, unsigned int iova,
338 struct scatterlist *sg, unsigned int len, int prot)
339{
340 if (unlikely(domain->ops->map_range == NULL))
341 return -ENODEV;
342
343 BUG_ON(iova & (~PAGE_MASK));
344
345 return domain->ops->map_range(domain, iova, sg, len, prot);
346}
347EXPORT_SYMBOL_GPL(iommu_map_range);
348
349int iommu_unmap_range(struct iommu_domain *domain, unsigned int iova,
350 unsigned int len)
351{
352 if (unlikely(domain->ops->unmap_range == NULL))
353 return -ENODEV;
354
355 BUG_ON(iova & (~PAGE_MASK));
356
357 return domain->ops->unmap_range(domain, iova, len);
358}
359EXPORT_SYMBOL_GPL(iommu_unmap_range);
360
361phys_addr_t iommu_get_pt_base_addr(struct iommu_domain *domain)
362{
363 if (unlikely(domain->ops->get_pt_base_addr == NULL))
364 return 0;
365
366 return domain->ops->get_pt_base_addr(domain);
367}
368EXPORT_SYMBOL_GPL(iommu_get_pt_base_addr);
369
Alex Williamson14604322011-10-21 15:56:05 -0400370int iommu_device_group(struct device *dev, unsigned int *groupid)
371{
372 if (iommu_present(dev->bus) && dev->bus->iommu_ops->device_group)
373 return dev->bus->iommu_ops->device_group(dev, groupid);
374
375 return -ENODEV;
376}
377EXPORT_SYMBOL_GPL(iommu_device_group);