| Hiroshi DOYU | 69d3a84 | 2009-01-28 21:32:08 +0200 | [diff] [blame] | 1 | /* | 
|  | 2 | * omap iommu: simple virtual address space management | 
|  | 3 | * | 
|  | 4 | * Copyright (C) 2008-2009 Nokia Corporation | 
|  | 5 | * | 
|  | 6 | * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com> | 
|  | 7 | * | 
|  | 8 | * This program is free software; you can redistribute it and/or modify | 
|  | 9 | * it under the terms of the GNU General Public License version 2 as | 
|  | 10 | * published by the Free Software Foundation. | 
|  | 11 | */ | 
|  | 12 |  | 
|  | 13 | #include <linux/err.h> | 
|  | 14 | #include <linux/vmalloc.h> | 
|  | 15 | #include <linux/device.h> | 
|  | 16 | #include <linux/scatterlist.h> | 
|  | 17 |  | 
|  | 18 | #include <asm/cacheflush.h> | 
|  | 19 | #include <asm/mach/map.h> | 
|  | 20 |  | 
| Tony Lindgren | ce491cf | 2009-10-20 09:40:47 -0700 | [diff] [blame] | 21 | #include <plat/iommu.h> | 
|  | 22 | #include <plat/iovmm.h> | 
| Hiroshi DOYU | 69d3a84 | 2009-01-28 21:32:08 +0200 | [diff] [blame] | 23 |  | 
|  | 24 | #include "iopgtable.h" | 
|  | 25 |  | 
|  | 26 | /* | 
|  | 27 | * A device driver needs to create address mappings between: | 
|  | 28 | * | 
|  | 29 | * - iommu/device address | 
|  | 30 | * - physical address | 
|  | 31 | * - mpu virtual address | 
|  | 32 | * | 
|  | 33 | * There are 4 possible patterns for them: | 
|  | 34 | * | 
|  | 35 | *    |iova/			  mapping		iommu_		page | 
|  | 36 | *    | da	pa	va	(d)-(p)-(v)		function	type | 
|  | 37 | *  --------------------------------------------------------------------------- | 
|  | 38 | *  1 | c	c	c	 1 - 1 - 1	  _kmap() / _kunmap()	s | 
|  | 39 | *  2 | c	c,a	c	 1 - 1 - 1	_kmalloc()/ _kfree()	s | 
|  | 40 | *  3 | c	d	c	 1 - n - 1	  _vmap() / _vunmap()	s | 
|  | 41 | *  4 | c	d,a	c	 1 - n - 1	_vmalloc()/ _vfree()	n* | 
|  | 42 | * | 
|  | 43 | * | 
|  | 44 | *	'iova':	device iommu virtual address | 
|  | 45 | *	'da':	alias of 'iova' | 
|  | 46 | *	'pa':	physical address | 
|  | 47 | *	'va':	mpu virtual address | 
|  | 48 | * | 
|  | 49 | *	'c':	contiguous memory area | 
| Hiroshi DOYU | ba6a117 | 2009-10-05 13:31:45 -0700 | [diff] [blame] | 50 | *	'd':	discontiguous memory area | 
| Hiroshi DOYU | 69d3a84 | 2009-01-28 21:32:08 +0200 | [diff] [blame] | 51 | *	'a':	anonymous memory allocation | 
|  | 52 | *	'()':	optional feature | 
|  | 53 | * | 
|  | 54 | *	'n':	a normal page(4KB) size is used. | 
|  | 55 | *	's':	multiple iommu superpage(16MB, 1MB, 64KB, 4KB) size is used. | 
|  | 56 | * | 
|  | 57 | *	'*':	not yet, but feasible. | 
|  | 58 | */ | 
|  | 59 |  | 
|  | 60 | static struct kmem_cache *iovm_area_cachep; | 
|  | 61 |  | 
|  | 62 | /* return total bytes of sg buffers */ | 
|  | 63 | static size_t sgtable_len(const struct sg_table *sgt) | 
|  | 64 | { | 
|  | 65 | unsigned int i, total = 0; | 
|  | 66 | struct scatterlist *sg; | 
|  | 67 |  | 
|  | 68 | if (!sgt) | 
|  | 69 | return 0; | 
|  | 70 |  | 
|  | 71 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | 
|  | 72 | size_t bytes; | 
|  | 73 |  | 
|  | 74 | bytes = sg_dma_len(sg); | 
|  | 75 |  | 
|  | 76 | if (!iopgsz_ok(bytes)) { | 
|  | 77 | pr_err("%s: sg[%d] not iommu pagesize(%x)\n", | 
|  | 78 | __func__, i, bytes); | 
|  | 79 | return 0; | 
|  | 80 | } | 
|  | 81 |  | 
|  | 82 | total += bytes; | 
|  | 83 | } | 
|  | 84 |  | 
|  | 85 | return total; | 
|  | 86 | } | 
|  | 87 | #define sgtable_ok(x)	(!!sgtable_len(x)) | 
|  | 88 |  | 
|  | 89 | /* | 
|  | 90 | * calculate the optimal number sg elements from total bytes based on | 
|  | 91 | * iommu superpages | 
|  | 92 | */ | 
|  | 93 | static unsigned int sgtable_nents(size_t bytes) | 
|  | 94 | { | 
|  | 95 | int i; | 
|  | 96 | unsigned int nr_entries; | 
|  | 97 | const unsigned long pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, }; | 
|  | 98 |  | 
|  | 99 | if (!IS_ALIGNED(bytes, PAGE_SIZE)) { | 
|  | 100 | pr_err("%s: wrong size %08x\n", __func__, bytes); | 
|  | 101 | return 0; | 
|  | 102 | } | 
|  | 103 |  | 
|  | 104 | nr_entries = 0; | 
|  | 105 | for (i = 0; i < ARRAY_SIZE(pagesize); i++) { | 
|  | 106 | if (bytes >= pagesize[i]) { | 
|  | 107 | nr_entries += (bytes / pagesize[i]); | 
|  | 108 | bytes %= pagesize[i]; | 
|  | 109 | } | 
|  | 110 | } | 
|  | 111 | BUG_ON(bytes); | 
|  | 112 |  | 
|  | 113 | return nr_entries; | 
|  | 114 | } | 
|  | 115 |  | 
|  | 116 | /* allocate and initialize sg_table header(a kind of 'superblock') */ | 
|  | 117 | static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags) | 
|  | 118 | { | 
|  | 119 | unsigned int nr_entries; | 
|  | 120 | int err; | 
|  | 121 | struct sg_table *sgt; | 
|  | 122 |  | 
|  | 123 | if (!bytes) | 
|  | 124 | return ERR_PTR(-EINVAL); | 
|  | 125 |  | 
|  | 126 | if (!IS_ALIGNED(bytes, PAGE_SIZE)) | 
|  | 127 | return ERR_PTR(-EINVAL); | 
|  | 128 |  | 
|  | 129 | /* FIXME: IOVMF_DA_FIXED should support 'superpages' */ | 
|  | 130 | if ((flags & IOVMF_LINEAR) && (flags & IOVMF_DA_ANON)) { | 
|  | 131 | nr_entries = sgtable_nents(bytes); | 
|  | 132 | if (!nr_entries) | 
|  | 133 | return ERR_PTR(-EINVAL); | 
|  | 134 | } else | 
|  | 135 | nr_entries =  bytes / PAGE_SIZE; | 
|  | 136 |  | 
|  | 137 | sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); | 
|  | 138 | if (!sgt) | 
|  | 139 | return ERR_PTR(-ENOMEM); | 
|  | 140 |  | 
|  | 141 | err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL); | 
|  | 142 | if (err) | 
|  | 143 | return ERR_PTR(err); | 
|  | 144 |  | 
|  | 145 | pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries); | 
|  | 146 |  | 
|  | 147 | return sgt; | 
|  | 148 | } | 
|  | 149 |  | 
|  | 150 | /* free sg_table header(a kind of superblock) */ | 
|  | 151 | static void sgtable_free(struct sg_table *sgt) | 
|  | 152 | { | 
|  | 153 | if (!sgt) | 
|  | 154 | return; | 
|  | 155 |  | 
|  | 156 | sg_free_table(sgt); | 
|  | 157 | kfree(sgt); | 
|  | 158 |  | 
|  | 159 | pr_debug("%s: sgt:%p\n", __func__, sgt); | 
|  | 160 | } | 
|  | 161 |  | 
|  | 162 | /* map 'sglist' to a contiguous mpu virtual area and return 'va' */ | 
|  | 163 | static void *vmap_sg(const struct sg_table *sgt) | 
|  | 164 | { | 
|  | 165 | u32 va; | 
|  | 166 | size_t total; | 
|  | 167 | unsigned int i; | 
|  | 168 | struct scatterlist *sg; | 
|  | 169 | struct vm_struct *new; | 
|  | 170 | const struct mem_type *mtype; | 
|  | 171 |  | 
|  | 172 | mtype = get_mem_type(MT_DEVICE); | 
|  | 173 | if (!mtype) | 
|  | 174 | return ERR_PTR(-EINVAL); | 
|  | 175 |  | 
|  | 176 | total = sgtable_len(sgt); | 
|  | 177 | if (!total) | 
|  | 178 | return ERR_PTR(-EINVAL); | 
|  | 179 |  | 
|  | 180 | new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END); | 
|  | 181 | if (!new) | 
|  | 182 | return ERR_PTR(-ENOMEM); | 
|  | 183 | va = (u32)new->addr; | 
|  | 184 |  | 
|  | 185 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | 
|  | 186 | size_t bytes; | 
|  | 187 | u32 pa; | 
|  | 188 | int err; | 
|  | 189 |  | 
|  | 190 | pa = sg_phys(sg); | 
|  | 191 | bytes = sg_dma_len(sg); | 
|  | 192 |  | 
|  | 193 | BUG_ON(bytes != PAGE_SIZE); | 
|  | 194 |  | 
|  | 195 | err = ioremap_page(va,  pa, mtype); | 
|  | 196 | if (err) | 
|  | 197 | goto err_out; | 
|  | 198 |  | 
|  | 199 | va += bytes; | 
|  | 200 | } | 
|  | 201 |  | 
| Sanjeev Premi | 6716bd0 | 2009-09-24 16:23:12 -0700 | [diff] [blame] | 202 | flush_cache_vmap((unsigned long)new->addr, | 
|  | 203 | (unsigned long)(new->addr + total)); | 
| Hiroshi DOYU | 69d3a84 | 2009-01-28 21:32:08 +0200 | [diff] [blame] | 204 | return new->addr; | 
|  | 205 |  | 
|  | 206 | err_out: | 
|  | 207 | WARN_ON(1); /* FIXME: cleanup some mpu mappings */ | 
|  | 208 | vunmap(new->addr); | 
|  | 209 | return ERR_PTR(-EAGAIN); | 
|  | 210 | } | 
|  | 211 |  | 
|  | 212 | static inline void vunmap_sg(const void *va) | 
|  | 213 | { | 
|  | 214 | vunmap(va); | 
|  | 215 | } | 
|  | 216 |  | 
|  | 217 | static struct iovm_struct *__find_iovm_area(struct iommu *obj, const u32 da) | 
|  | 218 | { | 
|  | 219 | struct iovm_struct *tmp; | 
|  | 220 |  | 
|  | 221 | list_for_each_entry(tmp, &obj->mmap, list) { | 
|  | 222 | if ((da >= tmp->da_start) && (da < tmp->da_end)) { | 
|  | 223 | size_t len; | 
|  | 224 |  | 
|  | 225 | len = tmp->da_end - tmp->da_start; | 
|  | 226 |  | 
|  | 227 | dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", | 
|  | 228 | __func__, tmp->da_start, da, tmp->da_end, len, | 
|  | 229 | tmp->flags); | 
|  | 230 |  | 
|  | 231 | return tmp; | 
|  | 232 | } | 
|  | 233 | } | 
|  | 234 |  | 
|  | 235 | return NULL; | 
|  | 236 | } | 
|  | 237 |  | 
|  | 238 | /** | 
|  | 239 | * find_iovm_area  -  find iovma which includes @da | 
|  | 240 | * @da:		iommu device virtual address | 
|  | 241 | * | 
|  | 242 | * Find the existing iovma starting at @da | 
|  | 243 | */ | 
|  | 244 | struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da) | 
|  | 245 | { | 
|  | 246 | struct iovm_struct *area; | 
|  | 247 |  | 
|  | 248 | mutex_lock(&obj->mmap_lock); | 
|  | 249 | area = __find_iovm_area(obj, da); | 
|  | 250 | mutex_unlock(&obj->mmap_lock); | 
|  | 251 |  | 
|  | 252 | return area; | 
|  | 253 | } | 
|  | 254 | EXPORT_SYMBOL_GPL(find_iovm_area); | 
|  | 255 |  | 
|  | 256 | /* | 
|  | 257 | * This finds the hole(area) which fits the requested address and len | 
|  | 258 | * in iovmas mmap, and returns the new allocated iovma. | 
|  | 259 | */ | 
|  | 260 | static struct iovm_struct *alloc_iovm_area(struct iommu *obj, u32 da, | 
|  | 261 | size_t bytes, u32 flags) | 
|  | 262 | { | 
|  | 263 | struct iovm_struct *new, *tmp; | 
|  | 264 | u32 start, prev_end, alignement; | 
|  | 265 |  | 
|  | 266 | if (!obj || !bytes) | 
|  | 267 | return ERR_PTR(-EINVAL); | 
|  | 268 |  | 
|  | 269 | start = da; | 
|  | 270 | alignement = PAGE_SIZE; | 
|  | 271 |  | 
|  | 272 | if (flags & IOVMF_DA_ANON) { | 
|  | 273 | /* | 
|  | 274 | * Reserve the first page for NULL | 
|  | 275 | */ | 
|  | 276 | start = PAGE_SIZE; | 
|  | 277 | if (flags & IOVMF_LINEAR) | 
|  | 278 | alignement = iopgsz_max(bytes); | 
|  | 279 | start = roundup(start, alignement); | 
|  | 280 | } | 
|  | 281 |  | 
|  | 282 | tmp = NULL; | 
|  | 283 | if (list_empty(&obj->mmap)) | 
|  | 284 | goto found; | 
|  | 285 |  | 
|  | 286 | prev_end = 0; | 
|  | 287 | list_for_each_entry(tmp, &obj->mmap, list) { | 
|  | 288 |  | 
|  | 289 | if ((prev_end <= start) && (start + bytes < tmp->da_start)) | 
|  | 290 | goto found; | 
|  | 291 |  | 
|  | 292 | if (flags & IOVMF_DA_ANON) | 
|  | 293 | start = roundup(tmp->da_end, alignement); | 
|  | 294 |  | 
|  | 295 | prev_end = tmp->da_end; | 
|  | 296 | } | 
|  | 297 |  | 
|  | 298 | if ((start >= prev_end) && (ULONG_MAX - start >= bytes)) | 
|  | 299 | goto found; | 
|  | 300 |  | 
|  | 301 | dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n", | 
|  | 302 | __func__, da, bytes, flags); | 
|  | 303 |  | 
|  | 304 | return ERR_PTR(-EINVAL); | 
|  | 305 |  | 
|  | 306 | found: | 
|  | 307 | new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL); | 
|  | 308 | if (!new) | 
|  | 309 | return ERR_PTR(-ENOMEM); | 
|  | 310 |  | 
|  | 311 | new->iommu = obj; | 
|  | 312 | new->da_start = start; | 
|  | 313 | new->da_end = start + bytes; | 
|  | 314 | new->flags = flags; | 
|  | 315 |  | 
|  | 316 | /* | 
|  | 317 | * keep ascending order of iovmas | 
|  | 318 | */ | 
|  | 319 | if (tmp) | 
|  | 320 | list_add_tail(&new->list, &tmp->list); | 
|  | 321 | else | 
|  | 322 | list_add(&new->list, &obj->mmap); | 
|  | 323 |  | 
|  | 324 | dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n", | 
|  | 325 | __func__, new->da_start, start, new->da_end, bytes, flags); | 
|  | 326 |  | 
|  | 327 | return new; | 
|  | 328 | } | 
|  | 329 |  | 
|  | 330 | static void free_iovm_area(struct iommu *obj, struct iovm_struct *area) | 
|  | 331 | { | 
|  | 332 | size_t bytes; | 
|  | 333 |  | 
|  | 334 | BUG_ON(!obj || !area); | 
|  | 335 |  | 
|  | 336 | bytes = area->da_end - area->da_start; | 
|  | 337 |  | 
|  | 338 | dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n", | 
|  | 339 | __func__, area->da_start, area->da_end, bytes, area->flags); | 
|  | 340 |  | 
|  | 341 | list_del(&area->list); | 
|  | 342 | kmem_cache_free(iovm_area_cachep, area); | 
|  | 343 | } | 
|  | 344 |  | 
|  | 345 | /** | 
|  | 346 | * da_to_va - convert (d) to (v) | 
|  | 347 | * @obj:	objective iommu | 
|  | 348 | * @da:		iommu device virtual address | 
|  | 349 | * @va:		mpu virtual address | 
|  | 350 | * | 
|  | 351 | * Returns mpu virtual addr which corresponds to a given device virtual addr | 
|  | 352 | */ | 
|  | 353 | void *da_to_va(struct iommu *obj, u32 da) | 
|  | 354 | { | 
|  | 355 | void *va = NULL; | 
|  | 356 | struct iovm_struct *area; | 
|  | 357 |  | 
|  | 358 | mutex_lock(&obj->mmap_lock); | 
|  | 359 |  | 
|  | 360 | area = __find_iovm_area(obj, da); | 
|  | 361 | if (!area) { | 
|  | 362 | dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da); | 
|  | 363 | goto out; | 
|  | 364 | } | 
|  | 365 | va = area->va; | 
| Hiroshi DOYU | 69d3a84 | 2009-01-28 21:32:08 +0200 | [diff] [blame] | 366 | out: | 
| Daniel Walker | 2654890 | 2009-10-05 13:31:45 -0700 | [diff] [blame] | 367 | mutex_unlock(&obj->mmap_lock); | 
|  | 368 |  | 
| Hiroshi DOYU | 69d3a84 | 2009-01-28 21:32:08 +0200 | [diff] [blame] | 369 | return va; | 
|  | 370 | } | 
|  | 371 | EXPORT_SYMBOL_GPL(da_to_va); | 
|  | 372 |  | 
|  | 373 | static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va) | 
|  | 374 | { | 
|  | 375 | unsigned int i; | 
|  | 376 | struct scatterlist *sg; | 
|  | 377 | void *va = _va; | 
|  | 378 | void *va_end; | 
|  | 379 |  | 
|  | 380 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | 
|  | 381 | struct page *pg; | 
|  | 382 | const size_t bytes = PAGE_SIZE; | 
|  | 383 |  | 
|  | 384 | /* | 
|  | 385 | * iommu 'superpage' isn't supported with 'iommu_vmalloc()' | 
|  | 386 | */ | 
|  | 387 | pg = vmalloc_to_page(va); | 
|  | 388 | BUG_ON(!pg); | 
|  | 389 | sg_set_page(sg, pg, bytes, 0); | 
|  | 390 |  | 
|  | 391 | va += bytes; | 
|  | 392 | } | 
|  | 393 |  | 
|  | 394 | va_end = _va + PAGE_SIZE * i; | 
| Hiroshi DOYU | 69d3a84 | 2009-01-28 21:32:08 +0200 | [diff] [blame] | 395 | } | 
|  | 396 |  | 
|  | 397 | static inline void sgtable_drain_vmalloc(struct sg_table *sgt) | 
|  | 398 | { | 
|  | 399 | /* | 
|  | 400 | * Actually this is not necessary at all, just exists for | 
| Hiroshi DOYU | ba6a117 | 2009-10-05 13:31:45 -0700 | [diff] [blame] | 401 | * consistency of the code readability. | 
| Hiroshi DOYU | 69d3a84 | 2009-01-28 21:32:08 +0200 | [diff] [blame] | 402 | */ | 
|  | 403 | BUG_ON(!sgt); | 
|  | 404 | } | 
|  | 405 |  | 
|  | 406 | static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, size_t len) | 
|  | 407 | { | 
|  | 408 | unsigned int i; | 
|  | 409 | struct scatterlist *sg; | 
|  | 410 | void *va; | 
|  | 411 |  | 
|  | 412 | va = phys_to_virt(pa); | 
|  | 413 |  | 
|  | 414 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | 
|  | 415 | size_t bytes; | 
|  | 416 |  | 
|  | 417 | bytes = iopgsz_max(len); | 
|  | 418 |  | 
|  | 419 | BUG_ON(!iopgsz_ok(bytes)); | 
|  | 420 |  | 
|  | 421 | sg_set_buf(sg, phys_to_virt(pa), bytes); | 
|  | 422 | /* | 
|  | 423 | * 'pa' is cotinuous(linear). | 
|  | 424 | */ | 
|  | 425 | pa += bytes; | 
|  | 426 | len -= bytes; | 
|  | 427 | } | 
|  | 428 | BUG_ON(len); | 
| Hiroshi DOYU | 69d3a84 | 2009-01-28 21:32:08 +0200 | [diff] [blame] | 429 | } | 
|  | 430 |  | 
|  | 431 | static inline void sgtable_drain_kmalloc(struct sg_table *sgt) | 
|  | 432 | { | 
|  | 433 | /* | 
|  | 434 | * Actually this is not necessary at all, just exists for | 
| Hiroshi DOYU | ba6a117 | 2009-10-05 13:31:45 -0700 | [diff] [blame] | 435 | * consistency of the code readability | 
| Hiroshi DOYU | 69d3a84 | 2009-01-28 21:32:08 +0200 | [diff] [blame] | 436 | */ | 
|  | 437 | BUG_ON(!sgt); | 
|  | 438 | } | 
|  | 439 |  | 
|  | 440 | /* create 'da' <-> 'pa' mapping from 'sgt' */ | 
|  | 441 | static int map_iovm_area(struct iommu *obj, struct iovm_struct *new, | 
|  | 442 | const struct sg_table *sgt, u32 flags) | 
|  | 443 | { | 
|  | 444 | int err; | 
|  | 445 | unsigned int i, j; | 
|  | 446 | struct scatterlist *sg; | 
|  | 447 | u32 da = new->da_start; | 
|  | 448 |  | 
| Julia Lawall | 20e11c2 | 2009-11-22 10:11:16 -0800 | [diff] [blame] | 449 | if (!obj || !sgt) | 
| Hiroshi DOYU | 69d3a84 | 2009-01-28 21:32:08 +0200 | [diff] [blame] | 450 | return -EINVAL; | 
|  | 451 |  | 
|  | 452 | BUG_ON(!sgtable_ok(sgt)); | 
|  | 453 |  | 
|  | 454 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | 
|  | 455 | u32 pa; | 
|  | 456 | int pgsz; | 
|  | 457 | size_t bytes; | 
|  | 458 | struct iotlb_entry e; | 
|  | 459 |  | 
|  | 460 | pa = sg_phys(sg); | 
|  | 461 | bytes = sg_dma_len(sg); | 
|  | 462 |  | 
|  | 463 | flags &= ~IOVMF_PGSZ_MASK; | 
|  | 464 | pgsz = bytes_to_iopgsz(bytes); | 
|  | 465 | if (pgsz < 0) | 
|  | 466 | goto err_out; | 
|  | 467 | flags |= pgsz; | 
|  | 468 |  | 
|  | 469 | pr_debug("%s: [%d] %08x %08x(%x)\n", __func__, | 
|  | 470 | i, da, pa, bytes); | 
|  | 471 |  | 
|  | 472 | iotlb_init_entry(&e, da, pa, flags); | 
|  | 473 | err = iopgtable_store_entry(obj, &e); | 
|  | 474 | if (err) | 
|  | 475 | goto err_out; | 
|  | 476 |  | 
|  | 477 | da += bytes; | 
|  | 478 | } | 
|  | 479 | return 0; | 
|  | 480 |  | 
|  | 481 | err_out: | 
|  | 482 | da = new->da_start; | 
|  | 483 |  | 
|  | 484 | for_each_sg(sgt->sgl, sg, i, j) { | 
|  | 485 | size_t bytes; | 
|  | 486 |  | 
|  | 487 | bytes = iopgtable_clear_entry(obj, da); | 
|  | 488 |  | 
|  | 489 | BUG_ON(!iopgsz_ok(bytes)); | 
|  | 490 |  | 
|  | 491 | da += bytes; | 
|  | 492 | } | 
|  | 493 | return err; | 
|  | 494 | } | 
|  | 495 |  | 
|  | 496 | /* release 'da' <-> 'pa' mapping */ | 
|  | 497 | static void unmap_iovm_area(struct iommu *obj, struct iovm_struct *area) | 
|  | 498 | { | 
|  | 499 | u32 start; | 
|  | 500 | size_t total = area->da_end - area->da_start; | 
|  | 501 |  | 
|  | 502 | BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE)); | 
|  | 503 |  | 
|  | 504 | start = area->da_start; | 
|  | 505 | while (total > 0) { | 
|  | 506 | size_t bytes; | 
|  | 507 |  | 
|  | 508 | bytes = iopgtable_clear_entry(obj, start); | 
|  | 509 | if (bytes == 0) | 
|  | 510 | bytes = PAGE_SIZE; | 
|  | 511 | else | 
|  | 512 | dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n", | 
|  | 513 | __func__, start, bytes, area->flags); | 
|  | 514 |  | 
|  | 515 | BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE)); | 
|  | 516 |  | 
|  | 517 | total -= bytes; | 
|  | 518 | start += bytes; | 
|  | 519 | } | 
|  | 520 | BUG_ON(total); | 
|  | 521 | } | 
|  | 522 |  | 
|  | 523 | /* template function for all unmapping */ | 
|  | 524 | static struct sg_table *unmap_vm_area(struct iommu *obj, const u32 da, | 
|  | 525 | void (*fn)(const void *), u32 flags) | 
|  | 526 | { | 
|  | 527 | struct sg_table *sgt = NULL; | 
|  | 528 | struct iovm_struct *area; | 
|  | 529 |  | 
|  | 530 | if (!IS_ALIGNED(da, PAGE_SIZE)) { | 
|  | 531 | dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da); | 
|  | 532 | return NULL; | 
|  | 533 | } | 
|  | 534 |  | 
|  | 535 | mutex_lock(&obj->mmap_lock); | 
|  | 536 |  | 
|  | 537 | area = __find_iovm_area(obj, da); | 
|  | 538 | if (!area) { | 
|  | 539 | dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da); | 
|  | 540 | goto out; | 
|  | 541 | } | 
|  | 542 |  | 
|  | 543 | if ((area->flags & flags) != flags) { | 
|  | 544 | dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__, | 
|  | 545 | area->flags); | 
|  | 546 | goto out; | 
|  | 547 | } | 
|  | 548 | sgt = (struct sg_table *)area->sgt; | 
|  | 549 |  | 
|  | 550 | unmap_iovm_area(obj, area); | 
|  | 551 |  | 
|  | 552 | fn(area->va); | 
|  | 553 |  | 
|  | 554 | dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__, | 
|  | 555 | area->da_start, da, area->da_end, | 
|  | 556 | area->da_end - area->da_start, area->flags); | 
|  | 557 |  | 
|  | 558 | free_iovm_area(obj, area); | 
|  | 559 | out: | 
|  | 560 | mutex_unlock(&obj->mmap_lock); | 
|  | 561 |  | 
|  | 562 | return sgt; | 
|  | 563 | } | 
|  | 564 |  | 
|  | 565 | static u32 map_iommu_region(struct iommu *obj, u32 da, | 
|  | 566 | const struct sg_table *sgt, void *va, size_t bytes, u32 flags) | 
|  | 567 | { | 
|  | 568 | int err = -ENOMEM; | 
|  | 569 | struct iovm_struct *new; | 
|  | 570 |  | 
|  | 571 | mutex_lock(&obj->mmap_lock); | 
|  | 572 |  | 
|  | 573 | new = alloc_iovm_area(obj, da, bytes, flags); | 
|  | 574 | if (IS_ERR(new)) { | 
|  | 575 | err = PTR_ERR(new); | 
|  | 576 | goto err_alloc_iovma; | 
|  | 577 | } | 
|  | 578 | new->va = va; | 
|  | 579 | new->sgt = sgt; | 
|  | 580 |  | 
|  | 581 | if (map_iovm_area(obj, new, sgt, new->flags)) | 
|  | 582 | goto err_map; | 
|  | 583 |  | 
|  | 584 | mutex_unlock(&obj->mmap_lock); | 
|  | 585 |  | 
|  | 586 | dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n", | 
|  | 587 | __func__, new->da_start, bytes, new->flags, va); | 
|  | 588 |  | 
|  | 589 | return new->da_start; | 
|  | 590 |  | 
|  | 591 | err_map: | 
|  | 592 | free_iovm_area(obj, new); | 
|  | 593 | err_alloc_iovma: | 
|  | 594 | mutex_unlock(&obj->mmap_lock); | 
|  | 595 | return err; | 
|  | 596 | } | 
|  | 597 |  | 
|  | 598 | static inline u32 __iommu_vmap(struct iommu *obj, u32 da, | 
|  | 599 | const struct sg_table *sgt, void *va, size_t bytes, u32 flags) | 
|  | 600 | { | 
|  | 601 | return map_iommu_region(obj, da, sgt, va, bytes, flags); | 
|  | 602 | } | 
|  | 603 |  | 
|  | 604 | /** | 
|  | 605 | * iommu_vmap  -  (d)-(p)-(v) address mapper | 
|  | 606 | * @obj:	objective iommu | 
|  | 607 | * @sgt:	address of scatter gather table | 
|  | 608 | * @flags:	iovma and page property | 
|  | 609 | * | 
|  | 610 | * Creates 1-n-1 mapping with given @sgt and returns @da. | 
|  | 611 | * All @sgt element must be io page size aligned. | 
|  | 612 | */ | 
|  | 613 | u32 iommu_vmap(struct iommu *obj, u32 da, const struct sg_table *sgt, | 
|  | 614 | u32 flags) | 
|  | 615 | { | 
|  | 616 | size_t bytes; | 
| Hiroshi DOYU | 935e473 | 2009-11-22 10:11:02 -0800 | [diff] [blame] | 617 | void *va = NULL; | 
| Hiroshi DOYU | 69d3a84 | 2009-01-28 21:32:08 +0200 | [diff] [blame] | 618 |  | 
|  | 619 | if (!obj || !obj->dev || !sgt) | 
|  | 620 | return -EINVAL; | 
|  | 621 |  | 
|  | 622 | bytes = sgtable_len(sgt); | 
|  | 623 | if (!bytes) | 
|  | 624 | return -EINVAL; | 
|  | 625 | bytes = PAGE_ALIGN(bytes); | 
|  | 626 |  | 
| Hiroshi DOYU | 935e473 | 2009-11-22 10:11:02 -0800 | [diff] [blame] | 627 | if (flags & IOVMF_MMIO) { | 
|  | 628 | va = vmap_sg(sgt); | 
|  | 629 | if (IS_ERR(va)) | 
|  | 630 | return PTR_ERR(va); | 
|  | 631 | } | 
| Hiroshi DOYU | 69d3a84 | 2009-01-28 21:32:08 +0200 | [diff] [blame] | 632 |  | 
|  | 633 | flags &= IOVMF_HW_MASK; | 
|  | 634 | flags |= IOVMF_DISCONT; | 
|  | 635 | flags |= IOVMF_MMIO; | 
|  | 636 | flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON); | 
|  | 637 |  | 
|  | 638 | da = __iommu_vmap(obj, da, sgt, va, bytes, flags); | 
|  | 639 | if (IS_ERR_VALUE(da)) | 
|  | 640 | vunmap_sg(va); | 
|  | 641 |  | 
|  | 642 | return da; | 
|  | 643 | } | 
|  | 644 | EXPORT_SYMBOL_GPL(iommu_vmap); | 
|  | 645 |  | 
|  | 646 | /** | 
|  | 647 | * iommu_vunmap  -  release virtual mapping obtained by 'iommu_vmap()' | 
|  | 648 | * @obj:	objective iommu | 
|  | 649 | * @da:		iommu device virtual address | 
|  | 650 | * | 
|  | 651 | * Free the iommu virtually contiguous memory area starting at | 
|  | 652 | * @da, which was returned by 'iommu_vmap()'. | 
|  | 653 | */ | 
|  | 654 | struct sg_table *iommu_vunmap(struct iommu *obj, u32 da) | 
|  | 655 | { | 
|  | 656 | struct sg_table *sgt; | 
|  | 657 | /* | 
|  | 658 | * 'sgt' is allocated before 'iommu_vmalloc()' is called. | 
|  | 659 | * Just returns 'sgt' to the caller to free | 
|  | 660 | */ | 
|  | 661 | sgt = unmap_vm_area(obj, da, vunmap_sg, IOVMF_DISCONT | IOVMF_MMIO); | 
|  | 662 | if (!sgt) | 
|  | 663 | dev_dbg(obj->dev, "%s: No sgt\n", __func__); | 
|  | 664 | return sgt; | 
|  | 665 | } | 
|  | 666 | EXPORT_SYMBOL_GPL(iommu_vunmap); | 
|  | 667 |  | 
|  | 668 | /** | 
|  | 669 | * iommu_vmalloc  -  (d)-(p)-(v) address allocator and mapper | 
|  | 670 | * @obj:	objective iommu | 
|  | 671 | * @da:		contiguous iommu virtual memory | 
|  | 672 | * @bytes:	allocation size | 
|  | 673 | * @flags:	iovma and page property | 
|  | 674 | * | 
|  | 675 | * Allocate @bytes linearly and creates 1-n-1 mapping and returns | 
|  | 676 | * @da again, which might be adjusted if 'IOVMF_DA_ANON' is set. | 
|  | 677 | */ | 
|  | 678 | u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags) | 
|  | 679 | { | 
|  | 680 | void *va; | 
|  | 681 | struct sg_table *sgt; | 
|  | 682 |  | 
|  | 683 | if (!obj || !obj->dev || !bytes) | 
|  | 684 | return -EINVAL; | 
|  | 685 |  | 
|  | 686 | bytes = PAGE_ALIGN(bytes); | 
|  | 687 |  | 
|  | 688 | va = vmalloc(bytes); | 
|  | 689 | if (!va) | 
|  | 690 | return -ENOMEM; | 
|  | 691 |  | 
|  | 692 | sgt = sgtable_alloc(bytes, flags); | 
|  | 693 | if (IS_ERR(sgt)) { | 
|  | 694 | da = PTR_ERR(sgt); | 
|  | 695 | goto err_sgt_alloc; | 
|  | 696 | } | 
|  | 697 | sgtable_fill_vmalloc(sgt, va); | 
|  | 698 |  | 
|  | 699 | flags &= IOVMF_HW_MASK; | 
|  | 700 | flags |= IOVMF_DISCONT; | 
|  | 701 | flags |= IOVMF_ALLOC; | 
|  | 702 | flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON); | 
|  | 703 |  | 
|  | 704 | da = __iommu_vmap(obj, da, sgt, va, bytes, flags); | 
|  | 705 | if (IS_ERR_VALUE(da)) | 
|  | 706 | goto err_iommu_vmap; | 
|  | 707 |  | 
|  | 708 | return da; | 
|  | 709 |  | 
|  | 710 | err_iommu_vmap: | 
|  | 711 | sgtable_drain_vmalloc(sgt); | 
|  | 712 | sgtable_free(sgt); | 
|  | 713 | err_sgt_alloc: | 
|  | 714 | vfree(va); | 
|  | 715 | return da; | 
|  | 716 | } | 
|  | 717 | EXPORT_SYMBOL_GPL(iommu_vmalloc); | 
|  | 718 |  | 
|  | 719 | /** | 
|  | 720 | * iommu_vfree  -  release memory allocated by 'iommu_vmalloc()' | 
|  | 721 | * @obj:	objective iommu | 
|  | 722 | * @da:		iommu device virtual address | 
|  | 723 | * | 
|  | 724 | * Frees the iommu virtually continuous memory area starting at | 
|  | 725 | * @da, as obtained from 'iommu_vmalloc()'. | 
|  | 726 | */ | 
|  | 727 | void iommu_vfree(struct iommu *obj, const u32 da) | 
|  | 728 | { | 
|  | 729 | struct sg_table *sgt; | 
|  | 730 |  | 
|  | 731 | sgt = unmap_vm_area(obj, da, vfree, IOVMF_DISCONT | IOVMF_ALLOC); | 
|  | 732 | if (!sgt) | 
|  | 733 | dev_dbg(obj->dev, "%s: No sgt\n", __func__); | 
|  | 734 | sgtable_free(sgt); | 
|  | 735 | } | 
|  | 736 | EXPORT_SYMBOL_GPL(iommu_vfree); | 
|  | 737 |  | 
|  | 738 | static u32 __iommu_kmap(struct iommu *obj, u32 da, u32 pa, void *va, | 
|  | 739 | size_t bytes, u32 flags) | 
|  | 740 | { | 
|  | 741 | struct sg_table *sgt; | 
|  | 742 |  | 
|  | 743 | sgt = sgtable_alloc(bytes, flags); | 
|  | 744 | if (IS_ERR(sgt)) | 
|  | 745 | return PTR_ERR(sgt); | 
|  | 746 |  | 
|  | 747 | sgtable_fill_kmalloc(sgt, pa, bytes); | 
|  | 748 |  | 
|  | 749 | da = map_iommu_region(obj, da, sgt, va, bytes, flags); | 
|  | 750 | if (IS_ERR_VALUE(da)) { | 
|  | 751 | sgtable_drain_kmalloc(sgt); | 
|  | 752 | sgtable_free(sgt); | 
|  | 753 | } | 
|  | 754 |  | 
|  | 755 | return da; | 
|  | 756 | } | 
|  | 757 |  | 
|  | 758 | /** | 
|  | 759 | * iommu_kmap  -  (d)-(p)-(v) address mapper | 
|  | 760 | * @obj:	objective iommu | 
|  | 761 | * @da:		contiguous iommu virtual memory | 
|  | 762 | * @pa:		contiguous physical memory | 
|  | 763 | * @flags:	iovma and page property | 
|  | 764 | * | 
|  | 765 | * Creates 1-1-1 mapping and returns @da again, which can be | 
|  | 766 | * adjusted if 'IOVMF_DA_ANON' is set. | 
|  | 767 | */ | 
|  | 768 | u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes, | 
|  | 769 | u32 flags) | 
|  | 770 | { | 
|  | 771 | void *va; | 
|  | 772 |  | 
|  | 773 | if (!obj || !obj->dev || !bytes) | 
|  | 774 | return -EINVAL; | 
|  | 775 |  | 
|  | 776 | bytes = PAGE_ALIGN(bytes); | 
|  | 777 |  | 
|  | 778 | va = ioremap(pa, bytes); | 
|  | 779 | if (!va) | 
|  | 780 | return -ENOMEM; | 
|  | 781 |  | 
|  | 782 | flags &= IOVMF_HW_MASK; | 
|  | 783 | flags |= IOVMF_LINEAR; | 
|  | 784 | flags |= IOVMF_MMIO; | 
|  | 785 | flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON); | 
|  | 786 |  | 
|  | 787 | da = __iommu_kmap(obj, da, pa, va, bytes, flags); | 
|  | 788 | if (IS_ERR_VALUE(da)) | 
|  | 789 | iounmap(va); | 
|  | 790 |  | 
|  | 791 | return da; | 
|  | 792 | } | 
|  | 793 | EXPORT_SYMBOL_GPL(iommu_kmap); | 
|  | 794 |  | 
|  | 795 | /** | 
|  | 796 | * iommu_kunmap  -  release virtual mapping obtained by 'iommu_kmap()' | 
|  | 797 | * @obj:	objective iommu | 
|  | 798 | * @da:		iommu device virtual address | 
|  | 799 | * | 
|  | 800 | * Frees the iommu virtually contiguous memory area starting at | 
|  | 801 | * @da, which was passed to and was returned by'iommu_kmap()'. | 
|  | 802 | */ | 
|  | 803 | void iommu_kunmap(struct iommu *obj, u32 da) | 
|  | 804 | { | 
|  | 805 | struct sg_table *sgt; | 
|  | 806 | typedef void (*func_t)(const void *); | 
|  | 807 |  | 
|  | 808 | sgt = unmap_vm_area(obj, da, (func_t)__iounmap, | 
|  | 809 | IOVMF_LINEAR | IOVMF_MMIO); | 
|  | 810 | if (!sgt) | 
|  | 811 | dev_dbg(obj->dev, "%s: No sgt\n", __func__); | 
|  | 812 | sgtable_free(sgt); | 
|  | 813 | } | 
|  | 814 | EXPORT_SYMBOL_GPL(iommu_kunmap); | 
|  | 815 |  | 
|  | 816 | /** | 
|  | 817 | * iommu_kmalloc  -  (d)-(p)-(v) address allocator and mapper | 
|  | 818 | * @obj:	objective iommu | 
|  | 819 | * @da:		contiguous iommu virtual memory | 
|  | 820 | * @bytes:	bytes for allocation | 
|  | 821 | * @flags:	iovma and page property | 
|  | 822 | * | 
|  | 823 | * Allocate @bytes linearly and creates 1-1-1 mapping and returns | 
|  | 824 | * @da again, which might be adjusted if 'IOVMF_DA_ANON' is set. | 
|  | 825 | */ | 
|  | 826 | u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags) | 
|  | 827 | { | 
|  | 828 | void *va; | 
|  | 829 | u32 pa; | 
|  | 830 |  | 
|  | 831 | if (!obj || !obj->dev || !bytes) | 
|  | 832 | return -EINVAL; | 
|  | 833 |  | 
|  | 834 | bytes = PAGE_ALIGN(bytes); | 
|  | 835 |  | 
|  | 836 | va = kmalloc(bytes, GFP_KERNEL | GFP_DMA); | 
|  | 837 | if (!va) | 
|  | 838 | return -ENOMEM; | 
|  | 839 | pa = virt_to_phys(va); | 
|  | 840 |  | 
|  | 841 | flags &= IOVMF_HW_MASK; | 
|  | 842 | flags |= IOVMF_LINEAR; | 
|  | 843 | flags |= IOVMF_ALLOC; | 
|  | 844 | flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON); | 
|  | 845 |  | 
|  | 846 | da = __iommu_kmap(obj, da, pa, va, bytes, flags); | 
|  | 847 | if (IS_ERR_VALUE(da)) | 
|  | 848 | kfree(va); | 
|  | 849 |  | 
|  | 850 | return da; | 
|  | 851 | } | 
|  | 852 | EXPORT_SYMBOL_GPL(iommu_kmalloc); | 
|  | 853 |  | 
|  | 854 | /** | 
|  | 855 | * iommu_kfree  -  release virtual mapping obtained by 'iommu_kmalloc()' | 
|  | 856 | * @obj:	objective iommu | 
|  | 857 | * @da:		iommu device virtual address | 
|  | 858 | * | 
|  | 859 | * Frees the iommu virtually contiguous memory area starting at | 
|  | 860 | * @da, which was passed to and was returned by'iommu_kmalloc()'. | 
|  | 861 | */ | 
|  | 862 | void iommu_kfree(struct iommu *obj, u32 da) | 
|  | 863 | { | 
|  | 864 | struct sg_table *sgt; | 
|  | 865 |  | 
|  | 866 | sgt = unmap_vm_area(obj, da, kfree, IOVMF_LINEAR | IOVMF_ALLOC); | 
|  | 867 | if (!sgt) | 
|  | 868 | dev_dbg(obj->dev, "%s: No sgt\n", __func__); | 
|  | 869 | sgtable_free(sgt); | 
|  | 870 | } | 
|  | 871 | EXPORT_SYMBOL_GPL(iommu_kfree); | 
|  | 872 |  | 
|  | 873 |  | 
|  | 874 | static int __init iovmm_init(void) | 
|  | 875 | { | 
|  | 876 | const unsigned long flags = SLAB_HWCACHE_ALIGN; | 
|  | 877 | struct kmem_cache *p; | 
|  | 878 |  | 
|  | 879 | p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0, | 
|  | 880 | flags, NULL); | 
|  | 881 | if (!p) | 
|  | 882 | return -ENOMEM; | 
|  | 883 | iovm_area_cachep = p; | 
|  | 884 |  | 
|  | 885 | return 0; | 
|  | 886 | } | 
|  | 887 | module_init(iovmm_init); | 
|  | 888 |  | 
|  | 889 | static void __exit iovmm_exit(void) | 
|  | 890 | { | 
|  | 891 | kmem_cache_destroy(iovm_area_cachep); | 
|  | 892 | } | 
|  | 893 | module_exit(iovmm_exit); | 
|  | 894 |  | 
|  | 895 | MODULE_DESCRIPTION("omap iommu: simple virtual address space management"); | 
|  | 896 | MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>"); | 
|  | 897 | MODULE_LICENSE("GPL v2"); |