| Duy Truong | e833aca | 2013-02-12 13:35:08 -0800 | [diff] [blame] | 1 | /* Copyright (c) 2010, The Linux Foundation. All rights reserved. | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2 | * | 
|  | 3 | * This program is free software; you can redistribute it and/or modify | 
|  | 4 | * it under the terms of the GNU General Public License version 2 and | 
|  | 5 | * only version 2 as published by the Free Software Foundation. | 
|  | 6 | * | 
|  | 7 | * This program is distributed in the hope that it will be useful, | 
|  | 8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | 10 | * GNU General Public License for more details. | 
|  | 11 | */ | 
|  | 12 |  | 
|  | 13 | #include <linux/module.h> | 
|  | 14 | #include <linux/kernel.h> | 
|  | 15 | #include <linux/slab.h> | 
|  | 16 | #include <linux/vcm_mm.h> | 
|  | 17 | #include <linux/vcm.h> | 
|  | 18 | #include <linux/vcm_alloc.h> | 
|  | 19 | #include <linux/vcm_types.h> | 
|  | 20 | #include <linux/errno.h> | 
|  | 21 | #include <linux/spinlock.h> | 
|  | 22 |  | 
|  | 23 | #include <asm/page.h> | 
|  | 24 | #include <asm/sizes.h> | 
|  | 25 |  | 
|  | 26 | #include <linux/iommu.h> | 
|  | 27 |  | 
|  | 28 | /* alloc_vm_area */ | 
|  | 29 | #include <linux/pfn.h> | 
|  | 30 | #include <linux/mm.h> | 
|  | 31 | #include <linux/vmalloc.h> | 
|  | 32 |  | 
|  | 33 | #include <asm/cacheflush.h> | 
|  | 34 | #include <asm/mach/map.h> | 
|  | 35 |  | 
|  | 36 | #define ONE_TO_ONE_CHK 1 | 
|  | 37 |  | 
|  | 38 | #define vcm_err(a, ...)							\ | 
|  | 39 | pr_err("ERROR %s %i " a, __func__, __LINE__, ##__VA_ARGS__) | 
|  | 40 |  | 
|  | 41 | static unsigned int smmu_map_sizes[4] = {SZ_16M, SZ_1M, SZ_64K, SZ_4K}; | 
|  | 42 |  | 
|  | 43 | static phys_addr_t *bootmem_cont; | 
|  | 44 | static int cont_sz; | 
|  | 45 | static struct vcm *cont_vcm_id; | 
|  | 46 | static struct phys_chunk *cont_phys_chunk; | 
|  | 47 |  | 
|  | 48 | DEFINE_SPINLOCK(vcmlock); | 
|  | 49 |  | 
|  | 50 | /* Leaving this in for now to keep compatibility of the API. */ | 
|  | 51 | /* This will disappear. */ | 
|  | 52 | phys_addr_t vcm_get_dev_addr(struct res *res) | 
|  | 53 | { | 
|  | 54 | if (!res) { | 
|  | 55 | vcm_err("NULL RES"); | 
|  | 56 | return -EINVAL; | 
|  | 57 | } | 
|  | 58 | return res->dev_addr; | 
|  | 59 | } | 
|  | 60 |  | 
|  | 61 | static int vcm_no_res(struct vcm *vcm) | 
|  | 62 | { | 
|  | 63 | if (!vcm) { | 
|  | 64 | vcm_err("NULL vcm\n"); | 
|  | 65 | goto fail; | 
|  | 66 | } | 
|  | 67 |  | 
|  | 68 | return list_empty(&vcm->res_head); | 
|  | 69 | fail: | 
|  | 70 | return -EINVAL; | 
|  | 71 | } | 
|  | 72 |  | 
|  | 73 | static int vcm_no_assoc(struct vcm *vcm) | 
|  | 74 | { | 
|  | 75 | if (!vcm) { | 
|  | 76 | vcm_err("NULL vcm\n"); | 
|  | 77 | goto fail; | 
|  | 78 | } | 
|  | 79 |  | 
|  | 80 | return list_empty(&vcm->assoc_head); | 
|  | 81 | fail: | 
|  | 82 | return -EINVAL; | 
|  | 83 | } | 
|  | 84 |  | 
|  | 85 | static int vcm_all_activated(struct vcm *vcm) | 
|  | 86 | { | 
|  | 87 | struct avcm *avcm; | 
|  | 88 |  | 
|  | 89 | if (!vcm) { | 
|  | 90 | vcm_err("NULL vcm\n"); | 
|  | 91 | goto fail; | 
|  | 92 | } | 
|  | 93 |  | 
|  | 94 | list_for_each_entry(avcm, &vcm->assoc_head, assoc_elm) | 
|  | 95 | if (!avcm->is_active) | 
|  | 96 | return 0; | 
|  | 97 |  | 
|  | 98 | return 1; | 
|  | 99 | fail: | 
|  | 100 | return -EINVAL; | 
|  | 101 | } | 
|  | 102 |  | 
|  | 103 | static void vcm_destroy_common(struct vcm *vcm) | 
|  | 104 | { | 
|  | 105 | if (!vcm) { | 
|  | 106 | vcm_err("NULL vcm\n"); | 
|  | 107 | return; | 
|  | 108 | } | 
|  | 109 |  | 
|  | 110 | memset(vcm, 0, sizeof(*vcm)); | 
|  | 111 | kfree(vcm); | 
|  | 112 | } | 
|  | 113 |  | 
|  | 114 | static struct vcm *vcm_create_common(void) | 
|  | 115 | { | 
|  | 116 | struct vcm *vcm = 0; | 
|  | 117 |  | 
|  | 118 | vcm = kzalloc(sizeof(*vcm), GFP_KERNEL); | 
|  | 119 | if (!vcm) { | 
|  | 120 | vcm_err("kzalloc(%i, GFP_KERNEL) ret 0\n", | 
|  | 121 | sizeof(*vcm)); | 
|  | 122 | goto fail; | 
|  | 123 | } | 
|  | 124 |  | 
|  | 125 | INIT_LIST_HEAD(&vcm->res_head); | 
|  | 126 | INIT_LIST_HEAD(&vcm->assoc_head); | 
|  | 127 |  | 
|  | 128 | return vcm; | 
|  | 129 |  | 
|  | 130 | fail: | 
|  | 131 | return NULL; | 
|  | 132 | } | 
|  | 133 |  | 
|  | 134 |  | 
|  | 135 | static int vcm_create_pool(struct vcm *vcm, unsigned long start_addr, | 
|  | 136 | size_t len) | 
|  | 137 | { | 
|  | 138 | int ret = 0; | 
|  | 139 |  | 
|  | 140 | if (!vcm) { | 
|  | 141 | vcm_err("NULL vcm\n"); | 
|  | 142 | goto fail; | 
|  | 143 | } | 
|  | 144 |  | 
|  | 145 | vcm->start_addr = start_addr; | 
|  | 146 | vcm->len = len; | 
|  | 147 |  | 
|  | 148 | vcm->pool = gen_pool_create(PAGE_SHIFT, -1); | 
|  | 149 | if (!vcm->pool) { | 
|  | 150 | vcm_err("gen_pool_create(%x, -1) ret 0\n", PAGE_SHIFT); | 
|  | 151 | ret = -EINVAL; | 
|  | 152 | goto fail; | 
|  | 153 | } | 
|  | 154 |  | 
|  | 155 | ret = gen_pool_add(vcm->pool, start_addr, len, -1); | 
|  | 156 | if (ret) { | 
|  | 157 | vcm_err("gen_pool_add(%p, %p, %i, -1) ret %i\n", vcm->pool, | 
|  | 158 | (void *) start_addr, len, ret); | 
|  | 159 | goto fail; | 
|  | 160 | } | 
|  | 161 |  | 
|  | 162 | vcm->domain = iommu_domain_alloc(); | 
|  | 163 | if (!vcm->domain) { | 
|  | 164 | vcm_err("Could not allocate domain\n"); | 
|  | 165 | ret = -ENOMEM; | 
|  | 166 | goto fail; | 
|  | 167 | } | 
|  | 168 |  | 
|  | 169 | fail: | 
|  | 170 | if (ret && vcm->pool) | 
|  | 171 | gen_pool_destroy(vcm->pool); | 
|  | 172 |  | 
|  | 173 | return ret; | 
|  | 174 | } | 
|  | 175 |  | 
|  | 176 |  | 
|  | 177 | static struct vcm *vcm_create_flagged(int flag, unsigned long start_addr, | 
|  | 178 | size_t len) | 
|  | 179 | { | 
|  | 180 | int ret = 0; | 
|  | 181 | struct vcm *vcm = 0; | 
|  | 182 |  | 
|  | 183 | vcm = vcm_create_common(); | 
|  | 184 | if (!vcm) { | 
|  | 185 | vcm_err("NULL vcm\n"); | 
|  | 186 | goto fail; | 
|  | 187 | } | 
|  | 188 |  | 
|  | 189 | /* special one-to-one mapping case */ | 
|  | 190 | if ((flag & ONE_TO_ONE_CHK) && | 
|  | 191 | bootmem_cont && | 
|  | 192 | start_addr == (size_t) bootmem_cont && | 
|  | 193 | len == cont_sz) { | 
|  | 194 | vcm->type = VCM_ONE_TO_ONE; | 
|  | 195 | } else { | 
|  | 196 | ret = vcm_create_pool(vcm, start_addr, len); | 
|  | 197 | vcm->type = VCM_DEVICE; | 
|  | 198 | } | 
|  | 199 |  | 
|  | 200 | if (ret) { | 
|  | 201 | vcm_err("vcm_create_pool(%p, %p, %i) ret %i\n", vcm, | 
|  | 202 | (void *) start_addr, len, ret); | 
|  | 203 | goto fail2; | 
|  | 204 | } | 
|  | 205 |  | 
|  | 206 | return vcm; | 
|  | 207 |  | 
|  | 208 | fail2: | 
|  | 209 | vcm_destroy_common(vcm); | 
|  | 210 | fail: | 
|  | 211 | return NULL; | 
|  | 212 | } | 
|  | 213 |  | 
|  | 214 | struct vcm *vcm_create(unsigned long start_addr, size_t len) | 
|  | 215 | { | 
|  | 216 | unsigned long flags; | 
|  | 217 | struct vcm *vcm; | 
|  | 218 |  | 
|  | 219 | spin_lock_irqsave(&vcmlock, flags); | 
|  | 220 | vcm = vcm_create_flagged(ONE_TO_ONE_CHK, start_addr, len); | 
|  | 221 | spin_unlock_irqrestore(&vcmlock, flags); | 
|  | 222 | return vcm; | 
|  | 223 | } | 
|  | 224 |  | 
|  | 225 |  | 
|  | 226 | static int ext_vcm_id_valid(size_t ext_vcm_id) | 
|  | 227 | { | 
|  | 228 | return ((ext_vcm_id == VCM_PREBUILT_KERNEL) || | 
|  | 229 | (ext_vcm_id == VCM_PREBUILT_USER)); | 
|  | 230 | } | 
|  | 231 |  | 
|  | 232 |  | 
|  | 233 | struct vcm *vcm_create_from_prebuilt(size_t ext_vcm_id) | 
|  | 234 | { | 
|  | 235 | unsigned long flags; | 
|  | 236 | struct vcm *vcm = 0; | 
|  | 237 |  | 
|  | 238 | spin_lock_irqsave(&vcmlock, flags); | 
|  | 239 |  | 
|  | 240 | if (!ext_vcm_id_valid(ext_vcm_id)) { | 
|  | 241 | vcm_err("ext_vcm_id_valid(%i) ret 0\n", ext_vcm_id); | 
|  | 242 | goto fail; | 
|  | 243 | } | 
|  | 244 |  | 
|  | 245 | vcm = vcm_create_common(); | 
|  | 246 | if (!vcm) { | 
|  | 247 | vcm_err("NULL vcm\n"); | 
|  | 248 | goto fail; | 
|  | 249 | } | 
|  | 250 |  | 
|  | 251 | if (ext_vcm_id == VCM_PREBUILT_KERNEL) | 
|  | 252 | vcm->type = VCM_EXT_KERNEL; | 
|  | 253 | else if (ext_vcm_id == VCM_PREBUILT_USER) | 
|  | 254 | vcm->type = VCM_EXT_USER; | 
|  | 255 | else { | 
|  | 256 | vcm_err("UNREACHABLE ext_vcm_id is illegal\n"); | 
|  | 257 | goto fail_free; | 
|  | 258 | } | 
|  | 259 |  | 
|  | 260 | /* TODO: set kernel and userspace start_addr and len, if this | 
|  | 261 | * makes sense */ | 
|  | 262 |  | 
|  | 263 | spin_unlock_irqrestore(&vcmlock, flags); | 
|  | 264 | return vcm; | 
|  | 265 |  | 
|  | 266 | fail_free: | 
|  | 267 | vcm_destroy_common(vcm); | 
|  | 268 | fail: | 
|  | 269 | spin_unlock_irqrestore(&vcmlock, flags); | 
|  | 270 | return NULL; | 
|  | 271 | } | 
|  | 272 |  | 
|  | 273 |  | 
|  | 274 | struct vcm *vcm_clone(struct vcm *vcm) | 
|  | 275 | { | 
|  | 276 | return 0; | 
|  | 277 | } | 
|  | 278 |  | 
|  | 279 |  | 
|  | 280 | /* No lock needed, vcm->start_addr is never updated after creation */ | 
|  | 281 | size_t vcm_get_start_addr(struct vcm *vcm) | 
|  | 282 | { | 
|  | 283 | if (!vcm) { | 
|  | 284 | vcm_err("NULL vcm\n"); | 
|  | 285 | return 1; | 
|  | 286 | } | 
|  | 287 |  | 
|  | 288 | return vcm->start_addr; | 
|  | 289 | } | 
|  | 290 |  | 
|  | 291 |  | 
|  | 292 | /* No lock needed, vcm->len is never updated after creation */ | 
|  | 293 | size_t vcm_get_len(struct vcm *vcm) | 
|  | 294 | { | 
|  | 295 | if (!vcm) { | 
|  | 296 | vcm_err("NULL vcm\n"); | 
|  | 297 | return 0; | 
|  | 298 | } | 
|  | 299 |  | 
|  | 300 | return vcm->len; | 
|  | 301 | } | 
|  | 302 |  | 
|  | 303 |  | 
|  | 304 | static int vcm_free_common_rule(struct vcm *vcm) | 
|  | 305 | { | 
|  | 306 | int ret; | 
|  | 307 |  | 
|  | 308 | if (!vcm) { | 
|  | 309 | vcm_err("NULL vcm\n"); | 
|  | 310 | goto fail; | 
|  | 311 | } | 
|  | 312 |  | 
|  | 313 | ret = vcm_no_res(vcm); | 
|  | 314 | if (!ret) { | 
|  | 315 | vcm_err("vcm_no_res(%p) ret 0\n", vcm); | 
|  | 316 | goto fail_busy; | 
|  | 317 | } | 
|  | 318 |  | 
|  | 319 | if (ret == -EINVAL) { | 
|  | 320 | vcm_err("vcm_no_res(%p) ret -EINVAL\n", vcm); | 
|  | 321 | goto fail; | 
|  | 322 | } | 
|  | 323 |  | 
|  | 324 | ret = vcm_no_assoc(vcm); | 
|  | 325 | if (!ret) { | 
|  | 326 | vcm_err("vcm_no_assoc(%p) ret 0\n", vcm); | 
|  | 327 | goto fail_busy; | 
|  | 328 | } | 
|  | 329 |  | 
|  | 330 | if (ret == -EINVAL) { | 
|  | 331 | vcm_err("vcm_no_assoc(%p) ret -EINVAL\n", vcm); | 
|  | 332 | goto fail; | 
|  | 333 | } | 
|  | 334 |  | 
|  | 335 | return 0; | 
|  | 336 |  | 
|  | 337 | fail_busy: | 
|  | 338 | return -EBUSY; | 
|  | 339 | fail: | 
|  | 340 | return -EINVAL; | 
|  | 341 | } | 
|  | 342 |  | 
|  | 343 |  | 
|  | 344 | static int vcm_free_pool_rule(struct vcm *vcm) | 
|  | 345 | { | 
|  | 346 | if (!vcm) { | 
|  | 347 | vcm_err("NULL vcm\n"); | 
|  | 348 | goto fail; | 
|  | 349 | } | 
|  | 350 |  | 
|  | 351 | /* A vcm always has a valid pool, don't free the vcm because | 
|  | 352 | what we got is probably invalid. | 
|  | 353 | */ | 
|  | 354 | if (!vcm->pool) { | 
|  | 355 | vcm_err("NULL vcm->pool\n"); | 
|  | 356 | goto fail; | 
|  | 357 | } | 
|  | 358 |  | 
|  | 359 | return 0; | 
|  | 360 |  | 
|  | 361 | fail: | 
|  | 362 | return -EINVAL; | 
|  | 363 | } | 
|  | 364 |  | 
|  | 365 |  | 
|  | 366 | static void vcm_free_common(struct vcm *vcm) | 
|  | 367 | { | 
|  | 368 | memset(vcm, 0, sizeof(*vcm)); | 
|  | 369 |  | 
|  | 370 | kfree(vcm); | 
|  | 371 | } | 
|  | 372 |  | 
|  | 373 |  | 
|  | 374 | static int vcm_free_pool(struct vcm *vcm) | 
|  | 375 | { | 
|  | 376 | if (!vcm) { | 
|  | 377 | vcm_err("NULL vcm\n"); | 
|  | 378 | goto fail; | 
|  | 379 | } | 
|  | 380 |  | 
|  | 381 | gen_pool_destroy(vcm->pool); | 
|  | 382 |  | 
|  | 383 | return 0; | 
|  | 384 |  | 
|  | 385 | fail: | 
|  | 386 | return -EINVAL; | 
|  | 387 | } | 
|  | 388 |  | 
|  | 389 |  | 
|  | 390 | static int __vcm_free(struct vcm *vcm) | 
|  | 391 | { | 
|  | 392 | int ret; | 
|  | 393 |  | 
|  | 394 | if (!vcm) { | 
|  | 395 | vcm_err("NULL vcm\n"); | 
|  | 396 | goto fail; | 
|  | 397 | } | 
|  | 398 |  | 
|  | 399 | ret = vcm_free_common_rule(vcm); | 
|  | 400 | if (ret != 0) { | 
|  | 401 | vcm_err("vcm_free_common_rule(%p) ret %i\n", vcm, ret); | 
|  | 402 | goto fail; | 
|  | 403 | } | 
|  | 404 |  | 
|  | 405 | if (vcm->type == VCM_DEVICE) { | 
|  | 406 | ret = vcm_free_pool_rule(vcm); | 
|  | 407 | if (ret != 0) { | 
|  | 408 | vcm_err("vcm_free_pool_rule(%p) ret %i\n", | 
|  | 409 | (void *) vcm, ret); | 
|  | 410 | goto fail; | 
|  | 411 | } | 
|  | 412 | if (vcm->domain) | 
|  | 413 | iommu_domain_free(vcm->domain); | 
|  | 414 |  | 
|  | 415 | vcm->domain = NULL; | 
|  | 416 | ret = vcm_free_pool(vcm); | 
|  | 417 | if (ret != 0) { | 
|  | 418 | vcm_err("vcm_free_pool(%p) ret %i", (void *) vcm, ret); | 
|  | 419 | goto fail; | 
|  | 420 | } | 
|  | 421 | } | 
|  | 422 |  | 
|  | 423 | vcm_free_common(vcm); | 
|  | 424 |  | 
|  | 425 | return 0; | 
|  | 426 |  | 
|  | 427 | fail: | 
|  | 428 | return -EINVAL; | 
|  | 429 | } | 
|  | 430 |  | 
|  | 431 | int vcm_free(struct vcm *vcm) | 
|  | 432 | { | 
|  | 433 | unsigned long flags; | 
|  | 434 | int ret; | 
|  | 435 |  | 
|  | 436 | spin_lock_irqsave(&vcmlock, flags); | 
|  | 437 | ret = __vcm_free(vcm); | 
|  | 438 | spin_unlock_irqrestore(&vcmlock, flags); | 
|  | 439 |  | 
|  | 440 | return ret; | 
|  | 441 | } | 
|  | 442 |  | 
|  | 443 |  | 
|  | 444 | static struct res *__vcm_reserve(struct vcm *vcm, size_t len, u32 attr) | 
|  | 445 | { | 
|  | 446 | struct res *res = NULL; | 
|  | 447 | int align_attr = 0, i = 0; | 
|  | 448 |  | 
|  | 449 | if (!vcm) { | 
|  | 450 | vcm_err("NULL vcm\n"); | 
|  | 451 | goto fail; | 
|  | 452 | } | 
|  | 453 |  | 
|  | 454 | if (len == 0) { | 
|  | 455 | vcm_err("len is 0\n"); | 
|  | 456 | goto fail; | 
|  | 457 | } | 
|  | 458 |  | 
|  | 459 | res = kzalloc(sizeof(*res), GFP_KERNEL); | 
|  | 460 | if (!res) { | 
|  | 461 | vcm_err("kzalloc(%i, GFP_KERNEL) ret 0", sizeof(*res)); | 
|  | 462 | goto fail; | 
|  | 463 | } | 
|  | 464 |  | 
|  | 465 | align_attr = (attr >> VCM_ALIGN_SHIFT) & VCM_ALIGN_MASK; | 
|  | 466 |  | 
|  | 467 | if (align_attr >= 32) { | 
|  | 468 | vcm_err("Invalid alignment attribute: %d\n", align_attr); | 
|  | 469 | goto fail2; | 
|  | 470 | } | 
|  | 471 |  | 
|  | 472 | INIT_LIST_HEAD(&res->res_elm); | 
|  | 473 | res->vcm = vcm; | 
|  | 474 | res->len = len; | 
|  | 475 | res->attr = attr; | 
|  | 476 | res->alignment_req = smmu_map_sizes[ARRAY_SIZE(smmu_map_sizes) - 1]; | 
|  | 477 |  | 
|  | 478 | if (align_attr == 0) { | 
|  | 479 | for (i = 0; i < ARRAY_SIZE(smmu_map_sizes); i++) | 
|  | 480 | if (len / smmu_map_sizes[i]) { | 
|  | 481 | res->alignment_req = smmu_map_sizes[i]; | 
|  | 482 | break; | 
|  | 483 | } | 
|  | 484 | } else | 
|  | 485 | res->alignment_req = 1 << align_attr; | 
|  | 486 |  | 
|  | 487 | res->aligned_len = res->alignment_req + len; | 
|  | 488 |  | 
|  | 489 | switch (vcm->type) { | 
|  | 490 | case VCM_DEVICE: | 
|  | 491 | /* should always be not zero */ | 
|  | 492 | if (!vcm->pool) { | 
|  | 493 | vcm_err("NULL vcm->pool\n"); | 
|  | 494 | goto fail2; | 
|  | 495 | } | 
|  | 496 |  | 
|  | 497 | res->ptr = gen_pool_alloc(vcm->pool, res->aligned_len); | 
|  | 498 | if (!res->ptr) { | 
|  | 499 | vcm_err("gen_pool_alloc(%p, %i) ret 0\n", | 
|  | 500 | vcm->pool, res->aligned_len); | 
|  | 501 | goto fail2; | 
|  | 502 | } | 
|  | 503 |  | 
|  | 504 | /* Calculate alignment... this will all change anyway */ | 
|  | 505 | res->dev_addr = res->ptr + | 
|  | 506 | (res->alignment_req - | 
|  | 507 | (res->ptr & (res->alignment_req - 1))); | 
|  | 508 |  | 
|  | 509 | break; | 
|  | 510 | case VCM_EXT_KERNEL: | 
|  | 511 | res->vm_area = alloc_vm_area(res->aligned_len); | 
|  | 512 | res->mapped = 0; /* be explicit */ | 
|  | 513 | if (!res->vm_area) { | 
|  | 514 | vcm_err("NULL res->vm_area\n"); | 
|  | 515 | goto fail2; | 
|  | 516 | } | 
|  | 517 |  | 
|  | 518 | res->dev_addr = (size_t) res->vm_area->addr + | 
|  | 519 | (res->alignment_req - | 
|  | 520 | ((size_t) res->vm_area->addr & | 
|  | 521 | (res->alignment_req - 1))); | 
|  | 522 |  | 
|  | 523 | break; | 
|  | 524 | case VCM_ONE_TO_ONE: | 
|  | 525 | break; | 
|  | 526 | default: | 
|  | 527 | vcm_err("%i is an invalid vcm->type\n", vcm->type); | 
|  | 528 | goto fail2; | 
|  | 529 | } | 
|  | 530 |  | 
|  | 531 | list_add_tail(&res->res_elm, &vcm->res_head); | 
|  | 532 |  | 
|  | 533 | return res; | 
|  | 534 |  | 
|  | 535 | fail2: | 
|  | 536 | kfree(res); | 
|  | 537 | fail: | 
|  | 538 | return 0; | 
|  | 539 | } | 
|  | 540 |  | 
|  | 541 |  | 
|  | 542 | struct res *vcm_reserve(struct vcm *vcm, size_t len, u32 attr) | 
|  | 543 | { | 
|  | 544 | unsigned long flags; | 
|  | 545 | struct res *res; | 
|  | 546 |  | 
|  | 547 | spin_lock_irqsave(&vcmlock, flags); | 
|  | 548 | res = __vcm_reserve(vcm, len, attr); | 
|  | 549 | spin_unlock_irqrestore(&vcmlock, flags); | 
|  | 550 |  | 
|  | 551 | return res; | 
|  | 552 | } | 
|  | 553 |  | 
|  | 554 |  | 
|  | 555 | struct res *vcm_reserve_at(enum memtarget_t memtarget, struct vcm *vcm, | 
|  | 556 | size_t len, u32 attr) | 
|  | 557 | { | 
|  | 558 | return 0; | 
|  | 559 | } | 
|  | 560 |  | 
|  | 561 |  | 
|  | 562 | static int __vcm_unreserve(struct res *res) | 
|  | 563 | { | 
|  | 564 | struct vcm *vcm; | 
|  | 565 |  | 
|  | 566 | if (!res) { | 
|  | 567 | vcm_err("NULL res\n"); | 
|  | 568 | goto fail; | 
|  | 569 | } | 
|  | 570 |  | 
|  | 571 | if (!res->vcm) { | 
|  | 572 | vcm_err("NULL res->vcm\n"); | 
|  | 573 | goto fail; | 
|  | 574 | } | 
|  | 575 |  | 
|  | 576 | vcm = res->vcm; | 
|  | 577 | if (!vcm) { | 
|  | 578 | vcm_err("NULL vcm\n"); | 
|  | 579 | goto fail; | 
|  | 580 | } | 
|  | 581 |  | 
|  | 582 | switch (vcm->type) { | 
|  | 583 | case VCM_DEVICE: | 
|  | 584 | if (!res->vcm->pool) { | 
|  | 585 | vcm_err("NULL (res->vcm))->pool\n"); | 
|  | 586 | goto fail; | 
|  | 587 | } | 
|  | 588 |  | 
|  | 589 | /* res->ptr could be zero, this isn't an error */ | 
|  | 590 | gen_pool_free(res->vcm->pool, res->ptr, | 
|  | 591 | res->aligned_len); | 
|  | 592 | break; | 
|  | 593 | case VCM_EXT_KERNEL: | 
|  | 594 | if (res->mapped) { | 
|  | 595 | vcm_err("res->mapped is true\n"); | 
|  | 596 | goto fail; | 
|  | 597 | } | 
|  | 598 |  | 
|  | 599 | /* This may take a little explaining. | 
|  | 600 | * In the kernel vunmap will free res->vm_area | 
|  | 601 | * so if we've called it then we shouldn't call | 
|  | 602 | * free_vm_area(). If we've called it we set | 
|  | 603 | * res->vm_area to 0. | 
|  | 604 | */ | 
|  | 605 | if (res->vm_area) { | 
|  | 606 | free_vm_area(res->vm_area); | 
|  | 607 | res->vm_area = 0; | 
|  | 608 | } | 
|  | 609 |  | 
|  | 610 | break; | 
|  | 611 | case VCM_ONE_TO_ONE: | 
|  | 612 | break; | 
|  | 613 | default: | 
|  | 614 | vcm_err("%i is an invalid vcm->type\n", vcm->type); | 
|  | 615 | goto fail; | 
|  | 616 | } | 
|  | 617 |  | 
|  | 618 | list_del(&res->res_elm); | 
|  | 619 |  | 
|  | 620 | /* be extra careful by clearing the memory before freeing it */ | 
|  | 621 | memset(res, 0, sizeof(*res)); | 
|  | 622 |  | 
|  | 623 | kfree(res); | 
|  | 624 |  | 
|  | 625 | return 0; | 
|  | 626 |  | 
|  | 627 | fail: | 
|  | 628 | return -EINVAL; | 
|  | 629 | } | 
|  | 630 |  | 
|  | 631 |  | 
|  | 632 | int vcm_unreserve(struct res *res) | 
|  | 633 | { | 
|  | 634 | unsigned long flags; | 
|  | 635 | int ret; | 
|  | 636 |  | 
|  | 637 | spin_lock_irqsave(&vcmlock, flags); | 
|  | 638 | ret = __vcm_unreserve(res); | 
|  | 639 | spin_unlock_irqrestore(&vcmlock, flags); | 
|  | 640 |  | 
|  | 641 | return ret; | 
|  | 642 | } | 
|  | 643 |  | 
|  | 644 |  | 
|  | 645 | /* No lock needed, res->len is never updated after creation */ | 
|  | 646 | size_t vcm_get_res_len(struct res *res) | 
|  | 647 | { | 
|  | 648 | if (!res) { | 
|  | 649 | vcm_err("res is 0\n"); | 
|  | 650 | return 0; | 
|  | 651 | } | 
|  | 652 |  | 
|  | 653 | return res->len; | 
|  | 654 | } | 
|  | 655 |  | 
|  | 656 |  | 
|  | 657 | int vcm_set_res_attr(struct res *res, u32 attr) | 
|  | 658 | { | 
|  | 659 | return 0; | 
|  | 660 | } | 
|  | 661 |  | 
|  | 662 |  | 
|  | 663 | u32 vcm_get_res_attr(struct res *res) | 
|  | 664 | { | 
|  | 665 | return 0; | 
|  | 666 | } | 
|  | 667 |  | 
|  | 668 |  | 
|  | 669 | size_t vcm_get_num_res(struct vcm *vcm) | 
|  | 670 | { | 
|  | 671 | return 0; | 
|  | 672 | } | 
|  | 673 |  | 
|  | 674 |  | 
|  | 675 | struct res *vcm_get_next_res(struct vcm *vcm, struct res *res) | 
|  | 676 | { | 
|  | 677 | return 0; | 
|  | 678 | } | 
|  | 679 |  | 
|  | 680 |  | 
|  | 681 | size_t vcm_res_copy(struct res *to, size_t to_off, struct res *from, size_t | 
|  | 682 | from_off, size_t len) | 
|  | 683 | { | 
|  | 684 | return 0; | 
|  | 685 | } | 
|  | 686 |  | 
|  | 687 |  | 
|  | 688 | size_t vcm_get_min_page_size(void) | 
|  | 689 | { | 
|  | 690 | return PAGE_SIZE; | 
|  | 691 | } | 
|  | 692 |  | 
|  | 693 |  | 
|  | 694 | static int vcm_to_smmu_attr(u32 attr) | 
|  | 695 | { | 
|  | 696 | int smmu_attr = 0; | 
|  | 697 |  | 
|  | 698 | switch (attr & VCM_CACHE_POLICY) { | 
|  | 699 | case VCM_NOTCACHED: | 
|  | 700 | smmu_attr = VCM_DEV_ATTR_NONCACHED; | 
|  | 701 | break; | 
|  | 702 | case VCM_WB_WA: | 
|  | 703 | smmu_attr = VCM_DEV_ATTR_CACHED_WB_WA; | 
|  | 704 | smmu_attr |= VCM_DEV_ATTR_SH; | 
|  | 705 | break; | 
|  | 706 | case VCM_WB_NWA: | 
|  | 707 | smmu_attr = VCM_DEV_ATTR_CACHED_WB_NWA; | 
|  | 708 | smmu_attr |= VCM_DEV_ATTR_SH; | 
|  | 709 | break; | 
|  | 710 | case VCM_WT: | 
|  | 711 | smmu_attr = VCM_DEV_ATTR_CACHED_WT; | 
|  | 712 | smmu_attr |= VCM_DEV_ATTR_SH; | 
|  | 713 | break; | 
|  | 714 | default: | 
|  | 715 | return -EINVAL; | 
|  | 716 | } | 
|  | 717 |  | 
|  | 718 | return smmu_attr; | 
|  | 719 | } | 
|  | 720 |  | 
|  | 721 |  | 
|  | 722 | static int vcm_process_chunk(struct iommu_domain *domain, phys_addr_t pa, | 
|  | 723 | unsigned long va, size_t len, u32 attr, int map) | 
|  | 724 | { | 
|  | 725 | int ret, i, map_order; | 
|  | 726 | unsigned long map_len = smmu_map_sizes[ARRAY_SIZE(smmu_map_sizes) - 1]; | 
|  | 727 |  | 
|  | 728 | for (i = 0; i < ARRAY_SIZE(smmu_map_sizes); i++) { | 
|  | 729 | if (IS_ALIGNED(va, smmu_map_sizes[i]) && len >= | 
|  | 730 | smmu_map_sizes[i]) { | 
|  | 731 | map_len = smmu_map_sizes[i]; | 
|  | 732 | break; | 
|  | 733 | } | 
|  | 734 | } | 
|  | 735 |  | 
|  | 736 | #ifdef VCM_PERF_DEBUG | 
|  | 737 | if (va & (len - 1)) | 
|  | 738 | pr_warning("Warning! Suboptimal VCM mapping alignment " | 
|  | 739 | "va = %p, len = %p. Expect TLB performance " | 
|  | 740 | "degradation.\n", (void *) va, (void *) len); | 
|  | 741 | #endif | 
|  | 742 |  | 
|  | 743 | map_order = get_order(map_len); | 
|  | 744 |  | 
|  | 745 | while (len) { | 
|  | 746 | if (va & (SZ_4K - 1)) { | 
|  | 747 | vcm_err("Tried to map w/ align < 4k! va = %08lx\n", va); | 
|  | 748 | goto fail; | 
|  | 749 | } | 
|  | 750 |  | 
|  | 751 | if (map_len > len) { | 
|  | 752 | vcm_err("map_len = %lu, len = %d, trying to overmap\n", | 
|  | 753 | map_len, len); | 
|  | 754 | goto fail; | 
|  | 755 | } | 
|  | 756 |  | 
|  | 757 | if (map) | 
| Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 758 | ret = iommu_map(domain, va, pa, map_len, attr); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 759 | else | 
| Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 760 | ret = iommu_unmap(domain, va, map_len); | 
| Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 761 |  | 
|  | 762 | if (ret) { | 
|  | 763 | vcm_err("iommu_map/unmap(%p, %p, %p, 0x%x, 0x%x) ret %i" | 
|  | 764 | "map = %d", (void *) domain, (void *) pa, | 
|  | 765 | (void *) va, (int) map_len, attr, ret, map); | 
|  | 766 | goto fail; | 
|  | 767 | } | 
|  | 768 |  | 
|  | 769 | va += map_len; | 
|  | 770 | pa += map_len; | 
|  | 771 | len -= map_len; | 
|  | 772 | } | 
|  | 773 |  | 
|  | 774 | return 0; | 
|  | 775 | fail: | 
|  | 776 | return -EINVAL; | 
|  | 777 | } | 
|  | 778 |  | 
|  | 779 | /* TBD if you vcm_back again what happens? */ | 
|  | 780 | int vcm_back(struct res *res, struct physmem *physmem) | 
|  | 781 | { | 
|  | 782 | unsigned long flags; | 
|  | 783 | struct vcm *vcm; | 
|  | 784 | struct phys_chunk *chunk; | 
|  | 785 | size_t va = 0; | 
|  | 786 | int ret; | 
|  | 787 | int attr; | 
|  | 788 |  | 
|  | 789 | spin_lock_irqsave(&vcmlock, flags); | 
|  | 790 |  | 
|  | 791 | if (!res) { | 
|  | 792 | vcm_err("NULL res\n"); | 
|  | 793 | goto fail; | 
|  | 794 | } | 
|  | 795 |  | 
|  | 796 | vcm = res->vcm; | 
|  | 797 | if (!vcm) { | 
|  | 798 | vcm_err("NULL vcm\n"); | 
|  | 799 | goto fail; | 
|  | 800 | } | 
|  | 801 |  | 
|  | 802 | switch (vcm->type) { | 
|  | 803 | case VCM_DEVICE: | 
|  | 804 | case VCM_EXT_KERNEL: /* hack part 1 */ | 
|  | 805 | attr = vcm_to_smmu_attr(res->attr); | 
|  | 806 | if (attr == -1) { | 
|  | 807 | vcm_err("Bad SMMU attr\n"); | 
|  | 808 | goto fail; | 
|  | 809 | } | 
|  | 810 | break; | 
|  | 811 | default: | 
|  | 812 | attr = 0; | 
|  | 813 | break; | 
|  | 814 | } | 
|  | 815 |  | 
|  | 816 | if (!physmem) { | 
|  | 817 | vcm_err("NULL physmem\n"); | 
|  | 818 | goto fail; | 
|  | 819 | } | 
|  | 820 |  | 
|  | 821 | if (res->len == 0) { | 
|  | 822 | vcm_err("res->len is 0\n"); | 
|  | 823 | goto fail; | 
|  | 824 | } | 
|  | 825 |  | 
|  | 826 | if (physmem->len == 0) { | 
|  | 827 | vcm_err("physmem->len is 0\n"); | 
|  | 828 | goto fail; | 
|  | 829 | } | 
|  | 830 |  | 
|  | 831 | if (res->len != physmem->len) { | 
|  | 832 | vcm_err("res->len (%i) != physmem->len (%i)\n", | 
|  | 833 | res->len, physmem->len); | 
|  | 834 | goto fail; | 
|  | 835 | } | 
|  | 836 |  | 
|  | 837 | if (physmem->is_cont) { | 
|  | 838 | if (physmem->res == 0) { | 
|  | 839 | vcm_err("cont physmem->res is 0"); | 
|  | 840 | goto fail; | 
|  | 841 | } | 
|  | 842 | } else { | 
|  | 843 | /* fail if no physmem */ | 
|  | 844 | if (list_empty(&physmem->alloc_head.allocated)) { | 
|  | 845 | vcm_err("no allocated phys memory"); | 
|  | 846 | goto fail; | 
|  | 847 | } | 
|  | 848 | } | 
|  | 849 |  | 
|  | 850 | ret = vcm_no_assoc(res->vcm); | 
|  | 851 | if (ret == 1) { | 
|  | 852 | vcm_err("can't back un associated VCM\n"); | 
|  | 853 | goto fail; | 
|  | 854 | } | 
|  | 855 |  | 
|  | 856 | if (ret == -1) { | 
|  | 857 | vcm_err("vcm_no_assoc() ret -1\n"); | 
|  | 858 | goto fail; | 
|  | 859 | } | 
|  | 860 |  | 
|  | 861 | ret = vcm_all_activated(res->vcm); | 
|  | 862 | if (ret == 0) { | 
|  | 863 | vcm_err("can't back, not all associations are activated\n"); | 
|  | 864 | goto fail_eagain; | 
|  | 865 | } | 
|  | 866 |  | 
|  | 867 | if (ret == -1) { | 
|  | 868 | vcm_err("vcm_all_activated() ret -1\n"); | 
|  | 869 | goto fail; | 
|  | 870 | } | 
|  | 871 |  | 
|  | 872 | va = res->dev_addr; | 
|  | 873 |  | 
|  | 874 | list_for_each_entry(chunk, &physmem->alloc_head.allocated, | 
|  | 875 | allocated) { | 
|  | 876 | struct vcm *vcm = res->vcm; | 
|  | 877 | size_t chunk_size = chunk->size; | 
|  | 878 |  | 
|  | 879 | if (chunk_size <= 0) { | 
|  | 880 | vcm_err("Bad chunk size: %d\n", chunk_size); | 
|  | 881 | goto fail; | 
|  | 882 | } | 
|  | 883 |  | 
|  | 884 | switch (vcm->type) { | 
|  | 885 | case VCM_DEVICE: | 
|  | 886 | { | 
|  | 887 | /* map all */ | 
|  | 888 | ret = vcm_process_chunk(vcm->domain, chunk->pa, | 
|  | 889 | va, chunk_size, attr, 1); | 
|  | 890 | if (ret != 0) { | 
|  | 891 | vcm_err("vcm_process_chunk(%p, %p, %p," | 
|  | 892 | " 0x%x, 0x%x)" | 
|  | 893 | " ret %i", | 
|  | 894 | vcm->domain, | 
|  | 895 | (void *) chunk->pa, | 
|  | 896 | (void *) va, | 
|  | 897 | (int) chunk_size, attr, ret); | 
|  | 898 | goto fail; | 
|  | 899 | } | 
|  | 900 | break; | 
|  | 901 | } | 
|  | 902 |  | 
|  | 903 | case VCM_EXT_KERNEL: | 
|  | 904 | { | 
|  | 905 | unsigned int pages_in_chunk = chunk_size / PAGE_SIZE; | 
|  | 906 | unsigned long loc_va = va; | 
|  | 907 | unsigned long loc_pa = chunk->pa; | 
|  | 908 |  | 
|  | 909 | const struct mem_type *mtype; | 
|  | 910 |  | 
|  | 911 | /* TODO: get this based on MEMTYPE */ | 
|  | 912 | mtype = get_mem_type(MT_DEVICE); | 
|  | 913 | if (!mtype) { | 
|  | 914 | vcm_err("mtype is 0\n"); | 
|  | 915 | goto fail; | 
|  | 916 | } | 
|  | 917 |  | 
|  | 918 | /* TODO: Map with the same chunk size */ | 
|  | 919 | while (pages_in_chunk--) { | 
|  | 920 | ret = ioremap_page(loc_va, | 
|  | 921 | loc_pa, | 
|  | 922 | mtype); | 
|  | 923 | if (ret != 0) { | 
|  | 924 | vcm_err("ioremap_page(%p, %p, %p) ret" | 
|  | 925 | " %i", (void *) loc_va, | 
|  | 926 | (void *) loc_pa, | 
|  | 927 | (void *) mtype, ret); | 
|  | 928 | goto fail; | 
|  | 929 | /* TODO handle weird | 
|  | 930 | inter-map case */ | 
|  | 931 | } | 
|  | 932 |  | 
|  | 933 | /* hack part 2 */ | 
|  | 934 | /* we're changing the PT entry behind | 
|  | 935 | * linux's back | 
|  | 936 | */ | 
|  | 937 | ret = cpu_set_attr(loc_va, PAGE_SIZE, attr); | 
|  | 938 | if (ret != 0) { | 
|  | 939 | vcm_err("cpu_set_attr(%p, %lu, %x)" | 
|  | 940 | "ret %i\n", | 
|  | 941 | (void *) loc_va, PAGE_SIZE, | 
|  | 942 | attr, ret); | 
|  | 943 | goto fail; | 
|  | 944 | /* TODO handle weird | 
|  | 945 | inter-map case */ | 
|  | 946 | } | 
|  | 947 |  | 
|  | 948 | res->mapped = 1; | 
|  | 949 |  | 
|  | 950 | loc_va += PAGE_SIZE; | 
|  | 951 | loc_pa += PAGE_SIZE; | 
|  | 952 | } | 
|  | 953 |  | 
|  | 954 | flush_cache_vmap(va, loc_va); | 
|  | 955 | break; | 
|  | 956 | } | 
|  | 957 | case VCM_ONE_TO_ONE: | 
|  | 958 | va = chunk->pa; | 
|  | 959 | break; | 
|  | 960 | default: | 
|  | 961 | /* this should never happen */ | 
|  | 962 | goto fail; | 
|  | 963 | } | 
|  | 964 |  | 
|  | 965 | va += chunk_size; | 
|  | 966 | /* also add res to the allocated chunk list of refs */ | 
|  | 967 | } | 
|  | 968 |  | 
|  | 969 | /* note the reservation */ | 
|  | 970 | res->physmem = physmem; | 
|  | 971 |  | 
|  | 972 | spin_unlock_irqrestore(&vcmlock, flags); | 
|  | 973 | return 0; | 
|  | 974 | fail_eagain: | 
|  | 975 | spin_unlock_irqrestore(&vcmlock, flags); | 
|  | 976 | return -EAGAIN; | 
|  | 977 | fail: | 
|  | 978 | spin_unlock_irqrestore(&vcmlock, flags); | 
|  | 979 | return -EINVAL; | 
|  | 980 | } | 
|  | 981 |  | 
|  | 982 |  | 
|  | 983 | int vcm_unback(struct res *res) | 
|  | 984 | { | 
|  | 985 | unsigned long flags; | 
|  | 986 | struct vcm *vcm; | 
|  | 987 | struct physmem *physmem; | 
|  | 988 | int ret; | 
|  | 989 |  | 
|  | 990 | spin_lock_irqsave(&vcmlock, flags); | 
|  | 991 |  | 
|  | 992 | if (!res) | 
|  | 993 | goto fail; | 
|  | 994 |  | 
|  | 995 | vcm = res->vcm; | 
|  | 996 | if (!vcm) { | 
|  | 997 | vcm_err("NULL vcm\n"); | 
|  | 998 | goto fail; | 
|  | 999 | } | 
|  | 1000 |  | 
|  | 1001 | if (!res->physmem) { | 
|  | 1002 | vcm_err("can't unback a non-backed reservation\n"); | 
|  | 1003 | goto fail; | 
|  | 1004 | } | 
|  | 1005 |  | 
|  | 1006 | physmem = res->physmem; | 
|  | 1007 | if (!physmem) { | 
|  | 1008 | vcm_err("physmem is NULL\n"); | 
|  | 1009 | goto fail; | 
|  | 1010 | } | 
|  | 1011 |  | 
|  | 1012 | if (list_empty(&physmem->alloc_head.allocated)) { | 
|  | 1013 | vcm_err("physmem allocation is empty\n"); | 
|  | 1014 | goto fail; | 
|  | 1015 | } | 
|  | 1016 |  | 
|  | 1017 | ret = vcm_no_assoc(res->vcm); | 
|  | 1018 | if (ret == 1) { | 
|  | 1019 | vcm_err("can't unback a unassociated reservation\n"); | 
|  | 1020 | goto fail; | 
|  | 1021 | } | 
|  | 1022 |  | 
|  | 1023 | if (ret == -1) { | 
|  | 1024 | vcm_err("vcm_no_assoc(%p) ret -1\n", (void *) res->vcm); | 
|  | 1025 | goto fail; | 
|  | 1026 | } | 
|  | 1027 |  | 
|  | 1028 | ret = vcm_all_activated(res->vcm); | 
|  | 1029 | if (ret == 0) { | 
|  | 1030 | vcm_err("can't unback, not all associations are active\n"); | 
|  | 1031 | goto fail_eagain; | 
|  | 1032 | } | 
|  | 1033 |  | 
|  | 1034 | if (ret == -1) { | 
|  | 1035 | vcm_err("vcm_all_activated(%p) ret -1\n", (void *) res->vcm); | 
|  | 1036 | goto fail; | 
|  | 1037 | } | 
|  | 1038 |  | 
|  | 1039 |  | 
|  | 1040 | switch (vcm->type) { | 
|  | 1041 | case VCM_EXT_KERNEL: | 
|  | 1042 | if (!res->mapped) { | 
|  | 1043 | vcm_err("can't unback an unmapped VCM_EXT_KERNEL" | 
|  | 1044 | " VCM\n"); | 
|  | 1045 | goto fail; | 
|  | 1046 | } | 
|  | 1047 |  | 
|  | 1048 | /* vunmap free's vm_area */ | 
|  | 1049 | vunmap(res->vm_area->addr); | 
|  | 1050 | res->vm_area = 0; | 
|  | 1051 |  | 
|  | 1052 | res->mapped = 0; | 
|  | 1053 | break; | 
|  | 1054 |  | 
|  | 1055 | case VCM_DEVICE: | 
|  | 1056 | { | 
|  | 1057 | struct phys_chunk *chunk; | 
|  | 1058 | size_t va = res->dev_addr; | 
|  | 1059 |  | 
|  | 1060 | list_for_each_entry(chunk, &physmem->alloc_head.allocated, | 
|  | 1061 | allocated) { | 
|  | 1062 | struct vcm *vcm = res->vcm; | 
|  | 1063 | size_t chunk_size = chunk->size; | 
|  | 1064 |  | 
|  | 1065 | ret = vcm_process_chunk(vcm->domain, 0, va, | 
|  | 1066 | chunk_size, 0, 0); | 
|  | 1067 | if (ret != 0) { | 
|  | 1068 | vcm_err("vcm_unback_chunk(%p, %p, 0x%x)" | 
|  | 1069 | " ret %i", | 
|  | 1070 | (void *) vcm->domain, | 
|  | 1071 | (void *) va, | 
|  | 1072 | (int) chunk_size, ret); | 
|  | 1073 | goto fail; | 
|  | 1074 | /* TODO handle weird inter-unmap state*/ | 
|  | 1075 | } | 
|  | 1076 |  | 
|  | 1077 | va += chunk_size; | 
|  | 1078 | /* may to a light unback, depending on the requested | 
|  | 1079 | * functionality | 
|  | 1080 | */ | 
|  | 1081 | } | 
|  | 1082 | break; | 
|  | 1083 | } | 
|  | 1084 |  | 
|  | 1085 | case VCM_ONE_TO_ONE: | 
|  | 1086 | break; | 
|  | 1087 | default: | 
|  | 1088 | /* this should never happen */ | 
|  | 1089 | goto fail; | 
|  | 1090 | } | 
|  | 1091 |  | 
|  | 1092 | /* clear the reservation */ | 
|  | 1093 | res->physmem = 0; | 
|  | 1094 |  | 
|  | 1095 | spin_unlock_irqrestore(&vcmlock, flags); | 
|  | 1096 | return 0; | 
|  | 1097 | fail_eagain: | 
|  | 1098 | spin_unlock_irqrestore(&vcmlock, flags); | 
|  | 1099 | return -EAGAIN; | 
|  | 1100 | fail: | 
|  | 1101 | spin_unlock_irqrestore(&vcmlock, flags); | 
|  | 1102 | return -EINVAL; | 
|  | 1103 | } | 
|  | 1104 |  | 
|  | 1105 |  | 
|  | 1106 | enum memtarget_t vcm_get_memtype_of_res(struct res *res) | 
|  | 1107 | { | 
|  | 1108 | return VCM_INVALID; | 
|  | 1109 | } | 
|  | 1110 |  | 
|  | 1111 | static int vcm_free_max_munch_cont(struct phys_chunk *head) | 
|  | 1112 | { | 
|  | 1113 | struct phys_chunk *chunk, *tmp; | 
|  | 1114 |  | 
|  | 1115 | if (!head) | 
|  | 1116 | return -EINVAL; | 
|  | 1117 |  | 
|  | 1118 | list_for_each_entry_safe(chunk, tmp, &head->allocated, | 
|  | 1119 | allocated) { | 
|  | 1120 | list_del_init(&chunk->allocated); | 
|  | 1121 | } | 
|  | 1122 |  | 
|  | 1123 | return 0; | 
|  | 1124 | } | 
|  | 1125 |  | 
|  | 1126 | static int vcm_alloc_max_munch_cont(size_t start_addr, size_t len, | 
|  | 1127 | struct phys_chunk *head) | 
|  | 1128 | { | 
|  | 1129 | /* this function should always succeed, since it | 
|  | 1130 | parallels a VCM */ | 
|  | 1131 |  | 
|  | 1132 | int i, j; | 
|  | 1133 |  | 
|  | 1134 | if (!head) { | 
|  | 1135 | vcm_err("head is NULL in continuous map.\n"); | 
|  | 1136 | goto fail; | 
|  | 1137 | } | 
|  | 1138 |  | 
|  | 1139 | if (start_addr < (int) bootmem_cont) { | 
|  | 1140 | vcm_err("phys start addr (%p) < base (%p)\n", | 
|  | 1141 | (void *) start_addr, (void *) bootmem_cont); | 
|  | 1142 | goto fail; | 
|  | 1143 | } | 
|  | 1144 |  | 
|  | 1145 | if ((start_addr + len) >= ((size_t) bootmem_cont + cont_sz)) { | 
|  | 1146 | vcm_err("requested region (%p + %i) > " | 
|  | 1147 | " available region (%p + %i)", | 
|  | 1148 | (void *) start_addr, (int) len, | 
|  | 1149 | (void *) bootmem_cont, cont_sz); | 
|  | 1150 | goto fail; | 
|  | 1151 | } | 
|  | 1152 |  | 
|  | 1153 | i = (start_addr - (size_t) bootmem_cont)/SZ_4K; | 
|  | 1154 |  | 
|  | 1155 | for (j = 0; j < ARRAY_SIZE(smmu_map_sizes); ++j) { | 
|  | 1156 | while (len/smmu_map_sizes[j]) { | 
|  | 1157 | if (!list_empty(&cont_phys_chunk[i].allocated)) { | 
|  | 1158 | vcm_err("chunk %i ( addr %p) already mapped\n", | 
|  | 1159 | i, (void *) (start_addr + | 
|  | 1160 | (i*smmu_map_sizes[j]))); | 
|  | 1161 | goto fail_free; | 
|  | 1162 | } | 
|  | 1163 | list_add_tail(&cont_phys_chunk[i].allocated, | 
|  | 1164 | &head->allocated); | 
|  | 1165 | cont_phys_chunk[i].size = smmu_map_sizes[j]; | 
|  | 1166 |  | 
|  | 1167 | len -= smmu_map_sizes[j]; | 
|  | 1168 | i += smmu_map_sizes[j]/SZ_4K; | 
|  | 1169 | } | 
|  | 1170 | } | 
|  | 1171 |  | 
|  | 1172 | if (len % SZ_4K) { | 
|  | 1173 | if (!list_empty(&cont_phys_chunk[i].allocated)) { | 
|  | 1174 | vcm_err("chunk %i (addr %p) already mapped\n", | 
|  | 1175 | i, (void *) (start_addr + (i*SZ_4K))); | 
|  | 1176 | goto fail_free; | 
|  | 1177 | } | 
|  | 1178 | len -= SZ_4K; | 
|  | 1179 | list_add_tail(&cont_phys_chunk[i].allocated, | 
|  | 1180 | &head->allocated); | 
|  | 1181 |  | 
|  | 1182 | i++; | 
|  | 1183 | } | 
|  | 1184 |  | 
|  | 1185 | return i; | 
|  | 1186 |  | 
|  | 1187 | fail_free: | 
|  | 1188 | { | 
|  | 1189 | struct phys_chunk *chunk, *tmp; | 
|  | 1190 | /* just remove from list, if we're double alloc'ing | 
|  | 1191 | we don't want to stamp on the other guy */ | 
|  | 1192 | list_for_each_entry_safe(chunk, tmp, &head->allocated, | 
|  | 1193 | allocated) { | 
|  | 1194 | list_del(&chunk->allocated); | 
|  | 1195 | } | 
|  | 1196 | } | 
|  | 1197 | fail: | 
|  | 1198 | return 0; | 
|  | 1199 | } | 
|  | 1200 |  | 
|  | 1201 | struct physmem *vcm_phys_alloc(enum memtype_t memtype, size_t len, u32 attr) | 
|  | 1202 | { | 
|  | 1203 | unsigned long flags; | 
|  | 1204 | int ret; | 
|  | 1205 | struct physmem *physmem = NULL; | 
|  | 1206 | int blocks_allocated; | 
|  | 1207 |  | 
|  | 1208 | spin_lock_irqsave(&vcmlock, flags); | 
|  | 1209 |  | 
|  | 1210 | physmem = kzalloc(sizeof(*physmem), GFP_KERNEL); | 
|  | 1211 | if (!physmem) { | 
|  | 1212 | vcm_err("physmem is NULL\n"); | 
|  | 1213 | goto fail; | 
|  | 1214 | } | 
|  | 1215 |  | 
|  | 1216 | physmem->memtype = memtype; | 
|  | 1217 | physmem->len = len; | 
|  | 1218 | physmem->attr = attr; | 
|  | 1219 |  | 
|  | 1220 | INIT_LIST_HEAD(&physmem->alloc_head.allocated); | 
|  | 1221 |  | 
|  | 1222 | if (attr & VCM_PHYS_CONT) { | 
|  | 1223 | if (!cont_vcm_id) { | 
|  | 1224 | vcm_err("cont_vcm_id is NULL\n"); | 
|  | 1225 | goto fail2; | 
|  | 1226 | } | 
|  | 1227 |  | 
|  | 1228 | physmem->is_cont = 1; | 
|  | 1229 |  | 
|  | 1230 | /* TODO: get attributes */ | 
|  | 1231 | physmem->res = __vcm_reserve(cont_vcm_id, len, 0); | 
|  | 1232 | if (physmem->res == 0) { | 
|  | 1233 | vcm_err("contiguous space allocation failed\n"); | 
|  | 1234 | goto fail2; | 
|  | 1235 | } | 
|  | 1236 |  | 
|  | 1237 | /* if we're here we know we have memory, create | 
|  | 1238 | the shadow physmem links*/ | 
|  | 1239 | blocks_allocated = | 
|  | 1240 | vcm_alloc_max_munch_cont( | 
|  | 1241 | physmem->res->dev_addr, | 
|  | 1242 | len, | 
|  | 1243 | &physmem->alloc_head); | 
|  | 1244 |  | 
|  | 1245 | if (blocks_allocated == 0) { | 
|  | 1246 | vcm_err("shadow physmem allocation failed\n"); | 
|  | 1247 | goto fail3; | 
|  | 1248 | } | 
|  | 1249 | } else { | 
|  | 1250 | blocks_allocated = vcm_alloc_max_munch(len, memtype, | 
|  | 1251 | &physmem->alloc_head); | 
|  | 1252 | if (blocks_allocated == 0) { | 
|  | 1253 | vcm_err("physical allocation failed:" | 
|  | 1254 | " vcm_alloc_max_munch(%i, %p) ret 0\n", | 
|  | 1255 | len, &physmem->alloc_head); | 
|  | 1256 | goto fail2; | 
|  | 1257 | } | 
|  | 1258 | } | 
|  | 1259 |  | 
|  | 1260 | spin_unlock_irqrestore(&vcmlock, flags); | 
|  | 1261 | return physmem; | 
|  | 1262 |  | 
|  | 1263 | fail3: | 
|  | 1264 | ret = __vcm_unreserve(physmem->res); | 
|  | 1265 | if (ret != 0) { | 
|  | 1266 | vcm_err("vcm_unreserve(%p) ret %i during cleanup", | 
|  | 1267 | (void *) physmem->res, ret); | 
|  | 1268 | spin_unlock_irqrestore(&vcmlock, flags); | 
|  | 1269 | return 0; | 
|  | 1270 | } | 
|  | 1271 | fail2: | 
|  | 1272 | kfree(physmem); | 
|  | 1273 | fail: | 
|  | 1274 | spin_unlock_irqrestore(&vcmlock, flags); | 
|  | 1275 | return 0; | 
|  | 1276 | } | 
|  | 1277 |  | 
|  | 1278 |  | 
|  | 1279 | int vcm_phys_free(struct physmem *physmem) | 
|  | 1280 | { | 
|  | 1281 | unsigned long flags; | 
|  | 1282 | int ret; | 
|  | 1283 |  | 
|  | 1284 | spin_lock_irqsave(&vcmlock, flags); | 
|  | 1285 |  | 
|  | 1286 | if (!physmem) { | 
|  | 1287 | vcm_err("physmem is NULL\n"); | 
|  | 1288 | goto fail; | 
|  | 1289 | } | 
|  | 1290 |  | 
|  | 1291 | if (physmem->is_cont) { | 
|  | 1292 | if (physmem->res == 0) { | 
|  | 1293 | vcm_err("contiguous reservation is NULL\n"); | 
|  | 1294 | goto fail; | 
|  | 1295 | } | 
|  | 1296 |  | 
|  | 1297 | ret = vcm_free_max_munch_cont(&physmem->alloc_head); | 
|  | 1298 | if (ret != 0) { | 
|  | 1299 | vcm_err("failed to free physical blocks:" | 
|  | 1300 | " vcm_free_max_munch_cont(%p) ret %i\n", | 
|  | 1301 | (void *) &physmem->alloc_head, ret); | 
|  | 1302 | goto fail; | 
|  | 1303 | } | 
|  | 1304 |  | 
|  | 1305 | ret = __vcm_unreserve(physmem->res); | 
|  | 1306 | if (ret != 0) { | 
|  | 1307 | vcm_err("failed to free virtual blocks:" | 
|  | 1308 | " vcm_unreserve(%p) ret %i\n", | 
|  | 1309 | (void *) physmem->res, ret); | 
|  | 1310 | goto fail; | 
|  | 1311 | } | 
|  | 1312 |  | 
|  | 1313 | } else { | 
|  | 1314 |  | 
|  | 1315 | ret = vcm_alloc_free_blocks(physmem->memtype, | 
|  | 1316 | &physmem->alloc_head); | 
|  | 1317 | if (ret != 0) { | 
|  | 1318 | vcm_err("failed to free physical blocks:" | 
|  | 1319 | " vcm_alloc_free_blocks(%p) ret %i\n", | 
|  | 1320 | (void *) &physmem->alloc_head, ret); | 
|  | 1321 | goto fail; | 
|  | 1322 | } | 
|  | 1323 | } | 
|  | 1324 |  | 
|  | 1325 | memset(physmem, 0, sizeof(*physmem)); | 
|  | 1326 |  | 
|  | 1327 | kfree(physmem); | 
|  | 1328 |  | 
|  | 1329 | spin_unlock_irqrestore(&vcmlock, flags); | 
|  | 1330 | return 0; | 
|  | 1331 |  | 
|  | 1332 | fail: | 
|  | 1333 | spin_unlock_irqrestore(&vcmlock, flags); | 
|  | 1334 | return -EINVAL; | 
|  | 1335 | } | 
|  | 1336 |  | 
|  | 1337 |  | 
|  | 1338 | struct avcm *vcm_assoc(struct vcm *vcm, struct device *dev, u32 attr) | 
|  | 1339 | { | 
|  | 1340 | unsigned long flags; | 
|  | 1341 | struct avcm *avcm = NULL; | 
|  | 1342 |  | 
|  | 1343 | spin_lock_irqsave(&vcmlock, flags); | 
|  | 1344 |  | 
|  | 1345 | if (!vcm) { | 
|  | 1346 | vcm_err("vcm is NULL\n"); | 
|  | 1347 | goto fail; | 
|  | 1348 | } | 
|  | 1349 |  | 
|  | 1350 | if (!dev) { | 
|  | 1351 | vcm_err("dev_id is NULL\n"); | 
|  | 1352 | goto fail; | 
|  | 1353 | } | 
|  | 1354 |  | 
|  | 1355 | if (vcm->type == VCM_EXT_KERNEL && !list_empty(&vcm->assoc_head)) { | 
|  | 1356 | vcm_err("only one device may be assocoated with a" | 
|  | 1357 | " VCM_EXT_KERNEL\n"); | 
|  | 1358 | goto fail; | 
|  | 1359 | } | 
|  | 1360 |  | 
|  | 1361 | avcm = kzalloc(sizeof(*avcm), GFP_KERNEL); | 
|  | 1362 | if (!avcm) { | 
|  | 1363 | vcm_err("kzalloc(%i, GFP_KERNEL) ret NULL\n", sizeof(*avcm)); | 
|  | 1364 | goto fail; | 
|  | 1365 | } | 
|  | 1366 |  | 
|  | 1367 | avcm->dev = dev; | 
|  | 1368 |  | 
|  | 1369 | avcm->vcm = vcm; | 
|  | 1370 | avcm->attr = attr; | 
|  | 1371 | avcm->is_active = 0; | 
|  | 1372 |  | 
|  | 1373 | INIT_LIST_HEAD(&avcm->assoc_elm); | 
|  | 1374 | list_add(&avcm->assoc_elm, &vcm->assoc_head); | 
|  | 1375 |  | 
|  | 1376 | spin_unlock_irqrestore(&vcmlock, flags); | 
|  | 1377 | return avcm; | 
|  | 1378 |  | 
|  | 1379 | fail: | 
|  | 1380 | spin_unlock_irqrestore(&vcmlock, flags); | 
|  | 1381 | return 0; | 
|  | 1382 | } | 
|  | 1383 |  | 
|  | 1384 |  | 
|  | 1385 | int vcm_deassoc(struct avcm *avcm) | 
|  | 1386 | { | 
|  | 1387 | unsigned long flags; | 
|  | 1388 |  | 
|  | 1389 | spin_lock_irqsave(&vcmlock, flags); | 
|  | 1390 |  | 
|  | 1391 | if (!avcm) { | 
|  | 1392 | vcm_err("avcm is NULL\n"); | 
|  | 1393 | goto fail; | 
|  | 1394 | } | 
|  | 1395 |  | 
|  | 1396 | if (list_empty(&avcm->assoc_elm)) { | 
|  | 1397 | vcm_err("nothing to deassociate\n"); | 
|  | 1398 | goto fail; | 
|  | 1399 | } | 
|  | 1400 |  | 
|  | 1401 | if (avcm->is_active) { | 
|  | 1402 | vcm_err("association still activated\n"); | 
|  | 1403 | goto fail_busy; | 
|  | 1404 | } | 
|  | 1405 |  | 
|  | 1406 | list_del(&avcm->assoc_elm); | 
|  | 1407 |  | 
|  | 1408 | memset(avcm, 0, sizeof(*avcm)); | 
|  | 1409 |  | 
|  | 1410 | kfree(avcm); | 
|  | 1411 | spin_unlock_irqrestore(&vcmlock, flags); | 
|  | 1412 | return 0; | 
|  | 1413 | fail_busy: | 
|  | 1414 | spin_unlock_irqrestore(&vcmlock, flags); | 
|  | 1415 | return -EBUSY; | 
|  | 1416 | fail: | 
|  | 1417 | spin_unlock_irqrestore(&vcmlock, flags); | 
|  | 1418 | return -EINVAL; | 
|  | 1419 | } | 
|  | 1420 |  | 
|  | 1421 |  | 
|  | 1422 | int vcm_set_assoc_attr(struct avcm *avcm, u32 attr) | 
|  | 1423 | { | 
|  | 1424 | return 0; | 
|  | 1425 | } | 
|  | 1426 |  | 
|  | 1427 |  | 
|  | 1428 | u32 vcm_get_assoc_attr(struct avcm *avcm) | 
|  | 1429 | { | 
|  | 1430 | return 0; | 
|  | 1431 | } | 
|  | 1432 |  | 
|  | 1433 |  | 
|  | 1434 | int vcm_activate(struct avcm *avcm) | 
|  | 1435 | { | 
|  | 1436 | unsigned long flags; | 
|  | 1437 | struct vcm *vcm; | 
|  | 1438 |  | 
|  | 1439 | spin_lock_irqsave(&vcmlock, flags); | 
|  | 1440 |  | 
|  | 1441 | if (!avcm) { | 
|  | 1442 | vcm_err("avcm is NULL\n"); | 
|  | 1443 | goto fail; | 
|  | 1444 | } | 
|  | 1445 |  | 
|  | 1446 | vcm = avcm->vcm; | 
|  | 1447 | if (!vcm) { | 
|  | 1448 | vcm_err("NULL vcm\n"); | 
|  | 1449 | goto fail; | 
|  | 1450 | } | 
|  | 1451 |  | 
|  | 1452 | if (!avcm->dev) { | 
|  | 1453 | vcm_err("cannot activate without a device\n"); | 
|  | 1454 | goto fail_nodev; | 
|  | 1455 | } | 
|  | 1456 |  | 
|  | 1457 | if (avcm->is_active) { | 
|  | 1458 | vcm_err("double activate\n"); | 
|  | 1459 | goto fail_busy; | 
|  | 1460 | } | 
|  | 1461 |  | 
|  | 1462 | if (vcm->type == VCM_DEVICE) { | 
|  | 1463 | #ifdef CONFIG_SMMU | 
|  | 1464 | int ret; | 
|  | 1465 | ret = iommu_attach_device(vcm->domain, avcm->dev); | 
|  | 1466 | if (ret != 0) { | 
|  | 1467 | dev_err(avcm->dev, "failed to attach to domain\n"); | 
|  | 1468 | goto fail_dev; | 
|  | 1469 | } | 
|  | 1470 | #else | 
|  | 1471 | vcm_err("No SMMU support - cannot activate/deactivate\n"); | 
|  | 1472 | goto fail_nodev; | 
|  | 1473 | #endif | 
|  | 1474 | } | 
|  | 1475 |  | 
|  | 1476 | avcm->is_active = 1; | 
|  | 1477 | spin_unlock_irqrestore(&vcmlock, flags); | 
|  | 1478 | return 0; | 
|  | 1479 |  | 
|  | 1480 | #ifdef CONFIG_SMMU | 
|  | 1481 | fail_dev: | 
|  | 1482 | spin_unlock_irqrestore(&vcmlock, flags); | 
|  | 1483 | return -ENODEV; | 
|  | 1484 | #endif | 
|  | 1485 | fail_busy: | 
|  | 1486 | spin_unlock_irqrestore(&vcmlock, flags); | 
|  | 1487 | return -EBUSY; | 
|  | 1488 | fail_nodev: | 
|  | 1489 | spin_unlock_irqrestore(&vcmlock, flags); | 
|  | 1490 | return -ENODEV; | 
|  | 1491 | fail: | 
|  | 1492 | spin_unlock_irqrestore(&vcmlock, flags); | 
|  | 1493 | return -EINVAL; | 
|  | 1494 | } | 
|  | 1495 |  | 
|  | 1496 |  | 
|  | 1497 | int vcm_deactivate(struct avcm *avcm) | 
|  | 1498 | { | 
|  | 1499 | unsigned long flags; | 
|  | 1500 | struct vcm *vcm; | 
|  | 1501 |  | 
|  | 1502 | spin_lock_irqsave(&vcmlock, flags); | 
|  | 1503 |  | 
|  | 1504 | if (!avcm) | 
|  | 1505 | goto fail; | 
|  | 1506 |  | 
|  | 1507 | vcm = avcm->vcm; | 
|  | 1508 | if (!vcm) { | 
|  | 1509 | vcm_err("NULL vcm\n"); | 
|  | 1510 | goto fail; | 
|  | 1511 | } | 
|  | 1512 |  | 
|  | 1513 | if (!avcm->dev) { | 
|  | 1514 | vcm_err("cannot deactivate without a device\n"); | 
|  | 1515 | goto fail; | 
|  | 1516 | } | 
|  | 1517 |  | 
|  | 1518 | if (!avcm->is_active) { | 
|  | 1519 | vcm_err("double deactivate\n"); | 
|  | 1520 | goto fail_nobusy; | 
|  | 1521 | } | 
|  | 1522 |  | 
|  | 1523 | if (vcm->type == VCM_DEVICE) { | 
|  | 1524 | #ifdef CONFIG_SMMU | 
|  | 1525 | /* TODO, pmem check */ | 
|  | 1526 | iommu_detach_device(vcm->domain, avcm->dev); | 
|  | 1527 | #else | 
|  | 1528 | vcm_err("No SMMU support - cannot activate/deactivate\n"); | 
|  | 1529 | goto fail; | 
|  | 1530 | #endif | 
|  | 1531 | } | 
|  | 1532 |  | 
|  | 1533 | avcm->is_active = 0; | 
|  | 1534 | spin_unlock_irqrestore(&vcmlock, flags); | 
|  | 1535 | return 0; | 
|  | 1536 | fail_nobusy: | 
|  | 1537 | spin_unlock_irqrestore(&vcmlock, flags); | 
|  | 1538 | return -ENOENT; | 
|  | 1539 | fail: | 
|  | 1540 | spin_unlock_irqrestore(&vcmlock, flags); | 
|  | 1541 | return -EINVAL; | 
|  | 1542 | } | 
|  | 1543 |  | 
|  | 1544 | struct bound *vcm_create_bound(struct vcm *vcm, size_t len) | 
|  | 1545 | { | 
|  | 1546 | return 0; | 
|  | 1547 | } | 
|  | 1548 |  | 
|  | 1549 |  | 
|  | 1550 | int vcm_free_bound(struct bound *bound) | 
|  | 1551 | { | 
|  | 1552 | return -EINVAL; | 
|  | 1553 | } | 
|  | 1554 |  | 
|  | 1555 |  | 
|  | 1556 | struct res *vcm_reserve_from_bound(struct bound *bound, size_t len, | 
|  | 1557 | u32 attr) | 
|  | 1558 | { | 
|  | 1559 | return 0; | 
|  | 1560 | } | 
|  | 1561 |  | 
|  | 1562 |  | 
|  | 1563 | size_t vcm_get_bound_start_addr(struct bound *bound) | 
|  | 1564 | { | 
|  | 1565 | return 0; | 
|  | 1566 | } | 
|  | 1567 |  | 
|  | 1568 |  | 
|  | 1569 | size_t vcm_get_bound_len(struct bound *bound) | 
|  | 1570 | { | 
|  | 1571 | return 0; | 
|  | 1572 | } | 
|  | 1573 |  | 
|  | 1574 |  | 
|  | 1575 | struct physmem *vcm_map_phys_addr(phys_addr_t phys, size_t len) | 
|  | 1576 | { | 
|  | 1577 | return 0; | 
|  | 1578 | } | 
|  | 1579 |  | 
|  | 1580 |  | 
|  | 1581 | size_t vcm_get_next_phys_addr(struct physmem *physmem, phys_addr_t phys, | 
|  | 1582 | size_t *len) | 
|  | 1583 | { | 
|  | 1584 | return 0; | 
|  | 1585 | } | 
|  | 1586 |  | 
|  | 1587 |  | 
|  | 1588 | struct res *vcm_get_res(unsigned long dev_addr, struct vcm *vcm) | 
|  | 1589 | { | 
|  | 1590 | return 0; | 
|  | 1591 | } | 
|  | 1592 |  | 
|  | 1593 |  | 
|  | 1594 | size_t vcm_translate(struct device *src_dev, struct vcm *src_vcm, | 
|  | 1595 | struct vcm *dst_vcm) | 
|  | 1596 | { | 
|  | 1597 | return 0; | 
|  | 1598 | } | 
|  | 1599 |  | 
|  | 1600 |  | 
|  | 1601 | size_t vcm_get_phys_num_res(phys_addr_t phys) | 
|  | 1602 | { | 
|  | 1603 | return 0; | 
|  | 1604 | } | 
|  | 1605 |  | 
|  | 1606 |  | 
|  | 1607 | struct res *vcm_get_next_phys_res(phys_addr_t phys, struct res *res, | 
|  | 1608 | size_t *len) | 
|  | 1609 | { | 
|  | 1610 | return 0; | 
|  | 1611 | } | 
|  | 1612 |  | 
|  | 1613 |  | 
|  | 1614 | phys_addr_t vcm_get_pgtbl_pa(struct vcm *vcm) | 
|  | 1615 | { | 
|  | 1616 | return 0; | 
|  | 1617 | } | 
|  | 1618 |  | 
|  | 1619 |  | 
|  | 1620 | /* No lock needed, smmu_translate has its own lock */ | 
|  | 1621 | phys_addr_t vcm_dev_addr_to_phys_addr(struct vcm *vcm, unsigned long dev_addr) | 
|  | 1622 | { | 
|  | 1623 | if (!vcm) | 
|  | 1624 | return -EINVAL; | 
|  | 1625 | #ifdef CONFIG_SMMU | 
|  | 1626 | return iommu_iova_to_phys(vcm->domain, dev_addr); | 
|  | 1627 | #else | 
|  | 1628 | vcm_err("No support for SMMU - manual translation not supported\n"); | 
|  | 1629 | return -ENODEV; | 
|  | 1630 | #endif | 
|  | 1631 | } | 
|  | 1632 |  | 
|  | 1633 |  | 
|  | 1634 | /* No lock needed, bootmem_cont never changes after  */ | 
|  | 1635 | phys_addr_t vcm_get_cont_memtype_pa(enum memtype_t memtype) | 
|  | 1636 | { | 
|  | 1637 | if (memtype != VCM_MEMTYPE_0) { | 
|  | 1638 | vcm_err("memtype != VCM_MEMTYPE_0\n"); | 
|  | 1639 | goto fail; | 
|  | 1640 | } | 
|  | 1641 |  | 
|  | 1642 | if (!bootmem_cont) { | 
|  | 1643 | vcm_err("bootmem_cont 0\n"); | 
|  | 1644 | goto fail; | 
|  | 1645 | } | 
|  | 1646 |  | 
|  | 1647 | return (size_t) bootmem_cont; | 
|  | 1648 | fail: | 
|  | 1649 | return 0; | 
|  | 1650 | } | 
|  | 1651 |  | 
|  | 1652 |  | 
|  | 1653 | /* No lock needed, constant */ | 
|  | 1654 | size_t vcm_get_cont_memtype_len(enum memtype_t memtype) | 
|  | 1655 | { | 
|  | 1656 | if (memtype != VCM_MEMTYPE_0) { | 
|  | 1657 | vcm_err("memtype != VCM_MEMTYPE_0\n"); | 
|  | 1658 | return 0; | 
|  | 1659 | } | 
|  | 1660 |  | 
|  | 1661 | return cont_sz; | 
|  | 1662 | } | 
|  | 1663 |  | 
|  | 1664 | int vcm_hook(struct device *dev, vcm_handler handler, void *data) | 
|  | 1665 | { | 
|  | 1666 | #ifdef CONFIG_SMMU | 
|  | 1667 | vcm_err("No interrupts in IOMMU API\n"); | 
|  | 1668 | return -ENODEV; | 
|  | 1669 | #else | 
|  | 1670 | vcm_err("No support for SMMU - interrupts not supported\n"); | 
|  | 1671 | return -ENODEV; | 
|  | 1672 | #endif | 
|  | 1673 | } | 
|  | 1674 |  | 
|  | 1675 |  | 
|  | 1676 | size_t vcm_hw_ver(size_t dev) | 
|  | 1677 | { | 
|  | 1678 | return 0; | 
|  | 1679 | } | 
|  | 1680 |  | 
|  | 1681 |  | 
|  | 1682 | static int vcm_cont_phys_chunk_init(void) | 
|  | 1683 | { | 
|  | 1684 | int i; | 
|  | 1685 | int cont_pa; | 
|  | 1686 |  | 
|  | 1687 | if (!cont_phys_chunk) { | 
|  | 1688 | vcm_err("cont_phys_chunk 0\n"); | 
|  | 1689 | goto fail; | 
|  | 1690 | } | 
|  | 1691 |  | 
|  | 1692 | if (!bootmem_cont) { | 
|  | 1693 | vcm_err("bootmem_cont 0\n"); | 
|  | 1694 | goto fail; | 
|  | 1695 | } | 
|  | 1696 |  | 
|  | 1697 | cont_pa = (size_t) bootmem_cont; | 
|  | 1698 |  | 
|  | 1699 | for (i = 0; i < cont_sz/PAGE_SIZE; ++i) { | 
|  | 1700 | cont_phys_chunk[i].pa = cont_pa; cont_pa += PAGE_SIZE; | 
|  | 1701 | cont_phys_chunk[i].size = SZ_4K; | 
|  | 1702 | /* Not part of an allocator-managed pool */ | 
|  | 1703 | cont_phys_chunk[i].pool_idx = -1; | 
|  | 1704 | INIT_LIST_HEAD(&cont_phys_chunk[i].allocated); | 
|  | 1705 | } | 
|  | 1706 |  | 
|  | 1707 | return 0; | 
|  | 1708 |  | 
|  | 1709 | fail: | 
|  | 1710 | return -EINVAL; | 
|  | 1711 | } | 
|  | 1712 |  | 
|  | 1713 | int vcm_sys_init(struct physmem_region *mem, int n_regions, | 
|  | 1714 | struct vcm_memtype_map *mt_map, int n_mt, | 
|  | 1715 | void *cont_pa, unsigned int cont_len) | 
|  | 1716 | { | 
|  | 1717 | int ret; | 
|  | 1718 | printk(KERN_INFO "VCM Initialization\n"); | 
|  | 1719 | bootmem_cont = cont_pa; | 
|  | 1720 | cont_sz = cont_len; | 
|  | 1721 |  | 
|  | 1722 | if (!bootmem_cont) { | 
|  | 1723 | vcm_err("bootmem_cont is 0\n"); | 
|  | 1724 | ret = -1; | 
|  | 1725 | goto fail; | 
|  | 1726 | } | 
|  | 1727 |  | 
|  | 1728 | ret = vcm_setup_tex_classes(); | 
|  | 1729 | if (ret != 0) { | 
|  | 1730 | printk(KERN_INFO "Could not determine TEX attribute mapping\n"); | 
|  | 1731 | ret = -1; | 
|  | 1732 | goto fail; | 
|  | 1733 | } | 
|  | 1734 |  | 
|  | 1735 |  | 
|  | 1736 | ret = vcm_alloc_init(mem, n_regions, mt_map, n_mt); | 
|  | 1737 |  | 
|  | 1738 | if (ret != 0) { | 
|  | 1739 | vcm_err("vcm_alloc_init() ret %i\n", ret); | 
|  | 1740 | ret = -1; | 
|  | 1741 | goto fail; | 
|  | 1742 | } | 
|  | 1743 |  | 
|  | 1744 | cont_phys_chunk = kzalloc(sizeof(*cont_phys_chunk)*(cont_sz/PAGE_SIZE), | 
|  | 1745 | GFP_KERNEL); | 
|  | 1746 | if (!cont_phys_chunk) { | 
|  | 1747 | vcm_err("kzalloc(%lu, GFP_KERNEL) ret 0", | 
|  | 1748 | sizeof(*cont_phys_chunk)*(cont_sz/PAGE_SIZE)); | 
|  | 1749 | goto fail_free; | 
|  | 1750 | } | 
|  | 1751 |  | 
|  | 1752 | /* the address and size will hit our special case unless we | 
|  | 1753 | pass an override */ | 
|  | 1754 | cont_vcm_id = vcm_create_flagged(0, (size_t)bootmem_cont, cont_sz); | 
|  | 1755 | if (cont_vcm_id == 0) { | 
|  | 1756 | vcm_err("vcm_create_flagged(0, %p, %i) ret 0\n", | 
|  | 1757 | bootmem_cont, cont_sz); | 
|  | 1758 | ret = -1; | 
|  | 1759 | goto fail_free2; | 
|  | 1760 | } | 
|  | 1761 |  | 
|  | 1762 | ret = vcm_cont_phys_chunk_init(); | 
|  | 1763 | if (ret != 0) { | 
|  | 1764 | vcm_err("vcm_cont_phys_chunk_init() ret %i\n", ret); | 
|  | 1765 | goto fail_free3; | 
|  | 1766 | } | 
|  | 1767 |  | 
|  | 1768 | printk(KERN_INFO "VCM Initialization OK\n"); | 
|  | 1769 | return 0; | 
|  | 1770 |  | 
|  | 1771 | fail_free3: | 
|  | 1772 | ret = __vcm_free(cont_vcm_id); | 
|  | 1773 | if (ret != 0) { | 
|  | 1774 | vcm_err("vcm_free(%p) ret %i during failure path\n", | 
|  | 1775 | (void *) cont_vcm_id, ret); | 
|  | 1776 | return ret; | 
|  | 1777 | } | 
|  | 1778 |  | 
|  | 1779 | fail_free2: | 
|  | 1780 | kfree(cont_phys_chunk); | 
|  | 1781 | cont_phys_chunk = 0; | 
|  | 1782 |  | 
|  | 1783 | fail_free: | 
|  | 1784 | ret = vcm_alloc_destroy(); | 
|  | 1785 | if (ret != 0) | 
|  | 1786 | vcm_err("vcm_alloc_destroy() ret %i during failure path\n", | 
|  | 1787 | ret); | 
|  | 1788 |  | 
|  | 1789 | ret = -EINVAL; | 
|  | 1790 | fail: | 
|  | 1791 | return ret; | 
|  | 1792 | } | 
|  | 1793 |  | 
|  | 1794 |  | 
|  | 1795 | int vcm_sys_destroy(void) | 
|  | 1796 | { | 
|  | 1797 | int ret = 0; | 
|  | 1798 |  | 
|  | 1799 | if (!cont_phys_chunk) { | 
|  | 1800 | vcm_err("cont_phys_chunk is 0\n"); | 
|  | 1801 | return -ENODEV; | 
|  | 1802 | } | 
|  | 1803 |  | 
|  | 1804 | if (!cont_vcm_id) { | 
|  | 1805 | vcm_err("cont_vcm_id is 0\n"); | 
|  | 1806 | return -ENODEV; | 
|  | 1807 | } | 
|  | 1808 |  | 
|  | 1809 | ret = __vcm_free(cont_vcm_id); | 
|  | 1810 | if (ret != 0) { | 
|  | 1811 | vcm_err("vcm_free(%p) ret %i\n", (void *) cont_vcm_id, ret); | 
|  | 1812 | return -ENODEV; | 
|  | 1813 | } | 
|  | 1814 |  | 
|  | 1815 | cont_vcm_id = 0; | 
|  | 1816 |  | 
|  | 1817 | kfree(cont_phys_chunk); | 
|  | 1818 | cont_phys_chunk = 0; | 
|  | 1819 |  | 
|  | 1820 | ret = vcm_alloc_destroy(); | 
|  | 1821 | if (ret != 0) { | 
|  | 1822 | vcm_err("vcm_alloc_destroy() ret %i\n", ret); | 
|  | 1823 | return ret; | 
|  | 1824 | } | 
|  | 1825 |  | 
|  | 1826 | return ret; | 
|  | 1827 | } | 
|  | 1828 |  | 
|  | 1829 | MODULE_LICENSE("GPL v2"); | 
|  | 1830 | MODULE_AUTHOR("Zach Pfeffer <zpfeffer@codeaurora.org>"); |