blob: 234841de7860e05b41e5432aec42c997be84f9ba [file] [log] [blame]
Jordan Crouse00714012012-03-16 14:53:40 -06001/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/types.h>
14#include <linux/device.h>
15#include <linux/spinlock.h>
16#include <linux/genalloc.h>
17#include <linux/slab.h>
18#include <linux/sched.h>
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060019#include <linux/iommu.h>
Jordan Crouse817e0b92012-02-04 10:23:53 -070020#include <mach/socinfo.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070021
22#include "kgsl.h"
23#include "kgsl_mmu.h"
24#include "kgsl_device.h"
25#include "kgsl_sharedmem.h"
Jeremy Gebbena3d07a42011-10-17 12:08:16 -060026#include "adreno_postmortem.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070027
28#define KGSL_MMU_ALIGN_SHIFT 13
29#define KGSL_MMU_ALIGN_MASK (~((1 << KGSL_MMU_ALIGN_SHIFT) - 1))
30
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060031static enum kgsl_mmutype kgsl_mmu_type;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070032
33static void pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable);
34
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070035static int kgsl_cleanup_pt(struct kgsl_pagetable *pt)
36{
37 int i;
Shubhraprakash Das84fdb112012-04-04 12:49:31 -060038 /* For IOMMU only unmap the global structures to global pt */
39 if ((KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type) &&
40 (KGSL_MMU_GLOBAL_PT != pt->name))
41 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070042 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
43 struct kgsl_device *device = kgsl_driver.devp[i];
44 if (device)
45 device->ftbl->cleanup_pt(device, pt);
46 }
47 return 0;
48}
49
50static void kgsl_destroy_pagetable(struct kref *kref)
51{
52 struct kgsl_pagetable *pagetable = container_of(kref,
53 struct kgsl_pagetable, refcount);
54 unsigned long flags;
55
56 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
57 list_del(&pagetable->list);
58 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
59
60 pagetable_remove_sysfs_objects(pagetable);
61
62 kgsl_cleanup_pt(pagetable);
63
Shubhraprakash Das84fdb112012-04-04 12:49:31 -060064 if (pagetable->kgsl_pool)
65 gen_pool_destroy(pagetable->kgsl_pool);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070066 if (pagetable->pool)
67 gen_pool_destroy(pagetable->pool);
68
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060069 pagetable->pt_ops->mmu_destroy_pagetable(pagetable->priv);
70
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071 kfree(pagetable);
72}
73
74static inline void kgsl_put_pagetable(struct kgsl_pagetable *pagetable)
75{
76 if (pagetable)
77 kref_put(&pagetable->refcount, kgsl_destroy_pagetable);
78}
79
80static struct kgsl_pagetable *
81kgsl_get_pagetable(unsigned long name)
82{
83 struct kgsl_pagetable *pt, *ret = NULL;
84 unsigned long flags;
85
86 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
87 list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
88 if (pt->name == name) {
89 ret = pt;
90 kref_get(&ret->refcount);
91 break;
92 }
93 }
94
95 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
96 return ret;
97}
98
99static struct kgsl_pagetable *
100_get_pt_from_kobj(struct kobject *kobj)
101{
102 unsigned long ptname;
103
104 if (!kobj)
105 return NULL;
106
107 if (sscanf(kobj->name, "%ld", &ptname) != 1)
108 return NULL;
109
110 return kgsl_get_pagetable(ptname);
111}
112
113static ssize_t
114sysfs_show_entries(struct kobject *kobj,
115 struct kobj_attribute *attr,
116 char *buf)
117{
118 struct kgsl_pagetable *pt;
119 int ret = 0;
120
121 pt = _get_pt_from_kobj(kobj);
122
123 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600124 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700125
126 kgsl_put_pagetable(pt);
127 return ret;
128}
129
130static ssize_t
131sysfs_show_mapped(struct kobject *kobj,
132 struct kobj_attribute *attr,
133 char *buf)
134{
135 struct kgsl_pagetable *pt;
136 int ret = 0;
137
138 pt = _get_pt_from_kobj(kobj);
139
140 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600141 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.mapped);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700142
143 kgsl_put_pagetable(pt);
144 return ret;
145}
146
147static ssize_t
148sysfs_show_va_range(struct kobject *kobj,
149 struct kobj_attribute *attr,
150 char *buf)
151{
152 struct kgsl_pagetable *pt;
153 int ret = 0;
154
155 pt = _get_pt_from_kobj(kobj);
156
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600157 if (pt) {
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600158 ret += snprintf(buf, PAGE_SIZE, "0x%x\n",
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600159 kgsl_mmu_get_ptsize());
160 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700161
162 kgsl_put_pagetable(pt);
163 return ret;
164}
165
166static ssize_t
167sysfs_show_max_mapped(struct kobject *kobj,
168 struct kobj_attribute *attr,
169 char *buf)
170{
171 struct kgsl_pagetable *pt;
172 int ret = 0;
173
174 pt = _get_pt_from_kobj(kobj);
175
176 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600177 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.max_mapped);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700178
179 kgsl_put_pagetable(pt);
180 return ret;
181}
182
183static ssize_t
184sysfs_show_max_entries(struct kobject *kobj,
185 struct kobj_attribute *attr,
186 char *buf)
187{
188 struct kgsl_pagetable *pt;
189 int ret = 0;
190
191 pt = _get_pt_from_kobj(kobj);
192
193 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600194 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.max_entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700195
196 kgsl_put_pagetable(pt);
197 return ret;
198}
199
200static struct kobj_attribute attr_entries = {
201 .attr = { .name = "entries", .mode = 0444 },
202 .show = sysfs_show_entries,
203 .store = NULL,
204};
205
206static struct kobj_attribute attr_mapped = {
207 .attr = { .name = "mapped", .mode = 0444 },
208 .show = sysfs_show_mapped,
209 .store = NULL,
210};
211
212static struct kobj_attribute attr_va_range = {
213 .attr = { .name = "va_range", .mode = 0444 },
214 .show = sysfs_show_va_range,
215 .store = NULL,
216};
217
218static struct kobj_attribute attr_max_mapped = {
219 .attr = { .name = "max_mapped", .mode = 0444 },
220 .show = sysfs_show_max_mapped,
221 .store = NULL,
222};
223
224static struct kobj_attribute attr_max_entries = {
225 .attr = { .name = "max_entries", .mode = 0444 },
226 .show = sysfs_show_max_entries,
227 .store = NULL,
228};
229
230static struct attribute *pagetable_attrs[] = {
231 &attr_entries.attr,
232 &attr_mapped.attr,
233 &attr_va_range.attr,
234 &attr_max_mapped.attr,
235 &attr_max_entries.attr,
236 NULL,
237};
238
239static struct attribute_group pagetable_attr_group = {
240 .attrs = pagetable_attrs,
241};
242
243static void
244pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable)
245{
246 if (pagetable->kobj)
247 sysfs_remove_group(pagetable->kobj,
248 &pagetable_attr_group);
249
250 kobject_put(pagetable->kobj);
251}
252
253static int
254pagetable_add_sysfs_objects(struct kgsl_pagetable *pagetable)
255{
256 char ptname[16];
257 int ret = -ENOMEM;
258
259 snprintf(ptname, sizeof(ptname), "%d", pagetable->name);
260 pagetable->kobj = kobject_create_and_add(ptname,
261 kgsl_driver.ptkobj);
262 if (pagetable->kobj == NULL)
263 goto err;
264
265 ret = sysfs_create_group(pagetable->kobj, &pagetable_attr_group);
266
267err:
268 if (ret) {
269 if (pagetable->kobj)
270 kobject_put(pagetable->kobj);
271
272 pagetable->kobj = NULL;
273 }
274
275 return ret;
276}
277
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600278unsigned int kgsl_mmu_get_ptsize(void)
279{
280 /*
281 * For IOMMU, we could do up to 4G virtual range if we wanted to, but
282 * it makes more sense to return a smaller range and leave the rest of
283 * the virtual range for future improvements
284 */
285
286 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
287 return CONFIG_MSM_KGSL_PAGE_TABLE_SIZE;
288 else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
289 return SZ_2G;
290 else
291 return 0;
292}
293
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600294unsigned int kgsl_mmu_get_current_ptbase(struct kgsl_device *device)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700295{
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600296 struct kgsl_mmu *mmu = &device->mmu;
297 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
298 return 0;
299 else
300 return mmu->mmu_ops->mmu_get_current_ptbase(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700301}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600302EXPORT_SYMBOL(kgsl_mmu_get_current_ptbase);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700303
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600304int
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600305kgsl_mmu_get_ptname_from_ptbase(unsigned int pt_base)
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600306{
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600307 struct kgsl_pagetable *pt;
308 int ptid = -1;
309
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600310 spin_lock(&kgsl_driver.ptlock);
311 list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600312 if (pt->pt_ops->mmu_pt_equal(pt, pt_base)) {
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600313 ptid = (int) pt->name;
314 break;
315 }
316 }
317 spin_unlock(&kgsl_driver.ptlock);
318
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600319 return ptid;
320}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600321EXPORT_SYMBOL(kgsl_mmu_get_ptname_from_ptbase);
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600322
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600323void kgsl_mmu_setstate(struct kgsl_device *device,
324 struct kgsl_pagetable *pagetable)
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600325{
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600326 struct kgsl_mmu *mmu = &device->mmu;
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600327
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600328 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
329 return;
330 else
331 mmu->mmu_ops->mmu_setstate(device,
332 pagetable);
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600333}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600334EXPORT_SYMBOL(kgsl_mmu_setstate);
335
336int kgsl_mmu_init(struct kgsl_device *device)
337{
338 struct kgsl_mmu *mmu = &device->mmu;
339
340 mmu->device = device;
341
342 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type) {
Shubhraprakash Dasf5526a12012-04-20 00:48:33 -0600343 int status = 0;
344 status = kgsl_allocate_contiguous(&mmu->setstate_memory, 64);
345 if (!status) {
346 kgsl_sharedmem_set(&mmu->setstate_memory, 0, 0,
347 mmu->setstate_memory.size);
348 dev_info(device->dev, "|%s| MMU type set for device is "
349 "NOMMU\n", __func__);
350 }
351 return status;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600352 } else if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
353 mmu->mmu_ops = &gpummu_ops;
354 else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
355 mmu->mmu_ops = &iommu_ops;
356
357 return mmu->mmu_ops->mmu_init(device);
358}
359EXPORT_SYMBOL(kgsl_mmu_init);
360
361int kgsl_mmu_start(struct kgsl_device *device)
362{
363 struct kgsl_mmu *mmu = &device->mmu;
364
365 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
366 kgsl_regwrite(device, MH_MMU_CONFIG, 0);
367 return 0;
368 } else {
369 return mmu->mmu_ops->mmu_start(device);
370 }
371}
372EXPORT_SYMBOL(kgsl_mmu_start);
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600373
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700374void kgsl_mh_intrcallback(struct kgsl_device *device)
375{
376 unsigned int status = 0;
377 unsigned int reg;
378
379 kgsl_regread(device, MH_INTERRUPT_STATUS, &status);
380 kgsl_regread(device, MH_AXI_ERROR, &reg);
381
382 if (status & MH_INTERRUPT_MASK__AXI_READ_ERROR)
383 KGSL_MEM_CRIT(device, "axi read error interrupt: %08x\n", reg);
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600384 if (status & MH_INTERRUPT_MASK__AXI_WRITE_ERROR)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700385 KGSL_MEM_CRIT(device, "axi write error interrupt: %08x\n", reg);
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600386 if (status & MH_INTERRUPT_MASK__MMU_PAGE_FAULT)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600387 device->mmu.mmu_ops->mmu_pagefault(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700388
Jordan Crousec8c9fcd2011-07-28 08:37:58 -0600389 status &= KGSL_MMU_INT_MASK;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700390 kgsl_regwrite(device, MH_INTERRUPT_CLEAR, status);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700391}
392EXPORT_SYMBOL(kgsl_mh_intrcallback);
393
394static int kgsl_setup_pt(struct kgsl_pagetable *pt)
395{
396 int i = 0;
397 int status = 0;
398
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600399 /* For IOMMU only map the global structures to global pt */
400 if ((KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type) &&
401 (KGSL_MMU_GLOBAL_PT != pt->name))
402 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700403 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
404 struct kgsl_device *device = kgsl_driver.devp[i];
405 if (device) {
406 status = device->ftbl->setup_pt(device, pt);
407 if (status)
408 goto error_pt;
409 }
410 }
411 return status;
412error_pt:
413 while (i >= 0) {
414 struct kgsl_device *device = kgsl_driver.devp[i];
415 if (device)
416 device->ftbl->cleanup_pt(device, pt);
417 i--;
418 }
419 return status;
420}
421
422static struct kgsl_pagetable *kgsl_mmu_createpagetableobject(
423 unsigned int name)
424{
425 int status = 0;
426 struct kgsl_pagetable *pagetable = NULL;
427 unsigned long flags;
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600428 unsigned int ptsize;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700429
430 pagetable = kzalloc(sizeof(struct kgsl_pagetable), GFP_KERNEL);
431 if (pagetable == NULL) {
432 KGSL_CORE_ERR("kzalloc(%d) failed\n",
433 sizeof(struct kgsl_pagetable));
434 return NULL;
435 }
436
437 kref_init(&pagetable->refcount);
438
439 spin_lock_init(&pagetable->lock);
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600440
441 ptsize = kgsl_mmu_get_ptsize();
442
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700443 pagetable->name = name;
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600444 pagetable->max_entries = KGSL_PAGETABLE_ENTRIES(ptsize);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700445
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600446 /*
447 * create a separate kgsl pool for IOMMU, global mappings can be mapped
448 * just once from this pool of the defaultpagetable
449 */
450 if ((KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) &&
451 (KGSL_MMU_GLOBAL_PT == name)) {
452 pagetable->kgsl_pool = gen_pool_create(PAGE_SHIFT, -1);
453 if (pagetable->kgsl_pool == NULL) {
454 KGSL_CORE_ERR("gen_pool_create(%d) failed\n",
455 PAGE_SHIFT);
456 goto err_alloc;
457 }
458 if (gen_pool_add(pagetable->kgsl_pool,
459 KGSL_IOMMU_GLOBAL_MEM_BASE,
460 KGSL_IOMMU_GLOBAL_MEM_SIZE, -1)) {
461 KGSL_CORE_ERR("gen_pool_add failed\n");
462 goto err_kgsl_pool;
463 }
464 }
465
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700466 pagetable->pool = gen_pool_create(PAGE_SHIFT, -1);
467 if (pagetable->pool == NULL) {
468 KGSL_CORE_ERR("gen_pool_create(%d) failed\n", PAGE_SHIFT);
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600469 goto err_kgsl_pool;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700470 }
471
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600472 if (gen_pool_add(pagetable->pool, KGSL_PAGETABLE_BASE,
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600473 ptsize, -1)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700474 KGSL_CORE_ERR("gen_pool_add failed\n");
475 goto err_pool;
476 }
477
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600478 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
479 pagetable->pt_ops = &gpummu_pt_ops;
480 else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
481 pagetable->pt_ops = &iommu_pt_ops;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700482
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600483 pagetable->priv = pagetable->pt_ops->mmu_create_pagetable();
484 if (!pagetable->priv)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700485 goto err_pool;
486
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700487 status = kgsl_setup_pt(pagetable);
488 if (status)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600489 goto err_mmu_create;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700490
491 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
492 list_add(&pagetable->list, &kgsl_driver.pagetable_list);
493 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
494
495 /* Create the sysfs entries */
496 pagetable_add_sysfs_objects(pagetable);
497
498 return pagetable;
499
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600500err_mmu_create:
501 pagetable->pt_ops->mmu_destroy_pagetable(pagetable->priv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700502err_pool:
503 gen_pool_destroy(pagetable->pool);
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600504err_kgsl_pool:
505 if (pagetable->kgsl_pool)
506 gen_pool_destroy(pagetable->kgsl_pool);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700507err_alloc:
508 kfree(pagetable);
509
510 return NULL;
511}
512
513struct kgsl_pagetable *kgsl_mmu_getpagetable(unsigned long name)
514{
515 struct kgsl_pagetable *pt;
516
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600517 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
518 return (void *)(-1);
519
520#ifdef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
521 if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
522 name = KGSL_MMU_GLOBAL_PT;
523#else
524 name = KGSL_MMU_GLOBAL_PT;
525#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700526 pt = kgsl_get_pagetable(name);
527
528 if (pt == NULL)
529 pt = kgsl_mmu_createpagetableobject(name);
530
531 return pt;
532}
533
534void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable)
535{
536 kgsl_put_pagetable(pagetable);
537}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600538EXPORT_SYMBOL(kgsl_mmu_putpagetable);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700539
540void kgsl_setstate(struct kgsl_device *device, uint32_t flags)
541{
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600542 struct kgsl_mmu *mmu = &device->mmu;
543 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
544 return;
545 else if (device->ftbl->setstate)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700546 device->ftbl->setstate(device, flags);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600547 else if (mmu->mmu_ops->mmu_device_setstate)
548 mmu->mmu_ops->mmu_device_setstate(device, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700549}
550EXPORT_SYMBOL(kgsl_setstate);
551
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600552void kgsl_mmu_device_setstate(struct kgsl_device *device, uint32_t flags)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700553{
554 struct kgsl_mmu *mmu = &device->mmu;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600555 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
556 return;
557 else if (mmu->mmu_ops->mmu_device_setstate)
558 mmu->mmu_ops->mmu_device_setstate(device, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700559}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600560EXPORT_SYMBOL(kgsl_mmu_device_setstate);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700561
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600562void kgsl_mh_start(struct kgsl_device *device)
563{
564 struct kgsl_mh *mh = &device->mh;
565 /* force mmu off to for now*/
566 kgsl_regwrite(device, MH_MMU_CONFIG, 0);
567 kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
568
569 /* define physical memory range accessible by the core */
570 kgsl_regwrite(device, MH_MMU_MPU_BASE, mh->mpu_base);
571 kgsl_regwrite(device, MH_MMU_MPU_END,
572 mh->mpu_base + mh->mpu_range);
573 kgsl_regwrite(device, MH_ARBITER_CONFIG, mh->mharb);
574
575 if (mh->mh_intf_cfg1 != 0)
576 kgsl_regwrite(device, MH_CLNT_INTF_CTRL_CONFIG1,
577 mh->mh_intf_cfg1);
578
579 if (mh->mh_intf_cfg2 != 0)
580 kgsl_regwrite(device, MH_CLNT_INTF_CTRL_CONFIG2,
581 mh->mh_intf_cfg2);
582
583 /*
584 * Interrupts are enabled on a per-device level when
585 * kgsl_pwrctrl_irq() is called
586 */
587}
588
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700589int
590kgsl_mmu_map(struct kgsl_pagetable *pagetable,
591 struct kgsl_memdesc *memdesc,
592 unsigned int protflags)
593{
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600594 int ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700595
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600596 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
Jordan Crouse40861a42012-02-06 10:18:23 -0700597 if (memdesc->sglen == 1) {
Shubhraprakash Das4d6af2b2012-04-20 00:35:03 -0600598 memdesc->gpuaddr = sg_dma_address(memdesc->sg);
599 if (!memdesc->gpuaddr)
600 memdesc->gpuaddr = sg_phys(memdesc->sg);
601 if (!memdesc->gpuaddr) {
602 KGSL_CORE_ERR("Unable to get a valid physical "
603 "address for memdesc\n");
604 return -EINVAL;
605 }
Jordan Crouse40861a42012-02-06 10:18:23 -0700606 return 0;
607 } else {
608 KGSL_CORE_ERR("Memory is not contigious "
609 "(sglen = %d)\n", memdesc->sglen);
610 return -EINVAL;
611 }
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600612 }
Jordan Crouse40861a42012-02-06 10:18:23 -0700613
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600614 /* Allocate from kgsl pool if it exists for global mappings */
615 if (pagetable->kgsl_pool &&
616 (KGSL_MEMFLAGS_GLOBAL & memdesc->priv))
617 memdesc->gpuaddr = gen_pool_alloc_aligned(pagetable->kgsl_pool,
618 memdesc->size, KGSL_MMU_ALIGN_SHIFT);
619 else
620 memdesc->gpuaddr = gen_pool_alloc_aligned(pagetable->pool,
621 memdesc->size, KGSL_MMU_ALIGN_SHIFT);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700622
623 if (memdesc->gpuaddr == 0) {
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600624 KGSL_CORE_ERR("gen_pool_alloc(%d) failed from pool: %s\n",
625 memdesc->size,
626 ((pagetable->kgsl_pool &&
627 (KGSL_MEMFLAGS_GLOBAL & memdesc->priv)) ?
628 "kgsl_pool" : "general_pool"));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700629 KGSL_CORE_ERR(" [%d] allocated=%d, entries=%d\n",
630 pagetable->name, pagetable->stats.mapped,
631 pagetable->stats.entries);
632 return -ENOMEM;
633 }
634
Shubhraprakash Dasbadaeda2012-03-21 00:31:39 -0600635 if (KGSL_MMU_TYPE_IOMMU != kgsl_mmu_get_mmutype())
636 spin_lock(&pagetable->lock);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600637 ret = pagetable->pt_ops->mmu_map(pagetable->priv, memdesc, protflags);
Shubhraprakash Dasbadaeda2012-03-21 00:31:39 -0600638 if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
639 spin_lock(&pagetable->lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700640
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600641 if (ret)
642 goto err_free_gpuaddr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700643
644 /* Keep track of the statistics for the sysfs files */
645
646 KGSL_STATS_ADD(1, pagetable->stats.entries,
647 pagetable->stats.max_entries);
648
649 KGSL_STATS_ADD(memdesc->size, pagetable->stats.mapped,
650 pagetable->stats.max_mapped);
651
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700652 spin_unlock(&pagetable->lock);
653
654 return 0;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600655
656err_free_gpuaddr:
657 spin_unlock(&pagetable->lock);
658 gen_pool_free(pagetable->pool, memdesc->gpuaddr, memdesc->size);
659 memdesc->gpuaddr = 0;
660 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700661}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600662EXPORT_SYMBOL(kgsl_mmu_map);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700663
664int
665kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
666 struct kgsl_memdesc *memdesc)
667{
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600668 if (memdesc->size == 0 || memdesc->gpuaddr == 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700669 return 0;
670
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600671 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
672 memdesc->gpuaddr = 0;
673 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700674 }
Shubhraprakash Dasbadaeda2012-03-21 00:31:39 -0600675 if (KGSL_MMU_TYPE_IOMMU != kgsl_mmu_get_mmutype())
676 spin_lock(&pagetable->lock);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600677 pagetable->pt_ops->mmu_unmap(pagetable->priv, memdesc);
Shubhraprakash Dasbadaeda2012-03-21 00:31:39 -0600678 if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
679 spin_lock(&pagetable->lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700680 /* Remove the statistics */
681 pagetable->stats.entries--;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600682 pagetable->stats.mapped -= memdesc->size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700683
684 spin_unlock(&pagetable->lock);
685
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600686 if (pagetable->kgsl_pool &&
687 (KGSL_MEMFLAGS_GLOBAL & memdesc->priv))
688 gen_pool_free(pagetable->kgsl_pool,
689 memdesc->gpuaddr & KGSL_MMU_ALIGN_MASK,
690 memdesc->size);
691 else
692 gen_pool_free(pagetable->pool,
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600693 memdesc->gpuaddr & KGSL_MMU_ALIGN_MASK,
694 memdesc->size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700695
Jeremy Gebben7faf9ec2012-03-21 14:09:55 -0600696 /*
697 * Don't clear the gpuaddr on global mappings because they
698 * may be in use by other pagetables
699 */
700 if (!(memdesc->priv & KGSL_MEMFLAGS_GLOBAL))
701 memdesc->gpuaddr = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700702 return 0;
703}
704EXPORT_SYMBOL(kgsl_mmu_unmap);
705
706int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable,
707 struct kgsl_memdesc *memdesc, unsigned int protflags)
708{
709 int result = -EINVAL;
710 unsigned int gpuaddr = 0;
711
712 if (memdesc == NULL) {
713 KGSL_CORE_ERR("invalid memdesc\n");
714 goto error;
715 }
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600716 /* Not all global mappings are needed for all MMU types */
717 if (!memdesc->size)
718 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700719
720 gpuaddr = memdesc->gpuaddr;
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600721 memdesc->priv |= KGSL_MEMFLAGS_GLOBAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700722
723 result = kgsl_mmu_map(pagetable, memdesc, protflags);
724 if (result)
725 goto error;
726
727 /*global mappings must have the same gpu address in all pagetables*/
728 if (gpuaddr && gpuaddr != memdesc->gpuaddr) {
729 KGSL_CORE_ERR("pt %p addr mismatch phys 0x%08x"
730 "gpu 0x%0x 0x%08x", pagetable, memdesc->physaddr,
731 gpuaddr, memdesc->gpuaddr);
732 goto error_unmap;
733 }
734 return result;
735error_unmap:
736 kgsl_mmu_unmap(pagetable, memdesc);
737error:
738 return result;
739}
740EXPORT_SYMBOL(kgsl_mmu_map_global);
741
742int kgsl_mmu_stop(struct kgsl_device *device)
743{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700744 struct kgsl_mmu *mmu = &device->mmu;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600745
746 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE)
747 return 0;
748 else
749 return mmu->mmu_ops->mmu_stop(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700750}
751EXPORT_SYMBOL(kgsl_mmu_stop);
752
753int kgsl_mmu_close(struct kgsl_device *device)
754{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700755 struct kgsl_mmu *mmu = &device->mmu;
756
Shubhraprakash Dasf5526a12012-04-20 00:48:33 -0600757 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
758 kgsl_sharedmem_free(&mmu->setstate_memory);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600759 return 0;
Shubhraprakash Dasf5526a12012-04-20 00:48:33 -0600760 } else
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600761 return mmu->mmu_ops->mmu_close(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700762}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600763EXPORT_SYMBOL(kgsl_mmu_close);
764
765int kgsl_mmu_pt_get_flags(struct kgsl_pagetable *pt,
766 enum kgsl_deviceid id)
767{
768 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
769 return pt->pt_ops->mmu_pt_get_flags(pt, id);
770 else
771 return 0;
772}
773EXPORT_SYMBOL(kgsl_mmu_pt_get_flags);
774
775void kgsl_mmu_ptpool_destroy(void *ptpool)
776{
777 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
778 kgsl_gpummu_ptpool_destroy(ptpool);
779 ptpool = 0;
780}
781EXPORT_SYMBOL(kgsl_mmu_ptpool_destroy);
782
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600783void *kgsl_mmu_ptpool_init(int entries)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600784{
785 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600786 return kgsl_gpummu_ptpool_init(entries);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600787 else
788 return (void *)(-1);
789}
790EXPORT_SYMBOL(kgsl_mmu_ptpool_init);
791
792int kgsl_mmu_enabled(void)
793{
794 if (KGSL_MMU_TYPE_NONE != kgsl_mmu_type)
795 return 1;
796 else
797 return 0;
798}
799EXPORT_SYMBOL(kgsl_mmu_enabled);
800
801int kgsl_mmu_pt_equal(struct kgsl_pagetable *pt,
802 unsigned int pt_base)
803{
804 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
805 return true;
806 else
807 return pt->pt_ops->mmu_pt_equal(pt, pt_base);
808}
809EXPORT_SYMBOL(kgsl_mmu_pt_equal);
810
811enum kgsl_mmutype kgsl_mmu_get_mmutype(void)
812{
813 return kgsl_mmu_type;
814}
815EXPORT_SYMBOL(kgsl_mmu_get_mmutype);
816
817void kgsl_mmu_set_mmutype(char *mmutype)
818{
Jordan Crouse817e0b92012-02-04 10:23:53 -0700819 /* Set the default MMU - GPU on <=8960 and nothing on >= 8064 */
820 kgsl_mmu_type =
821 cpu_is_apq8064() ? KGSL_MMU_TYPE_NONE : KGSL_MMU_TYPE_GPU;
822
823 /* Use the IOMMU if it is found */
824 if (iommu_found())
825 kgsl_mmu_type = KGSL_MMU_TYPE_IOMMU;
826
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600827 if (mmutype && !strncmp(mmutype, "gpummu", 6))
828 kgsl_mmu_type = KGSL_MMU_TYPE_GPU;
829 if (iommu_found() && mmutype && !strncmp(mmutype, "iommu", 5))
830 kgsl_mmu_type = KGSL_MMU_TYPE_IOMMU;
831 if (mmutype && !strncmp(mmutype, "nommu", 5))
832 kgsl_mmu_type = KGSL_MMU_TYPE_NONE;
833}
834EXPORT_SYMBOL(kgsl_mmu_set_mmutype);