blob: f11511fc2b8ce5529e72de75e22b2b87215a3a23 [file] [log] [blame]
Duy Truonge833aca2013-02-12 13:35:08 -08001/* Copyright (c) 2002,2007-2012, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
Steve Mucklef132c6c2012-06-06 18:30:57 -070013#include <linux/export.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070014#include <linux/types.h>
15#include <linux/device.h>
16#include <linux/spinlock.h>
17#include <linux/genalloc.h>
18#include <linux/slab.h>
19#include <linux/sched.h>
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060020#include <linux/iommu.h>
Shubhraprakash Das3cf33be2012-08-16 22:42:55 -070021#include <mach/iommu.h>
Jordan Crouse817e0b92012-02-04 10:23:53 -070022#include <mach/socinfo.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070023
24#include "kgsl.h"
25#include "kgsl_mmu.h"
26#include "kgsl_device.h"
27#include "kgsl_sharedmem.h"
Shubhraprakash Das7a0c93c2012-11-20 15:15:08 -070028#include "adreno.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029
30#define KGSL_MMU_ALIGN_SHIFT 13
31#define KGSL_MMU_ALIGN_MASK (~((1 << KGSL_MMU_ALIGN_SHIFT) - 1))
32
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060033static enum kgsl_mmutype kgsl_mmu_type;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070034
35static void pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable);
36
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070037static int kgsl_cleanup_pt(struct kgsl_pagetable *pt)
38{
39 int i;
Shubhraprakash Das84fdb112012-04-04 12:49:31 -060040 /* For IOMMU only unmap the global structures to global pt */
Shubhraprakash Das6b30c9f2012-04-20 01:15:55 -060041 if ((KGSL_MMU_TYPE_NONE != kgsl_mmu_type) &&
42 (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type) &&
Shubhraprakash Das19ca4a62012-05-18 12:11:20 -060043 (KGSL_MMU_GLOBAL_PT != pt->name) &&
44 (KGSL_MMU_PRIV_BANK_TABLE_NAME != pt->name))
Shubhraprakash Das84fdb112012-04-04 12:49:31 -060045 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070046 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
47 struct kgsl_device *device = kgsl_driver.devp[i];
48 if (device)
49 device->ftbl->cleanup_pt(device, pt);
50 }
51 return 0;
52}
53
Shubhraprakash Das6b30c9f2012-04-20 01:15:55 -060054
55static int kgsl_setup_pt(struct kgsl_pagetable *pt)
56{
57 int i = 0;
58 int status = 0;
59
60 /* For IOMMU only map the global structures to global pt */
61 if ((KGSL_MMU_TYPE_NONE != kgsl_mmu_type) &&
62 (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type) &&
Shubhraprakash Das19ca4a62012-05-18 12:11:20 -060063 (KGSL_MMU_GLOBAL_PT != pt->name) &&
64 (KGSL_MMU_PRIV_BANK_TABLE_NAME != pt->name))
Shubhraprakash Das6b30c9f2012-04-20 01:15:55 -060065 return 0;
66 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
67 struct kgsl_device *device = kgsl_driver.devp[i];
68 if (device) {
69 status = device->ftbl->setup_pt(device, pt);
70 if (status)
71 goto error_pt;
72 }
73 }
74 return status;
75error_pt:
76 while (i >= 0) {
77 struct kgsl_device *device = kgsl_driver.devp[i];
78 if (device)
79 device->ftbl->cleanup_pt(device, pt);
80 i--;
81 }
82 return status;
83}
84
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070085static void kgsl_destroy_pagetable(struct kref *kref)
86{
87 struct kgsl_pagetable *pagetable = container_of(kref,
88 struct kgsl_pagetable, refcount);
89 unsigned long flags;
90
91 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
92 list_del(&pagetable->list);
93 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
94
95 pagetable_remove_sysfs_objects(pagetable);
96
97 kgsl_cleanup_pt(pagetable);
98
Shubhraprakash Das84fdb112012-04-04 12:49:31 -060099 if (pagetable->kgsl_pool)
100 gen_pool_destroy(pagetable->kgsl_pool);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700101 if (pagetable->pool)
102 gen_pool_destroy(pagetable->pool);
103
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600104 pagetable->pt_ops->mmu_destroy_pagetable(pagetable->priv);
105
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700106 kfree(pagetable);
107}
108
109static inline void kgsl_put_pagetable(struct kgsl_pagetable *pagetable)
110{
111 if (pagetable)
112 kref_put(&pagetable->refcount, kgsl_destroy_pagetable);
113}
114
115static struct kgsl_pagetable *
116kgsl_get_pagetable(unsigned long name)
117{
118 struct kgsl_pagetable *pt, *ret = NULL;
119 unsigned long flags;
120
121 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
122 list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
123 if (pt->name == name) {
124 ret = pt;
125 kref_get(&ret->refcount);
126 break;
127 }
128 }
129
130 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
131 return ret;
132}
133
134static struct kgsl_pagetable *
135_get_pt_from_kobj(struct kobject *kobj)
136{
137 unsigned long ptname;
138
139 if (!kobj)
140 return NULL;
141
142 if (sscanf(kobj->name, "%ld", &ptname) != 1)
143 return NULL;
144
145 return kgsl_get_pagetable(ptname);
146}
147
148static ssize_t
149sysfs_show_entries(struct kobject *kobj,
150 struct kobj_attribute *attr,
151 char *buf)
152{
153 struct kgsl_pagetable *pt;
154 int ret = 0;
155
156 pt = _get_pt_from_kobj(kobj);
157
158 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600159 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700160
161 kgsl_put_pagetable(pt);
162 return ret;
163}
164
165static ssize_t
166sysfs_show_mapped(struct kobject *kobj,
167 struct kobj_attribute *attr,
168 char *buf)
169{
170 struct kgsl_pagetable *pt;
171 int ret = 0;
172
173 pt = _get_pt_from_kobj(kobj);
174
175 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600176 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.mapped);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700177
178 kgsl_put_pagetable(pt);
179 return ret;
180}
181
182static ssize_t
183sysfs_show_va_range(struct kobject *kobj,
184 struct kobj_attribute *attr,
185 char *buf)
186{
187 struct kgsl_pagetable *pt;
188 int ret = 0;
189
190 pt = _get_pt_from_kobj(kobj);
191
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600192 if (pt) {
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600193 ret += snprintf(buf, PAGE_SIZE, "0x%x\n",
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600194 kgsl_mmu_get_ptsize());
195 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700196
197 kgsl_put_pagetable(pt);
198 return ret;
199}
200
201static ssize_t
202sysfs_show_max_mapped(struct kobject *kobj,
203 struct kobj_attribute *attr,
204 char *buf)
205{
206 struct kgsl_pagetable *pt;
207 int ret = 0;
208
209 pt = _get_pt_from_kobj(kobj);
210
211 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600212 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.max_mapped);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700213
214 kgsl_put_pagetable(pt);
215 return ret;
216}
217
218static ssize_t
219sysfs_show_max_entries(struct kobject *kobj,
220 struct kobj_attribute *attr,
221 char *buf)
222{
223 struct kgsl_pagetable *pt;
224 int ret = 0;
225
226 pt = _get_pt_from_kobj(kobj);
227
228 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600229 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.max_entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700230
231 kgsl_put_pagetable(pt);
232 return ret;
233}
234
235static struct kobj_attribute attr_entries = {
236 .attr = { .name = "entries", .mode = 0444 },
237 .show = sysfs_show_entries,
238 .store = NULL,
239};
240
241static struct kobj_attribute attr_mapped = {
242 .attr = { .name = "mapped", .mode = 0444 },
243 .show = sysfs_show_mapped,
244 .store = NULL,
245};
246
247static struct kobj_attribute attr_va_range = {
248 .attr = { .name = "va_range", .mode = 0444 },
249 .show = sysfs_show_va_range,
250 .store = NULL,
251};
252
253static struct kobj_attribute attr_max_mapped = {
254 .attr = { .name = "max_mapped", .mode = 0444 },
255 .show = sysfs_show_max_mapped,
256 .store = NULL,
257};
258
259static struct kobj_attribute attr_max_entries = {
260 .attr = { .name = "max_entries", .mode = 0444 },
261 .show = sysfs_show_max_entries,
262 .store = NULL,
263};
264
265static struct attribute *pagetable_attrs[] = {
266 &attr_entries.attr,
267 &attr_mapped.attr,
268 &attr_va_range.attr,
269 &attr_max_mapped.attr,
270 &attr_max_entries.attr,
271 NULL,
272};
273
274static struct attribute_group pagetable_attr_group = {
275 .attrs = pagetable_attrs,
276};
277
278static void
279pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable)
280{
281 if (pagetable->kobj)
282 sysfs_remove_group(pagetable->kobj,
283 &pagetable_attr_group);
284
285 kobject_put(pagetable->kobj);
286}
287
288static int
289pagetable_add_sysfs_objects(struct kgsl_pagetable *pagetable)
290{
291 char ptname[16];
292 int ret = -ENOMEM;
293
294 snprintf(ptname, sizeof(ptname), "%d", pagetable->name);
295 pagetable->kobj = kobject_create_and_add(ptname,
296 kgsl_driver.ptkobj);
297 if (pagetable->kobj == NULL)
298 goto err;
299
300 ret = sysfs_create_group(pagetable->kobj, &pagetable_attr_group);
301
302err:
303 if (ret) {
304 if (pagetable->kobj)
305 kobject_put(pagetable->kobj);
306
307 pagetable->kobj = NULL;
308 }
309
310 return ret;
311}
312
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600313unsigned int kgsl_mmu_get_ptsize(void)
314{
315 /*
316 * For IOMMU, we could do up to 4G virtual range if we wanted to, but
317 * it makes more sense to return a smaller range and leave the rest of
318 * the virtual range for future improvements
319 */
320
321 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
322 return CONFIG_MSM_KGSL_PAGE_TABLE_SIZE;
323 else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
Shubhraprakash Das337d6c92012-09-19 16:19:19 -0700324 return SZ_2G - KGSL_PAGETABLE_BASE;
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600325 else
326 return 0;
327}
328
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600329int
Shubhraprakash Das3cf33be2012-08-16 22:42:55 -0700330kgsl_mmu_get_ptname_from_ptbase(struct kgsl_mmu *mmu, unsigned int pt_base)
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600331{
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600332 struct kgsl_pagetable *pt;
333 int ptid = -1;
334
Shubhraprakash Das3cf33be2012-08-16 22:42:55 -0700335 if (!mmu->mmu_ops || !mmu->mmu_ops->mmu_pt_equal)
336 return KGSL_MMU_GLOBAL_PT;
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600337 spin_lock(&kgsl_driver.ptlock);
338 list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
Shubhraprakash Das3cf33be2012-08-16 22:42:55 -0700339 if (mmu->mmu_ops->mmu_pt_equal(mmu, pt, pt_base)) {
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600340 ptid = (int) pt->name;
341 break;
342 }
343 }
344 spin_unlock(&kgsl_driver.ptlock);
345
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600346 return ptid;
347}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600348EXPORT_SYMBOL(kgsl_mmu_get_ptname_from_ptbase);
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600349
Tarun Karrab8107322013-02-07 13:46:02 -0800350unsigned int
351kgsl_mmu_log_fault_addr(struct kgsl_mmu *mmu, unsigned int pt_base,
352 unsigned int addr)
353{
354 struct kgsl_pagetable *pt;
355 unsigned int ret = 0;
356
357 if (!mmu->mmu_ops || !mmu->mmu_ops->mmu_pt_equal)
358 return KGSL_MMU_GLOBAL_PT;
359 spin_lock(&kgsl_driver.ptlock);
360 list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
361 if (mmu->mmu_ops->mmu_pt_equal(mmu, pt, pt_base)) {
Tarun Karra24e3dfa2013-02-25 21:58:05 -0800362 if ((addr & ~(PAGE_SIZE-1)) == pt->fault_addr) {
Tarun Karrab8107322013-02-07 13:46:02 -0800363 ret = 1;
364 break;
365 } else {
Tarun Karra24e3dfa2013-02-25 21:58:05 -0800366 pt->fault_addr = (addr & ~(PAGE_SIZE-1));
Tarun Karrab8107322013-02-07 13:46:02 -0800367 ret = 0;
368 break;
369 }
370
371 }
372 }
373 spin_unlock(&kgsl_driver.ptlock);
374
375 return ret;
376}
377EXPORT_SYMBOL(kgsl_mmu_log_fault_addr);
378
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600379int kgsl_mmu_init(struct kgsl_device *device)
380{
Shubhraprakash Das0ff034f2012-05-02 15:51:07 -0600381 int status = 0;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600382 struct kgsl_mmu *mmu = &device->mmu;
383
384 mmu->device = device;
Shubhraprakash Das0ff034f2012-05-02 15:51:07 -0600385 status = kgsl_allocate_contiguous(&mmu->setstate_memory, PAGE_SIZE);
386 if (status)
387 return status;
388 kgsl_sharedmem_set(&mmu->setstate_memory, 0, 0,
389 mmu->setstate_memory.size);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600390
391 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type) {
Shubhraprakash Das0ff034f2012-05-02 15:51:07 -0600392 dev_info(device->dev, "|%s| MMU type set for device is "
Shubhraprakash Dasf5526a12012-04-20 00:48:33 -0600393 "NOMMU\n", __func__);
Shubhraprakash Das0ff034f2012-05-02 15:51:07 -0600394 goto done;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600395 } else if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
396 mmu->mmu_ops = &gpummu_ops;
397 else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
398 mmu->mmu_ops = &iommu_ops;
399
Shubhraprakash Das0ff034f2012-05-02 15:51:07 -0600400 status = mmu->mmu_ops->mmu_init(mmu);
401done:
402 if (status)
403 kgsl_sharedmem_free(&mmu->setstate_memory);
404 return status;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600405}
406EXPORT_SYMBOL(kgsl_mmu_init);
407
408int kgsl_mmu_start(struct kgsl_device *device)
409{
410 struct kgsl_mmu *mmu = &device->mmu;
411
412 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
413 kgsl_regwrite(device, MH_MMU_CONFIG, 0);
Shubhraprakash Das6b30c9f2012-04-20 01:15:55 -0600414 /* Setup gpuaddr of global mappings */
415 if (!mmu->setstate_memory.gpuaddr)
416 kgsl_setup_pt(NULL);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600417 return 0;
418 } else {
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600419 return mmu->mmu_ops->mmu_start(mmu);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600420 }
421}
422EXPORT_SYMBOL(kgsl_mmu_start);
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600423
Jeremy Gebbenf4ea0822012-04-05 16:27:08 -0600424static void mh_axi_error(struct kgsl_device *device, const char* type)
425{
426 unsigned int reg, gpu_err, phys_err, pt_base;
427
428 kgsl_regread(device, MH_AXI_ERROR, &reg);
429 pt_base = kgsl_mmu_get_current_ptbase(&device->mmu);
430 /*
431 * Read gpu virtual and physical addresses that
432 * caused the error from the debug data.
433 */
434 kgsl_regwrite(device, MH_DEBUG_CTRL, 44);
435 kgsl_regread(device, MH_DEBUG_DATA, &gpu_err);
436 kgsl_regwrite(device, MH_DEBUG_CTRL, 45);
437 kgsl_regread(device, MH_DEBUG_DATA, &phys_err);
438 KGSL_MEM_CRIT(device,
439 "axi %s error: %08x pt %08x gpu %08x phys %08x\n",
440 type, reg, pt_base, gpu_err, phys_err);
441}
442
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700443void kgsl_mh_intrcallback(struct kgsl_device *device)
444{
445 unsigned int status = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700446
447 kgsl_regread(device, MH_INTERRUPT_STATUS, &status);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700448
449 if (status & MH_INTERRUPT_MASK__AXI_READ_ERROR)
Jeremy Gebbenf4ea0822012-04-05 16:27:08 -0600450 mh_axi_error(device, "read");
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600451 if (status & MH_INTERRUPT_MASK__AXI_WRITE_ERROR)
Jeremy Gebbenf4ea0822012-04-05 16:27:08 -0600452 mh_axi_error(device, "write");
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600453 if (status & MH_INTERRUPT_MASK__MMU_PAGE_FAULT)
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600454 device->mmu.mmu_ops->mmu_pagefault(&device->mmu);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700455
Jordan Crousec8c9fcd2011-07-28 08:37:58 -0600456 status &= KGSL_MMU_INT_MASK;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700457 kgsl_regwrite(device, MH_INTERRUPT_CLEAR, status);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700458}
459EXPORT_SYMBOL(kgsl_mh_intrcallback);
460
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700461static struct kgsl_pagetable *kgsl_mmu_createpagetableobject(
462 unsigned int name)
463{
464 int status = 0;
465 struct kgsl_pagetable *pagetable = NULL;
466 unsigned long flags;
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600467 unsigned int ptsize;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700468
469 pagetable = kzalloc(sizeof(struct kgsl_pagetable), GFP_KERNEL);
470 if (pagetable == NULL) {
471 KGSL_CORE_ERR("kzalloc(%d) failed\n",
472 sizeof(struct kgsl_pagetable));
473 return NULL;
474 }
475
476 kref_init(&pagetable->refcount);
477
478 spin_lock_init(&pagetable->lock);
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600479
480 ptsize = kgsl_mmu_get_ptsize();
481
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700482 pagetable->name = name;
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600483 pagetable->max_entries = KGSL_PAGETABLE_ENTRIES(ptsize);
Rajeev Kulkarni42946992013-02-15 16:45:17 -0800484 pagetable->fault_addr = 0xFFFFFFFF;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700485
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600486 /*
487 * create a separate kgsl pool for IOMMU, global mappings can be mapped
488 * just once from this pool of the defaultpagetable
489 */
490 if ((KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) &&
Shubhraprakash Das19ca4a62012-05-18 12:11:20 -0600491 ((KGSL_MMU_GLOBAL_PT == name) ||
492 (KGSL_MMU_PRIV_BANK_TABLE_NAME == name))) {
493 pagetable->kgsl_pool = gen_pool_create(PAGE_SHIFT, -1);
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600494 if (pagetable->kgsl_pool == NULL) {
495 KGSL_CORE_ERR("gen_pool_create(%d) failed\n",
Jeremy Gebbenc589ccb2012-05-16 10:26:20 -0600496 KGSL_MMU_ALIGN_SHIFT);
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600497 goto err_alloc;
498 }
499 if (gen_pool_add(pagetable->kgsl_pool,
500 KGSL_IOMMU_GLOBAL_MEM_BASE,
501 KGSL_IOMMU_GLOBAL_MEM_SIZE, -1)) {
502 KGSL_CORE_ERR("gen_pool_add failed\n");
503 goto err_kgsl_pool;
504 }
505 }
506
Jeremy Gebbenc589ccb2012-05-16 10:26:20 -0600507 pagetable->pool = gen_pool_create(KGSL_MMU_ALIGN_SHIFT, -1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700508 if (pagetable->pool == NULL) {
Jeremy Gebbenc589ccb2012-05-16 10:26:20 -0600509 KGSL_CORE_ERR("gen_pool_create(%d) failed\n",
510 KGSL_MMU_ALIGN_SHIFT);
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600511 goto err_kgsl_pool;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700512 }
513
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600514 if (gen_pool_add(pagetable->pool, KGSL_PAGETABLE_BASE,
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600515 ptsize, -1)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700516 KGSL_CORE_ERR("gen_pool_add failed\n");
517 goto err_pool;
518 }
519
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600520 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
521 pagetable->pt_ops = &gpummu_pt_ops;
522 else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
523 pagetable->pt_ops = &iommu_pt_ops;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700524
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600525 pagetable->priv = pagetable->pt_ops->mmu_create_pagetable();
526 if (!pagetable->priv)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700527 goto err_pool;
528
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700529 status = kgsl_setup_pt(pagetable);
530 if (status)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600531 goto err_mmu_create;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700532
533 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
534 list_add(&pagetable->list, &kgsl_driver.pagetable_list);
535 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
536
537 /* Create the sysfs entries */
538 pagetable_add_sysfs_objects(pagetable);
539
540 return pagetable;
541
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600542err_mmu_create:
543 pagetable->pt_ops->mmu_destroy_pagetable(pagetable->priv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700544err_pool:
545 gen_pool_destroy(pagetable->pool);
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600546err_kgsl_pool:
547 if (pagetable->kgsl_pool)
548 gen_pool_destroy(pagetable->kgsl_pool);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700549err_alloc:
550 kfree(pagetable);
551
552 return NULL;
553}
554
555struct kgsl_pagetable *kgsl_mmu_getpagetable(unsigned long name)
556{
557 struct kgsl_pagetable *pt;
558
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600559 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
560 return (void *)(-1);
561
Shubhraprakash Dasd8cbcd12012-05-07 16:11:32 -0600562#ifndef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
563 name = KGSL_MMU_GLOBAL_PT;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600564#endif
Shubhraprakash Das15a8b462012-08-16 23:24:28 -0700565 /* We presently do not support per-process for IOMMU-v2 */
566 if (!msm_soc_version_supports_iommu_v1())
567 name = KGSL_MMU_GLOBAL_PT;
568
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700569 pt = kgsl_get_pagetable(name);
570
571 if (pt == NULL)
572 pt = kgsl_mmu_createpagetableobject(name);
573
574 return pt;
575}
576
577void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable)
578{
579 kgsl_put_pagetable(pagetable);
580}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600581EXPORT_SYMBOL(kgsl_mmu_putpagetable);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700582
Shubhraprakash Dasb2abc452012-06-08 16:33:03 -0600583void kgsl_setstate(struct kgsl_mmu *mmu, unsigned int context_id,
584 uint32_t flags)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700585{
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600586 struct kgsl_device *device = mmu->device;
Shubhraprakash Das7a0c93c2012-11-20 15:15:08 -0700587 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
588
589 if (!(flags & (KGSL_MMUFLAGS_TLBFLUSH | KGSL_MMUFLAGS_PTUPDATE))
590 && !adreno_is_a2xx(adreno_dev))
591 return;
592
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600593 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
594 return;
Shubhraprakash Dasc6e21012012-05-11 17:24:51 -0600595 else if (device->ftbl->setstate)
Shubhraprakash Dasb2abc452012-06-08 16:33:03 -0600596 device->ftbl->setstate(device, context_id, flags);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600597 else if (mmu->mmu_ops->mmu_device_setstate)
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600598 mmu->mmu_ops->mmu_device_setstate(mmu, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700599}
600EXPORT_SYMBOL(kgsl_setstate);
601
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600602void kgsl_mh_start(struct kgsl_device *device)
603{
604 struct kgsl_mh *mh = &device->mh;
605 /* force mmu off to for now*/
606 kgsl_regwrite(device, MH_MMU_CONFIG, 0);
Jordan Crousea29a2e02012-08-14 09:09:23 -0600607 kgsl_idle(device);
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600608
609 /* define physical memory range accessible by the core */
610 kgsl_regwrite(device, MH_MMU_MPU_BASE, mh->mpu_base);
611 kgsl_regwrite(device, MH_MMU_MPU_END,
612 mh->mpu_base + mh->mpu_range);
613 kgsl_regwrite(device, MH_ARBITER_CONFIG, mh->mharb);
614
615 if (mh->mh_intf_cfg1 != 0)
616 kgsl_regwrite(device, MH_CLNT_INTF_CTRL_CONFIG1,
617 mh->mh_intf_cfg1);
618
619 if (mh->mh_intf_cfg2 != 0)
620 kgsl_regwrite(device, MH_CLNT_INTF_CTRL_CONFIG2,
621 mh->mh_intf_cfg2);
622
623 /*
624 * Interrupts are enabled on a per-device level when
625 * kgsl_pwrctrl_irq() is called
626 */
627}
628
Jeremy Gebben1b9b1f142012-05-16 10:43:28 -0600629static inline struct gen_pool *
630_get_pool(struct kgsl_pagetable *pagetable, unsigned int flags)
631{
632 if (pagetable->kgsl_pool &&
Jordan Crousedc67dfb2012-10-25 09:41:46 -0600633 (KGSL_MEMDESC_GLOBAL & flags))
Jeremy Gebben1b9b1f142012-05-16 10:43:28 -0600634 return pagetable->kgsl_pool;
635 return pagetable->pool;
636}
637
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700638int
639kgsl_mmu_map(struct kgsl_pagetable *pagetable,
640 struct kgsl_memdesc *memdesc,
641 unsigned int protflags)
642{
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600643 int ret;
Jeremy Gebben1b9b1f142012-05-16 10:43:28 -0600644 struct gen_pool *pool;
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600645 int size;
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -0800646 int page_align = ilog2(PAGE_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700647
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600648 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
Jordan Crouse40861a42012-02-06 10:18:23 -0700649 if (memdesc->sglen == 1) {
Shubhraprakash Das4d6af2b2012-04-20 00:35:03 -0600650 memdesc->gpuaddr = sg_dma_address(memdesc->sg);
651 if (!memdesc->gpuaddr)
652 memdesc->gpuaddr = sg_phys(memdesc->sg);
653 if (!memdesc->gpuaddr) {
654 KGSL_CORE_ERR("Unable to get a valid physical "
655 "address for memdesc\n");
656 return -EINVAL;
657 }
Jordan Crouse40861a42012-02-06 10:18:23 -0700658 return 0;
659 } else {
660 KGSL_CORE_ERR("Memory is not contigious "
661 "(sglen = %d)\n", memdesc->sglen);
662 return -EINVAL;
663 }
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600664 }
Jordan Crouse40861a42012-02-06 10:18:23 -0700665
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600666 size = kgsl_sg_size(memdesc->sg, memdesc->sglen);
667
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600668 /* Allocate from kgsl pool if it exists for global mappings */
Jeremy Gebben1b9b1f142012-05-16 10:43:28 -0600669 pool = _get_pool(pagetable, memdesc->priv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700670
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -0800671 /* Allocate aligned virtual addresses for iommu. This allows
672 * more efficient pagetable entries if the physical memory
673 * is also aligned. Don't do this for GPUMMU, because
674 * the address space is so small.
675 */
676 if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype() &&
Jordan Crousedc67dfb2012-10-25 09:41:46 -0600677 kgsl_memdesc_get_align(memdesc) > 0)
678 page_align = kgsl_memdesc_get_align(memdesc);
679
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -0800680 memdesc->gpuaddr = gen_pool_alloc_aligned(pool, size, page_align);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700681 if (memdesc->gpuaddr == 0) {
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600682 KGSL_CORE_ERR("gen_pool_alloc(%d) failed from pool: %s\n",
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600683 size,
Jeremy Gebben1b9b1f142012-05-16 10:43:28 -0600684 (pool == pagetable->kgsl_pool) ?
685 "kgsl_pool" : "general_pool");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700686 KGSL_CORE_ERR(" [%d] allocated=%d, entries=%d\n",
687 pagetable->name, pagetable->stats.mapped,
688 pagetable->stats.entries);
689 return -ENOMEM;
690 }
691
Shubhraprakash Dasbadaeda2012-03-21 00:31:39 -0600692 if (KGSL_MMU_TYPE_IOMMU != kgsl_mmu_get_mmutype())
693 spin_lock(&pagetable->lock);
Shubhraprakash Dasf764e462012-04-26 15:38:09 -0600694 ret = pagetable->pt_ops->mmu_map(pagetable->priv, memdesc, protflags,
695 &pagetable->tlb_flags);
Shubhraprakash Dasbadaeda2012-03-21 00:31:39 -0600696 if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
697 spin_lock(&pagetable->lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700698
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600699 if (ret)
700 goto err_free_gpuaddr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700701
702 /* Keep track of the statistics for the sysfs files */
703
704 KGSL_STATS_ADD(1, pagetable->stats.entries,
705 pagetable->stats.max_entries);
706
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600707 KGSL_STATS_ADD(size, pagetable->stats.mapped,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700708 pagetable->stats.max_mapped);
709
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700710 spin_unlock(&pagetable->lock);
711
712 return 0;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600713
714err_free_gpuaddr:
715 spin_unlock(&pagetable->lock);
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600716 gen_pool_free(pool, memdesc->gpuaddr, size);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600717 memdesc->gpuaddr = 0;
718 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700719}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600720EXPORT_SYMBOL(kgsl_mmu_map);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700721
722int
723kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
724 struct kgsl_memdesc *memdesc)
725{
Jeremy Gebben1b9b1f142012-05-16 10:43:28 -0600726 struct gen_pool *pool;
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600727 int size;
Tarun Karrab8107322013-02-07 13:46:02 -0800728 unsigned int start_addr = 0;
729 unsigned int end_addr = 0;
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600730
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600731 if (memdesc->size == 0 || memdesc->gpuaddr == 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700732 return 0;
733
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600734 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
735 memdesc->gpuaddr = 0;
736 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700737 }
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600738
739 size = kgsl_sg_size(memdesc->sg, memdesc->sglen);
740
Tarun Karrab8107322013-02-07 13:46:02 -0800741 start_addr = memdesc->gpuaddr;
742 end_addr = (memdesc->gpuaddr + size);
743
Shubhraprakash Dasbadaeda2012-03-21 00:31:39 -0600744 if (KGSL_MMU_TYPE_IOMMU != kgsl_mmu_get_mmutype())
745 spin_lock(&pagetable->lock);
Shubhraprakash Das0c811262012-06-06 23:22:19 -0600746 pagetable->pt_ops->mmu_unmap(pagetable->priv, memdesc,
747 &pagetable->tlb_flags);
Tarun Karrab8107322013-02-07 13:46:02 -0800748
749 /* If buffer is unmapped 0 fault addr */
750 if ((pagetable->fault_addr >= start_addr) &&
751 (pagetable->fault_addr < end_addr))
752 pagetable->fault_addr = 0;
753
Shubhraprakash Dasbadaeda2012-03-21 00:31:39 -0600754 if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
755 spin_lock(&pagetable->lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700756 /* Remove the statistics */
757 pagetable->stats.entries--;
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600758 pagetable->stats.mapped -= size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700759
760 spin_unlock(&pagetable->lock);
761
Jeremy Gebben1b9b1f142012-05-16 10:43:28 -0600762 pool = _get_pool(pagetable, memdesc->priv);
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600763 gen_pool_free(pool, memdesc->gpuaddr, size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700764
Jeremy Gebben7faf9ec2012-03-21 14:09:55 -0600765 /*
766 * Don't clear the gpuaddr on global mappings because they
767 * may be in use by other pagetables
768 */
Jordan Crousedc67dfb2012-10-25 09:41:46 -0600769 if (!(memdesc->priv & KGSL_MEMDESC_GLOBAL))
Jeremy Gebben7faf9ec2012-03-21 14:09:55 -0600770 memdesc->gpuaddr = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700771 return 0;
772}
773EXPORT_SYMBOL(kgsl_mmu_unmap);
774
775int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable,
776 struct kgsl_memdesc *memdesc, unsigned int protflags)
777{
778 int result = -EINVAL;
779 unsigned int gpuaddr = 0;
780
781 if (memdesc == NULL) {
782 KGSL_CORE_ERR("invalid memdesc\n");
783 goto error;
784 }
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600785 /* Not all global mappings are needed for all MMU types */
786 if (!memdesc->size)
787 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700788
789 gpuaddr = memdesc->gpuaddr;
Jordan Crousedc67dfb2012-10-25 09:41:46 -0600790 memdesc->priv |= KGSL_MEMDESC_GLOBAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700791
792 result = kgsl_mmu_map(pagetable, memdesc, protflags);
793 if (result)
794 goto error;
795
796 /*global mappings must have the same gpu address in all pagetables*/
797 if (gpuaddr && gpuaddr != memdesc->gpuaddr) {
798 KGSL_CORE_ERR("pt %p addr mismatch phys 0x%08x"
799 "gpu 0x%0x 0x%08x", pagetable, memdesc->physaddr,
800 gpuaddr, memdesc->gpuaddr);
801 goto error_unmap;
802 }
803 return result;
804error_unmap:
805 kgsl_mmu_unmap(pagetable, memdesc);
806error:
807 return result;
808}
809EXPORT_SYMBOL(kgsl_mmu_map_global);
810
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700811int kgsl_mmu_close(struct kgsl_device *device)
812{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700813 struct kgsl_mmu *mmu = &device->mmu;
814
Shubhraprakash Das0ff034f2012-05-02 15:51:07 -0600815 kgsl_sharedmem_free(&mmu->setstate_memory);
816 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600817 return 0;
Shubhraprakash Das0ff034f2012-05-02 15:51:07 -0600818 else
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600819 return mmu->mmu_ops->mmu_close(mmu);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700820}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600821EXPORT_SYMBOL(kgsl_mmu_close);
822
823int kgsl_mmu_pt_get_flags(struct kgsl_pagetable *pt,
824 enum kgsl_deviceid id)
825{
Shubhraprakash Dasf764e462012-04-26 15:38:09 -0600826 unsigned int result = 0;
827
828 if (pt == NULL)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600829 return 0;
Shubhraprakash Dasf764e462012-04-26 15:38:09 -0600830
831 spin_lock(&pt->lock);
Shubhraprakash Das97828ae2012-06-06 22:46:37 -0600832 if (pt->tlb_flags & (1<<id)) {
Shubhraprakash Dasf764e462012-04-26 15:38:09 -0600833 result = KGSL_MMUFLAGS_TLBFLUSH;
834 pt->tlb_flags &= ~(1<<id);
835 }
836 spin_unlock(&pt->lock);
837 return result;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600838}
839EXPORT_SYMBOL(kgsl_mmu_pt_get_flags);
840
841void kgsl_mmu_ptpool_destroy(void *ptpool)
842{
843 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
844 kgsl_gpummu_ptpool_destroy(ptpool);
845 ptpool = 0;
846}
847EXPORT_SYMBOL(kgsl_mmu_ptpool_destroy);
848
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600849void *kgsl_mmu_ptpool_init(int entries)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600850{
851 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600852 return kgsl_gpummu_ptpool_init(entries);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600853 else
854 return (void *)(-1);
855}
856EXPORT_SYMBOL(kgsl_mmu_ptpool_init);
857
858int kgsl_mmu_enabled(void)
859{
860 if (KGSL_MMU_TYPE_NONE != kgsl_mmu_type)
861 return 1;
862 else
863 return 0;
864}
865EXPORT_SYMBOL(kgsl_mmu_enabled);
866
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600867enum kgsl_mmutype kgsl_mmu_get_mmutype(void)
868{
869 return kgsl_mmu_type;
870}
871EXPORT_SYMBOL(kgsl_mmu_get_mmutype);
872
873void kgsl_mmu_set_mmutype(char *mmutype)
874{
Jordan Crouse817e0b92012-02-04 10:23:53 -0700875 /* Set the default MMU - GPU on <=8960 and nothing on >= 8064 */
876 kgsl_mmu_type =
877 cpu_is_apq8064() ? KGSL_MMU_TYPE_NONE : KGSL_MMU_TYPE_GPU;
878
879 /* Use the IOMMU if it is found */
Steve Mucklef132c6c2012-06-06 18:30:57 -0700880 if (iommu_present(&platform_bus_type))
Jordan Crouse817e0b92012-02-04 10:23:53 -0700881 kgsl_mmu_type = KGSL_MMU_TYPE_IOMMU;
882
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600883 if (mmutype && !strncmp(mmutype, "gpummu", 6))
884 kgsl_mmu_type = KGSL_MMU_TYPE_GPU;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700885 if (iommu_present(&platform_bus_type) && mmutype &&
886 !strncmp(mmutype, "iommu", 5))
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600887 kgsl_mmu_type = KGSL_MMU_TYPE_IOMMU;
888 if (mmutype && !strncmp(mmutype, "nommu", 5))
889 kgsl_mmu_type = KGSL_MMU_TYPE_NONE;
890}
891EXPORT_SYMBOL(kgsl_mmu_set_mmutype);
Shubhraprakash Dase7652cf2012-08-11 17:15:19 -0700892
893int kgsl_mmu_gpuaddr_in_range(unsigned int gpuaddr)
894{
895 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
896 return 1;
897 return ((gpuaddr >= KGSL_PAGETABLE_BASE) &&
898 (gpuaddr < (KGSL_PAGETABLE_BASE + kgsl_mmu_get_ptsize())));
899}
900EXPORT_SYMBOL(kgsl_mmu_gpuaddr_in_range);
901