blob: 36248ef2f757a042b96324bf40e552f3357d80d5 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/types.h>
14#include <linux/device.h>
15#include <linux/spinlock.h>
16#include <linux/genalloc.h>
17#include <linux/slab.h>
18#include <linux/sched.h>
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060019#include <linux/iommu.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070020
21#include "kgsl.h"
22#include "kgsl_mmu.h"
23#include "kgsl_device.h"
24#include "kgsl_sharedmem.h"
Jeremy Gebbena3d07a42011-10-17 12:08:16 -060025#include "adreno_postmortem.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070026
27#define KGSL_MMU_ALIGN_SHIFT 13
28#define KGSL_MMU_ALIGN_MASK (~((1 << KGSL_MMU_ALIGN_SHIFT) - 1))
29
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060030static enum kgsl_mmutype kgsl_mmu_type;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070031
32static void pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable);
33
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070034static int kgsl_cleanup_pt(struct kgsl_pagetable *pt)
35{
36 int i;
37 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
38 struct kgsl_device *device = kgsl_driver.devp[i];
39 if (device)
40 device->ftbl->cleanup_pt(device, pt);
41 }
42 return 0;
43}
44
45static void kgsl_destroy_pagetable(struct kref *kref)
46{
47 struct kgsl_pagetable *pagetable = container_of(kref,
48 struct kgsl_pagetable, refcount);
49 unsigned long flags;
50
51 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
52 list_del(&pagetable->list);
53 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
54
55 pagetable_remove_sysfs_objects(pagetable);
56
57 kgsl_cleanup_pt(pagetable);
58
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070059 if (pagetable->pool)
60 gen_pool_destroy(pagetable->pool);
61
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060062 pagetable->pt_ops->mmu_destroy_pagetable(pagetable->priv);
63
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070064 kfree(pagetable);
65}
66
67static inline void kgsl_put_pagetable(struct kgsl_pagetable *pagetable)
68{
69 if (pagetable)
70 kref_put(&pagetable->refcount, kgsl_destroy_pagetable);
71}
72
73static struct kgsl_pagetable *
74kgsl_get_pagetable(unsigned long name)
75{
76 struct kgsl_pagetable *pt, *ret = NULL;
77 unsigned long flags;
78
79 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
80 list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
81 if (pt->name == name) {
82 ret = pt;
83 kref_get(&ret->refcount);
84 break;
85 }
86 }
87
88 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
89 return ret;
90}
91
92static struct kgsl_pagetable *
93_get_pt_from_kobj(struct kobject *kobj)
94{
95 unsigned long ptname;
96
97 if (!kobj)
98 return NULL;
99
100 if (sscanf(kobj->name, "%ld", &ptname) != 1)
101 return NULL;
102
103 return kgsl_get_pagetable(ptname);
104}
105
106static ssize_t
107sysfs_show_entries(struct kobject *kobj,
108 struct kobj_attribute *attr,
109 char *buf)
110{
111 struct kgsl_pagetable *pt;
112 int ret = 0;
113
114 pt = _get_pt_from_kobj(kobj);
115
116 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600117 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700118
119 kgsl_put_pagetable(pt);
120 return ret;
121}
122
123static ssize_t
124sysfs_show_mapped(struct kobject *kobj,
125 struct kobj_attribute *attr,
126 char *buf)
127{
128 struct kgsl_pagetable *pt;
129 int ret = 0;
130
131 pt = _get_pt_from_kobj(kobj);
132
133 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600134 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.mapped);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700135
136 kgsl_put_pagetable(pt);
137 return ret;
138}
139
140static ssize_t
141sysfs_show_va_range(struct kobject *kobj,
142 struct kobj_attribute *attr,
143 char *buf)
144{
145 struct kgsl_pagetable *pt;
146 int ret = 0;
147
148 pt = _get_pt_from_kobj(kobj);
149
150 if (pt)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600151 ret += snprintf(buf, PAGE_SIZE, "0x%x\n",
152 CONFIG_MSM_KGSL_PAGE_TABLE_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700153
154 kgsl_put_pagetable(pt);
155 return ret;
156}
157
158static ssize_t
159sysfs_show_max_mapped(struct kobject *kobj,
160 struct kobj_attribute *attr,
161 char *buf)
162{
163 struct kgsl_pagetable *pt;
164 int ret = 0;
165
166 pt = _get_pt_from_kobj(kobj);
167
168 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600169 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.max_mapped);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700170
171 kgsl_put_pagetable(pt);
172 return ret;
173}
174
175static ssize_t
176sysfs_show_max_entries(struct kobject *kobj,
177 struct kobj_attribute *attr,
178 char *buf)
179{
180 struct kgsl_pagetable *pt;
181 int ret = 0;
182
183 pt = _get_pt_from_kobj(kobj);
184
185 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600186 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.max_entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700187
188 kgsl_put_pagetable(pt);
189 return ret;
190}
191
192static struct kobj_attribute attr_entries = {
193 .attr = { .name = "entries", .mode = 0444 },
194 .show = sysfs_show_entries,
195 .store = NULL,
196};
197
198static struct kobj_attribute attr_mapped = {
199 .attr = { .name = "mapped", .mode = 0444 },
200 .show = sysfs_show_mapped,
201 .store = NULL,
202};
203
204static struct kobj_attribute attr_va_range = {
205 .attr = { .name = "va_range", .mode = 0444 },
206 .show = sysfs_show_va_range,
207 .store = NULL,
208};
209
210static struct kobj_attribute attr_max_mapped = {
211 .attr = { .name = "max_mapped", .mode = 0444 },
212 .show = sysfs_show_max_mapped,
213 .store = NULL,
214};
215
216static struct kobj_attribute attr_max_entries = {
217 .attr = { .name = "max_entries", .mode = 0444 },
218 .show = sysfs_show_max_entries,
219 .store = NULL,
220};
221
222static struct attribute *pagetable_attrs[] = {
223 &attr_entries.attr,
224 &attr_mapped.attr,
225 &attr_va_range.attr,
226 &attr_max_mapped.attr,
227 &attr_max_entries.attr,
228 NULL,
229};
230
231static struct attribute_group pagetable_attr_group = {
232 .attrs = pagetable_attrs,
233};
234
235static void
236pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable)
237{
238 if (pagetable->kobj)
239 sysfs_remove_group(pagetable->kobj,
240 &pagetable_attr_group);
241
242 kobject_put(pagetable->kobj);
243}
244
245static int
246pagetable_add_sysfs_objects(struct kgsl_pagetable *pagetable)
247{
248 char ptname[16];
249 int ret = -ENOMEM;
250
251 snprintf(ptname, sizeof(ptname), "%d", pagetable->name);
252 pagetable->kobj = kobject_create_and_add(ptname,
253 kgsl_driver.ptkobj);
254 if (pagetable->kobj == NULL)
255 goto err;
256
257 ret = sysfs_create_group(pagetable->kobj, &pagetable_attr_group);
258
259err:
260 if (ret) {
261 if (pagetable->kobj)
262 kobject_put(pagetable->kobj);
263
264 pagetable->kobj = NULL;
265 }
266
267 return ret;
268}
269
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600270unsigned int kgsl_mmu_get_current_ptbase(struct kgsl_device *device)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700271{
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600272 struct kgsl_mmu *mmu = &device->mmu;
273 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
274 return 0;
275 else
276 return mmu->mmu_ops->mmu_get_current_ptbase(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700277}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600278EXPORT_SYMBOL(kgsl_mmu_get_current_ptbase);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700279
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600280int
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600281kgsl_mmu_get_ptname_from_ptbase(unsigned int pt_base)
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600282{
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600283 struct kgsl_pagetable *pt;
284 int ptid = -1;
285
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600286 spin_lock(&kgsl_driver.ptlock);
287 list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600288 if (pt->pt_ops->mmu_pt_equal(pt, pt_base)) {
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600289 ptid = (int) pt->name;
290 break;
291 }
292 }
293 spin_unlock(&kgsl_driver.ptlock);
294
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600295 return ptid;
296}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600297EXPORT_SYMBOL(kgsl_mmu_get_ptname_from_ptbase);
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600298
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600299void kgsl_mmu_setstate(struct kgsl_device *device,
300 struct kgsl_pagetable *pagetable)
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600301{
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600302 struct kgsl_mmu *mmu = &device->mmu;
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600303
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600304 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
305 return;
306 else
307 mmu->mmu_ops->mmu_setstate(device,
308 pagetable);
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600309}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600310EXPORT_SYMBOL(kgsl_mmu_setstate);
311
312int kgsl_mmu_init(struct kgsl_device *device)
313{
314 struct kgsl_mmu *mmu = &device->mmu;
315
316 mmu->device = device;
317
318 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type) {
319 dev_info(device->dev, "|%s| MMU type set for device is "
320 "NOMMU\n", __func__);
321 return 0;
322 } else if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
323 mmu->mmu_ops = &gpummu_ops;
324 else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
325 mmu->mmu_ops = &iommu_ops;
326
327 return mmu->mmu_ops->mmu_init(device);
328}
329EXPORT_SYMBOL(kgsl_mmu_init);
330
331int kgsl_mmu_start(struct kgsl_device *device)
332{
333 struct kgsl_mmu *mmu = &device->mmu;
334
335 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
336 kgsl_regwrite(device, MH_MMU_CONFIG, 0);
337 return 0;
338 } else {
339 return mmu->mmu_ops->mmu_start(device);
340 }
341}
342EXPORT_SYMBOL(kgsl_mmu_start);
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600343
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700344void kgsl_mh_intrcallback(struct kgsl_device *device)
345{
346 unsigned int status = 0;
347 unsigned int reg;
348
349 kgsl_regread(device, MH_INTERRUPT_STATUS, &status);
350 kgsl_regread(device, MH_AXI_ERROR, &reg);
351
352 if (status & MH_INTERRUPT_MASK__AXI_READ_ERROR)
353 KGSL_MEM_CRIT(device, "axi read error interrupt: %08x\n", reg);
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600354 if (status & MH_INTERRUPT_MASK__AXI_WRITE_ERROR)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700355 KGSL_MEM_CRIT(device, "axi write error interrupt: %08x\n", reg);
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600356 if (status & MH_INTERRUPT_MASK__MMU_PAGE_FAULT)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600357 device->mmu.mmu_ops->mmu_pagefault(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700358
Jordan Crousec8c9fcd2011-07-28 08:37:58 -0600359 status &= KGSL_MMU_INT_MASK;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700360 kgsl_regwrite(device, MH_INTERRUPT_CLEAR, status);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700361}
362EXPORT_SYMBOL(kgsl_mh_intrcallback);
363
364static int kgsl_setup_pt(struct kgsl_pagetable *pt)
365{
366 int i = 0;
367 int status = 0;
368
369 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
370 struct kgsl_device *device = kgsl_driver.devp[i];
371 if (device) {
372 status = device->ftbl->setup_pt(device, pt);
373 if (status)
374 goto error_pt;
375 }
376 }
377 return status;
378error_pt:
379 while (i >= 0) {
380 struct kgsl_device *device = kgsl_driver.devp[i];
381 if (device)
382 device->ftbl->cleanup_pt(device, pt);
383 i--;
384 }
385 return status;
386}
387
388static struct kgsl_pagetable *kgsl_mmu_createpagetableobject(
389 unsigned int name)
390{
391 int status = 0;
392 struct kgsl_pagetable *pagetable = NULL;
393 unsigned long flags;
394
395 pagetable = kzalloc(sizeof(struct kgsl_pagetable), GFP_KERNEL);
396 if (pagetable == NULL) {
397 KGSL_CORE_ERR("kzalloc(%d) failed\n",
398 sizeof(struct kgsl_pagetable));
399 return NULL;
400 }
401
402 kref_init(&pagetable->refcount);
403
404 spin_lock_init(&pagetable->lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700405 pagetable->name = name;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600406 pagetable->max_entries = KGSL_PAGETABLE_ENTRIES(
407 CONFIG_MSM_KGSL_PAGE_TABLE_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700408
409 pagetable->pool = gen_pool_create(PAGE_SHIFT, -1);
410 if (pagetable->pool == NULL) {
411 KGSL_CORE_ERR("gen_pool_create(%d) failed\n", PAGE_SHIFT);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600412 goto err_alloc;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700413 }
414
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600415 if (gen_pool_add(pagetable->pool, KGSL_PAGETABLE_BASE,
416 CONFIG_MSM_KGSL_PAGE_TABLE_SIZE, -1)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700417 KGSL_CORE_ERR("gen_pool_add failed\n");
418 goto err_pool;
419 }
420
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600421 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
422 pagetable->pt_ops = &gpummu_pt_ops;
423 else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
424 pagetable->pt_ops = &iommu_pt_ops;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700425
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600426 pagetable->priv = pagetable->pt_ops->mmu_create_pagetable();
427 if (!pagetable->priv)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700428 goto err_pool;
429
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700430 status = kgsl_setup_pt(pagetable);
431 if (status)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600432 goto err_mmu_create;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700433
434 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
435 list_add(&pagetable->list, &kgsl_driver.pagetable_list);
436 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
437
438 /* Create the sysfs entries */
439 pagetable_add_sysfs_objects(pagetable);
440
441 return pagetable;
442
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600443err_mmu_create:
444 pagetable->pt_ops->mmu_destroy_pagetable(pagetable->priv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700445err_pool:
446 gen_pool_destroy(pagetable->pool);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700447err_alloc:
448 kfree(pagetable);
449
450 return NULL;
451}
452
453struct kgsl_pagetable *kgsl_mmu_getpagetable(unsigned long name)
454{
455 struct kgsl_pagetable *pt;
456
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600457 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
458 return (void *)(-1);
459
460#ifdef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
461 if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
462 name = KGSL_MMU_GLOBAL_PT;
463#else
464 name = KGSL_MMU_GLOBAL_PT;
465#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700466 pt = kgsl_get_pagetable(name);
467
468 if (pt == NULL)
469 pt = kgsl_mmu_createpagetableobject(name);
470
471 return pt;
472}
473
474void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable)
475{
476 kgsl_put_pagetable(pagetable);
477}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600478EXPORT_SYMBOL(kgsl_mmu_putpagetable);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700479
480void kgsl_setstate(struct kgsl_device *device, uint32_t flags)
481{
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600482 struct kgsl_mmu *mmu = &device->mmu;
483 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
484 return;
485 else if (device->ftbl->setstate)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700486 device->ftbl->setstate(device, flags);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600487 else if (mmu->mmu_ops->mmu_device_setstate)
488 mmu->mmu_ops->mmu_device_setstate(device, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700489}
490EXPORT_SYMBOL(kgsl_setstate);
491
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600492void kgsl_mmu_device_setstate(struct kgsl_device *device, uint32_t flags)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700493{
494 struct kgsl_mmu *mmu = &device->mmu;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600495 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
496 return;
497 else if (mmu->mmu_ops->mmu_device_setstate)
498 mmu->mmu_ops->mmu_device_setstate(device, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700499}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600500EXPORT_SYMBOL(kgsl_mmu_device_setstate);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700501
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600502void kgsl_mh_start(struct kgsl_device *device)
503{
504 struct kgsl_mh *mh = &device->mh;
505 /* force mmu off to for now*/
506 kgsl_regwrite(device, MH_MMU_CONFIG, 0);
507 kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
508
509 /* define physical memory range accessible by the core */
510 kgsl_regwrite(device, MH_MMU_MPU_BASE, mh->mpu_base);
511 kgsl_regwrite(device, MH_MMU_MPU_END,
512 mh->mpu_base + mh->mpu_range);
513 kgsl_regwrite(device, MH_ARBITER_CONFIG, mh->mharb);
514
515 if (mh->mh_intf_cfg1 != 0)
516 kgsl_regwrite(device, MH_CLNT_INTF_CTRL_CONFIG1,
517 mh->mh_intf_cfg1);
518
519 if (mh->mh_intf_cfg2 != 0)
520 kgsl_regwrite(device, MH_CLNT_INTF_CTRL_CONFIG2,
521 mh->mh_intf_cfg2);
522
523 /*
524 * Interrupts are enabled on a per-device level when
525 * kgsl_pwrctrl_irq() is called
526 */
527}
528
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700529int
530kgsl_mmu_map(struct kgsl_pagetable *pagetable,
531 struct kgsl_memdesc *memdesc,
532 unsigned int protflags)
533{
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600534 int ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700535
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600536 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
537 memdesc->gpuaddr = memdesc->physaddr;
538 return 0;
539 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700540 memdesc->gpuaddr = gen_pool_alloc_aligned(pagetable->pool,
541 memdesc->size, KGSL_MMU_ALIGN_SHIFT);
542
543 if (memdesc->gpuaddr == 0) {
544 KGSL_CORE_ERR("gen_pool_alloc(%d) failed\n", memdesc->size);
545 KGSL_CORE_ERR(" [%d] allocated=%d, entries=%d\n",
546 pagetable->name, pagetable->stats.mapped,
547 pagetable->stats.entries);
548 return -ENOMEM;
549 }
550
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700551 spin_lock(&pagetable->lock);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600552 ret = pagetable->pt_ops->mmu_map(pagetable->priv, memdesc, protflags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700553
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600554 if (ret)
555 goto err_free_gpuaddr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700556
557 /* Keep track of the statistics for the sysfs files */
558
559 KGSL_STATS_ADD(1, pagetable->stats.entries,
560 pagetable->stats.max_entries);
561
562 KGSL_STATS_ADD(memdesc->size, pagetable->stats.mapped,
563 pagetable->stats.max_mapped);
564
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700565 spin_unlock(&pagetable->lock);
566
567 return 0;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600568
569err_free_gpuaddr:
570 spin_unlock(&pagetable->lock);
571 gen_pool_free(pagetable->pool, memdesc->gpuaddr, memdesc->size);
572 memdesc->gpuaddr = 0;
573 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700574}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600575EXPORT_SYMBOL(kgsl_mmu_map);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700576
577int
578kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
579 struct kgsl_memdesc *memdesc)
580{
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600581 if (memdesc->size == 0 || memdesc->gpuaddr == 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700582 return 0;
583
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600584 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
585 memdesc->gpuaddr = 0;
586 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700587 }
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600588 spin_lock(&pagetable->lock);
589 pagetable->pt_ops->mmu_unmap(pagetable->priv, memdesc);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700590 /* Remove the statistics */
591 pagetable->stats.entries--;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600592 pagetable->stats.mapped -= memdesc->size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700593
594 spin_unlock(&pagetable->lock);
595
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600596 gen_pool_free(pagetable->pool,
597 memdesc->gpuaddr & KGSL_MMU_ALIGN_MASK,
598 memdesc->size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700599
600 return 0;
601}
602EXPORT_SYMBOL(kgsl_mmu_unmap);
603
604int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable,
605 struct kgsl_memdesc *memdesc, unsigned int protflags)
606{
607 int result = -EINVAL;
608 unsigned int gpuaddr = 0;
609
610 if (memdesc == NULL) {
611 KGSL_CORE_ERR("invalid memdesc\n");
612 goto error;
613 }
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600614 /* Not all global mappings are needed for all MMU types */
615 if (!memdesc->size)
616 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700617
618 gpuaddr = memdesc->gpuaddr;
619
620 result = kgsl_mmu_map(pagetable, memdesc, protflags);
621 if (result)
622 goto error;
623
624 /*global mappings must have the same gpu address in all pagetables*/
625 if (gpuaddr && gpuaddr != memdesc->gpuaddr) {
626 KGSL_CORE_ERR("pt %p addr mismatch phys 0x%08x"
627 "gpu 0x%0x 0x%08x", pagetable, memdesc->physaddr,
628 gpuaddr, memdesc->gpuaddr);
629 goto error_unmap;
630 }
631 return result;
632error_unmap:
633 kgsl_mmu_unmap(pagetable, memdesc);
634error:
635 return result;
636}
637EXPORT_SYMBOL(kgsl_mmu_map_global);
638
639int kgsl_mmu_stop(struct kgsl_device *device)
640{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700641 struct kgsl_mmu *mmu = &device->mmu;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600642
643 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE)
644 return 0;
645 else
646 return mmu->mmu_ops->mmu_stop(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700647}
648EXPORT_SYMBOL(kgsl_mmu_stop);
649
650int kgsl_mmu_close(struct kgsl_device *device)
651{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700652 struct kgsl_mmu *mmu = &device->mmu;
653
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600654 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE)
655 return 0;
656 else
657 return mmu->mmu_ops->mmu_close(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700658}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600659EXPORT_SYMBOL(kgsl_mmu_close);
660
661int kgsl_mmu_pt_get_flags(struct kgsl_pagetable *pt,
662 enum kgsl_deviceid id)
663{
664 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
665 return pt->pt_ops->mmu_pt_get_flags(pt, id);
666 else
667 return 0;
668}
669EXPORT_SYMBOL(kgsl_mmu_pt_get_flags);
670
671void kgsl_mmu_ptpool_destroy(void *ptpool)
672{
673 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
674 kgsl_gpummu_ptpool_destroy(ptpool);
675 ptpool = 0;
676}
677EXPORT_SYMBOL(kgsl_mmu_ptpool_destroy);
678
679void *kgsl_mmu_ptpool_init(int ptsize, int entries)
680{
681 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
682 return kgsl_gpummu_ptpool_init(ptsize, entries);
683 else
684 return (void *)(-1);
685}
686EXPORT_SYMBOL(kgsl_mmu_ptpool_init);
687
688int kgsl_mmu_enabled(void)
689{
690 if (KGSL_MMU_TYPE_NONE != kgsl_mmu_type)
691 return 1;
692 else
693 return 0;
694}
695EXPORT_SYMBOL(kgsl_mmu_enabled);
696
697int kgsl_mmu_pt_equal(struct kgsl_pagetable *pt,
698 unsigned int pt_base)
699{
700 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
701 return true;
702 else
703 return pt->pt_ops->mmu_pt_equal(pt, pt_base);
704}
705EXPORT_SYMBOL(kgsl_mmu_pt_equal);
706
707enum kgsl_mmutype kgsl_mmu_get_mmutype(void)
708{
709 return kgsl_mmu_type;
710}
711EXPORT_SYMBOL(kgsl_mmu_get_mmutype);
712
713void kgsl_mmu_set_mmutype(char *mmutype)
714{
Jeremy Gebben32660362011-11-03 09:59:51 -0600715 kgsl_mmu_type = iommu_found() ? KGSL_MMU_TYPE_IOMMU : KGSL_MMU_TYPE_GPU;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600716 if (mmutype && !strncmp(mmutype, "gpummu", 6))
717 kgsl_mmu_type = KGSL_MMU_TYPE_GPU;
718 if (iommu_found() && mmutype && !strncmp(mmutype, "iommu", 5))
719 kgsl_mmu_type = KGSL_MMU_TYPE_IOMMU;
720 if (mmutype && !strncmp(mmutype, "nommu", 5))
721 kgsl_mmu_type = KGSL_MMU_TYPE_NONE;
722}
723EXPORT_SYMBOL(kgsl_mmu_set_mmutype);