blob: 2c63bb6e4eb4f2b4858c999377ae432fc5680964 [file] [log] [blame]
Jordan Crouse00714012012-03-16 14:53:40 -06001/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/types.h>
14#include <linux/device.h>
15#include <linux/spinlock.h>
16#include <linux/genalloc.h>
17#include <linux/slab.h>
18#include <linux/sched.h>
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060019#include <linux/iommu.h>
Jordan Crouse817e0b92012-02-04 10:23:53 -070020#include <mach/socinfo.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070021
22#include "kgsl.h"
23#include "kgsl_mmu.h"
24#include "kgsl_device.h"
25#include "kgsl_sharedmem.h"
Jeremy Gebbena3d07a42011-10-17 12:08:16 -060026#include "adreno_postmortem.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070027
28#define KGSL_MMU_ALIGN_SHIFT 13
29#define KGSL_MMU_ALIGN_MASK (~((1 << KGSL_MMU_ALIGN_SHIFT) - 1))
30
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060031static enum kgsl_mmutype kgsl_mmu_type;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070032
33static void pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable);
34
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070035static int kgsl_cleanup_pt(struct kgsl_pagetable *pt)
36{
37 int i;
Shubhraprakash Das84fdb112012-04-04 12:49:31 -060038 /* For IOMMU only unmap the global structures to global pt */
39 if ((KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type) &&
40 (KGSL_MMU_GLOBAL_PT != pt->name))
41 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070042 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
43 struct kgsl_device *device = kgsl_driver.devp[i];
44 if (device)
45 device->ftbl->cleanup_pt(device, pt);
46 }
47 return 0;
48}
49
50static void kgsl_destroy_pagetable(struct kref *kref)
51{
52 struct kgsl_pagetable *pagetable = container_of(kref,
53 struct kgsl_pagetable, refcount);
54 unsigned long flags;
55
56 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
57 list_del(&pagetable->list);
58 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
59
60 pagetable_remove_sysfs_objects(pagetable);
61
62 kgsl_cleanup_pt(pagetable);
63
Shubhraprakash Das84fdb112012-04-04 12:49:31 -060064 if (pagetable->kgsl_pool)
65 gen_pool_destroy(pagetable->kgsl_pool);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070066 if (pagetable->pool)
67 gen_pool_destroy(pagetable->pool);
68
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060069 pagetable->pt_ops->mmu_destroy_pagetable(pagetable->priv);
70
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071 kfree(pagetable);
72}
73
74static inline void kgsl_put_pagetable(struct kgsl_pagetable *pagetable)
75{
76 if (pagetable)
77 kref_put(&pagetable->refcount, kgsl_destroy_pagetable);
78}
79
80static struct kgsl_pagetable *
81kgsl_get_pagetable(unsigned long name)
82{
83 struct kgsl_pagetable *pt, *ret = NULL;
84 unsigned long flags;
85
86 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
87 list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
88 if (pt->name == name) {
89 ret = pt;
90 kref_get(&ret->refcount);
91 break;
92 }
93 }
94
95 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
96 return ret;
97}
98
99static struct kgsl_pagetable *
100_get_pt_from_kobj(struct kobject *kobj)
101{
102 unsigned long ptname;
103
104 if (!kobj)
105 return NULL;
106
107 if (sscanf(kobj->name, "%ld", &ptname) != 1)
108 return NULL;
109
110 return kgsl_get_pagetable(ptname);
111}
112
113static ssize_t
114sysfs_show_entries(struct kobject *kobj,
115 struct kobj_attribute *attr,
116 char *buf)
117{
118 struct kgsl_pagetable *pt;
119 int ret = 0;
120
121 pt = _get_pt_from_kobj(kobj);
122
123 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600124 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700125
126 kgsl_put_pagetable(pt);
127 return ret;
128}
129
130static ssize_t
131sysfs_show_mapped(struct kobject *kobj,
132 struct kobj_attribute *attr,
133 char *buf)
134{
135 struct kgsl_pagetable *pt;
136 int ret = 0;
137
138 pt = _get_pt_from_kobj(kobj);
139
140 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600141 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.mapped);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700142
143 kgsl_put_pagetable(pt);
144 return ret;
145}
146
147static ssize_t
148sysfs_show_va_range(struct kobject *kobj,
149 struct kobj_attribute *attr,
150 char *buf)
151{
152 struct kgsl_pagetable *pt;
153 int ret = 0;
154
155 pt = _get_pt_from_kobj(kobj);
156
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600157 if (pt) {
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600158 ret += snprintf(buf, PAGE_SIZE, "0x%x\n",
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600159 kgsl_mmu_get_ptsize());
160 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700161
162 kgsl_put_pagetable(pt);
163 return ret;
164}
165
166static ssize_t
167sysfs_show_max_mapped(struct kobject *kobj,
168 struct kobj_attribute *attr,
169 char *buf)
170{
171 struct kgsl_pagetable *pt;
172 int ret = 0;
173
174 pt = _get_pt_from_kobj(kobj);
175
176 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600177 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.max_mapped);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700178
179 kgsl_put_pagetable(pt);
180 return ret;
181}
182
183static ssize_t
184sysfs_show_max_entries(struct kobject *kobj,
185 struct kobj_attribute *attr,
186 char *buf)
187{
188 struct kgsl_pagetable *pt;
189 int ret = 0;
190
191 pt = _get_pt_from_kobj(kobj);
192
193 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600194 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.max_entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700195
196 kgsl_put_pagetable(pt);
197 return ret;
198}
199
200static struct kobj_attribute attr_entries = {
201 .attr = { .name = "entries", .mode = 0444 },
202 .show = sysfs_show_entries,
203 .store = NULL,
204};
205
206static struct kobj_attribute attr_mapped = {
207 .attr = { .name = "mapped", .mode = 0444 },
208 .show = sysfs_show_mapped,
209 .store = NULL,
210};
211
212static struct kobj_attribute attr_va_range = {
213 .attr = { .name = "va_range", .mode = 0444 },
214 .show = sysfs_show_va_range,
215 .store = NULL,
216};
217
218static struct kobj_attribute attr_max_mapped = {
219 .attr = { .name = "max_mapped", .mode = 0444 },
220 .show = sysfs_show_max_mapped,
221 .store = NULL,
222};
223
224static struct kobj_attribute attr_max_entries = {
225 .attr = { .name = "max_entries", .mode = 0444 },
226 .show = sysfs_show_max_entries,
227 .store = NULL,
228};
229
230static struct attribute *pagetable_attrs[] = {
231 &attr_entries.attr,
232 &attr_mapped.attr,
233 &attr_va_range.attr,
234 &attr_max_mapped.attr,
235 &attr_max_entries.attr,
236 NULL,
237};
238
239static struct attribute_group pagetable_attr_group = {
240 .attrs = pagetable_attrs,
241};
242
243static void
244pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable)
245{
246 if (pagetable->kobj)
247 sysfs_remove_group(pagetable->kobj,
248 &pagetable_attr_group);
249
250 kobject_put(pagetable->kobj);
251}
252
253static int
254pagetable_add_sysfs_objects(struct kgsl_pagetable *pagetable)
255{
256 char ptname[16];
257 int ret = -ENOMEM;
258
259 snprintf(ptname, sizeof(ptname), "%d", pagetable->name);
260 pagetable->kobj = kobject_create_and_add(ptname,
261 kgsl_driver.ptkobj);
262 if (pagetable->kobj == NULL)
263 goto err;
264
265 ret = sysfs_create_group(pagetable->kobj, &pagetable_attr_group);
266
267err:
268 if (ret) {
269 if (pagetable->kobj)
270 kobject_put(pagetable->kobj);
271
272 pagetable->kobj = NULL;
273 }
274
275 return ret;
276}
277
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600278unsigned int kgsl_mmu_get_ptsize(void)
279{
280 /*
281 * For IOMMU, we could do up to 4G virtual range if we wanted to, but
282 * it makes more sense to return a smaller range and leave the rest of
283 * the virtual range for future improvements
284 */
285
286 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
287 return CONFIG_MSM_KGSL_PAGE_TABLE_SIZE;
288 else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
289 return SZ_2G;
290 else
291 return 0;
292}
293
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600294unsigned int kgsl_mmu_get_current_ptbase(struct kgsl_device *device)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700295{
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600296 struct kgsl_mmu *mmu = &device->mmu;
297 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
298 return 0;
299 else
300 return mmu->mmu_ops->mmu_get_current_ptbase(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700301}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600302EXPORT_SYMBOL(kgsl_mmu_get_current_ptbase);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700303
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600304int
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600305kgsl_mmu_get_ptname_from_ptbase(unsigned int pt_base)
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600306{
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600307 struct kgsl_pagetable *pt;
308 int ptid = -1;
309
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600310 spin_lock(&kgsl_driver.ptlock);
311 list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600312 if (pt->pt_ops->mmu_pt_equal(pt, pt_base)) {
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600313 ptid = (int) pt->name;
314 break;
315 }
316 }
317 spin_unlock(&kgsl_driver.ptlock);
318
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600319 return ptid;
320}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600321EXPORT_SYMBOL(kgsl_mmu_get_ptname_from_ptbase);
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600322
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600323void kgsl_mmu_setstate(struct kgsl_device *device,
324 struct kgsl_pagetable *pagetable)
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600325{
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600326 struct kgsl_mmu *mmu = &device->mmu;
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600327
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600328 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
329 return;
330 else
331 mmu->mmu_ops->mmu_setstate(device,
332 pagetable);
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600333}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600334EXPORT_SYMBOL(kgsl_mmu_setstate);
335
336int kgsl_mmu_init(struct kgsl_device *device)
337{
338 struct kgsl_mmu *mmu = &device->mmu;
339
340 mmu->device = device;
341
342 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type) {
343 dev_info(device->dev, "|%s| MMU type set for device is "
344 "NOMMU\n", __func__);
345 return 0;
346 } else if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
347 mmu->mmu_ops = &gpummu_ops;
348 else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
349 mmu->mmu_ops = &iommu_ops;
350
351 return mmu->mmu_ops->mmu_init(device);
352}
353EXPORT_SYMBOL(kgsl_mmu_init);
354
355int kgsl_mmu_start(struct kgsl_device *device)
356{
357 struct kgsl_mmu *mmu = &device->mmu;
358
359 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
360 kgsl_regwrite(device, MH_MMU_CONFIG, 0);
361 return 0;
362 } else {
363 return mmu->mmu_ops->mmu_start(device);
364 }
365}
366EXPORT_SYMBOL(kgsl_mmu_start);
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600367
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700368void kgsl_mh_intrcallback(struct kgsl_device *device)
369{
370 unsigned int status = 0;
371 unsigned int reg;
372
373 kgsl_regread(device, MH_INTERRUPT_STATUS, &status);
374 kgsl_regread(device, MH_AXI_ERROR, &reg);
375
376 if (status & MH_INTERRUPT_MASK__AXI_READ_ERROR)
377 KGSL_MEM_CRIT(device, "axi read error interrupt: %08x\n", reg);
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600378 if (status & MH_INTERRUPT_MASK__AXI_WRITE_ERROR)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700379 KGSL_MEM_CRIT(device, "axi write error interrupt: %08x\n", reg);
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600380 if (status & MH_INTERRUPT_MASK__MMU_PAGE_FAULT)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600381 device->mmu.mmu_ops->mmu_pagefault(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700382
Jordan Crousec8c9fcd2011-07-28 08:37:58 -0600383 status &= KGSL_MMU_INT_MASK;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700384 kgsl_regwrite(device, MH_INTERRUPT_CLEAR, status);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700385}
386EXPORT_SYMBOL(kgsl_mh_intrcallback);
387
388static int kgsl_setup_pt(struct kgsl_pagetable *pt)
389{
390 int i = 0;
391 int status = 0;
392
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600393 /* For IOMMU only map the global structures to global pt */
394 if ((KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type) &&
395 (KGSL_MMU_GLOBAL_PT != pt->name))
396 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700397 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
398 struct kgsl_device *device = kgsl_driver.devp[i];
399 if (device) {
400 status = device->ftbl->setup_pt(device, pt);
401 if (status)
402 goto error_pt;
403 }
404 }
405 return status;
406error_pt:
407 while (i >= 0) {
408 struct kgsl_device *device = kgsl_driver.devp[i];
409 if (device)
410 device->ftbl->cleanup_pt(device, pt);
411 i--;
412 }
413 return status;
414}
415
416static struct kgsl_pagetable *kgsl_mmu_createpagetableobject(
417 unsigned int name)
418{
419 int status = 0;
420 struct kgsl_pagetable *pagetable = NULL;
421 unsigned long flags;
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600422 unsigned int ptsize;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700423
424 pagetable = kzalloc(sizeof(struct kgsl_pagetable), GFP_KERNEL);
425 if (pagetable == NULL) {
426 KGSL_CORE_ERR("kzalloc(%d) failed\n",
427 sizeof(struct kgsl_pagetable));
428 return NULL;
429 }
430
431 kref_init(&pagetable->refcount);
432
433 spin_lock_init(&pagetable->lock);
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600434
435 ptsize = kgsl_mmu_get_ptsize();
436
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700437 pagetable->name = name;
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600438 pagetable->max_entries = KGSL_PAGETABLE_ENTRIES(ptsize);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700439
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600440 /*
441 * create a separate kgsl pool for IOMMU, global mappings can be mapped
442 * just once from this pool of the defaultpagetable
443 */
444 if ((KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) &&
445 (KGSL_MMU_GLOBAL_PT == name)) {
446 pagetable->kgsl_pool = gen_pool_create(PAGE_SHIFT, -1);
447 if (pagetable->kgsl_pool == NULL) {
448 KGSL_CORE_ERR("gen_pool_create(%d) failed\n",
449 PAGE_SHIFT);
450 goto err_alloc;
451 }
452 if (gen_pool_add(pagetable->kgsl_pool,
453 KGSL_IOMMU_GLOBAL_MEM_BASE,
454 KGSL_IOMMU_GLOBAL_MEM_SIZE, -1)) {
455 KGSL_CORE_ERR("gen_pool_add failed\n");
456 goto err_kgsl_pool;
457 }
458 }
459
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700460 pagetable->pool = gen_pool_create(PAGE_SHIFT, -1);
461 if (pagetable->pool == NULL) {
462 KGSL_CORE_ERR("gen_pool_create(%d) failed\n", PAGE_SHIFT);
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600463 goto err_kgsl_pool;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700464 }
465
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600466 if (gen_pool_add(pagetable->pool, KGSL_PAGETABLE_BASE,
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600467 ptsize, -1)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700468 KGSL_CORE_ERR("gen_pool_add failed\n");
469 goto err_pool;
470 }
471
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600472 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
473 pagetable->pt_ops = &gpummu_pt_ops;
474 else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
475 pagetable->pt_ops = &iommu_pt_ops;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700476
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600477 pagetable->priv = pagetable->pt_ops->mmu_create_pagetable();
478 if (!pagetable->priv)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700479 goto err_pool;
480
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700481 status = kgsl_setup_pt(pagetable);
482 if (status)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600483 goto err_mmu_create;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700484
485 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
486 list_add(&pagetable->list, &kgsl_driver.pagetable_list);
487 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
488
489 /* Create the sysfs entries */
490 pagetable_add_sysfs_objects(pagetable);
491
492 return pagetable;
493
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600494err_mmu_create:
495 pagetable->pt_ops->mmu_destroy_pagetable(pagetable->priv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700496err_pool:
497 gen_pool_destroy(pagetable->pool);
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600498err_kgsl_pool:
499 if (pagetable->kgsl_pool)
500 gen_pool_destroy(pagetable->kgsl_pool);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700501err_alloc:
502 kfree(pagetable);
503
504 return NULL;
505}
506
507struct kgsl_pagetable *kgsl_mmu_getpagetable(unsigned long name)
508{
509 struct kgsl_pagetable *pt;
510
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600511 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
512 return (void *)(-1);
513
514#ifdef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
515 if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
516 name = KGSL_MMU_GLOBAL_PT;
517#else
518 name = KGSL_MMU_GLOBAL_PT;
519#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700520 pt = kgsl_get_pagetable(name);
521
522 if (pt == NULL)
523 pt = kgsl_mmu_createpagetableobject(name);
524
525 return pt;
526}
527
528void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable)
529{
530 kgsl_put_pagetable(pagetable);
531}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600532EXPORT_SYMBOL(kgsl_mmu_putpagetable);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700533
534void kgsl_setstate(struct kgsl_device *device, uint32_t flags)
535{
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600536 struct kgsl_mmu *mmu = &device->mmu;
537 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
538 return;
539 else if (device->ftbl->setstate)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700540 device->ftbl->setstate(device, flags);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600541 else if (mmu->mmu_ops->mmu_device_setstate)
542 mmu->mmu_ops->mmu_device_setstate(device, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700543}
544EXPORT_SYMBOL(kgsl_setstate);
545
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600546void kgsl_mmu_device_setstate(struct kgsl_device *device, uint32_t flags)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700547{
548 struct kgsl_mmu *mmu = &device->mmu;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600549 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
550 return;
551 else if (mmu->mmu_ops->mmu_device_setstate)
552 mmu->mmu_ops->mmu_device_setstate(device, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700553}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600554EXPORT_SYMBOL(kgsl_mmu_device_setstate);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700555
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600556void kgsl_mh_start(struct kgsl_device *device)
557{
558 struct kgsl_mh *mh = &device->mh;
559 /* force mmu off to for now*/
560 kgsl_regwrite(device, MH_MMU_CONFIG, 0);
561 kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
562
563 /* define physical memory range accessible by the core */
564 kgsl_regwrite(device, MH_MMU_MPU_BASE, mh->mpu_base);
565 kgsl_regwrite(device, MH_MMU_MPU_END,
566 mh->mpu_base + mh->mpu_range);
567 kgsl_regwrite(device, MH_ARBITER_CONFIG, mh->mharb);
568
569 if (mh->mh_intf_cfg1 != 0)
570 kgsl_regwrite(device, MH_CLNT_INTF_CTRL_CONFIG1,
571 mh->mh_intf_cfg1);
572
573 if (mh->mh_intf_cfg2 != 0)
574 kgsl_regwrite(device, MH_CLNT_INTF_CTRL_CONFIG2,
575 mh->mh_intf_cfg2);
576
577 /*
578 * Interrupts are enabled on a per-device level when
579 * kgsl_pwrctrl_irq() is called
580 */
581}
582
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700583int
584kgsl_mmu_map(struct kgsl_pagetable *pagetable,
585 struct kgsl_memdesc *memdesc,
586 unsigned int protflags)
587{
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600588 int ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700589
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600590 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
Jordan Crouse40861a42012-02-06 10:18:23 -0700591 if (memdesc->sglen == 1) {
Shubhraprakash Das4d6af2b2012-04-20 00:35:03 -0600592 memdesc->gpuaddr = sg_dma_address(memdesc->sg);
593 if (!memdesc->gpuaddr)
594 memdesc->gpuaddr = sg_phys(memdesc->sg);
595 if (!memdesc->gpuaddr) {
596 KGSL_CORE_ERR("Unable to get a valid physical "
597 "address for memdesc\n");
598 return -EINVAL;
599 }
Jordan Crouse40861a42012-02-06 10:18:23 -0700600 return 0;
601 } else {
602 KGSL_CORE_ERR("Memory is not contigious "
603 "(sglen = %d)\n", memdesc->sglen);
604 return -EINVAL;
605 }
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600606 }
Jordan Crouse40861a42012-02-06 10:18:23 -0700607
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600608 /* Allocate from kgsl pool if it exists for global mappings */
609 if (pagetable->kgsl_pool &&
610 (KGSL_MEMFLAGS_GLOBAL & memdesc->priv))
611 memdesc->gpuaddr = gen_pool_alloc_aligned(pagetable->kgsl_pool,
612 memdesc->size, KGSL_MMU_ALIGN_SHIFT);
613 else
614 memdesc->gpuaddr = gen_pool_alloc_aligned(pagetable->pool,
615 memdesc->size, KGSL_MMU_ALIGN_SHIFT);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700616
617 if (memdesc->gpuaddr == 0) {
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600618 KGSL_CORE_ERR("gen_pool_alloc(%d) failed from pool: %s\n",
619 memdesc->size,
620 ((pagetable->kgsl_pool &&
621 (KGSL_MEMFLAGS_GLOBAL & memdesc->priv)) ?
622 "kgsl_pool" : "general_pool"));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700623 KGSL_CORE_ERR(" [%d] allocated=%d, entries=%d\n",
624 pagetable->name, pagetable->stats.mapped,
625 pagetable->stats.entries);
626 return -ENOMEM;
627 }
628
Shubhraprakash Dasbadaeda2012-03-21 00:31:39 -0600629 if (KGSL_MMU_TYPE_IOMMU != kgsl_mmu_get_mmutype())
630 spin_lock(&pagetable->lock);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600631 ret = pagetable->pt_ops->mmu_map(pagetable->priv, memdesc, protflags);
Shubhraprakash Dasbadaeda2012-03-21 00:31:39 -0600632 if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
633 spin_lock(&pagetable->lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700634
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600635 if (ret)
636 goto err_free_gpuaddr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700637
638 /* Keep track of the statistics for the sysfs files */
639
640 KGSL_STATS_ADD(1, pagetable->stats.entries,
641 pagetable->stats.max_entries);
642
643 KGSL_STATS_ADD(memdesc->size, pagetable->stats.mapped,
644 pagetable->stats.max_mapped);
645
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700646 spin_unlock(&pagetable->lock);
647
648 return 0;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600649
650err_free_gpuaddr:
651 spin_unlock(&pagetable->lock);
652 gen_pool_free(pagetable->pool, memdesc->gpuaddr, memdesc->size);
653 memdesc->gpuaddr = 0;
654 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700655}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600656EXPORT_SYMBOL(kgsl_mmu_map);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700657
658int
659kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
660 struct kgsl_memdesc *memdesc)
661{
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600662 if (memdesc->size == 0 || memdesc->gpuaddr == 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700663 return 0;
664
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600665 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
666 memdesc->gpuaddr = 0;
667 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700668 }
Shubhraprakash Dasbadaeda2012-03-21 00:31:39 -0600669 if (KGSL_MMU_TYPE_IOMMU != kgsl_mmu_get_mmutype())
670 spin_lock(&pagetable->lock);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600671 pagetable->pt_ops->mmu_unmap(pagetable->priv, memdesc);
Shubhraprakash Dasbadaeda2012-03-21 00:31:39 -0600672 if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
673 spin_lock(&pagetable->lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700674 /* Remove the statistics */
675 pagetable->stats.entries--;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600676 pagetable->stats.mapped -= memdesc->size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700677
678 spin_unlock(&pagetable->lock);
679
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600680 if (pagetable->kgsl_pool &&
681 (KGSL_MEMFLAGS_GLOBAL & memdesc->priv))
682 gen_pool_free(pagetable->kgsl_pool,
683 memdesc->gpuaddr & KGSL_MMU_ALIGN_MASK,
684 memdesc->size);
685 else
686 gen_pool_free(pagetable->pool,
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600687 memdesc->gpuaddr & KGSL_MMU_ALIGN_MASK,
688 memdesc->size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700689
Jeremy Gebben7faf9ec2012-03-21 14:09:55 -0600690 /*
691 * Don't clear the gpuaddr on global mappings because they
692 * may be in use by other pagetables
693 */
694 if (!(memdesc->priv & KGSL_MEMFLAGS_GLOBAL))
695 memdesc->gpuaddr = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700696 return 0;
697}
698EXPORT_SYMBOL(kgsl_mmu_unmap);
699
700int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable,
701 struct kgsl_memdesc *memdesc, unsigned int protflags)
702{
703 int result = -EINVAL;
704 unsigned int gpuaddr = 0;
705
706 if (memdesc == NULL) {
707 KGSL_CORE_ERR("invalid memdesc\n");
708 goto error;
709 }
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600710 /* Not all global mappings are needed for all MMU types */
711 if (!memdesc->size)
712 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700713
714 gpuaddr = memdesc->gpuaddr;
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600715 memdesc->priv |= KGSL_MEMFLAGS_GLOBAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700716
717 result = kgsl_mmu_map(pagetable, memdesc, protflags);
718 if (result)
719 goto error;
720
721 /*global mappings must have the same gpu address in all pagetables*/
722 if (gpuaddr && gpuaddr != memdesc->gpuaddr) {
723 KGSL_CORE_ERR("pt %p addr mismatch phys 0x%08x"
724 "gpu 0x%0x 0x%08x", pagetable, memdesc->physaddr,
725 gpuaddr, memdesc->gpuaddr);
726 goto error_unmap;
727 }
728 return result;
729error_unmap:
730 kgsl_mmu_unmap(pagetable, memdesc);
731error:
732 return result;
733}
734EXPORT_SYMBOL(kgsl_mmu_map_global);
735
736int kgsl_mmu_stop(struct kgsl_device *device)
737{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700738 struct kgsl_mmu *mmu = &device->mmu;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600739
740 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE)
741 return 0;
742 else
743 return mmu->mmu_ops->mmu_stop(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700744}
745EXPORT_SYMBOL(kgsl_mmu_stop);
746
747int kgsl_mmu_close(struct kgsl_device *device)
748{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700749 struct kgsl_mmu *mmu = &device->mmu;
750
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600751 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE)
752 return 0;
753 else
754 return mmu->mmu_ops->mmu_close(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700755}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600756EXPORT_SYMBOL(kgsl_mmu_close);
757
758int kgsl_mmu_pt_get_flags(struct kgsl_pagetable *pt,
759 enum kgsl_deviceid id)
760{
761 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
762 return pt->pt_ops->mmu_pt_get_flags(pt, id);
763 else
764 return 0;
765}
766EXPORT_SYMBOL(kgsl_mmu_pt_get_flags);
767
768void kgsl_mmu_ptpool_destroy(void *ptpool)
769{
770 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
771 kgsl_gpummu_ptpool_destroy(ptpool);
772 ptpool = 0;
773}
774EXPORT_SYMBOL(kgsl_mmu_ptpool_destroy);
775
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600776void *kgsl_mmu_ptpool_init(int entries)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600777{
778 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600779 return kgsl_gpummu_ptpool_init(entries);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600780 else
781 return (void *)(-1);
782}
783EXPORT_SYMBOL(kgsl_mmu_ptpool_init);
784
785int kgsl_mmu_enabled(void)
786{
787 if (KGSL_MMU_TYPE_NONE != kgsl_mmu_type)
788 return 1;
789 else
790 return 0;
791}
792EXPORT_SYMBOL(kgsl_mmu_enabled);
793
794int kgsl_mmu_pt_equal(struct kgsl_pagetable *pt,
795 unsigned int pt_base)
796{
797 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
798 return true;
799 else
800 return pt->pt_ops->mmu_pt_equal(pt, pt_base);
801}
802EXPORT_SYMBOL(kgsl_mmu_pt_equal);
803
804enum kgsl_mmutype kgsl_mmu_get_mmutype(void)
805{
806 return kgsl_mmu_type;
807}
808EXPORT_SYMBOL(kgsl_mmu_get_mmutype);
809
810void kgsl_mmu_set_mmutype(char *mmutype)
811{
Jordan Crouse817e0b92012-02-04 10:23:53 -0700812 /* Set the default MMU - GPU on <=8960 and nothing on >= 8064 */
813 kgsl_mmu_type =
814 cpu_is_apq8064() ? KGSL_MMU_TYPE_NONE : KGSL_MMU_TYPE_GPU;
815
816 /* Use the IOMMU if it is found */
817 if (iommu_found())
818 kgsl_mmu_type = KGSL_MMU_TYPE_IOMMU;
819
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600820 if (mmutype && !strncmp(mmutype, "gpummu", 6))
821 kgsl_mmu_type = KGSL_MMU_TYPE_GPU;
822 if (iommu_found() && mmutype && !strncmp(mmutype, "iommu", 5))
823 kgsl_mmu_type = KGSL_MMU_TYPE_IOMMU;
824 if (mmutype && !strncmp(mmutype, "nommu", 5))
825 kgsl_mmu_type = KGSL_MMU_TYPE_NONE;
826}
827EXPORT_SYMBOL(kgsl_mmu_set_mmutype);