blob: ceb321278cc5b3861dc7240a7a0186c1ae5de0e1 [file] [log] [blame]
Jordan Crouse00714012012-03-16 14:53:40 -06001/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/types.h>
14#include <linux/device.h>
15#include <linux/spinlock.h>
16#include <linux/genalloc.h>
17#include <linux/slab.h>
18#include <linux/sched.h>
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060019#include <linux/iommu.h>
Jordan Crouse817e0b92012-02-04 10:23:53 -070020#include <mach/socinfo.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070021
22#include "kgsl.h"
23#include "kgsl_mmu.h"
24#include "kgsl_device.h"
25#include "kgsl_sharedmem.h"
Jeremy Gebbena3d07a42011-10-17 12:08:16 -060026#include "adreno_postmortem.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070027
28#define KGSL_MMU_ALIGN_SHIFT 13
29#define KGSL_MMU_ALIGN_MASK (~((1 << KGSL_MMU_ALIGN_SHIFT) - 1))
30
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060031static enum kgsl_mmutype kgsl_mmu_type;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070032
33static void pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable);
34
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070035static int kgsl_cleanup_pt(struct kgsl_pagetable *pt)
36{
37 int i;
38 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
39 struct kgsl_device *device = kgsl_driver.devp[i];
40 if (device)
41 device->ftbl->cleanup_pt(device, pt);
42 }
43 return 0;
44}
45
46static void kgsl_destroy_pagetable(struct kref *kref)
47{
48 struct kgsl_pagetable *pagetable = container_of(kref,
49 struct kgsl_pagetable, refcount);
50 unsigned long flags;
51
52 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
53 list_del(&pagetable->list);
54 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
55
56 pagetable_remove_sysfs_objects(pagetable);
57
58 kgsl_cleanup_pt(pagetable);
59
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070060 if (pagetable->pool)
61 gen_pool_destroy(pagetable->pool);
62
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060063 pagetable->pt_ops->mmu_destroy_pagetable(pagetable->priv);
64
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070065 kfree(pagetable);
66}
67
68static inline void kgsl_put_pagetable(struct kgsl_pagetable *pagetable)
69{
70 if (pagetable)
71 kref_put(&pagetable->refcount, kgsl_destroy_pagetable);
72}
73
74static struct kgsl_pagetable *
75kgsl_get_pagetable(unsigned long name)
76{
77 struct kgsl_pagetable *pt, *ret = NULL;
78 unsigned long flags;
79
80 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
81 list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
82 if (pt->name == name) {
83 ret = pt;
84 kref_get(&ret->refcount);
85 break;
86 }
87 }
88
89 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
90 return ret;
91}
92
93static struct kgsl_pagetable *
94_get_pt_from_kobj(struct kobject *kobj)
95{
96 unsigned long ptname;
97
98 if (!kobj)
99 return NULL;
100
101 if (sscanf(kobj->name, "%ld", &ptname) != 1)
102 return NULL;
103
104 return kgsl_get_pagetable(ptname);
105}
106
107static ssize_t
108sysfs_show_entries(struct kobject *kobj,
109 struct kobj_attribute *attr,
110 char *buf)
111{
112 struct kgsl_pagetable *pt;
113 int ret = 0;
114
115 pt = _get_pt_from_kobj(kobj);
116
117 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600118 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700119
120 kgsl_put_pagetable(pt);
121 return ret;
122}
123
124static ssize_t
125sysfs_show_mapped(struct kobject *kobj,
126 struct kobj_attribute *attr,
127 char *buf)
128{
129 struct kgsl_pagetable *pt;
130 int ret = 0;
131
132 pt = _get_pt_from_kobj(kobj);
133
134 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600135 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.mapped);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700136
137 kgsl_put_pagetable(pt);
138 return ret;
139}
140
141static ssize_t
142sysfs_show_va_range(struct kobject *kobj,
143 struct kobj_attribute *attr,
144 char *buf)
145{
146 struct kgsl_pagetable *pt;
147 int ret = 0;
148
149 pt = _get_pt_from_kobj(kobj);
150
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600151 if (pt) {
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600152 ret += snprintf(buf, PAGE_SIZE, "0x%x\n",
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600153 kgsl_mmu_get_ptsize());
154 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700155
156 kgsl_put_pagetable(pt);
157 return ret;
158}
159
160static ssize_t
161sysfs_show_max_mapped(struct kobject *kobj,
162 struct kobj_attribute *attr,
163 char *buf)
164{
165 struct kgsl_pagetable *pt;
166 int ret = 0;
167
168 pt = _get_pt_from_kobj(kobj);
169
170 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600171 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.max_mapped);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700172
173 kgsl_put_pagetable(pt);
174 return ret;
175}
176
177static ssize_t
178sysfs_show_max_entries(struct kobject *kobj,
179 struct kobj_attribute *attr,
180 char *buf)
181{
182 struct kgsl_pagetable *pt;
183 int ret = 0;
184
185 pt = _get_pt_from_kobj(kobj);
186
187 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600188 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.max_entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700189
190 kgsl_put_pagetable(pt);
191 return ret;
192}
193
194static struct kobj_attribute attr_entries = {
195 .attr = { .name = "entries", .mode = 0444 },
196 .show = sysfs_show_entries,
197 .store = NULL,
198};
199
200static struct kobj_attribute attr_mapped = {
201 .attr = { .name = "mapped", .mode = 0444 },
202 .show = sysfs_show_mapped,
203 .store = NULL,
204};
205
206static struct kobj_attribute attr_va_range = {
207 .attr = { .name = "va_range", .mode = 0444 },
208 .show = sysfs_show_va_range,
209 .store = NULL,
210};
211
212static struct kobj_attribute attr_max_mapped = {
213 .attr = { .name = "max_mapped", .mode = 0444 },
214 .show = sysfs_show_max_mapped,
215 .store = NULL,
216};
217
218static struct kobj_attribute attr_max_entries = {
219 .attr = { .name = "max_entries", .mode = 0444 },
220 .show = sysfs_show_max_entries,
221 .store = NULL,
222};
223
224static struct attribute *pagetable_attrs[] = {
225 &attr_entries.attr,
226 &attr_mapped.attr,
227 &attr_va_range.attr,
228 &attr_max_mapped.attr,
229 &attr_max_entries.attr,
230 NULL,
231};
232
233static struct attribute_group pagetable_attr_group = {
234 .attrs = pagetable_attrs,
235};
236
237static void
238pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable)
239{
240 if (pagetable->kobj)
241 sysfs_remove_group(pagetable->kobj,
242 &pagetable_attr_group);
243
244 kobject_put(pagetable->kobj);
245}
246
247static int
248pagetable_add_sysfs_objects(struct kgsl_pagetable *pagetable)
249{
250 char ptname[16];
251 int ret = -ENOMEM;
252
253 snprintf(ptname, sizeof(ptname), "%d", pagetable->name);
254 pagetable->kobj = kobject_create_and_add(ptname,
255 kgsl_driver.ptkobj);
256 if (pagetable->kobj == NULL)
257 goto err;
258
259 ret = sysfs_create_group(pagetable->kobj, &pagetable_attr_group);
260
261err:
262 if (ret) {
263 if (pagetable->kobj)
264 kobject_put(pagetable->kobj);
265
266 pagetable->kobj = NULL;
267 }
268
269 return ret;
270}
271
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600272unsigned int kgsl_mmu_get_ptsize(void)
273{
274 /*
275 * For IOMMU, we could do up to 4G virtual range if we wanted to, but
276 * it makes more sense to return a smaller range and leave the rest of
277 * the virtual range for future improvements
278 */
279
280 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
281 return CONFIG_MSM_KGSL_PAGE_TABLE_SIZE;
282 else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
283 return SZ_2G;
284 else
285 return 0;
286}
287
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600288unsigned int kgsl_mmu_get_current_ptbase(struct kgsl_device *device)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700289{
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600290 struct kgsl_mmu *mmu = &device->mmu;
291 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
292 return 0;
293 else
294 return mmu->mmu_ops->mmu_get_current_ptbase(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700295}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600296EXPORT_SYMBOL(kgsl_mmu_get_current_ptbase);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700297
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600298int
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600299kgsl_mmu_get_ptname_from_ptbase(unsigned int pt_base)
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600300{
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600301 struct kgsl_pagetable *pt;
302 int ptid = -1;
303
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600304 spin_lock(&kgsl_driver.ptlock);
305 list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600306 if (pt->pt_ops->mmu_pt_equal(pt, pt_base)) {
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600307 ptid = (int) pt->name;
308 break;
309 }
310 }
311 spin_unlock(&kgsl_driver.ptlock);
312
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600313 return ptid;
314}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600315EXPORT_SYMBOL(kgsl_mmu_get_ptname_from_ptbase);
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600316
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600317void kgsl_mmu_setstate(struct kgsl_device *device,
318 struct kgsl_pagetable *pagetable)
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600319{
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600320 struct kgsl_mmu *mmu = &device->mmu;
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600321
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600322 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
323 return;
324 else
325 mmu->mmu_ops->mmu_setstate(device,
326 pagetable);
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600327}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600328EXPORT_SYMBOL(kgsl_mmu_setstate);
329
330int kgsl_mmu_init(struct kgsl_device *device)
331{
332 struct kgsl_mmu *mmu = &device->mmu;
333
334 mmu->device = device;
335
336 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type) {
337 dev_info(device->dev, "|%s| MMU type set for device is "
338 "NOMMU\n", __func__);
339 return 0;
340 } else if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
341 mmu->mmu_ops = &gpummu_ops;
342 else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
343 mmu->mmu_ops = &iommu_ops;
344
345 return mmu->mmu_ops->mmu_init(device);
346}
347EXPORT_SYMBOL(kgsl_mmu_init);
348
349int kgsl_mmu_start(struct kgsl_device *device)
350{
351 struct kgsl_mmu *mmu = &device->mmu;
352
353 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
354 kgsl_regwrite(device, MH_MMU_CONFIG, 0);
355 return 0;
356 } else {
357 return mmu->mmu_ops->mmu_start(device);
358 }
359}
360EXPORT_SYMBOL(kgsl_mmu_start);
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600361
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700362void kgsl_mh_intrcallback(struct kgsl_device *device)
363{
364 unsigned int status = 0;
365 unsigned int reg;
366
367 kgsl_regread(device, MH_INTERRUPT_STATUS, &status);
368 kgsl_regread(device, MH_AXI_ERROR, &reg);
369
370 if (status & MH_INTERRUPT_MASK__AXI_READ_ERROR)
371 KGSL_MEM_CRIT(device, "axi read error interrupt: %08x\n", reg);
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600372 if (status & MH_INTERRUPT_MASK__AXI_WRITE_ERROR)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700373 KGSL_MEM_CRIT(device, "axi write error interrupt: %08x\n", reg);
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600374 if (status & MH_INTERRUPT_MASK__MMU_PAGE_FAULT)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600375 device->mmu.mmu_ops->mmu_pagefault(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700376
Jordan Crousec8c9fcd2011-07-28 08:37:58 -0600377 status &= KGSL_MMU_INT_MASK;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700378 kgsl_regwrite(device, MH_INTERRUPT_CLEAR, status);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700379}
380EXPORT_SYMBOL(kgsl_mh_intrcallback);
381
382static int kgsl_setup_pt(struct kgsl_pagetable *pt)
383{
384 int i = 0;
385 int status = 0;
386
387 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
388 struct kgsl_device *device = kgsl_driver.devp[i];
389 if (device) {
390 status = device->ftbl->setup_pt(device, pt);
391 if (status)
392 goto error_pt;
393 }
394 }
395 return status;
396error_pt:
397 while (i >= 0) {
398 struct kgsl_device *device = kgsl_driver.devp[i];
399 if (device)
400 device->ftbl->cleanup_pt(device, pt);
401 i--;
402 }
403 return status;
404}
405
406static struct kgsl_pagetable *kgsl_mmu_createpagetableobject(
407 unsigned int name)
408{
409 int status = 0;
410 struct kgsl_pagetable *pagetable = NULL;
411 unsigned long flags;
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600412 unsigned int ptsize;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700413
414 pagetable = kzalloc(sizeof(struct kgsl_pagetable), GFP_KERNEL);
415 if (pagetable == NULL) {
416 KGSL_CORE_ERR("kzalloc(%d) failed\n",
417 sizeof(struct kgsl_pagetable));
418 return NULL;
419 }
420
421 kref_init(&pagetable->refcount);
422
423 spin_lock_init(&pagetable->lock);
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600424
425 ptsize = kgsl_mmu_get_ptsize();
426
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700427 pagetable->name = name;
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600428 pagetable->max_entries = KGSL_PAGETABLE_ENTRIES(ptsize);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700429
430 pagetable->pool = gen_pool_create(PAGE_SHIFT, -1);
431 if (pagetable->pool == NULL) {
432 KGSL_CORE_ERR("gen_pool_create(%d) failed\n", PAGE_SHIFT);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600433 goto err_alloc;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700434 }
435
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600436 if (gen_pool_add(pagetable->pool, KGSL_PAGETABLE_BASE,
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600437 ptsize, -1)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700438 KGSL_CORE_ERR("gen_pool_add failed\n");
439 goto err_pool;
440 }
441
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600442 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
443 pagetable->pt_ops = &gpummu_pt_ops;
444 else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
445 pagetable->pt_ops = &iommu_pt_ops;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700446
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600447 pagetable->priv = pagetable->pt_ops->mmu_create_pagetable();
448 if (!pagetable->priv)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700449 goto err_pool;
450
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700451 status = kgsl_setup_pt(pagetable);
452 if (status)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600453 goto err_mmu_create;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700454
455 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
456 list_add(&pagetable->list, &kgsl_driver.pagetable_list);
457 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
458
459 /* Create the sysfs entries */
460 pagetable_add_sysfs_objects(pagetable);
461
462 return pagetable;
463
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600464err_mmu_create:
465 pagetable->pt_ops->mmu_destroy_pagetable(pagetable->priv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700466err_pool:
467 gen_pool_destroy(pagetable->pool);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700468err_alloc:
469 kfree(pagetable);
470
471 return NULL;
472}
473
474struct kgsl_pagetable *kgsl_mmu_getpagetable(unsigned long name)
475{
476 struct kgsl_pagetable *pt;
477
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600478 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
479 return (void *)(-1);
480
481#ifdef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
482 if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
483 name = KGSL_MMU_GLOBAL_PT;
484#else
485 name = KGSL_MMU_GLOBAL_PT;
486#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700487 pt = kgsl_get_pagetable(name);
488
489 if (pt == NULL)
490 pt = kgsl_mmu_createpagetableobject(name);
491
492 return pt;
493}
494
495void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable)
496{
497 kgsl_put_pagetable(pagetable);
498}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600499EXPORT_SYMBOL(kgsl_mmu_putpagetable);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700500
501void kgsl_setstate(struct kgsl_device *device, uint32_t flags)
502{
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600503 struct kgsl_mmu *mmu = &device->mmu;
504 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
505 return;
506 else if (device->ftbl->setstate)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700507 device->ftbl->setstate(device, flags);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600508 else if (mmu->mmu_ops->mmu_device_setstate)
509 mmu->mmu_ops->mmu_device_setstate(device, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700510}
511EXPORT_SYMBOL(kgsl_setstate);
512
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600513void kgsl_mmu_device_setstate(struct kgsl_device *device, uint32_t flags)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700514{
515 struct kgsl_mmu *mmu = &device->mmu;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600516 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
517 return;
518 else if (mmu->mmu_ops->mmu_device_setstate)
519 mmu->mmu_ops->mmu_device_setstate(device, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700520}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600521EXPORT_SYMBOL(kgsl_mmu_device_setstate);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700522
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600523void kgsl_mh_start(struct kgsl_device *device)
524{
525 struct kgsl_mh *mh = &device->mh;
526 /* force mmu off to for now*/
527 kgsl_regwrite(device, MH_MMU_CONFIG, 0);
528 kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
529
530 /* define physical memory range accessible by the core */
531 kgsl_regwrite(device, MH_MMU_MPU_BASE, mh->mpu_base);
532 kgsl_regwrite(device, MH_MMU_MPU_END,
533 mh->mpu_base + mh->mpu_range);
534 kgsl_regwrite(device, MH_ARBITER_CONFIG, mh->mharb);
535
536 if (mh->mh_intf_cfg1 != 0)
537 kgsl_regwrite(device, MH_CLNT_INTF_CTRL_CONFIG1,
538 mh->mh_intf_cfg1);
539
540 if (mh->mh_intf_cfg2 != 0)
541 kgsl_regwrite(device, MH_CLNT_INTF_CTRL_CONFIG2,
542 mh->mh_intf_cfg2);
543
544 /*
545 * Interrupts are enabled on a per-device level when
546 * kgsl_pwrctrl_irq() is called
547 */
548}
549
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700550int
551kgsl_mmu_map(struct kgsl_pagetable *pagetable,
552 struct kgsl_memdesc *memdesc,
553 unsigned int protflags)
554{
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600555 int ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700556
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600557 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
Jordan Crouse40861a42012-02-06 10:18:23 -0700558 if (memdesc->sglen == 1) {
559 memdesc->gpuaddr = sg_phys(memdesc->sg);
560 return 0;
561 } else {
562 KGSL_CORE_ERR("Memory is not contigious "
563 "(sglen = %d)\n", memdesc->sglen);
564 return -EINVAL;
565 }
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600566 }
Jordan Crouse40861a42012-02-06 10:18:23 -0700567
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700568 memdesc->gpuaddr = gen_pool_alloc_aligned(pagetable->pool,
569 memdesc->size, KGSL_MMU_ALIGN_SHIFT);
570
571 if (memdesc->gpuaddr == 0) {
572 KGSL_CORE_ERR("gen_pool_alloc(%d) failed\n", memdesc->size);
573 KGSL_CORE_ERR(" [%d] allocated=%d, entries=%d\n",
574 pagetable->name, pagetable->stats.mapped,
575 pagetable->stats.entries);
576 return -ENOMEM;
577 }
578
Shubhraprakash Dasbadaeda2012-03-21 00:31:39 -0600579 if (KGSL_MMU_TYPE_IOMMU != kgsl_mmu_get_mmutype())
580 spin_lock(&pagetable->lock);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600581 ret = pagetable->pt_ops->mmu_map(pagetable->priv, memdesc, protflags);
Shubhraprakash Dasbadaeda2012-03-21 00:31:39 -0600582 if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
583 spin_lock(&pagetable->lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700584
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600585 if (ret)
586 goto err_free_gpuaddr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700587
588 /* Keep track of the statistics for the sysfs files */
589
590 KGSL_STATS_ADD(1, pagetable->stats.entries,
591 pagetable->stats.max_entries);
592
593 KGSL_STATS_ADD(memdesc->size, pagetable->stats.mapped,
594 pagetable->stats.max_mapped);
595
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700596 spin_unlock(&pagetable->lock);
597
598 return 0;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600599
600err_free_gpuaddr:
601 spin_unlock(&pagetable->lock);
602 gen_pool_free(pagetable->pool, memdesc->gpuaddr, memdesc->size);
603 memdesc->gpuaddr = 0;
604 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700605}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600606EXPORT_SYMBOL(kgsl_mmu_map);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700607
608int
609kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
610 struct kgsl_memdesc *memdesc)
611{
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600612 if (memdesc->size == 0 || memdesc->gpuaddr == 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700613 return 0;
614
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600615 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
616 memdesc->gpuaddr = 0;
617 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700618 }
Shubhraprakash Dasbadaeda2012-03-21 00:31:39 -0600619 if (KGSL_MMU_TYPE_IOMMU != kgsl_mmu_get_mmutype())
620 spin_lock(&pagetable->lock);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600621 pagetable->pt_ops->mmu_unmap(pagetable->priv, memdesc);
Shubhraprakash Dasbadaeda2012-03-21 00:31:39 -0600622 if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
623 spin_lock(&pagetable->lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700624 /* Remove the statistics */
625 pagetable->stats.entries--;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600626 pagetable->stats.mapped -= memdesc->size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700627
628 spin_unlock(&pagetable->lock);
629
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600630 gen_pool_free(pagetable->pool,
631 memdesc->gpuaddr & KGSL_MMU_ALIGN_MASK,
632 memdesc->size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700633
Jeremy Gebben7faf9ec2012-03-21 14:09:55 -0600634 /*
635 * Don't clear the gpuaddr on global mappings because they
636 * may be in use by other pagetables
637 */
638 if (!(memdesc->priv & KGSL_MEMFLAGS_GLOBAL))
639 memdesc->gpuaddr = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700640 return 0;
641}
642EXPORT_SYMBOL(kgsl_mmu_unmap);
643
644int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable,
645 struct kgsl_memdesc *memdesc, unsigned int protflags)
646{
647 int result = -EINVAL;
648 unsigned int gpuaddr = 0;
649
650 if (memdesc == NULL) {
651 KGSL_CORE_ERR("invalid memdesc\n");
652 goto error;
653 }
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600654 /* Not all global mappings are needed for all MMU types */
655 if (!memdesc->size)
656 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700657
658 gpuaddr = memdesc->gpuaddr;
659
660 result = kgsl_mmu_map(pagetable, memdesc, protflags);
661 if (result)
662 goto error;
663
664 /*global mappings must have the same gpu address in all pagetables*/
665 if (gpuaddr && gpuaddr != memdesc->gpuaddr) {
666 KGSL_CORE_ERR("pt %p addr mismatch phys 0x%08x"
667 "gpu 0x%0x 0x%08x", pagetable, memdesc->physaddr,
668 gpuaddr, memdesc->gpuaddr);
669 goto error_unmap;
670 }
Jeremy Gebben7faf9ec2012-03-21 14:09:55 -0600671 memdesc->priv |= KGSL_MEMFLAGS_GLOBAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700672 return result;
673error_unmap:
674 kgsl_mmu_unmap(pagetable, memdesc);
675error:
676 return result;
677}
678EXPORT_SYMBOL(kgsl_mmu_map_global);
679
680int kgsl_mmu_stop(struct kgsl_device *device)
681{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700682 struct kgsl_mmu *mmu = &device->mmu;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600683
684 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE)
685 return 0;
686 else
687 return mmu->mmu_ops->mmu_stop(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700688}
689EXPORT_SYMBOL(kgsl_mmu_stop);
690
691int kgsl_mmu_close(struct kgsl_device *device)
692{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700693 struct kgsl_mmu *mmu = &device->mmu;
694
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600695 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE)
696 return 0;
697 else
698 return mmu->mmu_ops->mmu_close(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700699}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600700EXPORT_SYMBOL(kgsl_mmu_close);
701
702int kgsl_mmu_pt_get_flags(struct kgsl_pagetable *pt,
703 enum kgsl_deviceid id)
704{
705 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
706 return pt->pt_ops->mmu_pt_get_flags(pt, id);
707 else
708 return 0;
709}
710EXPORT_SYMBOL(kgsl_mmu_pt_get_flags);
711
712void kgsl_mmu_ptpool_destroy(void *ptpool)
713{
714 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
715 kgsl_gpummu_ptpool_destroy(ptpool);
716 ptpool = 0;
717}
718EXPORT_SYMBOL(kgsl_mmu_ptpool_destroy);
719
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600720void *kgsl_mmu_ptpool_init(int entries)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600721{
722 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600723 return kgsl_gpummu_ptpool_init(entries);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600724 else
725 return (void *)(-1);
726}
727EXPORT_SYMBOL(kgsl_mmu_ptpool_init);
728
729int kgsl_mmu_enabled(void)
730{
731 if (KGSL_MMU_TYPE_NONE != kgsl_mmu_type)
732 return 1;
733 else
734 return 0;
735}
736EXPORT_SYMBOL(kgsl_mmu_enabled);
737
738int kgsl_mmu_pt_equal(struct kgsl_pagetable *pt,
739 unsigned int pt_base)
740{
741 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
742 return true;
743 else
744 return pt->pt_ops->mmu_pt_equal(pt, pt_base);
745}
746EXPORT_SYMBOL(kgsl_mmu_pt_equal);
747
748enum kgsl_mmutype kgsl_mmu_get_mmutype(void)
749{
750 return kgsl_mmu_type;
751}
752EXPORT_SYMBOL(kgsl_mmu_get_mmutype);
753
754void kgsl_mmu_set_mmutype(char *mmutype)
755{
Jordan Crouse817e0b92012-02-04 10:23:53 -0700756 /* Set the default MMU - GPU on <=8960 and nothing on >= 8064 */
757 kgsl_mmu_type =
758 cpu_is_apq8064() ? KGSL_MMU_TYPE_NONE : KGSL_MMU_TYPE_GPU;
759
760 /* Use the IOMMU if it is found */
761 if (iommu_found())
762 kgsl_mmu_type = KGSL_MMU_TYPE_IOMMU;
763
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600764 if (mmutype && !strncmp(mmutype, "gpummu", 6))
765 kgsl_mmu_type = KGSL_MMU_TYPE_GPU;
766 if (iommu_found() && mmutype && !strncmp(mmutype, "iommu", 5))
767 kgsl_mmu_type = KGSL_MMU_TYPE_IOMMU;
768 if (mmutype && !strncmp(mmutype, "nommu", 5))
769 kgsl_mmu_type = KGSL_MMU_TYPE_NONE;
770}
771EXPORT_SYMBOL(kgsl_mmu_set_mmutype);