blob: 7eec9e5ddcac05ab87affc3d16143dc90782bc23 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/types.h>
14#include <linux/device.h>
15#include <linux/spinlock.h>
16#include <linux/genalloc.h>
17#include <linux/slab.h>
18#include <linux/sched.h>
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060019#include <linux/iommu.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070020
21#include "kgsl.h"
22#include "kgsl_mmu.h"
23#include "kgsl_device.h"
24#include "kgsl_sharedmem.h"
25
26#define KGSL_MMU_ALIGN_SHIFT 13
27#define KGSL_MMU_ALIGN_MASK (~((1 << KGSL_MMU_ALIGN_SHIFT) - 1))
28
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060029static enum kgsl_mmutype kgsl_mmu_type;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030
31static void pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable);
32
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070033static int kgsl_cleanup_pt(struct kgsl_pagetable *pt)
34{
35 int i;
36 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
37 struct kgsl_device *device = kgsl_driver.devp[i];
38 if (device)
39 device->ftbl->cleanup_pt(device, pt);
40 }
41 return 0;
42}
43
44static void kgsl_destroy_pagetable(struct kref *kref)
45{
46 struct kgsl_pagetable *pagetable = container_of(kref,
47 struct kgsl_pagetable, refcount);
48 unsigned long flags;
49
50 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
51 list_del(&pagetable->list);
52 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
53
54 pagetable_remove_sysfs_objects(pagetable);
55
56 kgsl_cleanup_pt(pagetable);
57
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070058 if (pagetable->pool)
59 gen_pool_destroy(pagetable->pool);
60
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060061 pagetable->pt_ops->mmu_destroy_pagetable(pagetable->priv);
62
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070063 kfree(pagetable);
64}
65
66static inline void kgsl_put_pagetable(struct kgsl_pagetable *pagetable)
67{
68 if (pagetable)
69 kref_put(&pagetable->refcount, kgsl_destroy_pagetable);
70}
71
72static struct kgsl_pagetable *
73kgsl_get_pagetable(unsigned long name)
74{
75 struct kgsl_pagetable *pt, *ret = NULL;
76 unsigned long flags;
77
78 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
79 list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
80 if (pt->name == name) {
81 ret = pt;
82 kref_get(&ret->refcount);
83 break;
84 }
85 }
86
87 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
88 return ret;
89}
90
91static struct kgsl_pagetable *
92_get_pt_from_kobj(struct kobject *kobj)
93{
94 unsigned long ptname;
95
96 if (!kobj)
97 return NULL;
98
99 if (sscanf(kobj->name, "%ld", &ptname) != 1)
100 return NULL;
101
102 return kgsl_get_pagetable(ptname);
103}
104
105static ssize_t
106sysfs_show_entries(struct kobject *kobj,
107 struct kobj_attribute *attr,
108 char *buf)
109{
110 struct kgsl_pagetable *pt;
111 int ret = 0;
112
113 pt = _get_pt_from_kobj(kobj);
114
115 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600116 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700117
118 kgsl_put_pagetable(pt);
119 return ret;
120}
121
122static ssize_t
123sysfs_show_mapped(struct kobject *kobj,
124 struct kobj_attribute *attr,
125 char *buf)
126{
127 struct kgsl_pagetable *pt;
128 int ret = 0;
129
130 pt = _get_pt_from_kobj(kobj);
131
132 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600133 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.mapped);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700134
135 kgsl_put_pagetable(pt);
136 return ret;
137}
138
139static ssize_t
140sysfs_show_va_range(struct kobject *kobj,
141 struct kobj_attribute *attr,
142 char *buf)
143{
144 struct kgsl_pagetable *pt;
145 int ret = 0;
146
147 pt = _get_pt_from_kobj(kobj);
148
149 if (pt)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600150 ret += snprintf(buf, PAGE_SIZE, "0x%x\n",
151 CONFIG_MSM_KGSL_PAGE_TABLE_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700152
153 kgsl_put_pagetable(pt);
154 return ret;
155}
156
157static ssize_t
158sysfs_show_max_mapped(struct kobject *kobj,
159 struct kobj_attribute *attr,
160 char *buf)
161{
162 struct kgsl_pagetable *pt;
163 int ret = 0;
164
165 pt = _get_pt_from_kobj(kobj);
166
167 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600168 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.max_mapped);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700169
170 kgsl_put_pagetable(pt);
171 return ret;
172}
173
174static ssize_t
175sysfs_show_max_entries(struct kobject *kobj,
176 struct kobj_attribute *attr,
177 char *buf)
178{
179 struct kgsl_pagetable *pt;
180 int ret = 0;
181
182 pt = _get_pt_from_kobj(kobj);
183
184 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600185 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.max_entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700186
187 kgsl_put_pagetable(pt);
188 return ret;
189}
190
191static struct kobj_attribute attr_entries = {
192 .attr = { .name = "entries", .mode = 0444 },
193 .show = sysfs_show_entries,
194 .store = NULL,
195};
196
197static struct kobj_attribute attr_mapped = {
198 .attr = { .name = "mapped", .mode = 0444 },
199 .show = sysfs_show_mapped,
200 .store = NULL,
201};
202
203static struct kobj_attribute attr_va_range = {
204 .attr = { .name = "va_range", .mode = 0444 },
205 .show = sysfs_show_va_range,
206 .store = NULL,
207};
208
209static struct kobj_attribute attr_max_mapped = {
210 .attr = { .name = "max_mapped", .mode = 0444 },
211 .show = sysfs_show_max_mapped,
212 .store = NULL,
213};
214
215static struct kobj_attribute attr_max_entries = {
216 .attr = { .name = "max_entries", .mode = 0444 },
217 .show = sysfs_show_max_entries,
218 .store = NULL,
219};
220
221static struct attribute *pagetable_attrs[] = {
222 &attr_entries.attr,
223 &attr_mapped.attr,
224 &attr_va_range.attr,
225 &attr_max_mapped.attr,
226 &attr_max_entries.attr,
227 NULL,
228};
229
230static struct attribute_group pagetable_attr_group = {
231 .attrs = pagetable_attrs,
232};
233
234static void
235pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable)
236{
237 if (pagetable->kobj)
238 sysfs_remove_group(pagetable->kobj,
239 &pagetable_attr_group);
240
241 kobject_put(pagetable->kobj);
242}
243
244static int
245pagetable_add_sysfs_objects(struct kgsl_pagetable *pagetable)
246{
247 char ptname[16];
248 int ret = -ENOMEM;
249
250 snprintf(ptname, sizeof(ptname), "%d", pagetable->name);
251 pagetable->kobj = kobject_create_and_add(ptname,
252 kgsl_driver.ptkobj);
253 if (pagetable->kobj == NULL)
254 goto err;
255
256 ret = sysfs_create_group(pagetable->kobj, &pagetable_attr_group);
257
258err:
259 if (ret) {
260 if (pagetable->kobj)
261 kobject_put(pagetable->kobj);
262
263 pagetable->kobj = NULL;
264 }
265
266 return ret;
267}
268
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600269unsigned int kgsl_mmu_get_current_ptbase(struct kgsl_device *device)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700270{
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600271 struct kgsl_mmu *mmu = &device->mmu;
272 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
273 return 0;
274 else
275 return mmu->mmu_ops->mmu_get_current_ptbase(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700276}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600277EXPORT_SYMBOL(kgsl_mmu_get_current_ptbase);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700278
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600279int
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600280kgsl_mmu_get_ptname_from_ptbase(unsigned int pt_base)
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600281{
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600282 struct kgsl_pagetable *pt;
283 int ptid = -1;
284
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600285 spin_lock(&kgsl_driver.ptlock);
286 list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600287 if (pt->pt_ops->mmu_pt_equal(pt, pt_base)) {
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600288 ptid = (int) pt->name;
289 break;
290 }
291 }
292 spin_unlock(&kgsl_driver.ptlock);
293
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600294 return ptid;
295}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600296EXPORT_SYMBOL(kgsl_mmu_get_ptname_from_ptbase);
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600297
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600298void kgsl_mmu_setstate(struct kgsl_device *device,
299 struct kgsl_pagetable *pagetable)
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600300{
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600301 struct kgsl_mmu *mmu = &device->mmu;
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600302
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600303 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
304 return;
305 else
306 mmu->mmu_ops->mmu_setstate(device,
307 pagetable);
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600308}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600309EXPORT_SYMBOL(kgsl_mmu_setstate);
310
311int kgsl_mmu_init(struct kgsl_device *device)
312{
313 struct kgsl_mmu *mmu = &device->mmu;
314
315 mmu->device = device;
316
317 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type) {
318 dev_info(device->dev, "|%s| MMU type set for device is "
319 "NOMMU\n", __func__);
320 return 0;
321 } else if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
322 mmu->mmu_ops = &gpummu_ops;
323 else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
324 mmu->mmu_ops = &iommu_ops;
325
326 return mmu->mmu_ops->mmu_init(device);
327}
328EXPORT_SYMBOL(kgsl_mmu_init);
329
330int kgsl_mmu_start(struct kgsl_device *device)
331{
332 struct kgsl_mmu *mmu = &device->mmu;
333
334 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
335 kgsl_regwrite(device, MH_MMU_CONFIG, 0);
336 return 0;
337 } else {
338 return mmu->mmu_ops->mmu_start(device);
339 }
340}
341EXPORT_SYMBOL(kgsl_mmu_start);
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600342
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700343void kgsl_mh_intrcallback(struct kgsl_device *device)
344{
345 unsigned int status = 0;
346 unsigned int reg;
347
348 kgsl_regread(device, MH_INTERRUPT_STATUS, &status);
349 kgsl_regread(device, MH_AXI_ERROR, &reg);
350
351 if (status & MH_INTERRUPT_MASK__AXI_READ_ERROR)
352 KGSL_MEM_CRIT(device, "axi read error interrupt: %08x\n", reg);
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600353 if (status & MH_INTERRUPT_MASK__AXI_WRITE_ERROR)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700354 KGSL_MEM_CRIT(device, "axi write error interrupt: %08x\n", reg);
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600355 if (status & MH_INTERRUPT_MASK__MMU_PAGE_FAULT)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600356 device->mmu.mmu_ops->mmu_pagefault(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700357
Jordan Crousec8c9fcd2011-07-28 08:37:58 -0600358 status &= KGSL_MMU_INT_MASK;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700359 kgsl_regwrite(device, MH_INTERRUPT_CLEAR, status);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700360}
361EXPORT_SYMBOL(kgsl_mh_intrcallback);
362
363static int kgsl_setup_pt(struct kgsl_pagetable *pt)
364{
365 int i = 0;
366 int status = 0;
367
368 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
369 struct kgsl_device *device = kgsl_driver.devp[i];
370 if (device) {
371 status = device->ftbl->setup_pt(device, pt);
372 if (status)
373 goto error_pt;
374 }
375 }
376 return status;
377error_pt:
378 while (i >= 0) {
379 struct kgsl_device *device = kgsl_driver.devp[i];
380 if (device)
381 device->ftbl->cleanup_pt(device, pt);
382 i--;
383 }
384 return status;
385}
386
387static struct kgsl_pagetable *kgsl_mmu_createpagetableobject(
388 unsigned int name)
389{
390 int status = 0;
391 struct kgsl_pagetable *pagetable = NULL;
392 unsigned long flags;
393
394 pagetable = kzalloc(sizeof(struct kgsl_pagetable), GFP_KERNEL);
395 if (pagetable == NULL) {
396 KGSL_CORE_ERR("kzalloc(%d) failed\n",
397 sizeof(struct kgsl_pagetable));
398 return NULL;
399 }
400
401 kref_init(&pagetable->refcount);
402
403 spin_lock_init(&pagetable->lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700404 pagetable->name = name;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600405 pagetable->max_entries = KGSL_PAGETABLE_ENTRIES(
406 CONFIG_MSM_KGSL_PAGE_TABLE_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700407
408 pagetable->pool = gen_pool_create(PAGE_SHIFT, -1);
409 if (pagetable->pool == NULL) {
410 KGSL_CORE_ERR("gen_pool_create(%d) failed\n", PAGE_SHIFT);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600411 goto err_alloc;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700412 }
413
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600414 if (gen_pool_add(pagetable->pool, KGSL_PAGETABLE_BASE,
415 CONFIG_MSM_KGSL_PAGE_TABLE_SIZE, -1)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700416 KGSL_CORE_ERR("gen_pool_add failed\n");
417 goto err_pool;
418 }
419
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600420 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
421 pagetable->pt_ops = &gpummu_pt_ops;
422 else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
423 pagetable->pt_ops = &iommu_pt_ops;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700424
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600425 pagetable->priv = pagetable->pt_ops->mmu_create_pagetable();
426 if (!pagetable->priv)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700427 goto err_pool;
428
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700429 status = kgsl_setup_pt(pagetable);
430 if (status)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600431 goto err_mmu_create;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700432
433 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
434 list_add(&pagetable->list, &kgsl_driver.pagetable_list);
435 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
436
437 /* Create the sysfs entries */
438 pagetable_add_sysfs_objects(pagetable);
439
440 return pagetable;
441
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600442err_mmu_create:
443 pagetable->pt_ops->mmu_destroy_pagetable(pagetable->priv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700444err_pool:
445 gen_pool_destroy(pagetable->pool);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700446err_alloc:
447 kfree(pagetable);
448
449 return NULL;
450}
451
452struct kgsl_pagetable *kgsl_mmu_getpagetable(unsigned long name)
453{
454 struct kgsl_pagetable *pt;
455
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600456 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
457 return (void *)(-1);
458
459#ifdef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
460 if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
461 name = KGSL_MMU_GLOBAL_PT;
462#else
463 name = KGSL_MMU_GLOBAL_PT;
464#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700465 pt = kgsl_get_pagetable(name);
466
467 if (pt == NULL)
468 pt = kgsl_mmu_createpagetableobject(name);
469
470 return pt;
471}
472
473void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable)
474{
475 kgsl_put_pagetable(pagetable);
476}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600477EXPORT_SYMBOL(kgsl_mmu_putpagetable);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700478
479void kgsl_setstate(struct kgsl_device *device, uint32_t flags)
480{
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600481 struct kgsl_mmu *mmu = &device->mmu;
482 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
483 return;
484 else if (device->ftbl->setstate)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700485 device->ftbl->setstate(device, flags);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600486 else if (mmu->mmu_ops->mmu_device_setstate)
487 mmu->mmu_ops->mmu_device_setstate(device, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700488}
489EXPORT_SYMBOL(kgsl_setstate);
490
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600491void kgsl_mmu_device_setstate(struct kgsl_device *device, uint32_t flags)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700492{
493 struct kgsl_mmu *mmu = &device->mmu;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600494 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
495 return;
496 else if (mmu->mmu_ops->mmu_device_setstate)
497 mmu->mmu_ops->mmu_device_setstate(device, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700498}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600499EXPORT_SYMBOL(kgsl_mmu_device_setstate);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700500
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600501void kgsl_mh_start(struct kgsl_device *device)
502{
503 struct kgsl_mh *mh = &device->mh;
504 /* force mmu off to for now*/
505 kgsl_regwrite(device, MH_MMU_CONFIG, 0);
506 kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
507
508 /* define physical memory range accessible by the core */
509 kgsl_regwrite(device, MH_MMU_MPU_BASE, mh->mpu_base);
510 kgsl_regwrite(device, MH_MMU_MPU_END,
511 mh->mpu_base + mh->mpu_range);
512 kgsl_regwrite(device, MH_ARBITER_CONFIG, mh->mharb);
513
514 if (mh->mh_intf_cfg1 != 0)
515 kgsl_regwrite(device, MH_CLNT_INTF_CTRL_CONFIG1,
516 mh->mh_intf_cfg1);
517
518 if (mh->mh_intf_cfg2 != 0)
519 kgsl_regwrite(device, MH_CLNT_INTF_CTRL_CONFIG2,
520 mh->mh_intf_cfg2);
521
522 /*
523 * Interrupts are enabled on a per-device level when
524 * kgsl_pwrctrl_irq() is called
525 */
526}
527
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700528unsigned int kgsl_virtaddr_to_physaddr(void *virtaddr)
529{
530 unsigned int physaddr = 0;
531 pgd_t *pgd_ptr = NULL;
532 pmd_t *pmd_ptr = NULL;
533 pte_t *pte_ptr = NULL, pte;
534
535 pgd_ptr = pgd_offset(current->mm, (unsigned long) virtaddr);
536 if (pgd_none(*pgd) || pgd_bad(*pgd)) {
537 KGSL_CORE_ERR("Invalid pgd entry\n");
538 return 0;
539 }
540
541 pmd_ptr = pmd_offset(pgd_ptr, (unsigned long) virtaddr);
542 if (pmd_none(*pmd_ptr) || pmd_bad(*pmd_ptr)) {
543 KGSL_CORE_ERR("Invalid pmd entry\n");
544 return 0;
545 }
546
547 pte_ptr = pte_offset_map(pmd_ptr, (unsigned long) virtaddr);
548 if (!pte_ptr) {
549 KGSL_CORE_ERR("pt_offset_map failed\n");
550 return 0;
551 }
552 pte = *pte_ptr;
553 physaddr = pte_pfn(pte);
554 pte_unmap(pte_ptr);
555 physaddr <<= PAGE_SHIFT;
556 return physaddr;
557}
558
559int
560kgsl_mmu_map(struct kgsl_pagetable *pagetable,
561 struct kgsl_memdesc *memdesc,
562 unsigned int protflags)
563{
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600564 int ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700565
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600566 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
567 memdesc->gpuaddr = memdesc->physaddr;
568 return 0;
569 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700570 memdesc->gpuaddr = gen_pool_alloc_aligned(pagetable->pool,
571 memdesc->size, KGSL_MMU_ALIGN_SHIFT);
572
573 if (memdesc->gpuaddr == 0) {
574 KGSL_CORE_ERR("gen_pool_alloc(%d) failed\n", memdesc->size);
575 KGSL_CORE_ERR(" [%d] allocated=%d, entries=%d\n",
576 pagetable->name, pagetable->stats.mapped,
577 pagetable->stats.entries);
578 return -ENOMEM;
579 }
580
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700581 spin_lock(&pagetable->lock);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600582 ret = pagetable->pt_ops->mmu_map(pagetable->priv, memdesc, protflags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700583
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600584 if (ret)
585 goto err_free_gpuaddr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700586
587 /* Keep track of the statistics for the sysfs files */
588
589 KGSL_STATS_ADD(1, pagetable->stats.entries,
590 pagetable->stats.max_entries);
591
592 KGSL_STATS_ADD(memdesc->size, pagetable->stats.mapped,
593 pagetable->stats.max_mapped);
594
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700595 spin_unlock(&pagetable->lock);
596
597 return 0;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600598
599err_free_gpuaddr:
600 spin_unlock(&pagetable->lock);
601 gen_pool_free(pagetable->pool, memdesc->gpuaddr, memdesc->size);
602 memdesc->gpuaddr = 0;
603 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700604}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600605EXPORT_SYMBOL(kgsl_mmu_map);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700606
607int
608kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
609 struct kgsl_memdesc *memdesc)
610{
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600611 if (memdesc->size == 0 || memdesc->gpuaddr == 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700612 return 0;
613
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600614 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
615 memdesc->gpuaddr = 0;
616 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700617 }
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600618 spin_lock(&pagetable->lock);
619 pagetable->pt_ops->mmu_unmap(pagetable->priv, memdesc);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700620 /* Remove the statistics */
621 pagetable->stats.entries--;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600622 pagetable->stats.mapped -= memdesc->size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700623
624 spin_unlock(&pagetable->lock);
625
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600626 gen_pool_free(pagetable->pool,
627 memdesc->gpuaddr & KGSL_MMU_ALIGN_MASK,
628 memdesc->size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700629
630 return 0;
631}
632EXPORT_SYMBOL(kgsl_mmu_unmap);
633
634int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable,
635 struct kgsl_memdesc *memdesc, unsigned int protflags)
636{
637 int result = -EINVAL;
638 unsigned int gpuaddr = 0;
639
640 if (memdesc == NULL) {
641 KGSL_CORE_ERR("invalid memdesc\n");
642 goto error;
643 }
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600644 /* Not all global mappings are needed for all MMU types */
645 if (!memdesc->size)
646 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700647
648 gpuaddr = memdesc->gpuaddr;
649
650 result = kgsl_mmu_map(pagetable, memdesc, protflags);
651 if (result)
652 goto error;
653
654 /*global mappings must have the same gpu address in all pagetables*/
655 if (gpuaddr && gpuaddr != memdesc->gpuaddr) {
656 KGSL_CORE_ERR("pt %p addr mismatch phys 0x%08x"
657 "gpu 0x%0x 0x%08x", pagetable, memdesc->physaddr,
658 gpuaddr, memdesc->gpuaddr);
659 goto error_unmap;
660 }
661 return result;
662error_unmap:
663 kgsl_mmu_unmap(pagetable, memdesc);
664error:
665 return result;
666}
667EXPORT_SYMBOL(kgsl_mmu_map_global);
668
669int kgsl_mmu_stop(struct kgsl_device *device)
670{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700671 struct kgsl_mmu *mmu = &device->mmu;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600672
673 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE)
674 return 0;
675 else
676 return mmu->mmu_ops->mmu_stop(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700677}
678EXPORT_SYMBOL(kgsl_mmu_stop);
679
680int kgsl_mmu_close(struct kgsl_device *device)
681{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700682 struct kgsl_mmu *mmu = &device->mmu;
683
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600684 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE)
685 return 0;
686 else
687 return mmu->mmu_ops->mmu_close(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700688}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600689EXPORT_SYMBOL(kgsl_mmu_close);
690
691int kgsl_mmu_pt_get_flags(struct kgsl_pagetable *pt,
692 enum kgsl_deviceid id)
693{
694 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
695 return pt->pt_ops->mmu_pt_get_flags(pt, id);
696 else
697 return 0;
698}
699EXPORT_SYMBOL(kgsl_mmu_pt_get_flags);
700
701void kgsl_mmu_ptpool_destroy(void *ptpool)
702{
703 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
704 kgsl_gpummu_ptpool_destroy(ptpool);
705 ptpool = 0;
706}
707EXPORT_SYMBOL(kgsl_mmu_ptpool_destroy);
708
709void *kgsl_mmu_ptpool_init(int ptsize, int entries)
710{
711 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
712 return kgsl_gpummu_ptpool_init(ptsize, entries);
713 else
714 return (void *)(-1);
715}
716EXPORT_SYMBOL(kgsl_mmu_ptpool_init);
717
718int kgsl_mmu_enabled(void)
719{
720 if (KGSL_MMU_TYPE_NONE != kgsl_mmu_type)
721 return 1;
722 else
723 return 0;
724}
725EXPORT_SYMBOL(kgsl_mmu_enabled);
726
727int kgsl_mmu_pt_equal(struct kgsl_pagetable *pt,
728 unsigned int pt_base)
729{
730 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
731 return true;
732 else
733 return pt->pt_ops->mmu_pt_equal(pt, pt_base);
734}
735EXPORT_SYMBOL(kgsl_mmu_pt_equal);
736
737enum kgsl_mmutype kgsl_mmu_get_mmutype(void)
738{
739 return kgsl_mmu_type;
740}
741EXPORT_SYMBOL(kgsl_mmu_get_mmutype);
742
743void kgsl_mmu_set_mmutype(char *mmutype)
744{
745 kgsl_mmu_type = KGSL_MMU_TYPE_NONE;
746#ifdef CONFIG_MSM_KGSL_GPUMMU
747 kgsl_mmu_type = KGSL_MMU_TYPE_GPU;
748#elif defined(CONFIG_MSM_KGSL_IOMMU)
749 if (iommu_found())
750 kgsl_mmu_type = KGSL_MMU_TYPE_IOMMU;
751#endif
752 if (mmutype && !strncmp(mmutype, "gpummu", 6))
753 kgsl_mmu_type = KGSL_MMU_TYPE_GPU;
754 if (iommu_found() && mmutype && !strncmp(mmutype, "iommu", 5))
755 kgsl_mmu_type = KGSL_MMU_TYPE_IOMMU;
756 if (mmutype && !strncmp(mmutype, "nommu", 5))
757 kgsl_mmu_type = KGSL_MMU_TYPE_NONE;
758}
759EXPORT_SYMBOL(kgsl_mmu_set_mmutype);