blob: 715b9d63d8dce9f82150c94acf6d02dfee986adc [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/types.h>
14#include <linux/device.h>
15#include <linux/spinlock.h>
16#include <linux/genalloc.h>
17#include <linux/slab.h>
18#include <linux/sched.h>
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060019#include <linux/iommu.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070020
21#include "kgsl.h"
22#include "kgsl_mmu.h"
23#include "kgsl_device.h"
24#include "kgsl_sharedmem.h"
25
26#define KGSL_MMU_ALIGN_SHIFT 13
27#define KGSL_MMU_ALIGN_MASK (~((1 << KGSL_MMU_ALIGN_SHIFT) - 1))
28
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060029static enum kgsl_mmutype kgsl_mmu_type;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030
31static void pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable);
32
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070033static int kgsl_cleanup_pt(struct kgsl_pagetable *pt)
34{
35 int i;
36 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
37 struct kgsl_device *device = kgsl_driver.devp[i];
38 if (device)
39 device->ftbl->cleanup_pt(device, pt);
40 }
41 return 0;
42}
43
44static void kgsl_destroy_pagetable(struct kref *kref)
45{
46 struct kgsl_pagetable *pagetable = container_of(kref,
47 struct kgsl_pagetable, refcount);
48 unsigned long flags;
49
50 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
51 list_del(&pagetable->list);
52 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
53
54 pagetable_remove_sysfs_objects(pagetable);
55
56 kgsl_cleanup_pt(pagetable);
57
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070058 if (pagetable->pool)
59 gen_pool_destroy(pagetable->pool);
60
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060061 pagetable->pt_ops->mmu_destroy_pagetable(pagetable->priv);
62
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070063 kfree(pagetable);
64}
65
66static inline void kgsl_put_pagetable(struct kgsl_pagetable *pagetable)
67{
68 if (pagetable)
69 kref_put(&pagetable->refcount, kgsl_destroy_pagetable);
70}
71
72static struct kgsl_pagetable *
73kgsl_get_pagetable(unsigned long name)
74{
75 struct kgsl_pagetable *pt, *ret = NULL;
76 unsigned long flags;
77
78 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
79 list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
80 if (pt->name == name) {
81 ret = pt;
82 kref_get(&ret->refcount);
83 break;
84 }
85 }
86
87 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
88 return ret;
89}
90
91static struct kgsl_pagetable *
92_get_pt_from_kobj(struct kobject *kobj)
93{
94 unsigned long ptname;
95
96 if (!kobj)
97 return NULL;
98
99 if (sscanf(kobj->name, "%ld", &ptname) != 1)
100 return NULL;
101
102 return kgsl_get_pagetable(ptname);
103}
104
105static ssize_t
106sysfs_show_entries(struct kobject *kobj,
107 struct kobj_attribute *attr,
108 char *buf)
109{
110 struct kgsl_pagetable *pt;
111 int ret = 0;
112
113 pt = _get_pt_from_kobj(kobj);
114
115 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600116 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700117
118 kgsl_put_pagetable(pt);
119 return ret;
120}
121
122static ssize_t
123sysfs_show_mapped(struct kobject *kobj,
124 struct kobj_attribute *attr,
125 char *buf)
126{
127 struct kgsl_pagetable *pt;
128 int ret = 0;
129
130 pt = _get_pt_from_kobj(kobj);
131
132 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600133 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.mapped);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700134
135 kgsl_put_pagetable(pt);
136 return ret;
137}
138
139static ssize_t
140sysfs_show_va_range(struct kobject *kobj,
141 struct kobj_attribute *attr,
142 char *buf)
143{
144 struct kgsl_pagetable *pt;
145 int ret = 0;
146
147 pt = _get_pt_from_kobj(kobj);
148
149 if (pt)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600150 ret += snprintf(buf, PAGE_SIZE, "0x%x\n",
151 CONFIG_MSM_KGSL_PAGE_TABLE_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700152
153 kgsl_put_pagetable(pt);
154 return ret;
155}
156
157static ssize_t
158sysfs_show_max_mapped(struct kobject *kobj,
159 struct kobj_attribute *attr,
160 char *buf)
161{
162 struct kgsl_pagetable *pt;
163 int ret = 0;
164
165 pt = _get_pt_from_kobj(kobj);
166
167 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600168 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.max_mapped);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700169
170 kgsl_put_pagetable(pt);
171 return ret;
172}
173
174static ssize_t
175sysfs_show_max_entries(struct kobject *kobj,
176 struct kobj_attribute *attr,
177 char *buf)
178{
179 struct kgsl_pagetable *pt;
180 int ret = 0;
181
182 pt = _get_pt_from_kobj(kobj);
183
184 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600185 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.max_entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700186
187 kgsl_put_pagetable(pt);
188 return ret;
189}
190
191static struct kobj_attribute attr_entries = {
192 .attr = { .name = "entries", .mode = 0444 },
193 .show = sysfs_show_entries,
194 .store = NULL,
195};
196
197static struct kobj_attribute attr_mapped = {
198 .attr = { .name = "mapped", .mode = 0444 },
199 .show = sysfs_show_mapped,
200 .store = NULL,
201};
202
203static struct kobj_attribute attr_va_range = {
204 .attr = { .name = "va_range", .mode = 0444 },
205 .show = sysfs_show_va_range,
206 .store = NULL,
207};
208
209static struct kobj_attribute attr_max_mapped = {
210 .attr = { .name = "max_mapped", .mode = 0444 },
211 .show = sysfs_show_max_mapped,
212 .store = NULL,
213};
214
215static struct kobj_attribute attr_max_entries = {
216 .attr = { .name = "max_entries", .mode = 0444 },
217 .show = sysfs_show_max_entries,
218 .store = NULL,
219};
220
221static struct attribute *pagetable_attrs[] = {
222 &attr_entries.attr,
223 &attr_mapped.attr,
224 &attr_va_range.attr,
225 &attr_max_mapped.attr,
226 &attr_max_entries.attr,
227 NULL,
228};
229
230static struct attribute_group pagetable_attr_group = {
231 .attrs = pagetable_attrs,
232};
233
234static void
235pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable)
236{
237 if (pagetable->kobj)
238 sysfs_remove_group(pagetable->kobj,
239 &pagetable_attr_group);
240
241 kobject_put(pagetable->kobj);
242}
243
244static int
245pagetable_add_sysfs_objects(struct kgsl_pagetable *pagetable)
246{
247 char ptname[16];
248 int ret = -ENOMEM;
249
250 snprintf(ptname, sizeof(ptname), "%d", pagetable->name);
251 pagetable->kobj = kobject_create_and_add(ptname,
252 kgsl_driver.ptkobj);
253 if (pagetable->kobj == NULL)
254 goto err;
255
256 ret = sysfs_create_group(pagetable->kobj, &pagetable_attr_group);
257
258err:
259 if (ret) {
260 if (pagetable->kobj)
261 kobject_put(pagetable->kobj);
262
263 pagetable->kobj = NULL;
264 }
265
266 return ret;
267}
268
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600269unsigned int kgsl_mmu_get_current_ptbase(struct kgsl_device *device)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700270{
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600271 struct kgsl_mmu *mmu = &device->mmu;
272 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
273 return 0;
274 else
275 return mmu->mmu_ops->mmu_get_current_ptbase(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700276}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600277EXPORT_SYMBOL(kgsl_mmu_get_current_ptbase);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700278
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600279int
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600280kgsl_mmu_get_ptname_from_ptbase(unsigned int pt_base)
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600281{
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600282 struct kgsl_pagetable *pt;
283 int ptid = -1;
284
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600285 spin_lock(&kgsl_driver.ptlock);
286 list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600287 if (pt->pt_ops->mmu_pt_equal(pt, pt_base)) {
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600288 ptid = (int) pt->name;
289 break;
290 }
291 }
292 spin_unlock(&kgsl_driver.ptlock);
293
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600294 return ptid;
295}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600296EXPORT_SYMBOL(kgsl_mmu_get_ptname_from_ptbase);
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600297
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600298void kgsl_mmu_setstate(struct kgsl_device *device,
299 struct kgsl_pagetable *pagetable)
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600300{
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600301 struct kgsl_mmu *mmu = &device->mmu;
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600302
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600303 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
304 return;
305 else
306 mmu->mmu_ops->mmu_setstate(device,
307 pagetable);
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600308}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600309EXPORT_SYMBOL(kgsl_mmu_setstate);
310
311int kgsl_mmu_init(struct kgsl_device *device)
312{
313 struct kgsl_mmu *mmu = &device->mmu;
314
315 mmu->device = device;
316
317 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type) {
318 dev_info(device->dev, "|%s| MMU type set for device is "
319 "NOMMU\n", __func__);
320 return 0;
321 } else if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
322 mmu->mmu_ops = &gpummu_ops;
323 else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
324 mmu->mmu_ops = &iommu_ops;
325
326 return mmu->mmu_ops->mmu_init(device);
327}
328EXPORT_SYMBOL(kgsl_mmu_init);
329
330int kgsl_mmu_start(struct kgsl_device *device)
331{
332 struct kgsl_mmu *mmu = &device->mmu;
333
334 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
335 kgsl_regwrite(device, MH_MMU_CONFIG, 0);
336 return 0;
337 } else {
338 return mmu->mmu_ops->mmu_start(device);
339 }
340}
341EXPORT_SYMBOL(kgsl_mmu_start);
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600342
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700343void kgsl_mh_intrcallback(struct kgsl_device *device)
344{
345 unsigned int status = 0;
346 unsigned int reg;
347
348 kgsl_regread(device, MH_INTERRUPT_STATUS, &status);
349 kgsl_regread(device, MH_AXI_ERROR, &reg);
350
351 if (status & MH_INTERRUPT_MASK__AXI_READ_ERROR)
352 KGSL_MEM_CRIT(device, "axi read error interrupt: %08x\n", reg);
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600353 if (status & MH_INTERRUPT_MASK__AXI_WRITE_ERROR)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700354 KGSL_MEM_CRIT(device, "axi write error interrupt: %08x\n", reg);
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600355 if (status & MH_INTERRUPT_MASK__MMU_PAGE_FAULT)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600356 device->mmu.mmu_ops->mmu_pagefault(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700357
Jordan Crousec8c9fcd2011-07-28 08:37:58 -0600358 status &= KGSL_MMU_INT_MASK;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700359 kgsl_regwrite(device, MH_INTERRUPT_CLEAR, status);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700360}
361EXPORT_SYMBOL(kgsl_mh_intrcallback);
362
363static int kgsl_setup_pt(struct kgsl_pagetable *pt)
364{
365 int i = 0;
366 int status = 0;
367
368 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
369 struct kgsl_device *device = kgsl_driver.devp[i];
370 if (device) {
371 status = device->ftbl->setup_pt(device, pt);
372 if (status)
373 goto error_pt;
374 }
375 }
376 return status;
377error_pt:
378 while (i >= 0) {
379 struct kgsl_device *device = kgsl_driver.devp[i];
380 if (device)
381 device->ftbl->cleanup_pt(device, pt);
382 i--;
383 }
384 return status;
385}
386
387static struct kgsl_pagetable *kgsl_mmu_createpagetableobject(
388 unsigned int name)
389{
390 int status = 0;
391 struct kgsl_pagetable *pagetable = NULL;
392 unsigned long flags;
393
394 pagetable = kzalloc(sizeof(struct kgsl_pagetable), GFP_KERNEL);
395 if (pagetable == NULL) {
396 KGSL_CORE_ERR("kzalloc(%d) failed\n",
397 sizeof(struct kgsl_pagetable));
398 return NULL;
399 }
400
401 kref_init(&pagetable->refcount);
402
403 spin_lock_init(&pagetable->lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700404 pagetable->name = name;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600405 pagetable->max_entries = KGSL_PAGETABLE_ENTRIES(
406 CONFIG_MSM_KGSL_PAGE_TABLE_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700407
408 pagetable->pool = gen_pool_create(PAGE_SHIFT, -1);
409 if (pagetable->pool == NULL) {
410 KGSL_CORE_ERR("gen_pool_create(%d) failed\n", PAGE_SHIFT);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600411 goto err_alloc;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700412 }
413
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600414 if (gen_pool_add(pagetable->pool, KGSL_PAGETABLE_BASE,
415 CONFIG_MSM_KGSL_PAGE_TABLE_SIZE, -1)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700416 KGSL_CORE_ERR("gen_pool_add failed\n");
417 goto err_pool;
418 }
419
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600420 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
421 pagetable->pt_ops = &gpummu_pt_ops;
422 else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
423 pagetable->pt_ops = &iommu_pt_ops;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700424
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600425 pagetable->priv = pagetable->pt_ops->mmu_create_pagetable();
426 if (!pagetable->priv)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700427 goto err_pool;
428
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700429 status = kgsl_setup_pt(pagetable);
430 if (status)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600431 goto err_mmu_create;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700432
433 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
434 list_add(&pagetable->list, &kgsl_driver.pagetable_list);
435 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
436
437 /* Create the sysfs entries */
438 pagetable_add_sysfs_objects(pagetable);
439
440 return pagetable;
441
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600442err_mmu_create:
443 pagetable->pt_ops->mmu_destroy_pagetable(pagetable->priv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700444err_pool:
445 gen_pool_destroy(pagetable->pool);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700446err_alloc:
447 kfree(pagetable);
448
449 return NULL;
450}
451
452struct kgsl_pagetable *kgsl_mmu_getpagetable(unsigned long name)
453{
454 struct kgsl_pagetable *pt;
455
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600456 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
457 return (void *)(-1);
458
459#ifdef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
460 if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
461 name = KGSL_MMU_GLOBAL_PT;
462#else
463 name = KGSL_MMU_GLOBAL_PT;
464#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700465 pt = kgsl_get_pagetable(name);
466
467 if (pt == NULL)
468 pt = kgsl_mmu_createpagetableobject(name);
469
470 return pt;
471}
472
473void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable)
474{
475 kgsl_put_pagetable(pagetable);
476}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600477EXPORT_SYMBOL(kgsl_mmu_putpagetable);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700478
479void kgsl_setstate(struct kgsl_device *device, uint32_t flags)
480{
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600481 struct kgsl_mmu *mmu = &device->mmu;
482 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
483 return;
484 else if (device->ftbl->setstate)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700485 device->ftbl->setstate(device, flags);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600486 else if (mmu->mmu_ops->mmu_device_setstate)
487 mmu->mmu_ops->mmu_device_setstate(device, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700488}
489EXPORT_SYMBOL(kgsl_setstate);
490
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600491void kgsl_mmu_device_setstate(struct kgsl_device *device, uint32_t flags)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700492{
493 struct kgsl_mmu *mmu = &device->mmu;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600494 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
495 return;
496 else if (mmu->mmu_ops->mmu_device_setstate)
497 mmu->mmu_ops->mmu_device_setstate(device, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700498}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600499EXPORT_SYMBOL(kgsl_mmu_device_setstate);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700500
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600501void kgsl_mh_start(struct kgsl_device *device)
502{
503 struct kgsl_mh *mh = &device->mh;
504 /* force mmu off to for now*/
505 kgsl_regwrite(device, MH_MMU_CONFIG, 0);
506 kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
507
508 /* define physical memory range accessible by the core */
509 kgsl_regwrite(device, MH_MMU_MPU_BASE, mh->mpu_base);
510 kgsl_regwrite(device, MH_MMU_MPU_END,
511 mh->mpu_base + mh->mpu_range);
512 kgsl_regwrite(device, MH_ARBITER_CONFIG, mh->mharb);
513
514 if (mh->mh_intf_cfg1 != 0)
515 kgsl_regwrite(device, MH_CLNT_INTF_CTRL_CONFIG1,
516 mh->mh_intf_cfg1);
517
518 if (mh->mh_intf_cfg2 != 0)
519 kgsl_regwrite(device, MH_CLNT_INTF_CTRL_CONFIG2,
520 mh->mh_intf_cfg2);
521
522 /*
523 * Interrupts are enabled on a per-device level when
524 * kgsl_pwrctrl_irq() is called
525 */
526}
527
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700528int
529kgsl_mmu_map(struct kgsl_pagetable *pagetable,
530 struct kgsl_memdesc *memdesc,
531 unsigned int protflags)
532{
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600533 int ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700534
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600535 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
536 memdesc->gpuaddr = memdesc->physaddr;
537 return 0;
538 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700539 memdesc->gpuaddr = gen_pool_alloc_aligned(pagetable->pool,
540 memdesc->size, KGSL_MMU_ALIGN_SHIFT);
541
542 if (memdesc->gpuaddr == 0) {
543 KGSL_CORE_ERR("gen_pool_alloc(%d) failed\n", memdesc->size);
544 KGSL_CORE_ERR(" [%d] allocated=%d, entries=%d\n",
545 pagetable->name, pagetable->stats.mapped,
546 pagetable->stats.entries);
547 return -ENOMEM;
548 }
549
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700550 spin_lock(&pagetable->lock);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600551 ret = pagetable->pt_ops->mmu_map(pagetable->priv, memdesc, protflags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700552
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600553 if (ret)
554 goto err_free_gpuaddr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700555
556 /* Keep track of the statistics for the sysfs files */
557
558 KGSL_STATS_ADD(1, pagetable->stats.entries,
559 pagetable->stats.max_entries);
560
561 KGSL_STATS_ADD(memdesc->size, pagetable->stats.mapped,
562 pagetable->stats.max_mapped);
563
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700564 spin_unlock(&pagetable->lock);
565
566 return 0;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600567
568err_free_gpuaddr:
569 spin_unlock(&pagetable->lock);
570 gen_pool_free(pagetable->pool, memdesc->gpuaddr, memdesc->size);
571 memdesc->gpuaddr = 0;
572 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700573}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600574EXPORT_SYMBOL(kgsl_mmu_map);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700575
576int
577kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
578 struct kgsl_memdesc *memdesc)
579{
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600580 if (memdesc->size == 0 || memdesc->gpuaddr == 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700581 return 0;
582
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600583 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
584 memdesc->gpuaddr = 0;
585 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700586 }
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600587 spin_lock(&pagetable->lock);
588 pagetable->pt_ops->mmu_unmap(pagetable->priv, memdesc);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700589 /* Remove the statistics */
590 pagetable->stats.entries--;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600591 pagetable->stats.mapped -= memdesc->size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700592
593 spin_unlock(&pagetable->lock);
594
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600595 gen_pool_free(pagetable->pool,
596 memdesc->gpuaddr & KGSL_MMU_ALIGN_MASK,
597 memdesc->size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700598
599 return 0;
600}
601EXPORT_SYMBOL(kgsl_mmu_unmap);
602
603int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable,
604 struct kgsl_memdesc *memdesc, unsigned int protflags)
605{
606 int result = -EINVAL;
607 unsigned int gpuaddr = 0;
608
609 if (memdesc == NULL) {
610 KGSL_CORE_ERR("invalid memdesc\n");
611 goto error;
612 }
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600613 /* Not all global mappings are needed for all MMU types */
614 if (!memdesc->size)
615 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700616
617 gpuaddr = memdesc->gpuaddr;
618
619 result = kgsl_mmu_map(pagetable, memdesc, protflags);
620 if (result)
621 goto error;
622
623 /*global mappings must have the same gpu address in all pagetables*/
624 if (gpuaddr && gpuaddr != memdesc->gpuaddr) {
625 KGSL_CORE_ERR("pt %p addr mismatch phys 0x%08x"
626 "gpu 0x%0x 0x%08x", pagetable, memdesc->physaddr,
627 gpuaddr, memdesc->gpuaddr);
628 goto error_unmap;
629 }
630 return result;
631error_unmap:
632 kgsl_mmu_unmap(pagetable, memdesc);
633error:
634 return result;
635}
636EXPORT_SYMBOL(kgsl_mmu_map_global);
637
638int kgsl_mmu_stop(struct kgsl_device *device)
639{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700640 struct kgsl_mmu *mmu = &device->mmu;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600641
642 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE)
643 return 0;
644 else
645 return mmu->mmu_ops->mmu_stop(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700646}
647EXPORT_SYMBOL(kgsl_mmu_stop);
648
649int kgsl_mmu_close(struct kgsl_device *device)
650{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700651 struct kgsl_mmu *mmu = &device->mmu;
652
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600653 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE)
654 return 0;
655 else
656 return mmu->mmu_ops->mmu_close(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700657}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600658EXPORT_SYMBOL(kgsl_mmu_close);
659
660int kgsl_mmu_pt_get_flags(struct kgsl_pagetable *pt,
661 enum kgsl_deviceid id)
662{
663 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
664 return pt->pt_ops->mmu_pt_get_flags(pt, id);
665 else
666 return 0;
667}
668EXPORT_SYMBOL(kgsl_mmu_pt_get_flags);
669
670void kgsl_mmu_ptpool_destroy(void *ptpool)
671{
672 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
673 kgsl_gpummu_ptpool_destroy(ptpool);
674 ptpool = 0;
675}
676EXPORT_SYMBOL(kgsl_mmu_ptpool_destroy);
677
678void *kgsl_mmu_ptpool_init(int ptsize, int entries)
679{
680 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
681 return kgsl_gpummu_ptpool_init(ptsize, entries);
682 else
683 return (void *)(-1);
684}
685EXPORT_SYMBOL(kgsl_mmu_ptpool_init);
686
687int kgsl_mmu_enabled(void)
688{
689 if (KGSL_MMU_TYPE_NONE != kgsl_mmu_type)
690 return 1;
691 else
692 return 0;
693}
694EXPORT_SYMBOL(kgsl_mmu_enabled);
695
696int kgsl_mmu_pt_equal(struct kgsl_pagetable *pt,
697 unsigned int pt_base)
698{
699 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
700 return true;
701 else
702 return pt->pt_ops->mmu_pt_equal(pt, pt_base);
703}
704EXPORT_SYMBOL(kgsl_mmu_pt_equal);
705
706enum kgsl_mmutype kgsl_mmu_get_mmutype(void)
707{
708 return kgsl_mmu_type;
709}
710EXPORT_SYMBOL(kgsl_mmu_get_mmutype);
711
712void kgsl_mmu_set_mmutype(char *mmutype)
713{
Jeremy Gebben32660362011-11-03 09:59:51 -0600714 kgsl_mmu_type = iommu_found() ? KGSL_MMU_TYPE_IOMMU : KGSL_MMU_TYPE_GPU;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600715 if (mmutype && !strncmp(mmutype, "gpummu", 6))
716 kgsl_mmu_type = KGSL_MMU_TYPE_GPU;
717 if (iommu_found() && mmutype && !strncmp(mmutype, "iommu", 5))
718 kgsl_mmu_type = KGSL_MMU_TYPE_IOMMU;
719 if (mmutype && !strncmp(mmutype, "nommu", 5))
720 kgsl_mmu_type = KGSL_MMU_TYPE_NONE;
721}
722EXPORT_SYMBOL(kgsl_mmu_set_mmutype);