blob: ff9f0b8088806a5f14b12ed01f734761b151d15d [file] [log] [blame]
Jordan Crouse00714012012-03-16 14:53:40 -06001/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
Steve Mucklef132c6c2012-06-06 18:30:57 -070013#include <linux/export.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070014#include <linux/types.h>
15#include <linux/device.h>
16#include <linux/spinlock.h>
17#include <linux/genalloc.h>
18#include <linux/slab.h>
19#include <linux/sched.h>
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060020#include <linux/iommu.h>
Jordan Crouse817e0b92012-02-04 10:23:53 -070021#include <mach/socinfo.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070022
23#include "kgsl.h"
24#include "kgsl_mmu.h"
25#include "kgsl_device.h"
26#include "kgsl_sharedmem.h"
Jeremy Gebbena3d07a42011-10-17 12:08:16 -060027#include "adreno_postmortem.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070028
29#define KGSL_MMU_ALIGN_SHIFT 13
30#define KGSL_MMU_ALIGN_MASK (~((1 << KGSL_MMU_ALIGN_SHIFT) - 1))
31
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060032static enum kgsl_mmutype kgsl_mmu_type;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070033
34static void pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable);
35
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070036static int kgsl_cleanup_pt(struct kgsl_pagetable *pt)
37{
38 int i;
Shubhraprakash Das84fdb112012-04-04 12:49:31 -060039 /* For IOMMU only unmap the global structures to global pt */
Shubhraprakash Das6b30c9f2012-04-20 01:15:55 -060040 if ((KGSL_MMU_TYPE_NONE != kgsl_mmu_type) &&
41 (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type) &&
Shubhraprakash Das84fdb112012-04-04 12:49:31 -060042 (KGSL_MMU_GLOBAL_PT != pt->name))
43 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070044 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
45 struct kgsl_device *device = kgsl_driver.devp[i];
46 if (device)
47 device->ftbl->cleanup_pt(device, pt);
48 }
49 return 0;
50}
51
Shubhraprakash Das6b30c9f2012-04-20 01:15:55 -060052
53static int kgsl_setup_pt(struct kgsl_pagetable *pt)
54{
55 int i = 0;
56 int status = 0;
57
58 /* For IOMMU only map the global structures to global pt */
59 if ((KGSL_MMU_TYPE_NONE != kgsl_mmu_type) &&
60 (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type) &&
61 (KGSL_MMU_GLOBAL_PT != pt->name))
62 return 0;
63 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
64 struct kgsl_device *device = kgsl_driver.devp[i];
65 if (device) {
66 status = device->ftbl->setup_pt(device, pt);
67 if (status)
68 goto error_pt;
69 }
70 }
71 return status;
72error_pt:
73 while (i >= 0) {
74 struct kgsl_device *device = kgsl_driver.devp[i];
75 if (device)
76 device->ftbl->cleanup_pt(device, pt);
77 i--;
78 }
79 return status;
80}
81
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070082static void kgsl_destroy_pagetable(struct kref *kref)
83{
84 struct kgsl_pagetable *pagetable = container_of(kref,
85 struct kgsl_pagetable, refcount);
86 unsigned long flags;
87
88 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
89 list_del(&pagetable->list);
90 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
91
92 pagetable_remove_sysfs_objects(pagetable);
93
94 kgsl_cleanup_pt(pagetable);
95
Shubhraprakash Das84fdb112012-04-04 12:49:31 -060096 if (pagetable->kgsl_pool)
97 gen_pool_destroy(pagetable->kgsl_pool);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070098 if (pagetable->pool)
99 gen_pool_destroy(pagetable->pool);
100
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600101 pagetable->pt_ops->mmu_destroy_pagetable(pagetable->priv);
102
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700103 kfree(pagetable);
104}
105
106static inline void kgsl_put_pagetable(struct kgsl_pagetable *pagetable)
107{
108 if (pagetable)
109 kref_put(&pagetable->refcount, kgsl_destroy_pagetable);
110}
111
112static struct kgsl_pagetable *
113kgsl_get_pagetable(unsigned long name)
114{
115 struct kgsl_pagetable *pt, *ret = NULL;
116 unsigned long flags;
117
118 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
119 list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
120 if (pt->name == name) {
121 ret = pt;
122 kref_get(&ret->refcount);
123 break;
124 }
125 }
126
127 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
128 return ret;
129}
130
131static struct kgsl_pagetable *
132_get_pt_from_kobj(struct kobject *kobj)
133{
134 unsigned long ptname;
135
136 if (!kobj)
137 return NULL;
138
139 if (sscanf(kobj->name, "%ld", &ptname) != 1)
140 return NULL;
141
142 return kgsl_get_pagetable(ptname);
143}
144
145static ssize_t
146sysfs_show_entries(struct kobject *kobj,
147 struct kobj_attribute *attr,
148 char *buf)
149{
150 struct kgsl_pagetable *pt;
151 int ret = 0;
152
153 pt = _get_pt_from_kobj(kobj);
154
155 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600156 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700157
158 kgsl_put_pagetable(pt);
159 return ret;
160}
161
162static ssize_t
163sysfs_show_mapped(struct kobject *kobj,
164 struct kobj_attribute *attr,
165 char *buf)
166{
167 struct kgsl_pagetable *pt;
168 int ret = 0;
169
170 pt = _get_pt_from_kobj(kobj);
171
172 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600173 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.mapped);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700174
175 kgsl_put_pagetable(pt);
176 return ret;
177}
178
179static ssize_t
180sysfs_show_va_range(struct kobject *kobj,
181 struct kobj_attribute *attr,
182 char *buf)
183{
184 struct kgsl_pagetable *pt;
185 int ret = 0;
186
187 pt = _get_pt_from_kobj(kobj);
188
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600189 if (pt) {
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600190 ret += snprintf(buf, PAGE_SIZE, "0x%x\n",
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600191 kgsl_mmu_get_ptsize());
192 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700193
194 kgsl_put_pagetable(pt);
195 return ret;
196}
197
198static ssize_t
199sysfs_show_max_mapped(struct kobject *kobj,
200 struct kobj_attribute *attr,
201 char *buf)
202{
203 struct kgsl_pagetable *pt;
204 int ret = 0;
205
206 pt = _get_pt_from_kobj(kobj);
207
208 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600209 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.max_mapped);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700210
211 kgsl_put_pagetable(pt);
212 return ret;
213}
214
215static ssize_t
216sysfs_show_max_entries(struct kobject *kobj,
217 struct kobj_attribute *attr,
218 char *buf)
219{
220 struct kgsl_pagetable *pt;
221 int ret = 0;
222
223 pt = _get_pt_from_kobj(kobj);
224
225 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600226 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.max_entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700227
228 kgsl_put_pagetable(pt);
229 return ret;
230}
231
232static struct kobj_attribute attr_entries = {
233 .attr = { .name = "entries", .mode = 0444 },
234 .show = sysfs_show_entries,
235 .store = NULL,
236};
237
238static struct kobj_attribute attr_mapped = {
239 .attr = { .name = "mapped", .mode = 0444 },
240 .show = sysfs_show_mapped,
241 .store = NULL,
242};
243
244static struct kobj_attribute attr_va_range = {
245 .attr = { .name = "va_range", .mode = 0444 },
246 .show = sysfs_show_va_range,
247 .store = NULL,
248};
249
250static struct kobj_attribute attr_max_mapped = {
251 .attr = { .name = "max_mapped", .mode = 0444 },
252 .show = sysfs_show_max_mapped,
253 .store = NULL,
254};
255
256static struct kobj_attribute attr_max_entries = {
257 .attr = { .name = "max_entries", .mode = 0444 },
258 .show = sysfs_show_max_entries,
259 .store = NULL,
260};
261
262static struct attribute *pagetable_attrs[] = {
263 &attr_entries.attr,
264 &attr_mapped.attr,
265 &attr_va_range.attr,
266 &attr_max_mapped.attr,
267 &attr_max_entries.attr,
268 NULL,
269};
270
271static struct attribute_group pagetable_attr_group = {
272 .attrs = pagetable_attrs,
273};
274
275static void
276pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable)
277{
278 if (pagetable->kobj)
279 sysfs_remove_group(pagetable->kobj,
280 &pagetable_attr_group);
281
282 kobject_put(pagetable->kobj);
283}
284
285static int
286pagetable_add_sysfs_objects(struct kgsl_pagetable *pagetable)
287{
288 char ptname[16];
289 int ret = -ENOMEM;
290
291 snprintf(ptname, sizeof(ptname), "%d", pagetable->name);
292 pagetable->kobj = kobject_create_and_add(ptname,
293 kgsl_driver.ptkobj);
294 if (pagetable->kobj == NULL)
295 goto err;
296
297 ret = sysfs_create_group(pagetable->kobj, &pagetable_attr_group);
298
299err:
300 if (ret) {
301 if (pagetable->kobj)
302 kobject_put(pagetable->kobj);
303
304 pagetable->kobj = NULL;
305 }
306
307 return ret;
308}
309
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600310unsigned int kgsl_mmu_get_ptsize(void)
311{
312 /*
313 * For IOMMU, we could do up to 4G virtual range if we wanted to, but
314 * it makes more sense to return a smaller range and leave the rest of
315 * the virtual range for future improvements
316 */
317
318 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
319 return CONFIG_MSM_KGSL_PAGE_TABLE_SIZE;
320 else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
321 return SZ_2G;
322 else
323 return 0;
324}
325
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600326int
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600327kgsl_mmu_get_ptname_from_ptbase(unsigned int pt_base)
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600328{
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600329 struct kgsl_pagetable *pt;
330 int ptid = -1;
331
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600332 spin_lock(&kgsl_driver.ptlock);
333 list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600334 if (pt->pt_ops->mmu_pt_equal(pt, pt_base)) {
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600335 ptid = (int) pt->name;
336 break;
337 }
338 }
339 spin_unlock(&kgsl_driver.ptlock);
340
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600341 return ptid;
342}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600343EXPORT_SYMBOL(kgsl_mmu_get_ptname_from_ptbase);
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600344
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600345int kgsl_mmu_init(struct kgsl_device *device)
346{
Shubhraprakash Das0ff034f2012-05-02 15:51:07 -0600347 int status = 0;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600348 struct kgsl_mmu *mmu = &device->mmu;
349
350 mmu->device = device;
Shubhraprakash Das0ff034f2012-05-02 15:51:07 -0600351 status = kgsl_allocate_contiguous(&mmu->setstate_memory, PAGE_SIZE);
352 if (status)
353 return status;
354 kgsl_sharedmem_set(&mmu->setstate_memory, 0, 0,
355 mmu->setstate_memory.size);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600356
357 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type) {
Shubhraprakash Das0ff034f2012-05-02 15:51:07 -0600358 dev_info(device->dev, "|%s| MMU type set for device is "
Shubhraprakash Dasf5526a12012-04-20 00:48:33 -0600359 "NOMMU\n", __func__);
Shubhraprakash Das0ff034f2012-05-02 15:51:07 -0600360 goto done;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600361 } else if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
362 mmu->mmu_ops = &gpummu_ops;
363 else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
364 mmu->mmu_ops = &iommu_ops;
365
Shubhraprakash Das0ff034f2012-05-02 15:51:07 -0600366 status = mmu->mmu_ops->mmu_init(mmu);
367done:
368 if (status)
369 kgsl_sharedmem_free(&mmu->setstate_memory);
370 return status;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600371}
372EXPORT_SYMBOL(kgsl_mmu_init);
373
374int kgsl_mmu_start(struct kgsl_device *device)
375{
376 struct kgsl_mmu *mmu = &device->mmu;
377
378 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
379 kgsl_regwrite(device, MH_MMU_CONFIG, 0);
Shubhraprakash Das6b30c9f2012-04-20 01:15:55 -0600380 /* Setup gpuaddr of global mappings */
381 if (!mmu->setstate_memory.gpuaddr)
382 kgsl_setup_pt(NULL);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600383 return 0;
384 } else {
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600385 return mmu->mmu_ops->mmu_start(mmu);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600386 }
387}
388EXPORT_SYMBOL(kgsl_mmu_start);
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600389
Jeremy Gebbenf4ea0822012-04-05 16:27:08 -0600390static void mh_axi_error(struct kgsl_device *device, const char* type)
391{
392 unsigned int reg, gpu_err, phys_err, pt_base;
393
394 kgsl_regread(device, MH_AXI_ERROR, &reg);
395 pt_base = kgsl_mmu_get_current_ptbase(&device->mmu);
396 /*
397 * Read gpu virtual and physical addresses that
398 * caused the error from the debug data.
399 */
400 kgsl_regwrite(device, MH_DEBUG_CTRL, 44);
401 kgsl_regread(device, MH_DEBUG_DATA, &gpu_err);
402 kgsl_regwrite(device, MH_DEBUG_CTRL, 45);
403 kgsl_regread(device, MH_DEBUG_DATA, &phys_err);
404 KGSL_MEM_CRIT(device,
405 "axi %s error: %08x pt %08x gpu %08x phys %08x\n",
406 type, reg, pt_base, gpu_err, phys_err);
407}
408
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700409void kgsl_mh_intrcallback(struct kgsl_device *device)
410{
411 unsigned int status = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700412
413 kgsl_regread(device, MH_INTERRUPT_STATUS, &status);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700414
415 if (status & MH_INTERRUPT_MASK__AXI_READ_ERROR)
Jeremy Gebbenf4ea0822012-04-05 16:27:08 -0600416 mh_axi_error(device, "read");
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600417 if (status & MH_INTERRUPT_MASK__AXI_WRITE_ERROR)
Jeremy Gebbenf4ea0822012-04-05 16:27:08 -0600418 mh_axi_error(device, "write");
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600419 if (status & MH_INTERRUPT_MASK__MMU_PAGE_FAULT)
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600420 device->mmu.mmu_ops->mmu_pagefault(&device->mmu);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700421
Jordan Crousec8c9fcd2011-07-28 08:37:58 -0600422 status &= KGSL_MMU_INT_MASK;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700423 kgsl_regwrite(device, MH_INTERRUPT_CLEAR, status);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700424}
425EXPORT_SYMBOL(kgsl_mh_intrcallback);
426
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700427static struct kgsl_pagetable *kgsl_mmu_createpagetableobject(
428 unsigned int name)
429{
430 int status = 0;
431 struct kgsl_pagetable *pagetable = NULL;
432 unsigned long flags;
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600433 unsigned int ptsize;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700434
435 pagetable = kzalloc(sizeof(struct kgsl_pagetable), GFP_KERNEL);
436 if (pagetable == NULL) {
437 KGSL_CORE_ERR("kzalloc(%d) failed\n",
438 sizeof(struct kgsl_pagetable));
439 return NULL;
440 }
441
442 kref_init(&pagetable->refcount);
443
444 spin_lock_init(&pagetable->lock);
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600445
446 ptsize = kgsl_mmu_get_ptsize();
447
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700448 pagetable->name = name;
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600449 pagetable->max_entries = KGSL_PAGETABLE_ENTRIES(ptsize);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700450
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600451 /*
452 * create a separate kgsl pool for IOMMU, global mappings can be mapped
453 * just once from this pool of the defaultpagetable
454 */
455 if ((KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) &&
456 (KGSL_MMU_GLOBAL_PT == name)) {
Jeremy Gebbenc589ccb2012-05-16 10:26:20 -0600457 pagetable->kgsl_pool = gen_pool_create(KGSL_MMU_ALIGN_SHIFT,
458 -1);
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600459 if (pagetable->kgsl_pool == NULL) {
460 KGSL_CORE_ERR("gen_pool_create(%d) failed\n",
Jeremy Gebbenc589ccb2012-05-16 10:26:20 -0600461 KGSL_MMU_ALIGN_SHIFT);
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600462 goto err_alloc;
463 }
464 if (gen_pool_add(pagetable->kgsl_pool,
465 KGSL_IOMMU_GLOBAL_MEM_BASE,
466 KGSL_IOMMU_GLOBAL_MEM_SIZE, -1)) {
467 KGSL_CORE_ERR("gen_pool_add failed\n");
468 goto err_kgsl_pool;
469 }
470 }
471
Jeremy Gebbenc589ccb2012-05-16 10:26:20 -0600472 pagetable->pool = gen_pool_create(KGSL_MMU_ALIGN_SHIFT, -1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700473 if (pagetable->pool == NULL) {
Jeremy Gebbenc589ccb2012-05-16 10:26:20 -0600474 KGSL_CORE_ERR("gen_pool_create(%d) failed\n",
475 KGSL_MMU_ALIGN_SHIFT);
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600476 goto err_kgsl_pool;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700477 }
478
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600479 if (gen_pool_add(pagetable->pool, KGSL_PAGETABLE_BASE,
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600480 ptsize, -1)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700481 KGSL_CORE_ERR("gen_pool_add failed\n");
482 goto err_pool;
483 }
484
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600485 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
486 pagetable->pt_ops = &gpummu_pt_ops;
487 else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
488 pagetable->pt_ops = &iommu_pt_ops;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700489
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600490 pagetable->priv = pagetable->pt_ops->mmu_create_pagetable();
491 if (!pagetable->priv)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700492 goto err_pool;
493
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700494 status = kgsl_setup_pt(pagetable);
495 if (status)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600496 goto err_mmu_create;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700497
498 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
499 list_add(&pagetable->list, &kgsl_driver.pagetable_list);
500 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
501
502 /* Create the sysfs entries */
503 pagetable_add_sysfs_objects(pagetable);
504
505 return pagetable;
506
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600507err_mmu_create:
508 pagetable->pt_ops->mmu_destroy_pagetable(pagetable->priv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700509err_pool:
510 gen_pool_destroy(pagetable->pool);
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600511err_kgsl_pool:
512 if (pagetable->kgsl_pool)
513 gen_pool_destroy(pagetable->kgsl_pool);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700514err_alloc:
515 kfree(pagetable);
516
517 return NULL;
518}
519
520struct kgsl_pagetable *kgsl_mmu_getpagetable(unsigned long name)
521{
522 struct kgsl_pagetable *pt;
523
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600524 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
525 return (void *)(-1);
526
Shubhraprakash Dasd8cbcd12012-05-07 16:11:32 -0600527#ifndef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
528 name = KGSL_MMU_GLOBAL_PT;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600529#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700530 pt = kgsl_get_pagetable(name);
531
532 if (pt == NULL)
533 pt = kgsl_mmu_createpagetableobject(name);
534
535 return pt;
536}
537
538void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable)
539{
540 kgsl_put_pagetable(pagetable);
541}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600542EXPORT_SYMBOL(kgsl_mmu_putpagetable);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700543
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600544void kgsl_setstate(struct kgsl_mmu *mmu, uint32_t flags)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700545{
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600546 struct kgsl_device *device = mmu->device;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600547 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
548 return;
Shubhraprakash Dasc6e21012012-05-11 17:24:51 -0600549 else if (device->ftbl->setstate)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700550 device->ftbl->setstate(device, flags);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600551 else if (mmu->mmu_ops->mmu_device_setstate)
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600552 mmu->mmu_ops->mmu_device_setstate(mmu, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700553}
554EXPORT_SYMBOL(kgsl_setstate);
555
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600556void kgsl_mh_start(struct kgsl_device *device)
557{
558 struct kgsl_mh *mh = &device->mh;
559 /* force mmu off to for now*/
560 kgsl_regwrite(device, MH_MMU_CONFIG, 0);
561 kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
562
563 /* define physical memory range accessible by the core */
564 kgsl_regwrite(device, MH_MMU_MPU_BASE, mh->mpu_base);
565 kgsl_regwrite(device, MH_MMU_MPU_END,
566 mh->mpu_base + mh->mpu_range);
567 kgsl_regwrite(device, MH_ARBITER_CONFIG, mh->mharb);
568
569 if (mh->mh_intf_cfg1 != 0)
570 kgsl_regwrite(device, MH_CLNT_INTF_CTRL_CONFIG1,
571 mh->mh_intf_cfg1);
572
573 if (mh->mh_intf_cfg2 != 0)
574 kgsl_regwrite(device, MH_CLNT_INTF_CTRL_CONFIG2,
575 mh->mh_intf_cfg2);
576
577 /*
578 * Interrupts are enabled on a per-device level when
579 * kgsl_pwrctrl_irq() is called
580 */
581}
582
Jeremy Gebben1b9b1f142012-05-16 10:43:28 -0600583static inline struct gen_pool *
584_get_pool(struct kgsl_pagetable *pagetable, unsigned int flags)
585{
586 if (pagetable->kgsl_pool &&
587 (KGSL_MEMFLAGS_GLOBAL & flags))
588 return pagetable->kgsl_pool;
589 return pagetable->pool;
590}
591
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700592int
593kgsl_mmu_map(struct kgsl_pagetable *pagetable,
594 struct kgsl_memdesc *memdesc,
595 unsigned int protflags)
596{
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600597 int ret;
Jeremy Gebben1b9b1f142012-05-16 10:43:28 -0600598 struct gen_pool *pool;
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600599 int size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700600
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600601 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
Jordan Crouse40861a42012-02-06 10:18:23 -0700602 if (memdesc->sglen == 1) {
Shubhraprakash Das4d6af2b2012-04-20 00:35:03 -0600603 memdesc->gpuaddr = sg_dma_address(memdesc->sg);
604 if (!memdesc->gpuaddr)
605 memdesc->gpuaddr = sg_phys(memdesc->sg);
606 if (!memdesc->gpuaddr) {
607 KGSL_CORE_ERR("Unable to get a valid physical "
608 "address for memdesc\n");
609 return -EINVAL;
610 }
Jordan Crouse40861a42012-02-06 10:18:23 -0700611 return 0;
612 } else {
613 KGSL_CORE_ERR("Memory is not contigious "
614 "(sglen = %d)\n", memdesc->sglen);
615 return -EINVAL;
616 }
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600617 }
Jordan Crouse40861a42012-02-06 10:18:23 -0700618
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600619 size = kgsl_sg_size(memdesc->sg, memdesc->sglen);
620
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600621 /* Allocate from kgsl pool if it exists for global mappings */
Jeremy Gebben1b9b1f142012-05-16 10:43:28 -0600622 pool = _get_pool(pagetable, memdesc->priv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700623
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600624 memdesc->gpuaddr = gen_pool_alloc(pool, size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700625 if (memdesc->gpuaddr == 0) {
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600626 KGSL_CORE_ERR("gen_pool_alloc(%d) failed from pool: %s\n",
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600627 size,
Jeremy Gebben1b9b1f142012-05-16 10:43:28 -0600628 (pool == pagetable->kgsl_pool) ?
629 "kgsl_pool" : "general_pool");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700630 KGSL_CORE_ERR(" [%d] allocated=%d, entries=%d\n",
631 pagetable->name, pagetable->stats.mapped,
632 pagetable->stats.entries);
633 return -ENOMEM;
634 }
635
Shubhraprakash Dasbadaeda2012-03-21 00:31:39 -0600636 if (KGSL_MMU_TYPE_IOMMU != kgsl_mmu_get_mmutype())
637 spin_lock(&pagetable->lock);
Shubhraprakash Dasf764e462012-04-26 15:38:09 -0600638 ret = pagetable->pt_ops->mmu_map(pagetable->priv, memdesc, protflags,
639 &pagetable->tlb_flags);
Shubhraprakash Dasbadaeda2012-03-21 00:31:39 -0600640 if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
641 spin_lock(&pagetable->lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700642
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600643 if (ret)
644 goto err_free_gpuaddr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700645
646 /* Keep track of the statistics for the sysfs files */
647
648 KGSL_STATS_ADD(1, pagetable->stats.entries,
649 pagetable->stats.max_entries);
650
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600651 KGSL_STATS_ADD(size, pagetable->stats.mapped,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700652 pagetable->stats.max_mapped);
653
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700654 spin_unlock(&pagetable->lock);
655
656 return 0;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600657
658err_free_gpuaddr:
659 spin_unlock(&pagetable->lock);
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600660 gen_pool_free(pool, memdesc->gpuaddr, size);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600661 memdesc->gpuaddr = 0;
662 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700663}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600664EXPORT_SYMBOL(kgsl_mmu_map);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700665
666int
667kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
668 struct kgsl_memdesc *memdesc)
669{
Jeremy Gebben1b9b1f142012-05-16 10:43:28 -0600670 struct gen_pool *pool;
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600671 int size;
672
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600673 if (memdesc->size == 0 || memdesc->gpuaddr == 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700674 return 0;
675
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600676 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
677 memdesc->gpuaddr = 0;
678 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700679 }
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600680
681 size = kgsl_sg_size(memdesc->sg, memdesc->sglen);
682
Shubhraprakash Dasbadaeda2012-03-21 00:31:39 -0600683 if (KGSL_MMU_TYPE_IOMMU != kgsl_mmu_get_mmutype())
684 spin_lock(&pagetable->lock);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600685 pagetable->pt_ops->mmu_unmap(pagetable->priv, memdesc);
Shubhraprakash Dasbadaeda2012-03-21 00:31:39 -0600686 if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
687 spin_lock(&pagetable->lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700688 /* Remove the statistics */
689 pagetable->stats.entries--;
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600690 pagetable->stats.mapped -= size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700691
692 spin_unlock(&pagetable->lock);
693
Jeremy Gebben1b9b1f142012-05-16 10:43:28 -0600694 pool = _get_pool(pagetable, memdesc->priv);
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600695 gen_pool_free(pool, memdesc->gpuaddr, size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700696
Jeremy Gebben7faf9ec2012-03-21 14:09:55 -0600697 /*
698 * Don't clear the gpuaddr on global mappings because they
699 * may be in use by other pagetables
700 */
701 if (!(memdesc->priv & KGSL_MEMFLAGS_GLOBAL))
702 memdesc->gpuaddr = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700703 return 0;
704}
705EXPORT_SYMBOL(kgsl_mmu_unmap);
706
707int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable,
708 struct kgsl_memdesc *memdesc, unsigned int protflags)
709{
710 int result = -EINVAL;
711 unsigned int gpuaddr = 0;
712
713 if (memdesc == NULL) {
714 KGSL_CORE_ERR("invalid memdesc\n");
715 goto error;
716 }
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600717 /* Not all global mappings are needed for all MMU types */
718 if (!memdesc->size)
719 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700720
721 gpuaddr = memdesc->gpuaddr;
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600722 memdesc->priv |= KGSL_MEMFLAGS_GLOBAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700723
724 result = kgsl_mmu_map(pagetable, memdesc, protflags);
725 if (result)
726 goto error;
727
728 /*global mappings must have the same gpu address in all pagetables*/
729 if (gpuaddr && gpuaddr != memdesc->gpuaddr) {
730 KGSL_CORE_ERR("pt %p addr mismatch phys 0x%08x"
731 "gpu 0x%0x 0x%08x", pagetable, memdesc->physaddr,
732 gpuaddr, memdesc->gpuaddr);
733 goto error_unmap;
734 }
735 return result;
736error_unmap:
737 kgsl_mmu_unmap(pagetable, memdesc);
738error:
739 return result;
740}
741EXPORT_SYMBOL(kgsl_mmu_map_global);
742
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700743int kgsl_mmu_close(struct kgsl_device *device)
744{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700745 struct kgsl_mmu *mmu = &device->mmu;
746
Shubhraprakash Das0ff034f2012-05-02 15:51:07 -0600747 kgsl_sharedmem_free(&mmu->setstate_memory);
748 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600749 return 0;
Shubhraprakash Das0ff034f2012-05-02 15:51:07 -0600750 else
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600751 return mmu->mmu_ops->mmu_close(mmu);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700752}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600753EXPORT_SYMBOL(kgsl_mmu_close);
754
755int kgsl_mmu_pt_get_flags(struct kgsl_pagetable *pt,
756 enum kgsl_deviceid id)
757{
Shubhraprakash Dasf764e462012-04-26 15:38:09 -0600758 unsigned int result = 0;
759
760 if (pt == NULL)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600761 return 0;
Shubhraprakash Dasf764e462012-04-26 15:38:09 -0600762
763 spin_lock(&pt->lock);
764 if (pt->tlb_flags && (1<<id)) {
765 result = KGSL_MMUFLAGS_TLBFLUSH;
766 pt->tlb_flags &= ~(1<<id);
767 }
768 spin_unlock(&pt->lock);
769 return result;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600770}
771EXPORT_SYMBOL(kgsl_mmu_pt_get_flags);
772
773void kgsl_mmu_ptpool_destroy(void *ptpool)
774{
775 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
776 kgsl_gpummu_ptpool_destroy(ptpool);
777 ptpool = 0;
778}
779EXPORT_SYMBOL(kgsl_mmu_ptpool_destroy);
780
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600781void *kgsl_mmu_ptpool_init(int entries)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600782{
783 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600784 return kgsl_gpummu_ptpool_init(entries);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600785 else
786 return (void *)(-1);
787}
788EXPORT_SYMBOL(kgsl_mmu_ptpool_init);
789
790int kgsl_mmu_enabled(void)
791{
792 if (KGSL_MMU_TYPE_NONE != kgsl_mmu_type)
793 return 1;
794 else
795 return 0;
796}
797EXPORT_SYMBOL(kgsl_mmu_enabled);
798
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600799enum kgsl_mmutype kgsl_mmu_get_mmutype(void)
800{
801 return kgsl_mmu_type;
802}
803EXPORT_SYMBOL(kgsl_mmu_get_mmutype);
804
805void kgsl_mmu_set_mmutype(char *mmutype)
806{
Jordan Crouse817e0b92012-02-04 10:23:53 -0700807 /* Set the default MMU - GPU on <=8960 and nothing on >= 8064 */
808 kgsl_mmu_type =
809 cpu_is_apq8064() ? KGSL_MMU_TYPE_NONE : KGSL_MMU_TYPE_GPU;
810
811 /* Use the IOMMU if it is found */
Steve Mucklef132c6c2012-06-06 18:30:57 -0700812 if (iommu_present(&platform_bus_type))
Jordan Crouse817e0b92012-02-04 10:23:53 -0700813 kgsl_mmu_type = KGSL_MMU_TYPE_IOMMU;
814
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600815 if (mmutype && !strncmp(mmutype, "gpummu", 6))
816 kgsl_mmu_type = KGSL_MMU_TYPE_GPU;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700817 if (iommu_present(&platform_bus_type) && mmutype &&
818 !strncmp(mmutype, "iommu", 5))
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600819 kgsl_mmu_type = KGSL_MMU_TYPE_IOMMU;
820 if (mmutype && !strncmp(mmutype, "nommu", 5))
821 kgsl_mmu_type = KGSL_MMU_TYPE_NONE;
822}
823EXPORT_SYMBOL(kgsl_mmu_set_mmutype);