blob: 9092b96a9dd4a3e0e58b6978322a49c223fc79bd [file] [log] [blame]
Jordan Crouse00714012012-03-16 14:53:40 -06001/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/types.h>
14#include <linux/device.h>
15#include <linux/spinlock.h>
16#include <linux/genalloc.h>
17#include <linux/slab.h>
18#include <linux/sched.h>
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060019#include <linux/iommu.h>
Jordan Crouse817e0b92012-02-04 10:23:53 -070020#include <mach/socinfo.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070021
22#include "kgsl.h"
23#include "kgsl_mmu.h"
24#include "kgsl_device.h"
25#include "kgsl_sharedmem.h"
Jeremy Gebbena3d07a42011-10-17 12:08:16 -060026#include "adreno_postmortem.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070027
28#define KGSL_MMU_ALIGN_SHIFT 13
29#define KGSL_MMU_ALIGN_MASK (~((1 << KGSL_MMU_ALIGN_SHIFT) - 1))
30
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060031static enum kgsl_mmutype kgsl_mmu_type;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070032
33static void pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable);
34
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070035static int kgsl_cleanup_pt(struct kgsl_pagetable *pt)
36{
37 int i;
Shubhraprakash Das84fdb112012-04-04 12:49:31 -060038 /* For IOMMU only unmap the global structures to global pt */
Shubhraprakash Das6b30c9f2012-04-20 01:15:55 -060039 if ((KGSL_MMU_TYPE_NONE != kgsl_mmu_type) &&
40 (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type) &&
Shubhraprakash Das84fdb112012-04-04 12:49:31 -060041 (KGSL_MMU_GLOBAL_PT != pt->name))
42 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070043 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
44 struct kgsl_device *device = kgsl_driver.devp[i];
45 if (device)
46 device->ftbl->cleanup_pt(device, pt);
47 }
48 return 0;
49}
50
Shubhraprakash Das6b30c9f2012-04-20 01:15:55 -060051
52static int kgsl_setup_pt(struct kgsl_pagetable *pt)
53{
54 int i = 0;
55 int status = 0;
56
57 /* For IOMMU only map the global structures to global pt */
58 if ((KGSL_MMU_TYPE_NONE != kgsl_mmu_type) &&
59 (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type) &&
60 (KGSL_MMU_GLOBAL_PT != pt->name))
61 return 0;
62 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
63 struct kgsl_device *device = kgsl_driver.devp[i];
64 if (device) {
65 status = device->ftbl->setup_pt(device, pt);
66 if (status)
67 goto error_pt;
68 }
69 }
70 return status;
71error_pt:
72 while (i >= 0) {
73 struct kgsl_device *device = kgsl_driver.devp[i];
74 if (device)
75 device->ftbl->cleanup_pt(device, pt);
76 i--;
77 }
78 return status;
79}
80
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070081static void kgsl_destroy_pagetable(struct kref *kref)
82{
83 struct kgsl_pagetable *pagetable = container_of(kref,
84 struct kgsl_pagetable, refcount);
85 unsigned long flags;
86
87 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
88 list_del(&pagetable->list);
89 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
90
91 pagetable_remove_sysfs_objects(pagetable);
92
93 kgsl_cleanup_pt(pagetable);
94
Shubhraprakash Das84fdb112012-04-04 12:49:31 -060095 if (pagetable->kgsl_pool)
96 gen_pool_destroy(pagetable->kgsl_pool);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070097 if (pagetable->pool)
98 gen_pool_destroy(pagetable->pool);
99
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600100 pagetable->pt_ops->mmu_destroy_pagetable(pagetable->priv);
101
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700102 kfree(pagetable);
103}
104
105static inline void kgsl_put_pagetable(struct kgsl_pagetable *pagetable)
106{
107 if (pagetable)
108 kref_put(&pagetable->refcount, kgsl_destroy_pagetable);
109}
110
111static struct kgsl_pagetable *
112kgsl_get_pagetable(unsigned long name)
113{
114 struct kgsl_pagetable *pt, *ret = NULL;
115 unsigned long flags;
116
117 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
118 list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
119 if (pt->name == name) {
120 ret = pt;
121 kref_get(&ret->refcount);
122 break;
123 }
124 }
125
126 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
127 return ret;
128}
129
130static struct kgsl_pagetable *
131_get_pt_from_kobj(struct kobject *kobj)
132{
133 unsigned long ptname;
134
135 if (!kobj)
136 return NULL;
137
138 if (sscanf(kobj->name, "%ld", &ptname) != 1)
139 return NULL;
140
141 return kgsl_get_pagetable(ptname);
142}
143
144static ssize_t
145sysfs_show_entries(struct kobject *kobj,
146 struct kobj_attribute *attr,
147 char *buf)
148{
149 struct kgsl_pagetable *pt;
150 int ret = 0;
151
152 pt = _get_pt_from_kobj(kobj);
153
154 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600155 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700156
157 kgsl_put_pagetable(pt);
158 return ret;
159}
160
161static ssize_t
162sysfs_show_mapped(struct kobject *kobj,
163 struct kobj_attribute *attr,
164 char *buf)
165{
166 struct kgsl_pagetable *pt;
167 int ret = 0;
168
169 pt = _get_pt_from_kobj(kobj);
170
171 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600172 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.mapped);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700173
174 kgsl_put_pagetable(pt);
175 return ret;
176}
177
178static ssize_t
179sysfs_show_va_range(struct kobject *kobj,
180 struct kobj_attribute *attr,
181 char *buf)
182{
183 struct kgsl_pagetable *pt;
184 int ret = 0;
185
186 pt = _get_pt_from_kobj(kobj);
187
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600188 if (pt) {
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600189 ret += snprintf(buf, PAGE_SIZE, "0x%x\n",
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600190 kgsl_mmu_get_ptsize());
191 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700192
193 kgsl_put_pagetable(pt);
194 return ret;
195}
196
197static ssize_t
198sysfs_show_max_mapped(struct kobject *kobj,
199 struct kobj_attribute *attr,
200 char *buf)
201{
202 struct kgsl_pagetable *pt;
203 int ret = 0;
204
205 pt = _get_pt_from_kobj(kobj);
206
207 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600208 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.max_mapped);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700209
210 kgsl_put_pagetable(pt);
211 return ret;
212}
213
214static ssize_t
215sysfs_show_max_entries(struct kobject *kobj,
216 struct kobj_attribute *attr,
217 char *buf)
218{
219 struct kgsl_pagetable *pt;
220 int ret = 0;
221
222 pt = _get_pt_from_kobj(kobj);
223
224 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600225 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.max_entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700226
227 kgsl_put_pagetable(pt);
228 return ret;
229}
230
231static struct kobj_attribute attr_entries = {
232 .attr = { .name = "entries", .mode = 0444 },
233 .show = sysfs_show_entries,
234 .store = NULL,
235};
236
237static struct kobj_attribute attr_mapped = {
238 .attr = { .name = "mapped", .mode = 0444 },
239 .show = sysfs_show_mapped,
240 .store = NULL,
241};
242
243static struct kobj_attribute attr_va_range = {
244 .attr = { .name = "va_range", .mode = 0444 },
245 .show = sysfs_show_va_range,
246 .store = NULL,
247};
248
249static struct kobj_attribute attr_max_mapped = {
250 .attr = { .name = "max_mapped", .mode = 0444 },
251 .show = sysfs_show_max_mapped,
252 .store = NULL,
253};
254
255static struct kobj_attribute attr_max_entries = {
256 .attr = { .name = "max_entries", .mode = 0444 },
257 .show = sysfs_show_max_entries,
258 .store = NULL,
259};
260
261static struct attribute *pagetable_attrs[] = {
262 &attr_entries.attr,
263 &attr_mapped.attr,
264 &attr_va_range.attr,
265 &attr_max_mapped.attr,
266 &attr_max_entries.attr,
267 NULL,
268};
269
270static struct attribute_group pagetable_attr_group = {
271 .attrs = pagetable_attrs,
272};
273
274static void
275pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable)
276{
277 if (pagetable->kobj)
278 sysfs_remove_group(pagetable->kobj,
279 &pagetable_attr_group);
280
281 kobject_put(pagetable->kobj);
282}
283
284static int
285pagetable_add_sysfs_objects(struct kgsl_pagetable *pagetable)
286{
287 char ptname[16];
288 int ret = -ENOMEM;
289
290 snprintf(ptname, sizeof(ptname), "%d", pagetable->name);
291 pagetable->kobj = kobject_create_and_add(ptname,
292 kgsl_driver.ptkobj);
293 if (pagetable->kobj == NULL)
294 goto err;
295
296 ret = sysfs_create_group(pagetable->kobj, &pagetable_attr_group);
297
298err:
299 if (ret) {
300 if (pagetable->kobj)
301 kobject_put(pagetable->kobj);
302
303 pagetable->kobj = NULL;
304 }
305
306 return ret;
307}
308
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600309unsigned int kgsl_mmu_get_ptsize(void)
310{
311 /*
312 * For IOMMU, we could do up to 4G virtual range if we wanted to, but
313 * it makes more sense to return a smaller range and leave the rest of
314 * the virtual range for future improvements
315 */
316
317 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
318 return CONFIG_MSM_KGSL_PAGE_TABLE_SIZE;
319 else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
320 return SZ_2G;
321 else
322 return 0;
323}
324
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600325int
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600326kgsl_mmu_get_ptname_from_ptbase(unsigned int pt_base)
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600327{
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600328 struct kgsl_pagetable *pt;
329 int ptid = -1;
330
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600331 spin_lock(&kgsl_driver.ptlock);
332 list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600333 if (pt->pt_ops->mmu_pt_equal(pt, pt_base)) {
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600334 ptid = (int) pt->name;
335 break;
336 }
337 }
338 spin_unlock(&kgsl_driver.ptlock);
339
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600340 return ptid;
341}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600342EXPORT_SYMBOL(kgsl_mmu_get_ptname_from_ptbase);
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600343
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600344int kgsl_mmu_init(struct kgsl_device *device)
345{
Shubhraprakash Das0ff034f2012-05-02 15:51:07 -0600346 int status = 0;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600347 struct kgsl_mmu *mmu = &device->mmu;
348
349 mmu->device = device;
Shubhraprakash Das0ff034f2012-05-02 15:51:07 -0600350 status = kgsl_allocate_contiguous(&mmu->setstate_memory, PAGE_SIZE);
351 if (status)
352 return status;
353 kgsl_sharedmem_set(&mmu->setstate_memory, 0, 0,
354 mmu->setstate_memory.size);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600355
356 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type) {
Shubhraprakash Das0ff034f2012-05-02 15:51:07 -0600357 dev_info(device->dev, "|%s| MMU type set for device is "
Shubhraprakash Dasf5526a12012-04-20 00:48:33 -0600358 "NOMMU\n", __func__);
Shubhraprakash Das0ff034f2012-05-02 15:51:07 -0600359 goto done;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600360 } else if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
361 mmu->mmu_ops = &gpummu_ops;
362 else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
363 mmu->mmu_ops = &iommu_ops;
364
Shubhraprakash Das0ff034f2012-05-02 15:51:07 -0600365 status = mmu->mmu_ops->mmu_init(mmu);
366done:
367 if (status)
368 kgsl_sharedmem_free(&mmu->setstate_memory);
369 return status;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600370}
371EXPORT_SYMBOL(kgsl_mmu_init);
372
373int kgsl_mmu_start(struct kgsl_device *device)
374{
375 struct kgsl_mmu *mmu = &device->mmu;
376
377 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
378 kgsl_regwrite(device, MH_MMU_CONFIG, 0);
Shubhraprakash Das6b30c9f2012-04-20 01:15:55 -0600379 /* Setup gpuaddr of global mappings */
380 if (!mmu->setstate_memory.gpuaddr)
381 kgsl_setup_pt(NULL);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600382 return 0;
383 } else {
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600384 return mmu->mmu_ops->mmu_start(mmu);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600385 }
386}
387EXPORT_SYMBOL(kgsl_mmu_start);
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600388
Jeremy Gebbenf4ea0822012-04-05 16:27:08 -0600389static void mh_axi_error(struct kgsl_device *device, const char* type)
390{
391 unsigned int reg, gpu_err, phys_err, pt_base;
392
393 kgsl_regread(device, MH_AXI_ERROR, &reg);
394 pt_base = kgsl_mmu_get_current_ptbase(&device->mmu);
395 /*
396 * Read gpu virtual and physical addresses that
397 * caused the error from the debug data.
398 */
399 kgsl_regwrite(device, MH_DEBUG_CTRL, 44);
400 kgsl_regread(device, MH_DEBUG_DATA, &gpu_err);
401 kgsl_regwrite(device, MH_DEBUG_CTRL, 45);
402 kgsl_regread(device, MH_DEBUG_DATA, &phys_err);
403 KGSL_MEM_CRIT(device,
404 "axi %s error: %08x pt %08x gpu %08x phys %08x\n",
405 type, reg, pt_base, gpu_err, phys_err);
406}
407
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700408void kgsl_mh_intrcallback(struct kgsl_device *device)
409{
410 unsigned int status = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700411
412 kgsl_regread(device, MH_INTERRUPT_STATUS, &status);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700413
414 if (status & MH_INTERRUPT_MASK__AXI_READ_ERROR)
Jeremy Gebbenf4ea0822012-04-05 16:27:08 -0600415 mh_axi_error(device, "read");
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600416 if (status & MH_INTERRUPT_MASK__AXI_WRITE_ERROR)
Jeremy Gebbenf4ea0822012-04-05 16:27:08 -0600417 mh_axi_error(device, "write");
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600418 if (status & MH_INTERRUPT_MASK__MMU_PAGE_FAULT)
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600419 device->mmu.mmu_ops->mmu_pagefault(&device->mmu);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700420
Jordan Crousec8c9fcd2011-07-28 08:37:58 -0600421 status &= KGSL_MMU_INT_MASK;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700422 kgsl_regwrite(device, MH_INTERRUPT_CLEAR, status);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700423}
424EXPORT_SYMBOL(kgsl_mh_intrcallback);
425
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700426static struct kgsl_pagetable *kgsl_mmu_createpagetableobject(
427 unsigned int name)
428{
429 int status = 0;
430 struct kgsl_pagetable *pagetable = NULL;
431 unsigned long flags;
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600432 unsigned int ptsize;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700433
434 pagetable = kzalloc(sizeof(struct kgsl_pagetable), GFP_KERNEL);
435 if (pagetable == NULL) {
436 KGSL_CORE_ERR("kzalloc(%d) failed\n",
437 sizeof(struct kgsl_pagetable));
438 return NULL;
439 }
440
441 kref_init(&pagetable->refcount);
442
443 spin_lock_init(&pagetable->lock);
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600444
445 ptsize = kgsl_mmu_get_ptsize();
446
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700447 pagetable->name = name;
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600448 pagetable->max_entries = KGSL_PAGETABLE_ENTRIES(ptsize);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700449
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600450 /*
451 * create a separate kgsl pool for IOMMU, global mappings can be mapped
452 * just once from this pool of the defaultpagetable
453 */
454 if ((KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) &&
455 (KGSL_MMU_GLOBAL_PT == name)) {
Jeremy Gebbenc589ccb2012-05-16 10:26:20 -0600456 pagetable->kgsl_pool = gen_pool_create(KGSL_MMU_ALIGN_SHIFT,
457 -1);
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600458 if (pagetable->kgsl_pool == NULL) {
459 KGSL_CORE_ERR("gen_pool_create(%d) failed\n",
Jeremy Gebbenc589ccb2012-05-16 10:26:20 -0600460 KGSL_MMU_ALIGN_SHIFT);
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600461 goto err_alloc;
462 }
463 if (gen_pool_add(pagetable->kgsl_pool,
464 KGSL_IOMMU_GLOBAL_MEM_BASE,
465 KGSL_IOMMU_GLOBAL_MEM_SIZE, -1)) {
466 KGSL_CORE_ERR("gen_pool_add failed\n");
467 goto err_kgsl_pool;
468 }
469 }
470
Jeremy Gebbenc589ccb2012-05-16 10:26:20 -0600471 pagetable->pool = gen_pool_create(KGSL_MMU_ALIGN_SHIFT, -1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700472 if (pagetable->pool == NULL) {
Jeremy Gebbenc589ccb2012-05-16 10:26:20 -0600473 KGSL_CORE_ERR("gen_pool_create(%d) failed\n",
474 KGSL_MMU_ALIGN_SHIFT);
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600475 goto err_kgsl_pool;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700476 }
477
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600478 if (gen_pool_add(pagetable->pool, KGSL_PAGETABLE_BASE,
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600479 ptsize, -1)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700480 KGSL_CORE_ERR("gen_pool_add failed\n");
481 goto err_pool;
482 }
483
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600484 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
485 pagetable->pt_ops = &gpummu_pt_ops;
486 else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
487 pagetable->pt_ops = &iommu_pt_ops;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700488
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600489 pagetable->priv = pagetable->pt_ops->mmu_create_pagetable();
490 if (!pagetable->priv)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700491 goto err_pool;
492
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700493 status = kgsl_setup_pt(pagetable);
494 if (status)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600495 goto err_mmu_create;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700496
497 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
498 list_add(&pagetable->list, &kgsl_driver.pagetable_list);
499 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
500
501 /* Create the sysfs entries */
502 pagetable_add_sysfs_objects(pagetable);
503
504 return pagetable;
505
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600506err_mmu_create:
507 pagetable->pt_ops->mmu_destroy_pagetable(pagetable->priv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700508err_pool:
509 gen_pool_destroy(pagetable->pool);
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600510err_kgsl_pool:
511 if (pagetable->kgsl_pool)
512 gen_pool_destroy(pagetable->kgsl_pool);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700513err_alloc:
514 kfree(pagetable);
515
516 return NULL;
517}
518
519struct kgsl_pagetable *kgsl_mmu_getpagetable(unsigned long name)
520{
521 struct kgsl_pagetable *pt;
522
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600523 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
524 return (void *)(-1);
525
Shubhraprakash Dasd8cbcd12012-05-07 16:11:32 -0600526#ifndef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
527 name = KGSL_MMU_GLOBAL_PT;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600528#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700529 pt = kgsl_get_pagetable(name);
530
531 if (pt == NULL)
532 pt = kgsl_mmu_createpagetableobject(name);
533
534 return pt;
535}
536
537void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable)
538{
539 kgsl_put_pagetable(pagetable);
540}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600541EXPORT_SYMBOL(kgsl_mmu_putpagetable);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700542
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600543void kgsl_setstate(struct kgsl_mmu *mmu, uint32_t flags)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700544{
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600545 struct kgsl_device *device = mmu->device;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600546 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
547 return;
Shubhraprakash Dasc6e21012012-05-11 17:24:51 -0600548 else if (device->ftbl->setstate)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700549 device->ftbl->setstate(device, flags);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600550 else if (mmu->mmu_ops->mmu_device_setstate)
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600551 mmu->mmu_ops->mmu_device_setstate(mmu, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700552}
553EXPORT_SYMBOL(kgsl_setstate);
554
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600555void kgsl_mh_start(struct kgsl_device *device)
556{
557 struct kgsl_mh *mh = &device->mh;
558 /* force mmu off to for now*/
559 kgsl_regwrite(device, MH_MMU_CONFIG, 0);
560 kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
561
562 /* define physical memory range accessible by the core */
563 kgsl_regwrite(device, MH_MMU_MPU_BASE, mh->mpu_base);
564 kgsl_regwrite(device, MH_MMU_MPU_END,
565 mh->mpu_base + mh->mpu_range);
566 kgsl_regwrite(device, MH_ARBITER_CONFIG, mh->mharb);
567
568 if (mh->mh_intf_cfg1 != 0)
569 kgsl_regwrite(device, MH_CLNT_INTF_CTRL_CONFIG1,
570 mh->mh_intf_cfg1);
571
572 if (mh->mh_intf_cfg2 != 0)
573 kgsl_regwrite(device, MH_CLNT_INTF_CTRL_CONFIG2,
574 mh->mh_intf_cfg2);
575
576 /*
577 * Interrupts are enabled on a per-device level when
578 * kgsl_pwrctrl_irq() is called
579 */
580}
581
Jeremy Gebben1b9b1f142012-05-16 10:43:28 -0600582static inline struct gen_pool *
583_get_pool(struct kgsl_pagetable *pagetable, unsigned int flags)
584{
585 if (pagetable->kgsl_pool &&
586 (KGSL_MEMFLAGS_GLOBAL & flags))
587 return pagetable->kgsl_pool;
588 return pagetable->pool;
589}
590
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700591int
592kgsl_mmu_map(struct kgsl_pagetable *pagetable,
593 struct kgsl_memdesc *memdesc,
594 unsigned int protflags)
595{
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600596 int ret;
Jeremy Gebben1b9b1f142012-05-16 10:43:28 -0600597 struct gen_pool *pool;
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600598 int size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700599
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600600 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
Jordan Crouse40861a42012-02-06 10:18:23 -0700601 if (memdesc->sglen == 1) {
Shubhraprakash Das4d6af2b2012-04-20 00:35:03 -0600602 memdesc->gpuaddr = sg_dma_address(memdesc->sg);
603 if (!memdesc->gpuaddr)
604 memdesc->gpuaddr = sg_phys(memdesc->sg);
605 if (!memdesc->gpuaddr) {
606 KGSL_CORE_ERR("Unable to get a valid physical "
607 "address for memdesc\n");
608 return -EINVAL;
609 }
Jordan Crouse40861a42012-02-06 10:18:23 -0700610 return 0;
611 } else {
612 KGSL_CORE_ERR("Memory is not contigious "
613 "(sglen = %d)\n", memdesc->sglen);
614 return -EINVAL;
615 }
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600616 }
Jordan Crouse40861a42012-02-06 10:18:23 -0700617
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600618 size = kgsl_sg_size(memdesc->sg, memdesc->sglen);
619
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600620 /* Allocate from kgsl pool if it exists for global mappings */
Jeremy Gebben1b9b1f142012-05-16 10:43:28 -0600621 pool = _get_pool(pagetable, memdesc->priv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700622
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600623 memdesc->gpuaddr = gen_pool_alloc(pool, size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700624 if (memdesc->gpuaddr == 0) {
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600625 KGSL_CORE_ERR("gen_pool_alloc(%d) failed from pool: %s\n",
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600626 size,
Jeremy Gebben1b9b1f142012-05-16 10:43:28 -0600627 (pool == pagetable->kgsl_pool) ?
628 "kgsl_pool" : "general_pool");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700629 KGSL_CORE_ERR(" [%d] allocated=%d, entries=%d\n",
630 pagetable->name, pagetable->stats.mapped,
631 pagetable->stats.entries);
632 return -ENOMEM;
633 }
634
Shubhraprakash Dasbadaeda2012-03-21 00:31:39 -0600635 if (KGSL_MMU_TYPE_IOMMU != kgsl_mmu_get_mmutype())
636 spin_lock(&pagetable->lock);
Shubhraprakash Dasf764e462012-04-26 15:38:09 -0600637 ret = pagetable->pt_ops->mmu_map(pagetable->priv, memdesc, protflags,
638 &pagetable->tlb_flags);
Shubhraprakash Dasbadaeda2012-03-21 00:31:39 -0600639 if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
640 spin_lock(&pagetable->lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700641
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600642 if (ret)
643 goto err_free_gpuaddr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700644
645 /* Keep track of the statistics for the sysfs files */
646
647 KGSL_STATS_ADD(1, pagetable->stats.entries,
648 pagetable->stats.max_entries);
649
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600650 KGSL_STATS_ADD(size, pagetable->stats.mapped,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700651 pagetable->stats.max_mapped);
652
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700653 spin_unlock(&pagetable->lock);
654
655 return 0;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600656
657err_free_gpuaddr:
658 spin_unlock(&pagetable->lock);
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600659 gen_pool_free(pool, memdesc->gpuaddr, size);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600660 memdesc->gpuaddr = 0;
661 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700662}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600663EXPORT_SYMBOL(kgsl_mmu_map);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700664
665int
666kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
667 struct kgsl_memdesc *memdesc)
668{
Jeremy Gebben1b9b1f142012-05-16 10:43:28 -0600669 struct gen_pool *pool;
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600670 int size;
671
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600672 if (memdesc->size == 0 || memdesc->gpuaddr == 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700673 return 0;
674
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600675 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
676 memdesc->gpuaddr = 0;
677 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700678 }
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600679
680 size = kgsl_sg_size(memdesc->sg, memdesc->sglen);
681
Shubhraprakash Dasbadaeda2012-03-21 00:31:39 -0600682 if (KGSL_MMU_TYPE_IOMMU != kgsl_mmu_get_mmutype())
683 spin_lock(&pagetable->lock);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600684 pagetable->pt_ops->mmu_unmap(pagetable->priv, memdesc);
Shubhraprakash Dasbadaeda2012-03-21 00:31:39 -0600685 if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
686 spin_lock(&pagetable->lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700687 /* Remove the statistics */
688 pagetable->stats.entries--;
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600689 pagetable->stats.mapped -= size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700690
691 spin_unlock(&pagetable->lock);
692
Jeremy Gebben1b9b1f142012-05-16 10:43:28 -0600693 pool = _get_pool(pagetable, memdesc->priv);
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600694 gen_pool_free(pool, memdesc->gpuaddr, size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700695
Jeremy Gebben7faf9ec2012-03-21 14:09:55 -0600696 /*
697 * Don't clear the gpuaddr on global mappings because they
698 * may be in use by other pagetables
699 */
700 if (!(memdesc->priv & KGSL_MEMFLAGS_GLOBAL))
701 memdesc->gpuaddr = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700702 return 0;
703}
704EXPORT_SYMBOL(kgsl_mmu_unmap);
705
706int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable,
707 struct kgsl_memdesc *memdesc, unsigned int protflags)
708{
709 int result = -EINVAL;
710 unsigned int gpuaddr = 0;
711
712 if (memdesc == NULL) {
713 KGSL_CORE_ERR("invalid memdesc\n");
714 goto error;
715 }
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600716 /* Not all global mappings are needed for all MMU types */
717 if (!memdesc->size)
718 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700719
720 gpuaddr = memdesc->gpuaddr;
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600721 memdesc->priv |= KGSL_MEMFLAGS_GLOBAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700722
723 result = kgsl_mmu_map(pagetable, memdesc, protflags);
724 if (result)
725 goto error;
726
727 /*global mappings must have the same gpu address in all pagetables*/
728 if (gpuaddr && gpuaddr != memdesc->gpuaddr) {
729 KGSL_CORE_ERR("pt %p addr mismatch phys 0x%08x"
730 "gpu 0x%0x 0x%08x", pagetable, memdesc->physaddr,
731 gpuaddr, memdesc->gpuaddr);
732 goto error_unmap;
733 }
734 return result;
735error_unmap:
736 kgsl_mmu_unmap(pagetable, memdesc);
737error:
738 return result;
739}
740EXPORT_SYMBOL(kgsl_mmu_map_global);
741
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700742int kgsl_mmu_close(struct kgsl_device *device)
743{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700744 struct kgsl_mmu *mmu = &device->mmu;
745
Shubhraprakash Das0ff034f2012-05-02 15:51:07 -0600746 kgsl_sharedmem_free(&mmu->setstate_memory);
747 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600748 return 0;
Shubhraprakash Das0ff034f2012-05-02 15:51:07 -0600749 else
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600750 return mmu->mmu_ops->mmu_close(mmu);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700751}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600752EXPORT_SYMBOL(kgsl_mmu_close);
753
754int kgsl_mmu_pt_get_flags(struct kgsl_pagetable *pt,
755 enum kgsl_deviceid id)
756{
Shubhraprakash Dasf764e462012-04-26 15:38:09 -0600757 unsigned int result = 0;
758
759 if (pt == NULL)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600760 return 0;
Shubhraprakash Dasf764e462012-04-26 15:38:09 -0600761
762 spin_lock(&pt->lock);
763 if (pt->tlb_flags && (1<<id)) {
764 result = KGSL_MMUFLAGS_TLBFLUSH;
765 pt->tlb_flags &= ~(1<<id);
766 }
767 spin_unlock(&pt->lock);
768 return result;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600769}
770EXPORT_SYMBOL(kgsl_mmu_pt_get_flags);
771
772void kgsl_mmu_ptpool_destroy(void *ptpool)
773{
774 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
775 kgsl_gpummu_ptpool_destroy(ptpool);
776 ptpool = 0;
777}
778EXPORT_SYMBOL(kgsl_mmu_ptpool_destroy);
779
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600780void *kgsl_mmu_ptpool_init(int entries)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600781{
782 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600783 return kgsl_gpummu_ptpool_init(entries);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600784 else
785 return (void *)(-1);
786}
787EXPORT_SYMBOL(kgsl_mmu_ptpool_init);
788
789int kgsl_mmu_enabled(void)
790{
791 if (KGSL_MMU_TYPE_NONE != kgsl_mmu_type)
792 return 1;
793 else
794 return 0;
795}
796EXPORT_SYMBOL(kgsl_mmu_enabled);
797
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600798enum kgsl_mmutype kgsl_mmu_get_mmutype(void)
799{
800 return kgsl_mmu_type;
801}
802EXPORT_SYMBOL(kgsl_mmu_get_mmutype);
803
804void kgsl_mmu_set_mmutype(char *mmutype)
805{
Jordan Crouse817e0b92012-02-04 10:23:53 -0700806 /* Set the default MMU - GPU on <=8960 and nothing on >= 8064 */
807 kgsl_mmu_type =
808 cpu_is_apq8064() ? KGSL_MMU_TYPE_NONE : KGSL_MMU_TYPE_GPU;
809
810 /* Use the IOMMU if it is found */
811 if (iommu_found())
812 kgsl_mmu_type = KGSL_MMU_TYPE_IOMMU;
813
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600814 if (mmutype && !strncmp(mmutype, "gpummu", 6))
815 kgsl_mmu_type = KGSL_MMU_TYPE_GPU;
816 if (iommu_found() && mmutype && !strncmp(mmutype, "iommu", 5))
817 kgsl_mmu_type = KGSL_MMU_TYPE_IOMMU;
818 if (mmutype && !strncmp(mmutype, "nommu", 5))
819 kgsl_mmu_type = KGSL_MMU_TYPE_NONE;
820}
821EXPORT_SYMBOL(kgsl_mmu_set_mmutype);