blob: e7f59357e1c0840d01302e8df81fb934c810e6d9 [file] [log] [blame]
Jordan Crouse00714012012-03-16 14:53:40 -06001/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
Steve Mucklef132c6c2012-06-06 18:30:57 -070013#include <linux/export.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070014#include <linux/types.h>
15#include <linux/device.h>
16#include <linux/spinlock.h>
17#include <linux/genalloc.h>
18#include <linux/slab.h>
19#include <linux/sched.h>
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060020#include <linux/iommu.h>
Jordan Crouse817e0b92012-02-04 10:23:53 -070021#include <mach/socinfo.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070022
23#include "kgsl.h"
24#include "kgsl_mmu.h"
25#include "kgsl_device.h"
26#include "kgsl_sharedmem.h"
27
28#define KGSL_MMU_ALIGN_SHIFT 13
29#define KGSL_MMU_ALIGN_MASK (~((1 << KGSL_MMU_ALIGN_SHIFT) - 1))
30
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060031static enum kgsl_mmutype kgsl_mmu_type;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070032
33static void pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable);
34
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070035static int kgsl_cleanup_pt(struct kgsl_pagetable *pt)
36{
37 int i;
Shubhraprakash Das84fdb112012-04-04 12:49:31 -060038 /* For IOMMU only unmap the global structures to global pt */
Shubhraprakash Das6b30c9f2012-04-20 01:15:55 -060039 if ((KGSL_MMU_TYPE_NONE != kgsl_mmu_type) &&
40 (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type) &&
Shubhraprakash Das19ca4a62012-05-18 12:11:20 -060041 (KGSL_MMU_GLOBAL_PT != pt->name) &&
42 (KGSL_MMU_PRIV_BANK_TABLE_NAME != pt->name))
Shubhraprakash Das84fdb112012-04-04 12:49:31 -060043 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070044 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
45 struct kgsl_device *device = kgsl_driver.devp[i];
46 if (device)
47 device->ftbl->cleanup_pt(device, pt);
48 }
49 return 0;
50}
51
Shubhraprakash Das6b30c9f2012-04-20 01:15:55 -060052
53static int kgsl_setup_pt(struct kgsl_pagetable *pt)
54{
55 int i = 0;
56 int status = 0;
57
58 /* For IOMMU only map the global structures to global pt */
59 if ((KGSL_MMU_TYPE_NONE != kgsl_mmu_type) &&
60 (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type) &&
Shubhraprakash Das19ca4a62012-05-18 12:11:20 -060061 (KGSL_MMU_GLOBAL_PT != pt->name) &&
62 (KGSL_MMU_PRIV_BANK_TABLE_NAME != pt->name))
Shubhraprakash Das6b30c9f2012-04-20 01:15:55 -060063 return 0;
64 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
65 struct kgsl_device *device = kgsl_driver.devp[i];
66 if (device) {
67 status = device->ftbl->setup_pt(device, pt);
68 if (status)
69 goto error_pt;
70 }
71 }
72 return status;
73error_pt:
74 while (i >= 0) {
75 struct kgsl_device *device = kgsl_driver.devp[i];
76 if (device)
77 device->ftbl->cleanup_pt(device, pt);
78 i--;
79 }
80 return status;
81}
82
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070083static void kgsl_destroy_pagetable(struct kref *kref)
84{
85 struct kgsl_pagetable *pagetable = container_of(kref,
86 struct kgsl_pagetable, refcount);
87 unsigned long flags;
88
89 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
90 list_del(&pagetable->list);
91 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
92
93 pagetable_remove_sysfs_objects(pagetable);
94
95 kgsl_cleanup_pt(pagetable);
96
Shubhraprakash Das84fdb112012-04-04 12:49:31 -060097 if (pagetable->kgsl_pool)
98 gen_pool_destroy(pagetable->kgsl_pool);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070099 if (pagetable->pool)
100 gen_pool_destroy(pagetable->pool);
101
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600102 pagetable->pt_ops->mmu_destroy_pagetable(pagetable->priv);
103
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700104 kfree(pagetable);
105}
106
107static inline void kgsl_put_pagetable(struct kgsl_pagetable *pagetable)
108{
109 if (pagetable)
110 kref_put(&pagetable->refcount, kgsl_destroy_pagetable);
111}
112
113static struct kgsl_pagetable *
114kgsl_get_pagetable(unsigned long name)
115{
116 struct kgsl_pagetable *pt, *ret = NULL;
117 unsigned long flags;
118
119 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
120 list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
121 if (pt->name == name) {
122 ret = pt;
123 kref_get(&ret->refcount);
124 break;
125 }
126 }
127
128 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
129 return ret;
130}
131
132static struct kgsl_pagetable *
133_get_pt_from_kobj(struct kobject *kobj)
134{
135 unsigned long ptname;
136
137 if (!kobj)
138 return NULL;
139
140 if (sscanf(kobj->name, "%ld", &ptname) != 1)
141 return NULL;
142
143 return kgsl_get_pagetable(ptname);
144}
145
146static ssize_t
147sysfs_show_entries(struct kobject *kobj,
148 struct kobj_attribute *attr,
149 char *buf)
150{
151 struct kgsl_pagetable *pt;
152 int ret = 0;
153
154 pt = _get_pt_from_kobj(kobj);
155
156 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600157 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700158
159 kgsl_put_pagetable(pt);
160 return ret;
161}
162
163static ssize_t
164sysfs_show_mapped(struct kobject *kobj,
165 struct kobj_attribute *attr,
166 char *buf)
167{
168 struct kgsl_pagetable *pt;
169 int ret = 0;
170
171 pt = _get_pt_from_kobj(kobj);
172
173 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600174 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.mapped);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700175
176 kgsl_put_pagetable(pt);
177 return ret;
178}
179
180static ssize_t
181sysfs_show_va_range(struct kobject *kobj,
182 struct kobj_attribute *attr,
183 char *buf)
184{
185 struct kgsl_pagetable *pt;
186 int ret = 0;
187
188 pt = _get_pt_from_kobj(kobj);
189
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600190 if (pt) {
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600191 ret += snprintf(buf, PAGE_SIZE, "0x%x\n",
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600192 kgsl_mmu_get_ptsize());
193 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700194
195 kgsl_put_pagetable(pt);
196 return ret;
197}
198
199static ssize_t
200sysfs_show_max_mapped(struct kobject *kobj,
201 struct kobj_attribute *attr,
202 char *buf)
203{
204 struct kgsl_pagetable *pt;
205 int ret = 0;
206
207 pt = _get_pt_from_kobj(kobj);
208
209 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600210 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.max_mapped);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700211
212 kgsl_put_pagetable(pt);
213 return ret;
214}
215
216static ssize_t
217sysfs_show_max_entries(struct kobject *kobj,
218 struct kobj_attribute *attr,
219 char *buf)
220{
221 struct kgsl_pagetable *pt;
222 int ret = 0;
223
224 pt = _get_pt_from_kobj(kobj);
225
226 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600227 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.max_entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700228
229 kgsl_put_pagetable(pt);
230 return ret;
231}
232
233static struct kobj_attribute attr_entries = {
234 .attr = { .name = "entries", .mode = 0444 },
235 .show = sysfs_show_entries,
236 .store = NULL,
237};
238
239static struct kobj_attribute attr_mapped = {
240 .attr = { .name = "mapped", .mode = 0444 },
241 .show = sysfs_show_mapped,
242 .store = NULL,
243};
244
245static struct kobj_attribute attr_va_range = {
246 .attr = { .name = "va_range", .mode = 0444 },
247 .show = sysfs_show_va_range,
248 .store = NULL,
249};
250
251static struct kobj_attribute attr_max_mapped = {
252 .attr = { .name = "max_mapped", .mode = 0444 },
253 .show = sysfs_show_max_mapped,
254 .store = NULL,
255};
256
257static struct kobj_attribute attr_max_entries = {
258 .attr = { .name = "max_entries", .mode = 0444 },
259 .show = sysfs_show_max_entries,
260 .store = NULL,
261};
262
263static struct attribute *pagetable_attrs[] = {
264 &attr_entries.attr,
265 &attr_mapped.attr,
266 &attr_va_range.attr,
267 &attr_max_mapped.attr,
268 &attr_max_entries.attr,
269 NULL,
270};
271
272static struct attribute_group pagetable_attr_group = {
273 .attrs = pagetable_attrs,
274};
275
276static void
277pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable)
278{
279 if (pagetable->kobj)
280 sysfs_remove_group(pagetable->kobj,
281 &pagetable_attr_group);
282
283 kobject_put(pagetable->kobj);
284}
285
286static int
287pagetable_add_sysfs_objects(struct kgsl_pagetable *pagetable)
288{
289 char ptname[16];
290 int ret = -ENOMEM;
291
292 snprintf(ptname, sizeof(ptname), "%d", pagetable->name);
293 pagetable->kobj = kobject_create_and_add(ptname,
294 kgsl_driver.ptkobj);
295 if (pagetable->kobj == NULL)
296 goto err;
297
298 ret = sysfs_create_group(pagetable->kobj, &pagetable_attr_group);
299
300err:
301 if (ret) {
302 if (pagetable->kobj)
303 kobject_put(pagetable->kobj);
304
305 pagetable->kobj = NULL;
306 }
307
308 return ret;
309}
310
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600311unsigned int kgsl_mmu_get_ptsize(void)
312{
313 /*
314 * For IOMMU, we could do up to 4G virtual range if we wanted to, but
315 * it makes more sense to return a smaller range and leave the rest of
316 * the virtual range for future improvements
317 */
318
319 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
320 return CONFIG_MSM_KGSL_PAGE_TABLE_SIZE;
321 else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
322 return SZ_2G;
323 else
324 return 0;
325}
326
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600327int
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600328kgsl_mmu_get_ptname_from_ptbase(unsigned int pt_base)
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600329{
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600330 struct kgsl_pagetable *pt;
331 int ptid = -1;
332
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600333 spin_lock(&kgsl_driver.ptlock);
334 list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600335 if (pt->pt_ops->mmu_pt_equal(pt, pt_base)) {
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600336 ptid = (int) pt->name;
337 break;
338 }
339 }
340 spin_unlock(&kgsl_driver.ptlock);
341
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600342 return ptid;
343}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600344EXPORT_SYMBOL(kgsl_mmu_get_ptname_from_ptbase);
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600345
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600346int kgsl_mmu_init(struct kgsl_device *device)
347{
Shubhraprakash Das0ff034f2012-05-02 15:51:07 -0600348 int status = 0;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600349 struct kgsl_mmu *mmu = &device->mmu;
350
351 mmu->device = device;
Shubhraprakash Das0ff034f2012-05-02 15:51:07 -0600352 status = kgsl_allocate_contiguous(&mmu->setstate_memory, PAGE_SIZE);
353 if (status)
354 return status;
355 kgsl_sharedmem_set(&mmu->setstate_memory, 0, 0,
356 mmu->setstate_memory.size);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600357
358 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type) {
Shubhraprakash Das0ff034f2012-05-02 15:51:07 -0600359 dev_info(device->dev, "|%s| MMU type set for device is "
Shubhraprakash Dasf5526a12012-04-20 00:48:33 -0600360 "NOMMU\n", __func__);
Shubhraprakash Das0ff034f2012-05-02 15:51:07 -0600361 goto done;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600362 } else if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
363 mmu->mmu_ops = &gpummu_ops;
364 else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
365 mmu->mmu_ops = &iommu_ops;
366
Shubhraprakash Das0ff034f2012-05-02 15:51:07 -0600367 status = mmu->mmu_ops->mmu_init(mmu);
368done:
369 if (status)
370 kgsl_sharedmem_free(&mmu->setstate_memory);
371 return status;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600372}
373EXPORT_SYMBOL(kgsl_mmu_init);
374
375int kgsl_mmu_start(struct kgsl_device *device)
376{
377 struct kgsl_mmu *mmu = &device->mmu;
378
379 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
380 kgsl_regwrite(device, MH_MMU_CONFIG, 0);
Shubhraprakash Das6b30c9f2012-04-20 01:15:55 -0600381 /* Setup gpuaddr of global mappings */
382 if (!mmu->setstate_memory.gpuaddr)
383 kgsl_setup_pt(NULL);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600384 return 0;
385 } else {
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600386 return mmu->mmu_ops->mmu_start(mmu);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600387 }
388}
389EXPORT_SYMBOL(kgsl_mmu_start);
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600390
Jeremy Gebbenf4ea0822012-04-05 16:27:08 -0600391static void mh_axi_error(struct kgsl_device *device, const char* type)
392{
393 unsigned int reg, gpu_err, phys_err, pt_base;
394
395 kgsl_regread(device, MH_AXI_ERROR, &reg);
396 pt_base = kgsl_mmu_get_current_ptbase(&device->mmu);
397 /*
398 * Read gpu virtual and physical addresses that
399 * caused the error from the debug data.
400 */
401 kgsl_regwrite(device, MH_DEBUG_CTRL, 44);
402 kgsl_regread(device, MH_DEBUG_DATA, &gpu_err);
403 kgsl_regwrite(device, MH_DEBUG_CTRL, 45);
404 kgsl_regread(device, MH_DEBUG_DATA, &phys_err);
405 KGSL_MEM_CRIT(device,
406 "axi %s error: %08x pt %08x gpu %08x phys %08x\n",
407 type, reg, pt_base, gpu_err, phys_err);
408}
409
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700410void kgsl_mh_intrcallback(struct kgsl_device *device)
411{
412 unsigned int status = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700413
414 kgsl_regread(device, MH_INTERRUPT_STATUS, &status);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700415
416 if (status & MH_INTERRUPT_MASK__AXI_READ_ERROR)
Jeremy Gebbenf4ea0822012-04-05 16:27:08 -0600417 mh_axi_error(device, "read");
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600418 if (status & MH_INTERRUPT_MASK__AXI_WRITE_ERROR)
Jeremy Gebbenf4ea0822012-04-05 16:27:08 -0600419 mh_axi_error(device, "write");
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600420 if (status & MH_INTERRUPT_MASK__MMU_PAGE_FAULT)
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600421 device->mmu.mmu_ops->mmu_pagefault(&device->mmu);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700422
Jordan Crousec8c9fcd2011-07-28 08:37:58 -0600423 status &= KGSL_MMU_INT_MASK;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700424 kgsl_regwrite(device, MH_INTERRUPT_CLEAR, status);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700425}
426EXPORT_SYMBOL(kgsl_mh_intrcallback);
427
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700428static struct kgsl_pagetable *kgsl_mmu_createpagetableobject(
429 unsigned int name)
430{
431 int status = 0;
432 struct kgsl_pagetable *pagetable = NULL;
433 unsigned long flags;
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600434 unsigned int ptsize;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700435
436 pagetable = kzalloc(sizeof(struct kgsl_pagetable), GFP_KERNEL);
437 if (pagetable == NULL) {
438 KGSL_CORE_ERR("kzalloc(%d) failed\n",
439 sizeof(struct kgsl_pagetable));
440 return NULL;
441 }
442
443 kref_init(&pagetable->refcount);
444
445 spin_lock_init(&pagetable->lock);
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600446
447 ptsize = kgsl_mmu_get_ptsize();
448
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700449 pagetable->name = name;
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600450 pagetable->max_entries = KGSL_PAGETABLE_ENTRIES(ptsize);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700451
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600452 /*
453 * create a separate kgsl pool for IOMMU, global mappings can be mapped
454 * just once from this pool of the defaultpagetable
455 */
456 if ((KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) &&
Shubhraprakash Das19ca4a62012-05-18 12:11:20 -0600457 ((KGSL_MMU_GLOBAL_PT == name) ||
458 (KGSL_MMU_PRIV_BANK_TABLE_NAME == name))) {
459 pagetable->kgsl_pool = gen_pool_create(PAGE_SHIFT, -1);
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600460 if (pagetable->kgsl_pool == NULL) {
461 KGSL_CORE_ERR("gen_pool_create(%d) failed\n",
Jeremy Gebbenc589ccb2012-05-16 10:26:20 -0600462 KGSL_MMU_ALIGN_SHIFT);
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600463 goto err_alloc;
464 }
465 if (gen_pool_add(pagetable->kgsl_pool,
466 KGSL_IOMMU_GLOBAL_MEM_BASE,
467 KGSL_IOMMU_GLOBAL_MEM_SIZE, -1)) {
468 KGSL_CORE_ERR("gen_pool_add failed\n");
469 goto err_kgsl_pool;
470 }
471 }
472
Jeremy Gebbenc589ccb2012-05-16 10:26:20 -0600473 pagetable->pool = gen_pool_create(KGSL_MMU_ALIGN_SHIFT, -1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700474 if (pagetable->pool == NULL) {
Jeremy Gebbenc589ccb2012-05-16 10:26:20 -0600475 KGSL_CORE_ERR("gen_pool_create(%d) failed\n",
476 KGSL_MMU_ALIGN_SHIFT);
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600477 goto err_kgsl_pool;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700478 }
479
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600480 if (gen_pool_add(pagetable->pool, KGSL_PAGETABLE_BASE,
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600481 ptsize, -1)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700482 KGSL_CORE_ERR("gen_pool_add failed\n");
483 goto err_pool;
484 }
485
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600486 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
487 pagetable->pt_ops = &gpummu_pt_ops;
488 else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
489 pagetable->pt_ops = &iommu_pt_ops;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700490
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600491 pagetable->priv = pagetable->pt_ops->mmu_create_pagetable();
492 if (!pagetable->priv)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700493 goto err_pool;
494
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700495 status = kgsl_setup_pt(pagetable);
496 if (status)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600497 goto err_mmu_create;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700498
499 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
500 list_add(&pagetable->list, &kgsl_driver.pagetable_list);
501 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
502
503 /* Create the sysfs entries */
504 pagetable_add_sysfs_objects(pagetable);
505
506 return pagetable;
507
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600508err_mmu_create:
509 pagetable->pt_ops->mmu_destroy_pagetable(pagetable->priv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700510err_pool:
511 gen_pool_destroy(pagetable->pool);
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600512err_kgsl_pool:
513 if (pagetable->kgsl_pool)
514 gen_pool_destroy(pagetable->kgsl_pool);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700515err_alloc:
516 kfree(pagetable);
517
518 return NULL;
519}
520
521struct kgsl_pagetable *kgsl_mmu_getpagetable(unsigned long name)
522{
523 struct kgsl_pagetable *pt;
524
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600525 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
526 return (void *)(-1);
527
Shubhraprakash Dasd8cbcd12012-05-07 16:11:32 -0600528#ifndef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
529 name = KGSL_MMU_GLOBAL_PT;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600530#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700531 pt = kgsl_get_pagetable(name);
532
533 if (pt == NULL)
534 pt = kgsl_mmu_createpagetableobject(name);
535
536 return pt;
537}
538
539void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable)
540{
541 kgsl_put_pagetable(pagetable);
542}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600543EXPORT_SYMBOL(kgsl_mmu_putpagetable);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700544
Shubhraprakash Dasb2abc452012-06-08 16:33:03 -0600545void kgsl_setstate(struct kgsl_mmu *mmu, unsigned int context_id,
546 uint32_t flags)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700547{
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600548 struct kgsl_device *device = mmu->device;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600549 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
550 return;
Shubhraprakash Dasc6e21012012-05-11 17:24:51 -0600551 else if (device->ftbl->setstate)
Shubhraprakash Dasb2abc452012-06-08 16:33:03 -0600552 device->ftbl->setstate(device, context_id, flags);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600553 else if (mmu->mmu_ops->mmu_device_setstate)
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600554 mmu->mmu_ops->mmu_device_setstate(mmu, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700555}
556EXPORT_SYMBOL(kgsl_setstate);
557
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600558void kgsl_mh_start(struct kgsl_device *device)
559{
560 struct kgsl_mh *mh = &device->mh;
561 /* force mmu off to for now*/
562 kgsl_regwrite(device, MH_MMU_CONFIG, 0);
Jordan Crousea29a2e02012-08-14 09:09:23 -0600563 kgsl_idle(device);
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600564
565 /* define physical memory range accessible by the core */
566 kgsl_regwrite(device, MH_MMU_MPU_BASE, mh->mpu_base);
567 kgsl_regwrite(device, MH_MMU_MPU_END,
568 mh->mpu_base + mh->mpu_range);
569 kgsl_regwrite(device, MH_ARBITER_CONFIG, mh->mharb);
570
571 if (mh->mh_intf_cfg1 != 0)
572 kgsl_regwrite(device, MH_CLNT_INTF_CTRL_CONFIG1,
573 mh->mh_intf_cfg1);
574
575 if (mh->mh_intf_cfg2 != 0)
576 kgsl_regwrite(device, MH_CLNT_INTF_CTRL_CONFIG2,
577 mh->mh_intf_cfg2);
578
579 /*
580 * Interrupts are enabled on a per-device level when
581 * kgsl_pwrctrl_irq() is called
582 */
583}
584
Jeremy Gebben1b9b1f142012-05-16 10:43:28 -0600585static inline struct gen_pool *
586_get_pool(struct kgsl_pagetable *pagetable, unsigned int flags)
587{
588 if (pagetable->kgsl_pool &&
589 (KGSL_MEMFLAGS_GLOBAL & flags))
590 return pagetable->kgsl_pool;
591 return pagetable->pool;
592}
593
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700594int
595kgsl_mmu_map(struct kgsl_pagetable *pagetable,
596 struct kgsl_memdesc *memdesc,
597 unsigned int protflags)
598{
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600599 int ret;
Jeremy Gebben1b9b1f142012-05-16 10:43:28 -0600600 struct gen_pool *pool;
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600601 int size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700602
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600603 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
Jordan Crouse40861a42012-02-06 10:18:23 -0700604 if (memdesc->sglen == 1) {
Shubhraprakash Das4d6af2b2012-04-20 00:35:03 -0600605 memdesc->gpuaddr = sg_dma_address(memdesc->sg);
606 if (!memdesc->gpuaddr)
607 memdesc->gpuaddr = sg_phys(memdesc->sg);
608 if (!memdesc->gpuaddr) {
609 KGSL_CORE_ERR("Unable to get a valid physical "
610 "address for memdesc\n");
611 return -EINVAL;
612 }
Jordan Crouse40861a42012-02-06 10:18:23 -0700613 return 0;
614 } else {
615 KGSL_CORE_ERR("Memory is not contigious "
616 "(sglen = %d)\n", memdesc->sglen);
617 return -EINVAL;
618 }
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600619 }
Jordan Crouse40861a42012-02-06 10:18:23 -0700620
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600621 size = kgsl_sg_size(memdesc->sg, memdesc->sglen);
622
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600623 /* Allocate from kgsl pool if it exists for global mappings */
Jeremy Gebben1b9b1f142012-05-16 10:43:28 -0600624 pool = _get_pool(pagetable, memdesc->priv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700625
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600626 memdesc->gpuaddr = gen_pool_alloc(pool, size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700627 if (memdesc->gpuaddr == 0) {
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600628 KGSL_CORE_ERR("gen_pool_alloc(%d) failed from pool: %s\n",
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600629 size,
Jeremy Gebben1b9b1f142012-05-16 10:43:28 -0600630 (pool == pagetable->kgsl_pool) ?
631 "kgsl_pool" : "general_pool");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700632 KGSL_CORE_ERR(" [%d] allocated=%d, entries=%d\n",
633 pagetable->name, pagetable->stats.mapped,
634 pagetable->stats.entries);
635 return -ENOMEM;
636 }
637
Shubhraprakash Dasbadaeda2012-03-21 00:31:39 -0600638 if (KGSL_MMU_TYPE_IOMMU != kgsl_mmu_get_mmutype())
639 spin_lock(&pagetable->lock);
Shubhraprakash Dasf764e462012-04-26 15:38:09 -0600640 ret = pagetable->pt_ops->mmu_map(pagetable->priv, memdesc, protflags,
641 &pagetable->tlb_flags);
Shubhraprakash Dasbadaeda2012-03-21 00:31:39 -0600642 if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
643 spin_lock(&pagetable->lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700644
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600645 if (ret)
646 goto err_free_gpuaddr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700647
648 /* Keep track of the statistics for the sysfs files */
649
650 KGSL_STATS_ADD(1, pagetable->stats.entries,
651 pagetable->stats.max_entries);
652
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600653 KGSL_STATS_ADD(size, pagetable->stats.mapped,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700654 pagetable->stats.max_mapped);
655
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700656 spin_unlock(&pagetable->lock);
657
658 return 0;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600659
660err_free_gpuaddr:
661 spin_unlock(&pagetable->lock);
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600662 gen_pool_free(pool, memdesc->gpuaddr, size);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600663 memdesc->gpuaddr = 0;
664 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700665}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600666EXPORT_SYMBOL(kgsl_mmu_map);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700667
668int
669kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
670 struct kgsl_memdesc *memdesc)
671{
Jeremy Gebben1b9b1f142012-05-16 10:43:28 -0600672 struct gen_pool *pool;
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600673 int size;
674
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600675 if (memdesc->size == 0 || memdesc->gpuaddr == 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700676 return 0;
677
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600678 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
679 memdesc->gpuaddr = 0;
680 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700681 }
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600682
683 size = kgsl_sg_size(memdesc->sg, memdesc->sglen);
684
Shubhraprakash Dasbadaeda2012-03-21 00:31:39 -0600685 if (KGSL_MMU_TYPE_IOMMU != kgsl_mmu_get_mmutype())
686 spin_lock(&pagetable->lock);
Shubhraprakash Das0c811262012-06-06 23:22:19 -0600687 pagetable->pt_ops->mmu_unmap(pagetable->priv, memdesc,
688 &pagetable->tlb_flags);
Shubhraprakash Dasbadaeda2012-03-21 00:31:39 -0600689 if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
690 spin_lock(&pagetable->lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700691 /* Remove the statistics */
692 pagetable->stats.entries--;
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600693 pagetable->stats.mapped -= size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700694
695 spin_unlock(&pagetable->lock);
696
Jeremy Gebben1b9b1f142012-05-16 10:43:28 -0600697 pool = _get_pool(pagetable, memdesc->priv);
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600698 gen_pool_free(pool, memdesc->gpuaddr, size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700699
Jeremy Gebben7faf9ec2012-03-21 14:09:55 -0600700 /*
701 * Don't clear the gpuaddr on global mappings because they
702 * may be in use by other pagetables
703 */
704 if (!(memdesc->priv & KGSL_MEMFLAGS_GLOBAL))
705 memdesc->gpuaddr = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700706 return 0;
707}
708EXPORT_SYMBOL(kgsl_mmu_unmap);
709
710int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable,
711 struct kgsl_memdesc *memdesc, unsigned int protflags)
712{
713 int result = -EINVAL;
714 unsigned int gpuaddr = 0;
715
716 if (memdesc == NULL) {
717 KGSL_CORE_ERR("invalid memdesc\n");
718 goto error;
719 }
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600720 /* Not all global mappings are needed for all MMU types */
721 if (!memdesc->size)
722 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700723
724 gpuaddr = memdesc->gpuaddr;
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600725 memdesc->priv |= KGSL_MEMFLAGS_GLOBAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700726
727 result = kgsl_mmu_map(pagetable, memdesc, protflags);
728 if (result)
729 goto error;
730
731 /*global mappings must have the same gpu address in all pagetables*/
732 if (gpuaddr && gpuaddr != memdesc->gpuaddr) {
733 KGSL_CORE_ERR("pt %p addr mismatch phys 0x%08x"
734 "gpu 0x%0x 0x%08x", pagetable, memdesc->physaddr,
735 gpuaddr, memdesc->gpuaddr);
736 goto error_unmap;
737 }
738 return result;
739error_unmap:
740 kgsl_mmu_unmap(pagetable, memdesc);
741error:
742 return result;
743}
744EXPORT_SYMBOL(kgsl_mmu_map_global);
745
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700746int kgsl_mmu_close(struct kgsl_device *device)
747{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700748 struct kgsl_mmu *mmu = &device->mmu;
749
Shubhraprakash Das0ff034f2012-05-02 15:51:07 -0600750 kgsl_sharedmem_free(&mmu->setstate_memory);
751 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600752 return 0;
Shubhraprakash Das0ff034f2012-05-02 15:51:07 -0600753 else
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600754 return mmu->mmu_ops->mmu_close(mmu);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700755}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600756EXPORT_SYMBOL(kgsl_mmu_close);
757
758int kgsl_mmu_pt_get_flags(struct kgsl_pagetable *pt,
759 enum kgsl_deviceid id)
760{
Shubhraprakash Dasf764e462012-04-26 15:38:09 -0600761 unsigned int result = 0;
762
763 if (pt == NULL)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600764 return 0;
Shubhraprakash Dasf764e462012-04-26 15:38:09 -0600765
766 spin_lock(&pt->lock);
Shubhraprakash Das97828ae2012-06-06 22:46:37 -0600767 if (pt->tlb_flags & (1<<id)) {
Shubhraprakash Dasf764e462012-04-26 15:38:09 -0600768 result = KGSL_MMUFLAGS_TLBFLUSH;
769 pt->tlb_flags &= ~(1<<id);
770 }
771 spin_unlock(&pt->lock);
772 return result;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600773}
774EXPORT_SYMBOL(kgsl_mmu_pt_get_flags);
775
776void kgsl_mmu_ptpool_destroy(void *ptpool)
777{
778 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
779 kgsl_gpummu_ptpool_destroy(ptpool);
780 ptpool = 0;
781}
782EXPORT_SYMBOL(kgsl_mmu_ptpool_destroy);
783
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600784void *kgsl_mmu_ptpool_init(int entries)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600785{
786 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600787 return kgsl_gpummu_ptpool_init(entries);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600788 else
789 return (void *)(-1);
790}
791EXPORT_SYMBOL(kgsl_mmu_ptpool_init);
792
793int kgsl_mmu_enabled(void)
794{
795 if (KGSL_MMU_TYPE_NONE != kgsl_mmu_type)
796 return 1;
797 else
798 return 0;
799}
800EXPORT_SYMBOL(kgsl_mmu_enabled);
801
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600802enum kgsl_mmutype kgsl_mmu_get_mmutype(void)
803{
804 return kgsl_mmu_type;
805}
806EXPORT_SYMBOL(kgsl_mmu_get_mmutype);
807
808void kgsl_mmu_set_mmutype(char *mmutype)
809{
Jordan Crouse817e0b92012-02-04 10:23:53 -0700810 /* Set the default MMU - GPU on <=8960 and nothing on >= 8064 */
811 kgsl_mmu_type =
812 cpu_is_apq8064() ? KGSL_MMU_TYPE_NONE : KGSL_MMU_TYPE_GPU;
813
814 /* Use the IOMMU if it is found */
Steve Mucklef132c6c2012-06-06 18:30:57 -0700815 if (iommu_present(&platform_bus_type))
Jordan Crouse817e0b92012-02-04 10:23:53 -0700816 kgsl_mmu_type = KGSL_MMU_TYPE_IOMMU;
817
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600818 if (mmutype && !strncmp(mmutype, "gpummu", 6))
819 kgsl_mmu_type = KGSL_MMU_TYPE_GPU;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700820 if (iommu_present(&platform_bus_type) && mmutype &&
821 !strncmp(mmutype, "iommu", 5))
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600822 kgsl_mmu_type = KGSL_MMU_TYPE_IOMMU;
823 if (mmutype && !strncmp(mmutype, "nommu", 5))
824 kgsl_mmu_type = KGSL_MMU_TYPE_NONE;
825}
826EXPORT_SYMBOL(kgsl_mmu_set_mmutype);