blob: dbb88ee847c87b2f3ffcc6a45415b06ce303b6bd [file] [log] [blame]
Jordan Crouse00714012012-03-16 14:53:40 -06001/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
Steve Mucklef132c6c2012-06-06 18:30:57 -070013#include <linux/export.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070014#include <linux/types.h>
15#include <linux/device.h>
16#include <linux/spinlock.h>
17#include <linux/genalloc.h>
18#include <linux/slab.h>
19#include <linux/sched.h>
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060020#include <linux/iommu.h>
Shubhraprakash Das3cf33be2012-08-16 22:42:55 -070021#include <mach/iommu.h>
Jordan Crouse817e0b92012-02-04 10:23:53 -070022#include <mach/socinfo.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070023
24#include "kgsl.h"
25#include "kgsl_mmu.h"
26#include "kgsl_device.h"
27#include "kgsl_sharedmem.h"
28
29#define KGSL_MMU_ALIGN_SHIFT 13
30#define KGSL_MMU_ALIGN_MASK (~((1 << KGSL_MMU_ALIGN_SHIFT) - 1))
31
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060032static enum kgsl_mmutype kgsl_mmu_type;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070033
34static void pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable);
35
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070036static int kgsl_cleanup_pt(struct kgsl_pagetable *pt)
37{
38 int i;
Shubhraprakash Das84fdb112012-04-04 12:49:31 -060039 /* For IOMMU only unmap the global structures to global pt */
Shubhraprakash Das6b30c9f2012-04-20 01:15:55 -060040 if ((KGSL_MMU_TYPE_NONE != kgsl_mmu_type) &&
41 (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type) &&
Shubhraprakash Das19ca4a62012-05-18 12:11:20 -060042 (KGSL_MMU_GLOBAL_PT != pt->name) &&
43 (KGSL_MMU_PRIV_BANK_TABLE_NAME != pt->name))
Shubhraprakash Das84fdb112012-04-04 12:49:31 -060044 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070045 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
46 struct kgsl_device *device = kgsl_driver.devp[i];
47 if (device)
48 device->ftbl->cleanup_pt(device, pt);
49 }
50 return 0;
51}
52
Shubhraprakash Das6b30c9f2012-04-20 01:15:55 -060053
54static int kgsl_setup_pt(struct kgsl_pagetable *pt)
55{
56 int i = 0;
57 int status = 0;
58
59 /* For IOMMU only map the global structures to global pt */
60 if ((KGSL_MMU_TYPE_NONE != kgsl_mmu_type) &&
61 (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type) &&
Shubhraprakash Das19ca4a62012-05-18 12:11:20 -060062 (KGSL_MMU_GLOBAL_PT != pt->name) &&
63 (KGSL_MMU_PRIV_BANK_TABLE_NAME != pt->name))
Shubhraprakash Das6b30c9f2012-04-20 01:15:55 -060064 return 0;
65 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
66 struct kgsl_device *device = kgsl_driver.devp[i];
67 if (device) {
68 status = device->ftbl->setup_pt(device, pt);
69 if (status)
70 goto error_pt;
71 }
72 }
73 return status;
74error_pt:
75 while (i >= 0) {
76 struct kgsl_device *device = kgsl_driver.devp[i];
77 if (device)
78 device->ftbl->cleanup_pt(device, pt);
79 i--;
80 }
81 return status;
82}
83
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070084static void kgsl_destroy_pagetable(struct kref *kref)
85{
86 struct kgsl_pagetable *pagetable = container_of(kref,
87 struct kgsl_pagetable, refcount);
88 unsigned long flags;
89
90 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
91 list_del(&pagetable->list);
92 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
93
94 pagetable_remove_sysfs_objects(pagetable);
95
96 kgsl_cleanup_pt(pagetable);
97
Shubhraprakash Das84fdb112012-04-04 12:49:31 -060098 if (pagetable->kgsl_pool)
99 gen_pool_destroy(pagetable->kgsl_pool);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700100 if (pagetable->pool)
101 gen_pool_destroy(pagetable->pool);
102
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600103 pagetable->pt_ops->mmu_destroy_pagetable(pagetable->priv);
104
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700105 kfree(pagetable);
106}
107
108static inline void kgsl_put_pagetable(struct kgsl_pagetable *pagetable)
109{
110 if (pagetable)
111 kref_put(&pagetable->refcount, kgsl_destroy_pagetable);
112}
113
114static struct kgsl_pagetable *
115kgsl_get_pagetable(unsigned long name)
116{
117 struct kgsl_pagetable *pt, *ret = NULL;
118 unsigned long flags;
119
120 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
121 list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
122 if (pt->name == name) {
123 ret = pt;
124 kref_get(&ret->refcount);
125 break;
126 }
127 }
128
129 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
130 return ret;
131}
132
133static struct kgsl_pagetable *
134_get_pt_from_kobj(struct kobject *kobj)
135{
136 unsigned long ptname;
137
138 if (!kobj)
139 return NULL;
140
141 if (sscanf(kobj->name, "%ld", &ptname) != 1)
142 return NULL;
143
144 return kgsl_get_pagetable(ptname);
145}
146
147static ssize_t
148sysfs_show_entries(struct kobject *kobj,
149 struct kobj_attribute *attr,
150 char *buf)
151{
152 struct kgsl_pagetable *pt;
153 int ret = 0;
154
155 pt = _get_pt_from_kobj(kobj);
156
157 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600158 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700159
160 kgsl_put_pagetable(pt);
161 return ret;
162}
163
164static ssize_t
165sysfs_show_mapped(struct kobject *kobj,
166 struct kobj_attribute *attr,
167 char *buf)
168{
169 struct kgsl_pagetable *pt;
170 int ret = 0;
171
172 pt = _get_pt_from_kobj(kobj);
173
174 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600175 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.mapped);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700176
177 kgsl_put_pagetable(pt);
178 return ret;
179}
180
181static ssize_t
182sysfs_show_va_range(struct kobject *kobj,
183 struct kobj_attribute *attr,
184 char *buf)
185{
186 struct kgsl_pagetable *pt;
187 int ret = 0;
188
189 pt = _get_pt_from_kobj(kobj);
190
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600191 if (pt) {
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600192 ret += snprintf(buf, PAGE_SIZE, "0x%x\n",
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600193 kgsl_mmu_get_ptsize());
194 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700195
196 kgsl_put_pagetable(pt);
197 return ret;
198}
199
200static ssize_t
201sysfs_show_max_mapped(struct kobject *kobj,
202 struct kobj_attribute *attr,
203 char *buf)
204{
205 struct kgsl_pagetable *pt;
206 int ret = 0;
207
208 pt = _get_pt_from_kobj(kobj);
209
210 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600211 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.max_mapped);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700212
213 kgsl_put_pagetable(pt);
214 return ret;
215}
216
217static ssize_t
218sysfs_show_max_entries(struct kobject *kobj,
219 struct kobj_attribute *attr,
220 char *buf)
221{
222 struct kgsl_pagetable *pt;
223 int ret = 0;
224
225 pt = _get_pt_from_kobj(kobj);
226
227 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600228 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.max_entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700229
230 kgsl_put_pagetable(pt);
231 return ret;
232}
233
234static struct kobj_attribute attr_entries = {
235 .attr = { .name = "entries", .mode = 0444 },
236 .show = sysfs_show_entries,
237 .store = NULL,
238};
239
240static struct kobj_attribute attr_mapped = {
241 .attr = { .name = "mapped", .mode = 0444 },
242 .show = sysfs_show_mapped,
243 .store = NULL,
244};
245
246static struct kobj_attribute attr_va_range = {
247 .attr = { .name = "va_range", .mode = 0444 },
248 .show = sysfs_show_va_range,
249 .store = NULL,
250};
251
252static struct kobj_attribute attr_max_mapped = {
253 .attr = { .name = "max_mapped", .mode = 0444 },
254 .show = sysfs_show_max_mapped,
255 .store = NULL,
256};
257
258static struct kobj_attribute attr_max_entries = {
259 .attr = { .name = "max_entries", .mode = 0444 },
260 .show = sysfs_show_max_entries,
261 .store = NULL,
262};
263
264static struct attribute *pagetable_attrs[] = {
265 &attr_entries.attr,
266 &attr_mapped.attr,
267 &attr_va_range.attr,
268 &attr_max_mapped.attr,
269 &attr_max_entries.attr,
270 NULL,
271};
272
273static struct attribute_group pagetable_attr_group = {
274 .attrs = pagetable_attrs,
275};
276
277static void
278pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable)
279{
280 if (pagetable->kobj)
281 sysfs_remove_group(pagetable->kobj,
282 &pagetable_attr_group);
283
284 kobject_put(pagetable->kobj);
285}
286
287static int
288pagetable_add_sysfs_objects(struct kgsl_pagetable *pagetable)
289{
290 char ptname[16];
291 int ret = -ENOMEM;
292
293 snprintf(ptname, sizeof(ptname), "%d", pagetable->name);
294 pagetable->kobj = kobject_create_and_add(ptname,
295 kgsl_driver.ptkobj);
296 if (pagetable->kobj == NULL)
297 goto err;
298
299 ret = sysfs_create_group(pagetable->kobj, &pagetable_attr_group);
300
301err:
302 if (ret) {
303 if (pagetable->kobj)
304 kobject_put(pagetable->kobj);
305
306 pagetable->kobj = NULL;
307 }
308
309 return ret;
310}
311
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600312unsigned int kgsl_mmu_get_ptsize(void)
313{
314 /*
315 * For IOMMU, we could do up to 4G virtual range if we wanted to, but
316 * it makes more sense to return a smaller range and leave the rest of
317 * the virtual range for future improvements
318 */
319
320 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
321 return CONFIG_MSM_KGSL_PAGE_TABLE_SIZE;
322 else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
Shubhraprakash Das337d6c92012-09-19 16:19:19 -0700323 return SZ_2G - KGSL_PAGETABLE_BASE;
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600324 else
325 return 0;
326}
327
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600328int
Shubhraprakash Das3cf33be2012-08-16 22:42:55 -0700329kgsl_mmu_get_ptname_from_ptbase(struct kgsl_mmu *mmu, unsigned int pt_base)
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600330{
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600331 struct kgsl_pagetable *pt;
332 int ptid = -1;
333
Shubhraprakash Das3cf33be2012-08-16 22:42:55 -0700334 if (!mmu->mmu_ops || !mmu->mmu_ops->mmu_pt_equal)
335 return KGSL_MMU_GLOBAL_PT;
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600336 spin_lock(&kgsl_driver.ptlock);
337 list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
Shubhraprakash Das3cf33be2012-08-16 22:42:55 -0700338 if (mmu->mmu_ops->mmu_pt_equal(mmu, pt, pt_base)) {
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600339 ptid = (int) pt->name;
340 break;
341 }
342 }
343 spin_unlock(&kgsl_driver.ptlock);
344
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600345 return ptid;
346}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600347EXPORT_SYMBOL(kgsl_mmu_get_ptname_from_ptbase);
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600348
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600349int kgsl_mmu_init(struct kgsl_device *device)
350{
Shubhraprakash Das0ff034f2012-05-02 15:51:07 -0600351 int status = 0;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600352 struct kgsl_mmu *mmu = &device->mmu;
353
354 mmu->device = device;
Shubhraprakash Das0ff034f2012-05-02 15:51:07 -0600355 status = kgsl_allocate_contiguous(&mmu->setstate_memory, PAGE_SIZE);
356 if (status)
357 return status;
358 kgsl_sharedmem_set(&mmu->setstate_memory, 0, 0,
359 mmu->setstate_memory.size);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600360
361 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type) {
Shubhraprakash Das0ff034f2012-05-02 15:51:07 -0600362 dev_info(device->dev, "|%s| MMU type set for device is "
Shubhraprakash Dasf5526a12012-04-20 00:48:33 -0600363 "NOMMU\n", __func__);
Shubhraprakash Das0ff034f2012-05-02 15:51:07 -0600364 goto done;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600365 } else if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
366 mmu->mmu_ops = &gpummu_ops;
367 else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
368 mmu->mmu_ops = &iommu_ops;
369
Shubhraprakash Das0ff034f2012-05-02 15:51:07 -0600370 status = mmu->mmu_ops->mmu_init(mmu);
371done:
372 if (status)
373 kgsl_sharedmem_free(&mmu->setstate_memory);
374 return status;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600375}
376EXPORT_SYMBOL(kgsl_mmu_init);
377
378int kgsl_mmu_start(struct kgsl_device *device)
379{
380 struct kgsl_mmu *mmu = &device->mmu;
381
382 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
383 kgsl_regwrite(device, MH_MMU_CONFIG, 0);
Shubhraprakash Das6b30c9f2012-04-20 01:15:55 -0600384 /* Setup gpuaddr of global mappings */
385 if (!mmu->setstate_memory.gpuaddr)
386 kgsl_setup_pt(NULL);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600387 return 0;
388 } else {
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600389 return mmu->mmu_ops->mmu_start(mmu);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600390 }
391}
392EXPORT_SYMBOL(kgsl_mmu_start);
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600393
Jeremy Gebbenf4ea0822012-04-05 16:27:08 -0600394static void mh_axi_error(struct kgsl_device *device, const char* type)
395{
396 unsigned int reg, gpu_err, phys_err, pt_base;
397
398 kgsl_regread(device, MH_AXI_ERROR, &reg);
399 pt_base = kgsl_mmu_get_current_ptbase(&device->mmu);
400 /*
401 * Read gpu virtual and physical addresses that
402 * caused the error from the debug data.
403 */
404 kgsl_regwrite(device, MH_DEBUG_CTRL, 44);
405 kgsl_regread(device, MH_DEBUG_DATA, &gpu_err);
406 kgsl_regwrite(device, MH_DEBUG_CTRL, 45);
407 kgsl_regread(device, MH_DEBUG_DATA, &phys_err);
408 KGSL_MEM_CRIT(device,
409 "axi %s error: %08x pt %08x gpu %08x phys %08x\n",
410 type, reg, pt_base, gpu_err, phys_err);
411}
412
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700413void kgsl_mh_intrcallback(struct kgsl_device *device)
414{
415 unsigned int status = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700416
417 kgsl_regread(device, MH_INTERRUPT_STATUS, &status);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700418
419 if (status & MH_INTERRUPT_MASK__AXI_READ_ERROR)
Jeremy Gebbenf4ea0822012-04-05 16:27:08 -0600420 mh_axi_error(device, "read");
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600421 if (status & MH_INTERRUPT_MASK__AXI_WRITE_ERROR)
Jeremy Gebbenf4ea0822012-04-05 16:27:08 -0600422 mh_axi_error(device, "write");
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600423 if (status & MH_INTERRUPT_MASK__MMU_PAGE_FAULT)
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600424 device->mmu.mmu_ops->mmu_pagefault(&device->mmu);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700425
Jordan Crousec8c9fcd2011-07-28 08:37:58 -0600426 status &= KGSL_MMU_INT_MASK;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700427 kgsl_regwrite(device, MH_INTERRUPT_CLEAR, status);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700428}
429EXPORT_SYMBOL(kgsl_mh_intrcallback);
430
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700431static struct kgsl_pagetable *kgsl_mmu_createpagetableobject(
432 unsigned int name)
433{
434 int status = 0;
435 struct kgsl_pagetable *pagetable = NULL;
436 unsigned long flags;
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600437 unsigned int ptsize;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700438
439 pagetable = kzalloc(sizeof(struct kgsl_pagetable), GFP_KERNEL);
440 if (pagetable == NULL) {
441 KGSL_CORE_ERR("kzalloc(%d) failed\n",
442 sizeof(struct kgsl_pagetable));
443 return NULL;
444 }
445
446 kref_init(&pagetable->refcount);
447
448 spin_lock_init(&pagetable->lock);
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600449
450 ptsize = kgsl_mmu_get_ptsize();
451
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700452 pagetable->name = name;
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600453 pagetable->max_entries = KGSL_PAGETABLE_ENTRIES(ptsize);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700454
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600455 /*
456 * create a separate kgsl pool for IOMMU, global mappings can be mapped
457 * just once from this pool of the defaultpagetable
458 */
459 if ((KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) &&
Shubhraprakash Das19ca4a62012-05-18 12:11:20 -0600460 ((KGSL_MMU_GLOBAL_PT == name) ||
461 (KGSL_MMU_PRIV_BANK_TABLE_NAME == name))) {
462 pagetable->kgsl_pool = gen_pool_create(PAGE_SHIFT, -1);
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600463 if (pagetable->kgsl_pool == NULL) {
464 KGSL_CORE_ERR("gen_pool_create(%d) failed\n",
Jeremy Gebbenc589ccb2012-05-16 10:26:20 -0600465 KGSL_MMU_ALIGN_SHIFT);
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600466 goto err_alloc;
467 }
468 if (gen_pool_add(pagetable->kgsl_pool,
469 KGSL_IOMMU_GLOBAL_MEM_BASE,
470 KGSL_IOMMU_GLOBAL_MEM_SIZE, -1)) {
471 KGSL_CORE_ERR("gen_pool_add failed\n");
472 goto err_kgsl_pool;
473 }
474 }
475
Jeremy Gebbenc589ccb2012-05-16 10:26:20 -0600476 pagetable->pool = gen_pool_create(KGSL_MMU_ALIGN_SHIFT, -1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700477 if (pagetable->pool == NULL) {
Jeremy Gebbenc589ccb2012-05-16 10:26:20 -0600478 KGSL_CORE_ERR("gen_pool_create(%d) failed\n",
479 KGSL_MMU_ALIGN_SHIFT);
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600480 goto err_kgsl_pool;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700481 }
482
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600483 if (gen_pool_add(pagetable->pool, KGSL_PAGETABLE_BASE,
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600484 ptsize, -1)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700485 KGSL_CORE_ERR("gen_pool_add failed\n");
486 goto err_pool;
487 }
488
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600489 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
490 pagetable->pt_ops = &gpummu_pt_ops;
491 else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
492 pagetable->pt_ops = &iommu_pt_ops;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700493
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600494 pagetable->priv = pagetable->pt_ops->mmu_create_pagetable();
495 if (!pagetable->priv)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700496 goto err_pool;
497
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700498 status = kgsl_setup_pt(pagetable);
499 if (status)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600500 goto err_mmu_create;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700501
502 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
503 list_add(&pagetable->list, &kgsl_driver.pagetable_list);
504 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
505
506 /* Create the sysfs entries */
507 pagetable_add_sysfs_objects(pagetable);
508
509 return pagetable;
510
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600511err_mmu_create:
512 pagetable->pt_ops->mmu_destroy_pagetable(pagetable->priv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700513err_pool:
514 gen_pool_destroy(pagetable->pool);
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600515err_kgsl_pool:
516 if (pagetable->kgsl_pool)
517 gen_pool_destroy(pagetable->kgsl_pool);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700518err_alloc:
519 kfree(pagetable);
520
521 return NULL;
522}
523
524struct kgsl_pagetable *kgsl_mmu_getpagetable(unsigned long name)
525{
526 struct kgsl_pagetable *pt;
527
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600528 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
529 return (void *)(-1);
530
Shubhraprakash Dasd8cbcd12012-05-07 16:11:32 -0600531#ifndef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
532 name = KGSL_MMU_GLOBAL_PT;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600533#endif
Shubhraprakash Das15a8b462012-08-16 23:24:28 -0700534 /* We presently do not support per-process for IOMMU-v2 */
535 if (!msm_soc_version_supports_iommu_v1())
536 name = KGSL_MMU_GLOBAL_PT;
537
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700538 pt = kgsl_get_pagetable(name);
539
540 if (pt == NULL)
541 pt = kgsl_mmu_createpagetableobject(name);
542
543 return pt;
544}
545
546void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable)
547{
548 kgsl_put_pagetable(pagetable);
549}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600550EXPORT_SYMBOL(kgsl_mmu_putpagetable);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700551
Shubhraprakash Dasb2abc452012-06-08 16:33:03 -0600552void kgsl_setstate(struct kgsl_mmu *mmu, unsigned int context_id,
553 uint32_t flags)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700554{
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600555 struct kgsl_device *device = mmu->device;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600556 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
557 return;
Shubhraprakash Dasc6e21012012-05-11 17:24:51 -0600558 else if (device->ftbl->setstate)
Shubhraprakash Dasb2abc452012-06-08 16:33:03 -0600559 device->ftbl->setstate(device, context_id, flags);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600560 else if (mmu->mmu_ops->mmu_device_setstate)
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600561 mmu->mmu_ops->mmu_device_setstate(mmu, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700562}
563EXPORT_SYMBOL(kgsl_setstate);
564
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600565void kgsl_mh_start(struct kgsl_device *device)
566{
567 struct kgsl_mh *mh = &device->mh;
568 /* force mmu off to for now*/
569 kgsl_regwrite(device, MH_MMU_CONFIG, 0);
Jordan Crousea29a2e02012-08-14 09:09:23 -0600570 kgsl_idle(device);
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600571
572 /* define physical memory range accessible by the core */
573 kgsl_regwrite(device, MH_MMU_MPU_BASE, mh->mpu_base);
574 kgsl_regwrite(device, MH_MMU_MPU_END,
575 mh->mpu_base + mh->mpu_range);
576 kgsl_regwrite(device, MH_ARBITER_CONFIG, mh->mharb);
577
578 if (mh->mh_intf_cfg1 != 0)
579 kgsl_regwrite(device, MH_CLNT_INTF_CTRL_CONFIG1,
580 mh->mh_intf_cfg1);
581
582 if (mh->mh_intf_cfg2 != 0)
583 kgsl_regwrite(device, MH_CLNT_INTF_CTRL_CONFIG2,
584 mh->mh_intf_cfg2);
585
586 /*
587 * Interrupts are enabled on a per-device level when
588 * kgsl_pwrctrl_irq() is called
589 */
590}
591
Jeremy Gebben1b9b1f142012-05-16 10:43:28 -0600592static inline struct gen_pool *
593_get_pool(struct kgsl_pagetable *pagetable, unsigned int flags)
594{
595 if (pagetable->kgsl_pool &&
596 (KGSL_MEMFLAGS_GLOBAL & flags))
597 return pagetable->kgsl_pool;
598 return pagetable->pool;
599}
600
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700601int
602kgsl_mmu_map(struct kgsl_pagetable *pagetable,
603 struct kgsl_memdesc *memdesc,
604 unsigned int protflags)
605{
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600606 int ret;
Jeremy Gebben1b9b1f142012-05-16 10:43:28 -0600607 struct gen_pool *pool;
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600608 int size;
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -0800609 int page_align = ilog2(PAGE_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700610
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600611 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
Jordan Crouse40861a42012-02-06 10:18:23 -0700612 if (memdesc->sglen == 1) {
Shubhraprakash Das4d6af2b2012-04-20 00:35:03 -0600613 memdesc->gpuaddr = sg_dma_address(memdesc->sg);
614 if (!memdesc->gpuaddr)
615 memdesc->gpuaddr = sg_phys(memdesc->sg);
616 if (!memdesc->gpuaddr) {
617 KGSL_CORE_ERR("Unable to get a valid physical "
618 "address for memdesc\n");
619 return -EINVAL;
620 }
Jordan Crouse40861a42012-02-06 10:18:23 -0700621 return 0;
622 } else {
623 KGSL_CORE_ERR("Memory is not contigious "
624 "(sglen = %d)\n", memdesc->sglen);
625 return -EINVAL;
626 }
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600627 }
Jordan Crouse40861a42012-02-06 10:18:23 -0700628
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600629 size = kgsl_sg_size(memdesc->sg, memdesc->sglen);
630
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600631 /* Allocate from kgsl pool if it exists for global mappings */
Jeremy Gebben1b9b1f142012-05-16 10:43:28 -0600632 pool = _get_pool(pagetable, memdesc->priv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700633
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -0800634 /* Allocate aligned virtual addresses for iommu. This allows
635 * more efficient pagetable entries if the physical memory
636 * is also aligned. Don't do this for GPUMMU, because
637 * the address space is so small.
638 */
639 if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype() &&
640 (memdesc->priv & KGSL_MEMALIGN_MASK)) {
641 page_align = (memdesc->priv & KGSL_MEMALIGN_MASK)
642 >> KGSL_MEMALIGN_SHIFT;
643 }
644 memdesc->gpuaddr = gen_pool_alloc_aligned(pool, size, page_align);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700645 if (memdesc->gpuaddr == 0) {
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600646 KGSL_CORE_ERR("gen_pool_alloc(%d) failed from pool: %s\n",
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600647 size,
Jeremy Gebben1b9b1f142012-05-16 10:43:28 -0600648 (pool == pagetable->kgsl_pool) ?
649 "kgsl_pool" : "general_pool");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700650 KGSL_CORE_ERR(" [%d] allocated=%d, entries=%d\n",
651 pagetable->name, pagetable->stats.mapped,
652 pagetable->stats.entries);
653 return -ENOMEM;
654 }
655
Shubhraprakash Dasbadaeda2012-03-21 00:31:39 -0600656 if (KGSL_MMU_TYPE_IOMMU != kgsl_mmu_get_mmutype())
657 spin_lock(&pagetable->lock);
Shubhraprakash Dasf764e462012-04-26 15:38:09 -0600658 ret = pagetable->pt_ops->mmu_map(pagetable->priv, memdesc, protflags,
659 &pagetable->tlb_flags);
Shubhraprakash Dasbadaeda2012-03-21 00:31:39 -0600660 if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
661 spin_lock(&pagetable->lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700662
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600663 if (ret)
664 goto err_free_gpuaddr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700665
666 /* Keep track of the statistics for the sysfs files */
667
668 KGSL_STATS_ADD(1, pagetable->stats.entries,
669 pagetable->stats.max_entries);
670
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600671 KGSL_STATS_ADD(size, pagetable->stats.mapped,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700672 pagetable->stats.max_mapped);
673
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700674 spin_unlock(&pagetable->lock);
675
676 return 0;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600677
678err_free_gpuaddr:
679 spin_unlock(&pagetable->lock);
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600680 gen_pool_free(pool, memdesc->gpuaddr, size);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600681 memdesc->gpuaddr = 0;
682 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700683}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600684EXPORT_SYMBOL(kgsl_mmu_map);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700685
686int
687kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
688 struct kgsl_memdesc *memdesc)
689{
Jeremy Gebben1b9b1f142012-05-16 10:43:28 -0600690 struct gen_pool *pool;
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600691 int size;
692
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600693 if (memdesc->size == 0 || memdesc->gpuaddr == 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700694 return 0;
695
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600696 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
697 memdesc->gpuaddr = 0;
698 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700699 }
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600700
701 size = kgsl_sg_size(memdesc->sg, memdesc->sglen);
702
Shubhraprakash Dasbadaeda2012-03-21 00:31:39 -0600703 if (KGSL_MMU_TYPE_IOMMU != kgsl_mmu_get_mmutype())
704 spin_lock(&pagetable->lock);
Shubhraprakash Das0c811262012-06-06 23:22:19 -0600705 pagetable->pt_ops->mmu_unmap(pagetable->priv, memdesc,
706 &pagetable->tlb_flags);
Shubhraprakash Dasbadaeda2012-03-21 00:31:39 -0600707 if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
708 spin_lock(&pagetable->lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700709 /* Remove the statistics */
710 pagetable->stats.entries--;
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600711 pagetable->stats.mapped -= size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700712
713 spin_unlock(&pagetable->lock);
714
Jeremy Gebben1b9b1f142012-05-16 10:43:28 -0600715 pool = _get_pool(pagetable, memdesc->priv);
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600716 gen_pool_free(pool, memdesc->gpuaddr, size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700717
Jeremy Gebben7faf9ec2012-03-21 14:09:55 -0600718 /*
719 * Don't clear the gpuaddr on global mappings because they
720 * may be in use by other pagetables
721 */
722 if (!(memdesc->priv & KGSL_MEMFLAGS_GLOBAL))
723 memdesc->gpuaddr = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700724 return 0;
725}
726EXPORT_SYMBOL(kgsl_mmu_unmap);
727
728int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable,
729 struct kgsl_memdesc *memdesc, unsigned int protflags)
730{
731 int result = -EINVAL;
732 unsigned int gpuaddr = 0;
733
734 if (memdesc == NULL) {
735 KGSL_CORE_ERR("invalid memdesc\n");
736 goto error;
737 }
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600738 /* Not all global mappings are needed for all MMU types */
739 if (!memdesc->size)
740 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700741
742 gpuaddr = memdesc->gpuaddr;
Jeremy Gebben3e626ff2012-09-24 13:05:39 -0600743 memdesc->priv |= KGSL_MEMFLAGS_GLOBAL
744 | (KGSL_MEMTYPE_KERNEL << KGSL_MEMTYPE_SHIFT);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700745
746 result = kgsl_mmu_map(pagetable, memdesc, protflags);
747 if (result)
748 goto error;
749
750 /*global mappings must have the same gpu address in all pagetables*/
751 if (gpuaddr && gpuaddr != memdesc->gpuaddr) {
752 KGSL_CORE_ERR("pt %p addr mismatch phys 0x%08x"
753 "gpu 0x%0x 0x%08x", pagetable, memdesc->physaddr,
754 gpuaddr, memdesc->gpuaddr);
755 goto error_unmap;
756 }
757 return result;
758error_unmap:
759 kgsl_mmu_unmap(pagetable, memdesc);
760error:
761 return result;
762}
763EXPORT_SYMBOL(kgsl_mmu_map_global);
764
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700765int kgsl_mmu_close(struct kgsl_device *device)
766{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700767 struct kgsl_mmu *mmu = &device->mmu;
768
Shubhraprakash Das0ff034f2012-05-02 15:51:07 -0600769 kgsl_sharedmem_free(&mmu->setstate_memory);
770 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600771 return 0;
Shubhraprakash Das0ff034f2012-05-02 15:51:07 -0600772 else
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600773 return mmu->mmu_ops->mmu_close(mmu);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700774}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600775EXPORT_SYMBOL(kgsl_mmu_close);
776
777int kgsl_mmu_pt_get_flags(struct kgsl_pagetable *pt,
778 enum kgsl_deviceid id)
779{
Shubhraprakash Dasf764e462012-04-26 15:38:09 -0600780 unsigned int result = 0;
781
782 if (pt == NULL)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600783 return 0;
Shubhraprakash Dasf764e462012-04-26 15:38:09 -0600784
785 spin_lock(&pt->lock);
Shubhraprakash Das97828ae2012-06-06 22:46:37 -0600786 if (pt->tlb_flags & (1<<id)) {
Shubhraprakash Dasf764e462012-04-26 15:38:09 -0600787 result = KGSL_MMUFLAGS_TLBFLUSH;
788 pt->tlb_flags &= ~(1<<id);
789 }
790 spin_unlock(&pt->lock);
791 return result;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600792}
793EXPORT_SYMBOL(kgsl_mmu_pt_get_flags);
794
795void kgsl_mmu_ptpool_destroy(void *ptpool)
796{
797 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
798 kgsl_gpummu_ptpool_destroy(ptpool);
799 ptpool = 0;
800}
801EXPORT_SYMBOL(kgsl_mmu_ptpool_destroy);
802
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600803void *kgsl_mmu_ptpool_init(int entries)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600804{
805 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600806 return kgsl_gpummu_ptpool_init(entries);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600807 else
808 return (void *)(-1);
809}
810EXPORT_SYMBOL(kgsl_mmu_ptpool_init);
811
812int kgsl_mmu_enabled(void)
813{
814 if (KGSL_MMU_TYPE_NONE != kgsl_mmu_type)
815 return 1;
816 else
817 return 0;
818}
819EXPORT_SYMBOL(kgsl_mmu_enabled);
820
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600821enum kgsl_mmutype kgsl_mmu_get_mmutype(void)
822{
823 return kgsl_mmu_type;
824}
825EXPORT_SYMBOL(kgsl_mmu_get_mmutype);
826
827void kgsl_mmu_set_mmutype(char *mmutype)
828{
Jordan Crouse817e0b92012-02-04 10:23:53 -0700829 /* Set the default MMU - GPU on <=8960 and nothing on >= 8064 */
830 kgsl_mmu_type =
831 cpu_is_apq8064() ? KGSL_MMU_TYPE_NONE : KGSL_MMU_TYPE_GPU;
832
833 /* Use the IOMMU if it is found */
Steve Mucklef132c6c2012-06-06 18:30:57 -0700834 if (iommu_present(&platform_bus_type))
Jordan Crouse817e0b92012-02-04 10:23:53 -0700835 kgsl_mmu_type = KGSL_MMU_TYPE_IOMMU;
836
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600837 if (mmutype && !strncmp(mmutype, "gpummu", 6))
838 kgsl_mmu_type = KGSL_MMU_TYPE_GPU;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700839 if (iommu_present(&platform_bus_type) && mmutype &&
840 !strncmp(mmutype, "iommu", 5))
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600841 kgsl_mmu_type = KGSL_MMU_TYPE_IOMMU;
842 if (mmutype && !strncmp(mmutype, "nommu", 5))
843 kgsl_mmu_type = KGSL_MMU_TYPE_NONE;
844}
845EXPORT_SYMBOL(kgsl_mmu_set_mmutype);
Shubhraprakash Dase7652cf2012-08-11 17:15:19 -0700846
847int kgsl_mmu_gpuaddr_in_range(unsigned int gpuaddr)
848{
849 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
850 return 1;
851 return ((gpuaddr >= KGSL_PAGETABLE_BASE) &&
852 (gpuaddr < (KGSL_PAGETABLE_BASE + kgsl_mmu_get_ptsize())));
853}
854EXPORT_SYMBOL(kgsl_mmu_gpuaddr_in_range);
855