blob: 7471342f0e64a56e7bcd8b09ceb9cf91b0a13ea3 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/types.h>
14#include <linux/device.h>
15#include <linux/spinlock.h>
16#include <linux/genalloc.h>
17#include <linux/slab.h>
18#include <linux/sched.h>
19
20#include "kgsl.h"
21#include "kgsl_mmu.h"
22#include "kgsl_device.h"
23#include "kgsl_sharedmem.h"
24
25#define KGSL_MMU_ALIGN_SHIFT 13
26#define KGSL_MMU_ALIGN_MASK (~((1 << KGSL_MMU_ALIGN_SHIFT) - 1))
27
28#define GSL_PT_PAGE_BITS_MASK 0x00000007
29#define GSL_PT_PAGE_ADDR_MASK PAGE_MASK
30
31static void pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable);
32
33static ssize_t
34sysfs_show_ptpool_entries(struct kobject *kobj,
35 struct kobj_attribute *attr,
36 char *buf)
37{
Jeremy Gebbena87bb862011-08-08 16:09:38 -060038 return snprintf(buf, PAGE_SIZE, "%d\n", kgsl_driver.ptpool.entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070039}
40
41static ssize_t
42sysfs_show_ptpool_min(struct kobject *kobj,
43 struct kobj_attribute *attr,
44 char *buf)
45{
Jeremy Gebbena87bb862011-08-08 16:09:38 -060046 return snprintf(buf, PAGE_SIZE, "%d\n",
47 kgsl_driver.ptpool.static_entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070048}
49
50static ssize_t
51sysfs_show_ptpool_chunks(struct kobject *kobj,
52 struct kobj_attribute *attr,
53 char *buf)
54{
Jeremy Gebbena87bb862011-08-08 16:09:38 -060055 return snprintf(buf, PAGE_SIZE, "%d\n", kgsl_driver.ptpool.chunks);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070056}
57
58static ssize_t
59sysfs_show_ptpool_ptsize(struct kobject *kobj,
60 struct kobj_attribute *attr,
61 char *buf)
62{
Jeremy Gebbena87bb862011-08-08 16:09:38 -060063 return snprintf(buf, PAGE_SIZE, "%d\n", kgsl_driver.ptpool.ptsize);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070064}
65
66static struct kobj_attribute attr_ptpool_entries = {
67 .attr = { .name = "ptpool_entries", .mode = 0444 },
68 .show = sysfs_show_ptpool_entries,
69 .store = NULL,
70};
71
72static struct kobj_attribute attr_ptpool_min = {
73 .attr = { .name = "ptpool_min", .mode = 0444 },
74 .show = sysfs_show_ptpool_min,
75 .store = NULL,
76};
77
78static struct kobj_attribute attr_ptpool_chunks = {
79 .attr = { .name = "ptpool_chunks", .mode = 0444 },
80 .show = sysfs_show_ptpool_chunks,
81 .store = NULL,
82};
83
84static struct kobj_attribute attr_ptpool_ptsize = {
85 .attr = { .name = "ptpool_ptsize", .mode = 0444 },
86 .show = sysfs_show_ptpool_ptsize,
87 .store = NULL,
88};
89
90static struct attribute *ptpool_attrs[] = {
91 &attr_ptpool_entries.attr,
92 &attr_ptpool_min.attr,
93 &attr_ptpool_chunks.attr,
94 &attr_ptpool_ptsize.attr,
95 NULL,
96};
97
98static struct attribute_group ptpool_attr_group = {
99 .attrs = ptpool_attrs,
100};
101
102static int
103_kgsl_ptpool_add_entries(struct kgsl_ptpool *pool, int count, int dynamic)
104{
105 struct kgsl_ptpool_chunk *chunk;
106 size_t size = ALIGN(count * pool->ptsize, PAGE_SIZE);
107
108 BUG_ON(count == 0);
109
110 if (get_order(size) >= MAX_ORDER) {
111 KGSL_CORE_ERR("ptpool allocation is too big: %d\n", size);
112 return -EINVAL;
113 }
114
115 chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
116 if (chunk == NULL) {
117 KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*chunk));
118 return -ENOMEM;
119 }
120
121 chunk->size = size;
122 chunk->count = count;
123 chunk->dynamic = dynamic;
124
125 chunk->data = dma_alloc_coherent(NULL, size,
126 &chunk->phys, GFP_KERNEL);
127
128 if (chunk->data == NULL) {
129 KGSL_CORE_ERR("dma_alloc_coherent(%d) failed\n", size);
130 goto err;
131 }
132
133 chunk->bitmap = kzalloc(BITS_TO_LONGS(count) * 4, GFP_KERNEL);
134
135 if (chunk->bitmap == NULL) {
136 KGSL_CORE_ERR("kzalloc(%d) failed\n",
137 BITS_TO_LONGS(count) * 4);
138 goto err_dma;
139 }
140
141 list_add_tail(&chunk->list, &pool->list);
142
143 pool->chunks++;
144 pool->entries += count;
145
146 if (!dynamic)
147 pool->static_entries += count;
148
149 return 0;
150
151err_dma:
152 dma_free_coherent(NULL, chunk->size, chunk->data, chunk->phys);
153err:
154 kfree(chunk);
155 return -ENOMEM;
156}
157
158static void *
159_kgsl_ptpool_get_entry(struct kgsl_ptpool *pool, unsigned int *physaddr)
160{
161 struct kgsl_ptpool_chunk *chunk;
162
163 list_for_each_entry(chunk, &pool->list, list) {
164 int bit = find_first_zero_bit(chunk->bitmap, chunk->count);
165
166 if (bit >= chunk->count)
167 continue;
168
169 set_bit(bit, chunk->bitmap);
170 *physaddr = chunk->phys + (bit * pool->ptsize);
171
172 return chunk->data + (bit * pool->ptsize);
173 }
174
175 return NULL;
176}
177
178/**
179 * kgsl_ptpool_add
180 * @pool: A pointer to a ptpool structure
181 * @entries: Number of entries to add
182 *
183 * Add static entries to the pagetable pool.
184 */
185
186int
187kgsl_ptpool_add(struct kgsl_ptpool *pool, int count)
188{
189 int ret = 0;
190 BUG_ON(count == 0);
191
192 mutex_lock(&pool->lock);
193
194 /* Only 4MB can be allocated in one chunk, so larger allocations
195 need to be split into multiple sections */
196
197 while (count) {
198 int entries = ((count * pool->ptsize) > SZ_4M) ?
199 SZ_4M / pool->ptsize : count;
200
201 /* Add the entries as static, i.e. they don't ever stand
202 a chance of being removed */
203
204 ret = _kgsl_ptpool_add_entries(pool, entries, 0);
205 if (ret)
206 break;
207
208 count -= entries;
209 }
210
211 mutex_unlock(&pool->lock);
212 return ret;
213}
214
215/**
216 * kgsl_ptpool_alloc
217 * @pool: A pointer to a ptpool structure
218 * @addr: A pointer to store the physical address of the chunk
219 *
220 * Allocate a pagetable from the pool. Returns the virtual address
221 * of the pagetable, the physical address is returned in physaddr
222 */
223
224void *kgsl_ptpool_alloc(struct kgsl_ptpool *pool, unsigned int *physaddr)
225{
226 void *addr = NULL;
227 int ret;
228
229 mutex_lock(&pool->lock);
230 addr = _kgsl_ptpool_get_entry(pool, physaddr);
231 if (addr)
232 goto done;
233
234 /* Add a chunk for 1 more pagetable and mark it as dynamic */
235 ret = _kgsl_ptpool_add_entries(pool, 1, 1);
236
237 if (ret)
238 goto done;
239
240 addr = _kgsl_ptpool_get_entry(pool, physaddr);
241done:
242 mutex_unlock(&pool->lock);
243 return addr;
244}
245
246static inline void _kgsl_ptpool_rm_chunk(struct kgsl_ptpool_chunk *chunk)
247{
248 list_del(&chunk->list);
249
250 if (chunk->data)
251 dma_free_coherent(NULL, chunk->size, chunk->data,
252 chunk->phys);
253 kfree(chunk->bitmap);
254 kfree(chunk);
255}
256
257/**
258 * kgsl_ptpool_free
259 * @pool: A pointer to a ptpool structure
260 * @addr: A pointer to the virtual address to free
261 *
262 * Free a pagetable allocated from the pool
263 */
264
265void kgsl_ptpool_free(struct kgsl_ptpool *pool, void *addr)
266{
267 struct kgsl_ptpool_chunk *chunk, *tmp;
268
269 if (pool == NULL || addr == NULL)
270 return;
271
272 mutex_lock(&pool->lock);
273 list_for_each_entry_safe(chunk, tmp, &pool->list, list) {
274 if (addr >= chunk->data &&
275 addr < chunk->data + chunk->size) {
276 int bit = ((unsigned long) (addr - chunk->data)) /
277 pool->ptsize;
278
279 clear_bit(bit, chunk->bitmap);
280 memset(addr, 0, pool->ptsize);
281
282 if (chunk->dynamic &&
283 bitmap_empty(chunk->bitmap, chunk->count))
284 _kgsl_ptpool_rm_chunk(chunk);
285
286 break;
287 }
288 }
289
290 mutex_unlock(&pool->lock);
291}
292
293void kgsl_ptpool_destroy(struct kgsl_ptpool *pool)
294{
295 struct kgsl_ptpool_chunk *chunk, *tmp;
296
297 if (pool == NULL)
298 return;
299
300 mutex_lock(&pool->lock);
301 list_for_each_entry_safe(chunk, tmp, &pool->list, list)
302 _kgsl_ptpool_rm_chunk(chunk);
303 mutex_unlock(&pool->lock);
304
305 memset(pool, 0, sizeof(*pool));
306}
307
308/**
309 * kgsl_ptpool_init
310 * @pool: A pointer to a ptpool structure to initialize
311 * @ptsize: The size of each pagetable entry
312 * @entries: The number of inital entries to add to the pool
313 *
314 * Initalize a pool and allocate an initial chunk of entries.
315 */
316
317int kgsl_ptpool_init(struct kgsl_ptpool *pool, int ptsize, int entries)
318{
319 int ret = 0;
320 BUG_ON(ptsize == 0);
321
322 pool->ptsize = ptsize;
323 mutex_init(&pool->lock);
324 INIT_LIST_HEAD(&pool->list);
325
326 if (entries) {
327 ret = kgsl_ptpool_add(pool, entries);
328 if (ret)
329 return ret;
330 }
331
332 return sysfs_create_group(kgsl_driver.ptkobj, &ptpool_attr_group);
333}
334
335static int kgsl_cleanup_pt(struct kgsl_pagetable *pt)
336{
337 int i;
338 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
339 struct kgsl_device *device = kgsl_driver.devp[i];
340 if (device)
341 device->ftbl->cleanup_pt(device, pt);
342 }
343 return 0;
344}
345
346static void kgsl_destroy_pagetable(struct kref *kref)
347{
348 struct kgsl_pagetable *pagetable = container_of(kref,
349 struct kgsl_pagetable, refcount);
350 unsigned long flags;
351
352 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
353 list_del(&pagetable->list);
354 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
355
356 pagetable_remove_sysfs_objects(pagetable);
357
358 kgsl_cleanup_pt(pagetable);
359
360 kgsl_ptpool_free(&kgsl_driver.ptpool, pagetable->base.hostptr);
361
362 kgsl_driver.stats.coherent -= KGSL_PAGETABLE_SIZE;
363
364 if (pagetable->pool)
365 gen_pool_destroy(pagetable->pool);
366
367 kfree(pagetable->tlbflushfilter.base);
368 kfree(pagetable);
369}
370
371static inline void kgsl_put_pagetable(struct kgsl_pagetable *pagetable)
372{
373 if (pagetable)
374 kref_put(&pagetable->refcount, kgsl_destroy_pagetable);
375}
376
377static struct kgsl_pagetable *
378kgsl_get_pagetable(unsigned long name)
379{
380 struct kgsl_pagetable *pt, *ret = NULL;
381 unsigned long flags;
382
383 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
384 list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
385 if (pt->name == name) {
386 ret = pt;
387 kref_get(&ret->refcount);
388 break;
389 }
390 }
391
392 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
393 return ret;
394}
395
396static struct kgsl_pagetable *
397_get_pt_from_kobj(struct kobject *kobj)
398{
399 unsigned long ptname;
400
401 if (!kobj)
402 return NULL;
403
404 if (sscanf(kobj->name, "%ld", &ptname) != 1)
405 return NULL;
406
407 return kgsl_get_pagetable(ptname);
408}
409
410static ssize_t
411sysfs_show_entries(struct kobject *kobj,
412 struct kobj_attribute *attr,
413 char *buf)
414{
415 struct kgsl_pagetable *pt;
416 int ret = 0;
417
418 pt = _get_pt_from_kobj(kobj);
419
420 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600421 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700422
423 kgsl_put_pagetable(pt);
424 return ret;
425}
426
427static ssize_t
428sysfs_show_mapped(struct kobject *kobj,
429 struct kobj_attribute *attr,
430 char *buf)
431{
432 struct kgsl_pagetable *pt;
433 int ret = 0;
434
435 pt = _get_pt_from_kobj(kobj);
436
437 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600438 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.mapped);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700439
440 kgsl_put_pagetable(pt);
441 return ret;
442}
443
444static ssize_t
445sysfs_show_va_range(struct kobject *kobj,
446 struct kobj_attribute *attr,
447 char *buf)
448{
449 struct kgsl_pagetable *pt;
450 int ret = 0;
451
452 pt = _get_pt_from_kobj(kobj);
453
454 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600455 ret += snprintf(buf, PAGE_SIZE, "0x%x\n", pt->va_range);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700456
457 kgsl_put_pagetable(pt);
458 return ret;
459}
460
461static ssize_t
462sysfs_show_max_mapped(struct kobject *kobj,
463 struct kobj_attribute *attr,
464 char *buf)
465{
466 struct kgsl_pagetable *pt;
467 int ret = 0;
468
469 pt = _get_pt_from_kobj(kobj);
470
471 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600472 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.max_mapped);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700473
474 kgsl_put_pagetable(pt);
475 return ret;
476}
477
478static ssize_t
479sysfs_show_max_entries(struct kobject *kobj,
480 struct kobj_attribute *attr,
481 char *buf)
482{
483 struct kgsl_pagetable *pt;
484 int ret = 0;
485
486 pt = _get_pt_from_kobj(kobj);
487
488 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600489 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.max_entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700490
491 kgsl_put_pagetable(pt);
492 return ret;
493}
494
495static struct kobj_attribute attr_entries = {
496 .attr = { .name = "entries", .mode = 0444 },
497 .show = sysfs_show_entries,
498 .store = NULL,
499};
500
501static struct kobj_attribute attr_mapped = {
502 .attr = { .name = "mapped", .mode = 0444 },
503 .show = sysfs_show_mapped,
504 .store = NULL,
505};
506
507static struct kobj_attribute attr_va_range = {
508 .attr = { .name = "va_range", .mode = 0444 },
509 .show = sysfs_show_va_range,
510 .store = NULL,
511};
512
513static struct kobj_attribute attr_max_mapped = {
514 .attr = { .name = "max_mapped", .mode = 0444 },
515 .show = sysfs_show_max_mapped,
516 .store = NULL,
517};
518
519static struct kobj_attribute attr_max_entries = {
520 .attr = { .name = "max_entries", .mode = 0444 },
521 .show = sysfs_show_max_entries,
522 .store = NULL,
523};
524
525static struct attribute *pagetable_attrs[] = {
526 &attr_entries.attr,
527 &attr_mapped.attr,
528 &attr_va_range.attr,
529 &attr_max_mapped.attr,
530 &attr_max_entries.attr,
531 NULL,
532};
533
534static struct attribute_group pagetable_attr_group = {
535 .attrs = pagetable_attrs,
536};
537
538static void
539pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable)
540{
541 if (pagetable->kobj)
542 sysfs_remove_group(pagetable->kobj,
543 &pagetable_attr_group);
544
545 kobject_put(pagetable->kobj);
546}
547
548static int
549pagetable_add_sysfs_objects(struct kgsl_pagetable *pagetable)
550{
551 char ptname[16];
552 int ret = -ENOMEM;
553
554 snprintf(ptname, sizeof(ptname), "%d", pagetable->name);
555 pagetable->kobj = kobject_create_and_add(ptname,
556 kgsl_driver.ptkobj);
557 if (pagetable->kobj == NULL)
558 goto err;
559
560 ret = sysfs_create_group(pagetable->kobj, &pagetable_attr_group);
561
562err:
563 if (ret) {
564 if (pagetable->kobj)
565 kobject_put(pagetable->kobj);
566
567 pagetable->kobj = NULL;
568 }
569
570 return ret;
571}
572
573static inline uint32_t
574kgsl_pt_entry_get(struct kgsl_pagetable *pt, uint32_t va)
575{
576 return (va - pt->va_base) >> PAGE_SHIFT;
577}
578
579static inline void
580kgsl_pt_map_set(struct kgsl_pagetable *pt, uint32_t pte, uint32_t val)
581{
582 uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
583
584 writel_relaxed(val, &baseptr[pte]);
585}
586
587static inline uint32_t
588kgsl_pt_map_getaddr(struct kgsl_pagetable *pt, uint32_t pte)
589{
590 uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
591 uint32_t ret = readl_relaxed(&baseptr[pte]) & GSL_PT_PAGE_ADDR_MASK;
592 return ret;
593}
594
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600595int
596kgsl_get_ptname_from_ptbase(unsigned int pt_base)
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600597{
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600598 struct kgsl_pagetable *pt;
599 int ptid = -1;
600
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600601 spin_lock(&kgsl_driver.ptlock);
602 list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600603 if (pt_base == pt->base.gpuaddr) {
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600604 ptid = (int) pt->name;
605 break;
606 }
607 }
608 spin_unlock(&kgsl_driver.ptlock);
609
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600610 return ptid;
611}
612
613void kgsl_mmu_pagefault(struct kgsl_device *device)
614{
615 unsigned int reg;
616 unsigned int ptbase;
617
618 kgsl_regread(device, MH_MMU_PAGE_FAULT, &reg);
619 kgsl_regread(device, MH_MMU_PT_BASE, &ptbase);
620
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600621 KGSL_MEM_CRIT(device,
622 "mmu page fault: page=0x%lx pt=%d op=%s axi=%d\n",
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600623 reg & ~(PAGE_SIZE - 1),
624 kgsl_get_ptname_from_ptbase(ptbase),
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600625 reg & 0x02 ? "WRITE" : "READ", (reg >> 4) & 0xF);
626}
627
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700628void kgsl_mh_intrcallback(struct kgsl_device *device)
629{
630 unsigned int status = 0;
631 unsigned int reg;
632
633 kgsl_regread(device, MH_INTERRUPT_STATUS, &status);
634 kgsl_regread(device, MH_AXI_ERROR, &reg);
635
636 if (status & MH_INTERRUPT_MASK__AXI_READ_ERROR)
637 KGSL_MEM_CRIT(device, "axi read error interrupt: %08x\n", reg);
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600638 if (status & MH_INTERRUPT_MASK__AXI_WRITE_ERROR)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700639 KGSL_MEM_CRIT(device, "axi write error interrupt: %08x\n", reg);
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600640 if (status & MH_INTERRUPT_MASK__MMU_PAGE_FAULT)
641 kgsl_mmu_pagefault(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700642
Jordan Crousec8c9fcd2011-07-28 08:37:58 -0600643 status &= KGSL_MMU_INT_MASK;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700644 kgsl_regwrite(device, MH_INTERRUPT_CLEAR, status);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700645}
646EXPORT_SYMBOL(kgsl_mh_intrcallback);
647
648static int kgsl_setup_pt(struct kgsl_pagetable *pt)
649{
650 int i = 0;
651 int status = 0;
652
653 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
654 struct kgsl_device *device = kgsl_driver.devp[i];
655 if (device) {
656 status = device->ftbl->setup_pt(device, pt);
657 if (status)
658 goto error_pt;
659 }
660 }
661 return status;
662error_pt:
663 while (i >= 0) {
664 struct kgsl_device *device = kgsl_driver.devp[i];
665 if (device)
666 device->ftbl->cleanup_pt(device, pt);
667 i--;
668 }
669 return status;
670}
671
672static struct kgsl_pagetable *kgsl_mmu_createpagetableobject(
673 unsigned int name)
674{
675 int status = 0;
676 struct kgsl_pagetable *pagetable = NULL;
677 unsigned long flags;
678
679 pagetable = kzalloc(sizeof(struct kgsl_pagetable), GFP_KERNEL);
680 if (pagetable == NULL) {
681 KGSL_CORE_ERR("kzalloc(%d) failed\n",
682 sizeof(struct kgsl_pagetable));
683 return NULL;
684 }
685
686 kref_init(&pagetable->refcount);
687
688 spin_lock_init(&pagetable->lock);
689 pagetable->tlb_flags = 0;
690 pagetable->name = name;
691 pagetable->va_base = KGSL_PAGETABLE_BASE;
692 pagetable->va_range = CONFIG_MSM_KGSL_PAGE_TABLE_SIZE;
693 pagetable->last_superpte = 0;
694 pagetable->max_entries = KGSL_PAGETABLE_ENTRIES(pagetable->va_range);
695
696 pagetable->tlbflushfilter.size = (pagetable->va_range /
697 (PAGE_SIZE * GSL_PT_SUPER_PTE * 8)) + 1;
698 pagetable->tlbflushfilter.base = (unsigned int *)
699 kzalloc(pagetable->tlbflushfilter.size, GFP_KERNEL);
700 if (!pagetable->tlbflushfilter.base) {
701 KGSL_CORE_ERR("kzalloc(%d) failed\n",
702 pagetable->tlbflushfilter.size);
703 goto err_alloc;
704 }
705 GSL_TLBFLUSH_FILTER_RESET();
706
707 pagetable->pool = gen_pool_create(PAGE_SHIFT, -1);
708 if (pagetable->pool == NULL) {
709 KGSL_CORE_ERR("gen_pool_create(%d) failed\n", PAGE_SHIFT);
710 goto err_flushfilter;
711 }
712
713 if (gen_pool_add(pagetable->pool, pagetable->va_base,
714 pagetable->va_range, -1)) {
715 KGSL_CORE_ERR("gen_pool_add failed\n");
716 goto err_pool;
717 }
718
719 pagetable->base.hostptr = kgsl_ptpool_alloc(&kgsl_driver.ptpool,
720 &pagetable->base.physaddr);
721
722 if (pagetable->base.hostptr == NULL)
723 goto err_pool;
724
725 /* ptpool allocations are from coherent memory, so update the
726 device statistics acordingly */
727
728 KGSL_STATS_ADD(KGSL_PAGETABLE_SIZE, kgsl_driver.stats.coherent,
729 kgsl_driver.stats.coherent_max);
730
731 pagetable->base.gpuaddr = pagetable->base.physaddr;
732 pagetable->base.size = KGSL_PAGETABLE_SIZE;
733
734 status = kgsl_setup_pt(pagetable);
735 if (status)
736 goto err_free_sharedmem;
737
738 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
739 list_add(&pagetable->list, &kgsl_driver.pagetable_list);
740 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
741
742 /* Create the sysfs entries */
743 pagetable_add_sysfs_objects(pagetable);
744
745 return pagetable;
746
747err_free_sharedmem:
748 kgsl_ptpool_free(&kgsl_driver.ptpool, &pagetable->base.hostptr);
749err_pool:
750 gen_pool_destroy(pagetable->pool);
751err_flushfilter:
752 kfree(pagetable->tlbflushfilter.base);
753err_alloc:
754 kfree(pagetable);
755
756 return NULL;
757}
758
759struct kgsl_pagetable *kgsl_mmu_getpagetable(unsigned long name)
760{
761 struct kgsl_pagetable *pt;
762
763 pt = kgsl_get_pagetable(name);
764
765 if (pt == NULL)
766 pt = kgsl_mmu_createpagetableobject(name);
767
768 return pt;
769}
770
771void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable)
772{
773 kgsl_put_pagetable(pagetable);
774}
775
776void kgsl_default_setstate(struct kgsl_device *device, uint32_t flags)
777{
778 if (!kgsl_mmu_enabled())
779 return;
780
781 if (flags & KGSL_MMUFLAGS_PTUPDATE) {
782 kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
783 kgsl_regwrite(device, MH_MMU_PT_BASE,
784 device->mmu.hwpagetable->base.gpuaddr);
785 }
786
787 if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
788 /* Invalidate all and tc */
789 kgsl_regwrite(device, MH_MMU_INVALIDATE, 0x00000003);
790 }
791}
792EXPORT_SYMBOL(kgsl_default_setstate);
793
794void kgsl_setstate(struct kgsl_device *device, uint32_t flags)
795{
796 if (device->ftbl->setstate)
797 device->ftbl->setstate(device, flags);
798}
799EXPORT_SYMBOL(kgsl_setstate);
800
801void kgsl_mmu_setstate(struct kgsl_device *device,
802 struct kgsl_pagetable *pagetable)
803{
804 struct kgsl_mmu *mmu = &device->mmu;
805
806 if (mmu->flags & KGSL_FLAGS_STARTED) {
807 /* page table not current, then setup mmu to use new
808 * specified page table
809 */
810 if (mmu->hwpagetable != pagetable) {
811 mmu->hwpagetable = pagetable;
812 spin_lock(&mmu->hwpagetable->lock);
813 mmu->hwpagetable->tlb_flags &= ~(1<<device->id);
814 spin_unlock(&mmu->hwpagetable->lock);
815
816 /* call device specific set page table */
817 kgsl_setstate(mmu->device, KGSL_MMUFLAGS_TLBFLUSH |
818 KGSL_MMUFLAGS_PTUPDATE);
819 }
820 }
821}
822EXPORT_SYMBOL(kgsl_mmu_setstate);
823
824int kgsl_mmu_init(struct kgsl_device *device)
825{
826 /*
827 * intialize device mmu
828 *
829 * call this with the global lock held
830 */
831 int status = 0;
832 struct kgsl_mmu *mmu = &device->mmu;
833
834 mmu->device = device;
835
836 /* make sure aligned to pagesize */
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600837 BUG_ON(device->mh.mpu_base & (PAGE_SIZE - 1));
838 BUG_ON((device->mh.mpu_base + device->mh.mpu_range) & (PAGE_SIZE - 1));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700839
840 /* sub-client MMU lookups require address translation */
841 if ((mmu->config & ~0x1) > 0) {
842 /*make sure virtual address range is a multiple of 64Kb */
843 BUG_ON(CONFIG_MSM_KGSL_PAGE_TABLE_SIZE & ((1 << 16) - 1));
844
845 /* allocate memory used for completing r/w operations that
846 * cannot be mapped by the MMU
847 */
848 status = kgsl_allocate_contiguous(&mmu->dummyspace, 64);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700849 }
850
851 return status;
852}
853
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600854void kgsl_mh_start(struct kgsl_device *device)
855{
856 struct kgsl_mh *mh = &device->mh;
857 /* force mmu off to for now*/
858 kgsl_regwrite(device, MH_MMU_CONFIG, 0);
859 kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
860
861 /* define physical memory range accessible by the core */
862 kgsl_regwrite(device, MH_MMU_MPU_BASE, mh->mpu_base);
863 kgsl_regwrite(device, MH_MMU_MPU_END,
864 mh->mpu_base + mh->mpu_range);
865 kgsl_regwrite(device, MH_ARBITER_CONFIG, mh->mharb);
866
867 if (mh->mh_intf_cfg1 != 0)
868 kgsl_regwrite(device, MH_CLNT_INTF_CTRL_CONFIG1,
869 mh->mh_intf_cfg1);
870
871 if (mh->mh_intf_cfg2 != 0)
872 kgsl_regwrite(device, MH_CLNT_INTF_CTRL_CONFIG2,
873 mh->mh_intf_cfg2);
874
875 /*
876 * Interrupts are enabled on a per-device level when
877 * kgsl_pwrctrl_irq() is called
878 */
879}
880
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700881int kgsl_mmu_start(struct kgsl_device *device)
882{
883 /*
884 * intialize device mmu
885 *
886 * call this with the global lock held
887 */
888
889 struct kgsl_mmu *mmu = &device->mmu;
890
891 if (mmu->flags & KGSL_FLAGS_STARTED)
892 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700893 /* setup MMU and sub-client behavior */
894 kgsl_regwrite(device, MH_MMU_CONFIG, mmu->config);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700895 kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
896
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600897 kgsl_sharedmem_set(&mmu->dummyspace, 0, 0,
898 mmu->dummyspace.size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700899
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600900 /* TRAN_ERROR needs a 32 byte (32 byte aligned) chunk of memory
901 * to complete transactions in case of an MMU fault. Note that
902 * we'll leave the bottom 32 bytes of the dummyspace for other
903 * purposes (e.g. use it when dummy read cycles are needed
904 * for other blocks */
905 kgsl_regwrite(device, MH_MMU_TRAN_ERROR,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700906 mmu->dummyspace.physaddr + 32);
907
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600908 if (mmu->defaultpagetable == NULL)
909 mmu->defaultpagetable =
910 kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700911
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600912 /* Return error if the default pagetable doesn't exist */
913 if (mmu->defaultpagetable == NULL)
914 return -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700915
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600916 mmu->hwpagetable = mmu->defaultpagetable;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700917
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600918 kgsl_regwrite(device, MH_MMU_PT_BASE,
919 mmu->hwpagetable->base.gpuaddr);
920 kgsl_regwrite(device, MH_MMU_VA_RANGE,
921 (mmu->hwpagetable->va_base |
922 (mmu->hwpagetable->va_range >> 16)));
923 kgsl_setstate(device, KGSL_MMUFLAGS_TLBFLUSH);
924 mmu->flags |= KGSL_FLAGS_STARTED;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700925 return 0;
926}
927EXPORT_SYMBOL(kgsl_mmu_start);
928
929unsigned int kgsl_virtaddr_to_physaddr(void *virtaddr)
930{
931 unsigned int physaddr = 0;
932 pgd_t *pgd_ptr = NULL;
933 pmd_t *pmd_ptr = NULL;
934 pte_t *pte_ptr = NULL, pte;
935
936 pgd_ptr = pgd_offset(current->mm, (unsigned long) virtaddr);
937 if (pgd_none(*pgd) || pgd_bad(*pgd)) {
938 KGSL_CORE_ERR("Invalid pgd entry\n");
939 return 0;
940 }
941
942 pmd_ptr = pmd_offset(pgd_ptr, (unsigned long) virtaddr);
943 if (pmd_none(*pmd_ptr) || pmd_bad(*pmd_ptr)) {
944 KGSL_CORE_ERR("Invalid pmd entry\n");
945 return 0;
946 }
947
948 pte_ptr = pte_offset_map(pmd_ptr, (unsigned long) virtaddr);
949 if (!pte_ptr) {
950 KGSL_CORE_ERR("pt_offset_map failed\n");
951 return 0;
952 }
953 pte = *pte_ptr;
954 physaddr = pte_pfn(pte);
955 pte_unmap(pte_ptr);
956 physaddr <<= PAGE_SHIFT;
957 return physaddr;
958}
959
960int
961kgsl_mmu_map(struct kgsl_pagetable *pagetable,
962 struct kgsl_memdesc *memdesc,
963 unsigned int protflags)
964{
965 int numpages;
966 unsigned int pte, ptefirst, ptelast, physaddr;
967 int flushtlb;
968 unsigned int offset = 0;
969
970 BUG_ON(protflags & ~(GSL_PT_PAGE_RV | GSL_PT_PAGE_WV));
971 BUG_ON(protflags == 0);
972
973 memdesc->gpuaddr = gen_pool_alloc_aligned(pagetable->pool,
974 memdesc->size, KGSL_MMU_ALIGN_SHIFT);
975
976 if (memdesc->gpuaddr == 0) {
977 KGSL_CORE_ERR("gen_pool_alloc(%d) failed\n", memdesc->size);
978 KGSL_CORE_ERR(" [%d] allocated=%d, entries=%d\n",
979 pagetable->name, pagetable->stats.mapped,
980 pagetable->stats.entries);
981 return -ENOMEM;
982 }
983
984 numpages = (memdesc->size >> PAGE_SHIFT);
985
986 ptefirst = kgsl_pt_entry_get(pagetable, memdesc->gpuaddr);
987 ptelast = ptefirst + numpages;
988
989 pte = ptefirst;
990 flushtlb = 0;
991
992 /* tlb needs to be flushed when the first and last pte are not at
993 * superpte boundaries */
994 if ((ptefirst & (GSL_PT_SUPER_PTE - 1)) != 0 ||
995 ((ptelast + 1) & (GSL_PT_SUPER_PTE-1)) != 0)
996 flushtlb = 1;
997
998 spin_lock(&pagetable->lock);
999 for (pte = ptefirst; pte < ptelast; pte++, offset += PAGE_SIZE) {
1000#ifdef VERBOSE_DEBUG
1001 /* check if PTE exists */
1002 uint32_t val = kgsl_pt_map_getaddr(pagetable, pte);
1003 BUG_ON(val != 0 && val != GSL_PT_PAGE_DIRTY);
1004#endif
1005 if ((pte & (GSL_PT_SUPER_PTE-1)) == 0)
1006 if (GSL_TLBFLUSH_FILTER_ISDIRTY(pte / GSL_PT_SUPER_PTE))
1007 flushtlb = 1;
1008 /* mark pte as in use */
1009
1010 physaddr = memdesc->ops->physaddr(memdesc, offset);
1011 BUG_ON(physaddr == 0);
1012 kgsl_pt_map_set(pagetable, pte, physaddr | protflags);
1013 }
1014
1015 /* Keep track of the statistics for the sysfs files */
1016
1017 KGSL_STATS_ADD(1, pagetable->stats.entries,
1018 pagetable->stats.max_entries);
1019
1020 KGSL_STATS_ADD(memdesc->size, pagetable->stats.mapped,
1021 pagetable->stats.max_mapped);
1022
1023 /* Post all writes to the pagetable */
1024 wmb();
1025
1026 /* Invalidate tlb only if current page table used by GPU is the
1027 * pagetable that we used to allocate */
1028 if (flushtlb) {
1029 /*set all devices as needing flushing*/
1030 pagetable->tlb_flags = UINT_MAX;
1031 GSL_TLBFLUSH_FILTER_RESET();
1032 }
1033 spin_unlock(&pagetable->lock);
1034
1035 return 0;
1036}
1037
1038int
1039kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
1040 struct kgsl_memdesc *memdesc)
1041{
1042 unsigned int numpages;
1043 unsigned int pte, ptefirst, ptelast, superpte;
1044 unsigned int range = memdesc->size;
1045
1046 /* All GPU addresses as assigned are page aligned, but some
1047 functions purturb the gpuaddr with an offset, so apply the
1048 mask here to make sure we have the right address */
1049
1050 unsigned int gpuaddr = memdesc->gpuaddr & KGSL_MMU_ALIGN_MASK;
1051
1052 if (range == 0 || gpuaddr == 0)
1053 return 0;
1054
1055 numpages = (range >> PAGE_SHIFT);
1056 if (range & (PAGE_SIZE - 1))
1057 numpages++;
1058
1059 ptefirst = kgsl_pt_entry_get(pagetable, gpuaddr);
1060 ptelast = ptefirst + numpages;
1061
1062 spin_lock(&pagetable->lock);
1063 superpte = ptefirst - (ptefirst & (GSL_PT_SUPER_PTE-1));
1064 GSL_TLBFLUSH_FILTER_SETDIRTY(superpte / GSL_PT_SUPER_PTE);
1065 for (pte = ptefirst; pte < ptelast; pte++) {
1066#ifdef VERBOSE_DEBUG
1067 /* check if PTE exists */
1068 BUG_ON(!kgsl_pt_map_getaddr(pagetable, pte));
1069#endif
1070 kgsl_pt_map_set(pagetable, pte, GSL_PT_PAGE_DIRTY);
1071 superpte = pte - (pte & (GSL_PT_SUPER_PTE - 1));
1072 if (pte == superpte)
1073 GSL_TLBFLUSH_FILTER_SETDIRTY(superpte /
1074 GSL_PT_SUPER_PTE);
1075 }
1076
1077 /* Remove the statistics */
1078 pagetable->stats.entries--;
1079 pagetable->stats.mapped -= range;
1080
1081 /* Post all writes to the pagetable */
1082 wmb();
1083
1084 spin_unlock(&pagetable->lock);
1085
1086 gen_pool_free(pagetable->pool, gpuaddr, range);
1087
1088 return 0;
1089}
1090EXPORT_SYMBOL(kgsl_mmu_unmap);
1091
1092int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable,
1093 struct kgsl_memdesc *memdesc, unsigned int protflags)
1094{
1095 int result = -EINVAL;
1096 unsigned int gpuaddr = 0;
1097
1098 if (memdesc == NULL) {
1099 KGSL_CORE_ERR("invalid memdesc\n");
1100 goto error;
1101 }
1102
1103 gpuaddr = memdesc->gpuaddr;
1104
1105 result = kgsl_mmu_map(pagetable, memdesc, protflags);
1106 if (result)
1107 goto error;
1108
1109 /*global mappings must have the same gpu address in all pagetables*/
1110 if (gpuaddr && gpuaddr != memdesc->gpuaddr) {
1111 KGSL_CORE_ERR("pt %p addr mismatch phys 0x%08x"
1112 "gpu 0x%0x 0x%08x", pagetable, memdesc->physaddr,
1113 gpuaddr, memdesc->gpuaddr);
1114 goto error_unmap;
1115 }
1116 return result;
1117error_unmap:
1118 kgsl_mmu_unmap(pagetable, memdesc);
1119error:
1120 return result;
1121}
1122EXPORT_SYMBOL(kgsl_mmu_map_global);
1123
1124int kgsl_mmu_stop(struct kgsl_device *device)
1125{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001126 struct kgsl_mmu *mmu = &device->mmu;
Jeremy Gebben4e8aada2011-07-12 10:07:47 -06001127 kgsl_regwrite(device, MH_MMU_CONFIG, 0x00000000);
1128 mmu->flags &= ~KGSL_FLAGS_STARTED;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001129 return 0;
1130}
1131EXPORT_SYMBOL(kgsl_mmu_stop);
1132
1133int kgsl_mmu_close(struct kgsl_device *device)
1134{
1135 /*
1136 * close device mmu
1137 *
1138 * call this with the global lock held
1139 */
1140 struct kgsl_mmu *mmu = &device->mmu;
1141
1142 if (mmu->dummyspace.gpuaddr)
1143 kgsl_sharedmem_free(&mmu->dummyspace);
1144
1145 if (mmu->defaultpagetable)
1146 kgsl_mmu_putpagetable(mmu->defaultpagetable);
1147
1148 return 0;
1149}