blob: fb2f63bd473c8a9f99fe2db59b4a005d26adca33 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/types.h>
14#include <linux/device.h>
15#include <linux/spinlock.h>
16#include <linux/genalloc.h>
17#include <linux/slab.h>
18#include <linux/sched.h>
19
20#include "kgsl.h"
21#include "kgsl_mmu.h"
22#include "kgsl_device.h"
23#include "kgsl_sharedmem.h"
24
25#define KGSL_MMU_ALIGN_SHIFT 13
26#define KGSL_MMU_ALIGN_MASK (~((1 << KGSL_MMU_ALIGN_SHIFT) - 1))
27
28#define GSL_PT_PAGE_BITS_MASK 0x00000007
29#define GSL_PT_PAGE_ADDR_MASK PAGE_MASK
30
31static void pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable);
32
33static ssize_t
34sysfs_show_ptpool_entries(struct kobject *kobj,
35 struct kobj_attribute *attr,
36 char *buf)
37{
38 return sprintf(buf, "%d\n", kgsl_driver.ptpool.entries);
39}
40
41static ssize_t
42sysfs_show_ptpool_min(struct kobject *kobj,
43 struct kobj_attribute *attr,
44 char *buf)
45{
46 return sprintf(buf, "%d\n", kgsl_driver.ptpool.static_entries);
47}
48
49static ssize_t
50sysfs_show_ptpool_chunks(struct kobject *kobj,
51 struct kobj_attribute *attr,
52 char *buf)
53{
54 return sprintf(buf, "%d\n", kgsl_driver.ptpool.chunks);
55}
56
57static ssize_t
58sysfs_show_ptpool_ptsize(struct kobject *kobj,
59 struct kobj_attribute *attr,
60 char *buf)
61{
62 return sprintf(buf, "%d\n", kgsl_driver.ptpool.ptsize);
63}
64
65static struct kobj_attribute attr_ptpool_entries = {
66 .attr = { .name = "ptpool_entries", .mode = 0444 },
67 .show = sysfs_show_ptpool_entries,
68 .store = NULL,
69};
70
71static struct kobj_attribute attr_ptpool_min = {
72 .attr = { .name = "ptpool_min", .mode = 0444 },
73 .show = sysfs_show_ptpool_min,
74 .store = NULL,
75};
76
77static struct kobj_attribute attr_ptpool_chunks = {
78 .attr = { .name = "ptpool_chunks", .mode = 0444 },
79 .show = sysfs_show_ptpool_chunks,
80 .store = NULL,
81};
82
83static struct kobj_attribute attr_ptpool_ptsize = {
84 .attr = { .name = "ptpool_ptsize", .mode = 0444 },
85 .show = sysfs_show_ptpool_ptsize,
86 .store = NULL,
87};
88
89static struct attribute *ptpool_attrs[] = {
90 &attr_ptpool_entries.attr,
91 &attr_ptpool_min.attr,
92 &attr_ptpool_chunks.attr,
93 &attr_ptpool_ptsize.attr,
94 NULL,
95};
96
97static struct attribute_group ptpool_attr_group = {
98 .attrs = ptpool_attrs,
99};
100
101static int
102_kgsl_ptpool_add_entries(struct kgsl_ptpool *pool, int count, int dynamic)
103{
104 struct kgsl_ptpool_chunk *chunk;
105 size_t size = ALIGN(count * pool->ptsize, PAGE_SIZE);
106
107 BUG_ON(count == 0);
108
109 if (get_order(size) >= MAX_ORDER) {
110 KGSL_CORE_ERR("ptpool allocation is too big: %d\n", size);
111 return -EINVAL;
112 }
113
114 chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
115 if (chunk == NULL) {
116 KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*chunk));
117 return -ENOMEM;
118 }
119
120 chunk->size = size;
121 chunk->count = count;
122 chunk->dynamic = dynamic;
123
124 chunk->data = dma_alloc_coherent(NULL, size,
125 &chunk->phys, GFP_KERNEL);
126
127 if (chunk->data == NULL) {
128 KGSL_CORE_ERR("dma_alloc_coherent(%d) failed\n", size);
129 goto err;
130 }
131
132 chunk->bitmap = kzalloc(BITS_TO_LONGS(count) * 4, GFP_KERNEL);
133
134 if (chunk->bitmap == NULL) {
135 KGSL_CORE_ERR("kzalloc(%d) failed\n",
136 BITS_TO_LONGS(count) * 4);
137 goto err_dma;
138 }
139
140 list_add_tail(&chunk->list, &pool->list);
141
142 pool->chunks++;
143 pool->entries += count;
144
145 if (!dynamic)
146 pool->static_entries += count;
147
148 return 0;
149
150err_dma:
151 dma_free_coherent(NULL, chunk->size, chunk->data, chunk->phys);
152err:
153 kfree(chunk);
154 return -ENOMEM;
155}
156
157static void *
158_kgsl_ptpool_get_entry(struct kgsl_ptpool *pool, unsigned int *physaddr)
159{
160 struct kgsl_ptpool_chunk *chunk;
161
162 list_for_each_entry(chunk, &pool->list, list) {
163 int bit = find_first_zero_bit(chunk->bitmap, chunk->count);
164
165 if (bit >= chunk->count)
166 continue;
167
168 set_bit(bit, chunk->bitmap);
169 *physaddr = chunk->phys + (bit * pool->ptsize);
170
171 return chunk->data + (bit * pool->ptsize);
172 }
173
174 return NULL;
175}
176
177/**
178 * kgsl_ptpool_add
179 * @pool: A pointer to a ptpool structure
180 * @entries: Number of entries to add
181 *
182 * Add static entries to the pagetable pool.
183 */
184
185int
186kgsl_ptpool_add(struct kgsl_ptpool *pool, int count)
187{
188 int ret = 0;
189 BUG_ON(count == 0);
190
191 mutex_lock(&pool->lock);
192
193 /* Only 4MB can be allocated in one chunk, so larger allocations
194 need to be split into multiple sections */
195
196 while (count) {
197 int entries = ((count * pool->ptsize) > SZ_4M) ?
198 SZ_4M / pool->ptsize : count;
199
200 /* Add the entries as static, i.e. they don't ever stand
201 a chance of being removed */
202
203 ret = _kgsl_ptpool_add_entries(pool, entries, 0);
204 if (ret)
205 break;
206
207 count -= entries;
208 }
209
210 mutex_unlock(&pool->lock);
211 return ret;
212}
213
214/**
215 * kgsl_ptpool_alloc
216 * @pool: A pointer to a ptpool structure
217 * @addr: A pointer to store the physical address of the chunk
218 *
219 * Allocate a pagetable from the pool. Returns the virtual address
220 * of the pagetable, the physical address is returned in physaddr
221 */
222
223void *kgsl_ptpool_alloc(struct kgsl_ptpool *pool, unsigned int *physaddr)
224{
225 void *addr = NULL;
226 int ret;
227
228 mutex_lock(&pool->lock);
229 addr = _kgsl_ptpool_get_entry(pool, physaddr);
230 if (addr)
231 goto done;
232
233 /* Add a chunk for 1 more pagetable and mark it as dynamic */
234 ret = _kgsl_ptpool_add_entries(pool, 1, 1);
235
236 if (ret)
237 goto done;
238
239 addr = _kgsl_ptpool_get_entry(pool, physaddr);
240done:
241 mutex_unlock(&pool->lock);
242 return addr;
243}
244
245static inline void _kgsl_ptpool_rm_chunk(struct kgsl_ptpool_chunk *chunk)
246{
247 list_del(&chunk->list);
248
249 if (chunk->data)
250 dma_free_coherent(NULL, chunk->size, chunk->data,
251 chunk->phys);
252 kfree(chunk->bitmap);
253 kfree(chunk);
254}
255
256/**
257 * kgsl_ptpool_free
258 * @pool: A pointer to a ptpool structure
259 * @addr: A pointer to the virtual address to free
260 *
261 * Free a pagetable allocated from the pool
262 */
263
264void kgsl_ptpool_free(struct kgsl_ptpool *pool, void *addr)
265{
266 struct kgsl_ptpool_chunk *chunk, *tmp;
267
268 if (pool == NULL || addr == NULL)
269 return;
270
271 mutex_lock(&pool->lock);
272 list_for_each_entry_safe(chunk, tmp, &pool->list, list) {
273 if (addr >= chunk->data &&
274 addr < chunk->data + chunk->size) {
275 int bit = ((unsigned long) (addr - chunk->data)) /
276 pool->ptsize;
277
278 clear_bit(bit, chunk->bitmap);
279 memset(addr, 0, pool->ptsize);
280
281 if (chunk->dynamic &&
282 bitmap_empty(chunk->bitmap, chunk->count))
283 _kgsl_ptpool_rm_chunk(chunk);
284
285 break;
286 }
287 }
288
289 mutex_unlock(&pool->lock);
290}
291
292void kgsl_ptpool_destroy(struct kgsl_ptpool *pool)
293{
294 struct kgsl_ptpool_chunk *chunk, *tmp;
295
296 if (pool == NULL)
297 return;
298
299 mutex_lock(&pool->lock);
300 list_for_each_entry_safe(chunk, tmp, &pool->list, list)
301 _kgsl_ptpool_rm_chunk(chunk);
302 mutex_unlock(&pool->lock);
303
304 memset(pool, 0, sizeof(*pool));
305}
306
307/**
308 * kgsl_ptpool_init
309 * @pool: A pointer to a ptpool structure to initialize
310 * @ptsize: The size of each pagetable entry
311 * @entries: The number of inital entries to add to the pool
312 *
313 * Initalize a pool and allocate an initial chunk of entries.
314 */
315
316int kgsl_ptpool_init(struct kgsl_ptpool *pool, int ptsize, int entries)
317{
318 int ret = 0;
319 BUG_ON(ptsize == 0);
320
321 pool->ptsize = ptsize;
322 mutex_init(&pool->lock);
323 INIT_LIST_HEAD(&pool->list);
324
325 if (entries) {
326 ret = kgsl_ptpool_add(pool, entries);
327 if (ret)
328 return ret;
329 }
330
331 return sysfs_create_group(kgsl_driver.ptkobj, &ptpool_attr_group);
332}
333
334static int kgsl_cleanup_pt(struct kgsl_pagetable *pt)
335{
336 int i;
337 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
338 struct kgsl_device *device = kgsl_driver.devp[i];
339 if (device)
340 device->ftbl->cleanup_pt(device, pt);
341 }
342 return 0;
343}
344
345static void kgsl_destroy_pagetable(struct kref *kref)
346{
347 struct kgsl_pagetable *pagetable = container_of(kref,
348 struct kgsl_pagetable, refcount);
349 unsigned long flags;
350
351 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
352 list_del(&pagetable->list);
353 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
354
355 pagetable_remove_sysfs_objects(pagetable);
356
357 kgsl_cleanup_pt(pagetable);
358
359 kgsl_ptpool_free(&kgsl_driver.ptpool, pagetable->base.hostptr);
360
361 kgsl_driver.stats.coherent -= KGSL_PAGETABLE_SIZE;
362
363 if (pagetable->pool)
364 gen_pool_destroy(pagetable->pool);
365
366 kfree(pagetable->tlbflushfilter.base);
367 kfree(pagetable);
368}
369
370static inline void kgsl_put_pagetable(struct kgsl_pagetable *pagetable)
371{
372 if (pagetable)
373 kref_put(&pagetable->refcount, kgsl_destroy_pagetable);
374}
375
376static struct kgsl_pagetable *
377kgsl_get_pagetable(unsigned long name)
378{
379 struct kgsl_pagetable *pt, *ret = NULL;
380 unsigned long flags;
381
382 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
383 list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
384 if (pt->name == name) {
385 ret = pt;
386 kref_get(&ret->refcount);
387 break;
388 }
389 }
390
391 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
392 return ret;
393}
394
395static struct kgsl_pagetable *
396_get_pt_from_kobj(struct kobject *kobj)
397{
398 unsigned long ptname;
399
400 if (!kobj)
401 return NULL;
402
403 if (sscanf(kobj->name, "%ld", &ptname) != 1)
404 return NULL;
405
406 return kgsl_get_pagetable(ptname);
407}
408
409static ssize_t
410sysfs_show_entries(struct kobject *kobj,
411 struct kobj_attribute *attr,
412 char *buf)
413{
414 struct kgsl_pagetable *pt;
415 int ret = 0;
416
417 pt = _get_pt_from_kobj(kobj);
418
419 if (pt)
420 ret += sprintf(buf, "%d\n", pt->stats.entries);
421
422 kgsl_put_pagetable(pt);
423 return ret;
424}
425
426static ssize_t
427sysfs_show_mapped(struct kobject *kobj,
428 struct kobj_attribute *attr,
429 char *buf)
430{
431 struct kgsl_pagetable *pt;
432 int ret = 0;
433
434 pt = _get_pt_from_kobj(kobj);
435
436 if (pt)
437 ret += sprintf(buf, "%d\n", pt->stats.mapped);
438
439 kgsl_put_pagetable(pt);
440 return ret;
441}
442
443static ssize_t
444sysfs_show_va_range(struct kobject *kobj,
445 struct kobj_attribute *attr,
446 char *buf)
447{
448 struct kgsl_pagetable *pt;
449 int ret = 0;
450
451 pt = _get_pt_from_kobj(kobj);
452
453 if (pt)
454 ret += sprintf(buf, "0x%x\n", pt->va_range);
455
456 kgsl_put_pagetable(pt);
457 return ret;
458}
459
460static ssize_t
461sysfs_show_max_mapped(struct kobject *kobj,
462 struct kobj_attribute *attr,
463 char *buf)
464{
465 struct kgsl_pagetable *pt;
466 int ret = 0;
467
468 pt = _get_pt_from_kobj(kobj);
469
470 if (pt)
471 ret += sprintf(buf, "%d\n", pt->stats.max_mapped);
472
473 kgsl_put_pagetable(pt);
474 return ret;
475}
476
477static ssize_t
478sysfs_show_max_entries(struct kobject *kobj,
479 struct kobj_attribute *attr,
480 char *buf)
481{
482 struct kgsl_pagetable *pt;
483 int ret = 0;
484
485 pt = _get_pt_from_kobj(kobj);
486
487 if (pt)
488 ret += sprintf(buf, "%d\n", pt->stats.max_entries);
489
490 kgsl_put_pagetable(pt);
491 return ret;
492}
493
494static struct kobj_attribute attr_entries = {
495 .attr = { .name = "entries", .mode = 0444 },
496 .show = sysfs_show_entries,
497 .store = NULL,
498};
499
500static struct kobj_attribute attr_mapped = {
501 .attr = { .name = "mapped", .mode = 0444 },
502 .show = sysfs_show_mapped,
503 .store = NULL,
504};
505
506static struct kobj_attribute attr_va_range = {
507 .attr = { .name = "va_range", .mode = 0444 },
508 .show = sysfs_show_va_range,
509 .store = NULL,
510};
511
512static struct kobj_attribute attr_max_mapped = {
513 .attr = { .name = "max_mapped", .mode = 0444 },
514 .show = sysfs_show_max_mapped,
515 .store = NULL,
516};
517
518static struct kobj_attribute attr_max_entries = {
519 .attr = { .name = "max_entries", .mode = 0444 },
520 .show = sysfs_show_max_entries,
521 .store = NULL,
522};
523
524static struct attribute *pagetable_attrs[] = {
525 &attr_entries.attr,
526 &attr_mapped.attr,
527 &attr_va_range.attr,
528 &attr_max_mapped.attr,
529 &attr_max_entries.attr,
530 NULL,
531};
532
533static struct attribute_group pagetable_attr_group = {
534 .attrs = pagetable_attrs,
535};
536
537static void
538pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable)
539{
540 if (pagetable->kobj)
541 sysfs_remove_group(pagetable->kobj,
542 &pagetable_attr_group);
543
544 kobject_put(pagetable->kobj);
545}
546
547static int
548pagetable_add_sysfs_objects(struct kgsl_pagetable *pagetable)
549{
550 char ptname[16];
551 int ret = -ENOMEM;
552
553 snprintf(ptname, sizeof(ptname), "%d", pagetable->name);
554 pagetable->kobj = kobject_create_and_add(ptname,
555 kgsl_driver.ptkobj);
556 if (pagetable->kobj == NULL)
557 goto err;
558
559 ret = sysfs_create_group(pagetable->kobj, &pagetable_attr_group);
560
561err:
562 if (ret) {
563 if (pagetable->kobj)
564 kobject_put(pagetable->kobj);
565
566 pagetable->kobj = NULL;
567 }
568
569 return ret;
570}
571
572static inline uint32_t
573kgsl_pt_entry_get(struct kgsl_pagetable *pt, uint32_t va)
574{
575 return (va - pt->va_base) >> PAGE_SHIFT;
576}
577
578static inline void
579kgsl_pt_map_set(struct kgsl_pagetable *pt, uint32_t pte, uint32_t val)
580{
581 uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
582
583 writel_relaxed(val, &baseptr[pte]);
584}
585
586static inline uint32_t
587kgsl_pt_map_getaddr(struct kgsl_pagetable *pt, uint32_t pte)
588{
589 uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
590 uint32_t ret = readl_relaxed(&baseptr[pte]) & GSL_PT_PAGE_ADDR_MASK;
591 return ret;
592}
593
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600594int
595kgsl_get_ptname_from_ptbase(unsigned int pt_base)
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600596{
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600597 struct kgsl_pagetable *pt;
598 int ptid = -1;
599
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600600 spin_lock(&kgsl_driver.ptlock);
601 list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600602 if (pt_base == pt->base.gpuaddr) {
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600603 ptid = (int) pt->name;
604 break;
605 }
606 }
607 spin_unlock(&kgsl_driver.ptlock);
608
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600609 return ptid;
610}
611
612void kgsl_mmu_pagefault(struct kgsl_device *device)
613{
614 unsigned int reg;
615 unsigned int ptbase;
616
617 kgsl_regread(device, MH_MMU_PAGE_FAULT, &reg);
618 kgsl_regread(device, MH_MMU_PT_BASE, &ptbase);
619
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600620 KGSL_MEM_CRIT(device,
621 "mmu page fault: page=0x%lx pt=%d op=%s axi=%d\n",
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600622 reg & ~(PAGE_SIZE - 1),
623 kgsl_get_ptname_from_ptbase(ptbase),
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600624 reg & 0x02 ? "WRITE" : "READ", (reg >> 4) & 0xF);
625}
626
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700627void kgsl_mh_intrcallback(struct kgsl_device *device)
628{
629 unsigned int status = 0;
630 unsigned int reg;
631
632 kgsl_regread(device, MH_INTERRUPT_STATUS, &status);
633 kgsl_regread(device, MH_AXI_ERROR, &reg);
634
635 if (status & MH_INTERRUPT_MASK__AXI_READ_ERROR)
636 KGSL_MEM_CRIT(device, "axi read error interrupt: %08x\n", reg);
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600637 if (status & MH_INTERRUPT_MASK__AXI_WRITE_ERROR)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700638 KGSL_MEM_CRIT(device, "axi write error interrupt: %08x\n", reg);
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600639 if (status & MH_INTERRUPT_MASK__MMU_PAGE_FAULT)
640 kgsl_mmu_pagefault(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700641
642 kgsl_regwrite(device, MH_INTERRUPT_CLEAR, status);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700643}
644EXPORT_SYMBOL(kgsl_mh_intrcallback);
645
646static int kgsl_setup_pt(struct kgsl_pagetable *pt)
647{
648 int i = 0;
649 int status = 0;
650
651 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
652 struct kgsl_device *device = kgsl_driver.devp[i];
653 if (device) {
654 status = device->ftbl->setup_pt(device, pt);
655 if (status)
656 goto error_pt;
657 }
658 }
659 return status;
660error_pt:
661 while (i >= 0) {
662 struct kgsl_device *device = kgsl_driver.devp[i];
663 if (device)
664 device->ftbl->cleanup_pt(device, pt);
665 i--;
666 }
667 return status;
668}
669
670static struct kgsl_pagetable *kgsl_mmu_createpagetableobject(
671 unsigned int name)
672{
673 int status = 0;
674 struct kgsl_pagetable *pagetable = NULL;
675 unsigned long flags;
676
677 pagetable = kzalloc(sizeof(struct kgsl_pagetable), GFP_KERNEL);
678 if (pagetable == NULL) {
679 KGSL_CORE_ERR("kzalloc(%d) failed\n",
680 sizeof(struct kgsl_pagetable));
681 return NULL;
682 }
683
684 kref_init(&pagetable->refcount);
685
686 spin_lock_init(&pagetable->lock);
687 pagetable->tlb_flags = 0;
688 pagetable->name = name;
689 pagetable->va_base = KGSL_PAGETABLE_BASE;
690 pagetable->va_range = CONFIG_MSM_KGSL_PAGE_TABLE_SIZE;
691 pagetable->last_superpte = 0;
692 pagetable->max_entries = KGSL_PAGETABLE_ENTRIES(pagetable->va_range);
693
694 pagetable->tlbflushfilter.size = (pagetable->va_range /
695 (PAGE_SIZE * GSL_PT_SUPER_PTE * 8)) + 1;
696 pagetable->tlbflushfilter.base = (unsigned int *)
697 kzalloc(pagetable->tlbflushfilter.size, GFP_KERNEL);
698 if (!pagetable->tlbflushfilter.base) {
699 KGSL_CORE_ERR("kzalloc(%d) failed\n",
700 pagetable->tlbflushfilter.size);
701 goto err_alloc;
702 }
703 GSL_TLBFLUSH_FILTER_RESET();
704
705 pagetable->pool = gen_pool_create(PAGE_SHIFT, -1);
706 if (pagetable->pool == NULL) {
707 KGSL_CORE_ERR("gen_pool_create(%d) failed\n", PAGE_SHIFT);
708 goto err_flushfilter;
709 }
710
711 if (gen_pool_add(pagetable->pool, pagetable->va_base,
712 pagetable->va_range, -1)) {
713 KGSL_CORE_ERR("gen_pool_add failed\n");
714 goto err_pool;
715 }
716
717 pagetable->base.hostptr = kgsl_ptpool_alloc(&kgsl_driver.ptpool,
718 &pagetable->base.physaddr);
719
720 if (pagetable->base.hostptr == NULL)
721 goto err_pool;
722
723 /* ptpool allocations are from coherent memory, so update the
724 device statistics acordingly */
725
726 KGSL_STATS_ADD(KGSL_PAGETABLE_SIZE, kgsl_driver.stats.coherent,
727 kgsl_driver.stats.coherent_max);
728
729 pagetable->base.gpuaddr = pagetable->base.physaddr;
730 pagetable->base.size = KGSL_PAGETABLE_SIZE;
731
732 status = kgsl_setup_pt(pagetable);
733 if (status)
734 goto err_free_sharedmem;
735
736 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
737 list_add(&pagetable->list, &kgsl_driver.pagetable_list);
738 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
739
740 /* Create the sysfs entries */
741 pagetable_add_sysfs_objects(pagetable);
742
743 return pagetable;
744
745err_free_sharedmem:
746 kgsl_ptpool_free(&kgsl_driver.ptpool, &pagetable->base.hostptr);
747err_pool:
748 gen_pool_destroy(pagetable->pool);
749err_flushfilter:
750 kfree(pagetable->tlbflushfilter.base);
751err_alloc:
752 kfree(pagetable);
753
754 return NULL;
755}
756
757struct kgsl_pagetable *kgsl_mmu_getpagetable(unsigned long name)
758{
759 struct kgsl_pagetable *pt;
760
761 pt = kgsl_get_pagetable(name);
762
763 if (pt == NULL)
764 pt = kgsl_mmu_createpagetableobject(name);
765
766 return pt;
767}
768
769void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable)
770{
771 kgsl_put_pagetable(pagetable);
772}
773
774void kgsl_default_setstate(struct kgsl_device *device, uint32_t flags)
775{
776 if (!kgsl_mmu_enabled())
777 return;
778
779 if (flags & KGSL_MMUFLAGS_PTUPDATE) {
780 kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
781 kgsl_regwrite(device, MH_MMU_PT_BASE,
782 device->mmu.hwpagetable->base.gpuaddr);
783 }
784
785 if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
786 /* Invalidate all and tc */
787 kgsl_regwrite(device, MH_MMU_INVALIDATE, 0x00000003);
788 }
789}
790EXPORT_SYMBOL(kgsl_default_setstate);
791
792void kgsl_setstate(struct kgsl_device *device, uint32_t flags)
793{
794 if (device->ftbl->setstate)
795 device->ftbl->setstate(device, flags);
796}
797EXPORT_SYMBOL(kgsl_setstate);
798
799void kgsl_mmu_setstate(struct kgsl_device *device,
800 struct kgsl_pagetable *pagetable)
801{
802 struct kgsl_mmu *mmu = &device->mmu;
803
804 if (mmu->flags & KGSL_FLAGS_STARTED) {
805 /* page table not current, then setup mmu to use new
806 * specified page table
807 */
808 if (mmu->hwpagetable != pagetable) {
809 mmu->hwpagetable = pagetable;
810 spin_lock(&mmu->hwpagetable->lock);
811 mmu->hwpagetable->tlb_flags &= ~(1<<device->id);
812 spin_unlock(&mmu->hwpagetable->lock);
813
814 /* call device specific set page table */
815 kgsl_setstate(mmu->device, KGSL_MMUFLAGS_TLBFLUSH |
816 KGSL_MMUFLAGS_PTUPDATE);
817 }
818 }
819}
820EXPORT_SYMBOL(kgsl_mmu_setstate);
821
822int kgsl_mmu_init(struct kgsl_device *device)
823{
824 /*
825 * intialize device mmu
826 *
827 * call this with the global lock held
828 */
829 int status = 0;
830 struct kgsl_mmu *mmu = &device->mmu;
831
832 mmu->device = device;
833
834 /* make sure aligned to pagesize */
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600835 BUG_ON(device->mh.mpu_base & (PAGE_SIZE - 1));
836 BUG_ON((device->mh.mpu_base + device->mh.mpu_range) & (PAGE_SIZE - 1));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700837
838 /* sub-client MMU lookups require address translation */
839 if ((mmu->config & ~0x1) > 0) {
840 /*make sure virtual address range is a multiple of 64Kb */
841 BUG_ON(CONFIG_MSM_KGSL_PAGE_TABLE_SIZE & ((1 << 16) - 1));
842
843 /* allocate memory used for completing r/w operations that
844 * cannot be mapped by the MMU
845 */
846 status = kgsl_allocate_contiguous(&mmu->dummyspace, 64);
847 if (!status)
848 kgsl_sharedmem_set(&mmu->dummyspace, 0, 0,
849 mmu->dummyspace.size);
850 }
851
852 return status;
853}
854
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600855void kgsl_mh_start(struct kgsl_device *device)
856{
857 struct kgsl_mh *mh = &device->mh;
858 /* force mmu off to for now*/
859 kgsl_regwrite(device, MH_MMU_CONFIG, 0);
860 kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
861
862 /* define physical memory range accessible by the core */
863 kgsl_regwrite(device, MH_MMU_MPU_BASE, mh->mpu_base);
864 kgsl_regwrite(device, MH_MMU_MPU_END,
865 mh->mpu_base + mh->mpu_range);
866 kgsl_regwrite(device, MH_ARBITER_CONFIG, mh->mharb);
867
868 if (mh->mh_intf_cfg1 != 0)
869 kgsl_regwrite(device, MH_CLNT_INTF_CTRL_CONFIG1,
870 mh->mh_intf_cfg1);
871
872 if (mh->mh_intf_cfg2 != 0)
873 kgsl_regwrite(device, MH_CLNT_INTF_CTRL_CONFIG2,
874 mh->mh_intf_cfg2);
875
876 /*
877 * Interrupts are enabled on a per-device level when
878 * kgsl_pwrctrl_irq() is called
879 */
880}
881
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700882int kgsl_mmu_start(struct kgsl_device *device)
883{
884 /*
885 * intialize device mmu
886 *
887 * call this with the global lock held
888 */
889
890 struct kgsl_mmu *mmu = &device->mmu;
891
892 if (mmu->flags & KGSL_FLAGS_STARTED)
893 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700894 /* setup MMU and sub-client behavior */
895 kgsl_regwrite(device, MH_MMU_CONFIG, mmu->config);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700896 kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
897
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600898 kgsl_sharedmem_set(&mmu->dummyspace, 0, 0,
899 mmu->dummyspace.size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700900
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600901 /* TRAN_ERROR needs a 32 byte (32 byte aligned) chunk of memory
902 * to complete transactions in case of an MMU fault. Note that
903 * we'll leave the bottom 32 bytes of the dummyspace for other
904 * purposes (e.g. use it when dummy read cycles are needed
905 * for other blocks */
906 kgsl_regwrite(device, MH_MMU_TRAN_ERROR,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700907 mmu->dummyspace.physaddr + 32);
908
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600909 if (mmu->defaultpagetable == NULL)
910 mmu->defaultpagetable =
911 kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700912
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600913 /* Return error if the default pagetable doesn't exist */
914 if (mmu->defaultpagetable == NULL)
915 return -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700916
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600917 mmu->hwpagetable = mmu->defaultpagetable;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700918
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600919 kgsl_regwrite(device, MH_MMU_PT_BASE,
920 mmu->hwpagetable->base.gpuaddr);
921 kgsl_regwrite(device, MH_MMU_VA_RANGE,
922 (mmu->hwpagetable->va_base |
923 (mmu->hwpagetable->va_range >> 16)));
924 kgsl_setstate(device, KGSL_MMUFLAGS_TLBFLUSH);
925 mmu->flags |= KGSL_FLAGS_STARTED;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700926 return 0;
927}
928EXPORT_SYMBOL(kgsl_mmu_start);
929
930unsigned int kgsl_virtaddr_to_physaddr(void *virtaddr)
931{
932 unsigned int physaddr = 0;
933 pgd_t *pgd_ptr = NULL;
934 pmd_t *pmd_ptr = NULL;
935 pte_t *pte_ptr = NULL, pte;
936
937 pgd_ptr = pgd_offset(current->mm, (unsigned long) virtaddr);
938 if (pgd_none(*pgd) || pgd_bad(*pgd)) {
939 KGSL_CORE_ERR("Invalid pgd entry\n");
940 return 0;
941 }
942
943 pmd_ptr = pmd_offset(pgd_ptr, (unsigned long) virtaddr);
944 if (pmd_none(*pmd_ptr) || pmd_bad(*pmd_ptr)) {
945 KGSL_CORE_ERR("Invalid pmd entry\n");
946 return 0;
947 }
948
949 pte_ptr = pte_offset_map(pmd_ptr, (unsigned long) virtaddr);
950 if (!pte_ptr) {
951 KGSL_CORE_ERR("pt_offset_map failed\n");
952 return 0;
953 }
954 pte = *pte_ptr;
955 physaddr = pte_pfn(pte);
956 pte_unmap(pte_ptr);
957 physaddr <<= PAGE_SHIFT;
958 return physaddr;
959}
960
961int
962kgsl_mmu_map(struct kgsl_pagetable *pagetable,
963 struct kgsl_memdesc *memdesc,
964 unsigned int protflags)
965{
966 int numpages;
967 unsigned int pte, ptefirst, ptelast, physaddr;
968 int flushtlb;
969 unsigned int offset = 0;
970
971 BUG_ON(protflags & ~(GSL_PT_PAGE_RV | GSL_PT_PAGE_WV));
972 BUG_ON(protflags == 0);
973
974 memdesc->gpuaddr = gen_pool_alloc_aligned(pagetable->pool,
975 memdesc->size, KGSL_MMU_ALIGN_SHIFT);
976
977 if (memdesc->gpuaddr == 0) {
978 KGSL_CORE_ERR("gen_pool_alloc(%d) failed\n", memdesc->size);
979 KGSL_CORE_ERR(" [%d] allocated=%d, entries=%d\n",
980 pagetable->name, pagetable->stats.mapped,
981 pagetable->stats.entries);
982 return -ENOMEM;
983 }
984
985 numpages = (memdesc->size >> PAGE_SHIFT);
986
987 ptefirst = kgsl_pt_entry_get(pagetable, memdesc->gpuaddr);
988 ptelast = ptefirst + numpages;
989
990 pte = ptefirst;
991 flushtlb = 0;
992
993 /* tlb needs to be flushed when the first and last pte are not at
994 * superpte boundaries */
995 if ((ptefirst & (GSL_PT_SUPER_PTE - 1)) != 0 ||
996 ((ptelast + 1) & (GSL_PT_SUPER_PTE-1)) != 0)
997 flushtlb = 1;
998
999 spin_lock(&pagetable->lock);
1000 for (pte = ptefirst; pte < ptelast; pte++, offset += PAGE_SIZE) {
1001#ifdef VERBOSE_DEBUG
1002 /* check if PTE exists */
1003 uint32_t val = kgsl_pt_map_getaddr(pagetable, pte);
1004 BUG_ON(val != 0 && val != GSL_PT_PAGE_DIRTY);
1005#endif
1006 if ((pte & (GSL_PT_SUPER_PTE-1)) == 0)
1007 if (GSL_TLBFLUSH_FILTER_ISDIRTY(pte / GSL_PT_SUPER_PTE))
1008 flushtlb = 1;
1009 /* mark pte as in use */
1010
1011 physaddr = memdesc->ops->physaddr(memdesc, offset);
1012 BUG_ON(physaddr == 0);
1013 kgsl_pt_map_set(pagetable, pte, physaddr | protflags);
1014 }
1015
1016 /* Keep track of the statistics for the sysfs files */
1017
1018 KGSL_STATS_ADD(1, pagetable->stats.entries,
1019 pagetable->stats.max_entries);
1020
1021 KGSL_STATS_ADD(memdesc->size, pagetable->stats.mapped,
1022 pagetable->stats.max_mapped);
1023
1024 /* Post all writes to the pagetable */
1025 wmb();
1026
1027 /* Invalidate tlb only if current page table used by GPU is the
1028 * pagetable that we used to allocate */
1029 if (flushtlb) {
1030 /*set all devices as needing flushing*/
1031 pagetable->tlb_flags = UINT_MAX;
1032 GSL_TLBFLUSH_FILTER_RESET();
1033 }
1034 spin_unlock(&pagetable->lock);
1035
1036 return 0;
1037}
1038
1039int
1040kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
1041 struct kgsl_memdesc *memdesc)
1042{
1043 unsigned int numpages;
1044 unsigned int pte, ptefirst, ptelast, superpte;
1045 unsigned int range = memdesc->size;
1046
1047 /* All GPU addresses as assigned are page aligned, but some
1048 functions purturb the gpuaddr with an offset, so apply the
1049 mask here to make sure we have the right address */
1050
1051 unsigned int gpuaddr = memdesc->gpuaddr & KGSL_MMU_ALIGN_MASK;
1052
1053 if (range == 0 || gpuaddr == 0)
1054 return 0;
1055
1056 numpages = (range >> PAGE_SHIFT);
1057 if (range & (PAGE_SIZE - 1))
1058 numpages++;
1059
1060 ptefirst = kgsl_pt_entry_get(pagetable, gpuaddr);
1061 ptelast = ptefirst + numpages;
1062
1063 spin_lock(&pagetable->lock);
1064 superpte = ptefirst - (ptefirst & (GSL_PT_SUPER_PTE-1));
1065 GSL_TLBFLUSH_FILTER_SETDIRTY(superpte / GSL_PT_SUPER_PTE);
1066 for (pte = ptefirst; pte < ptelast; pte++) {
1067#ifdef VERBOSE_DEBUG
1068 /* check if PTE exists */
1069 BUG_ON(!kgsl_pt_map_getaddr(pagetable, pte));
1070#endif
1071 kgsl_pt_map_set(pagetable, pte, GSL_PT_PAGE_DIRTY);
1072 superpte = pte - (pte & (GSL_PT_SUPER_PTE - 1));
1073 if (pte == superpte)
1074 GSL_TLBFLUSH_FILTER_SETDIRTY(superpte /
1075 GSL_PT_SUPER_PTE);
1076 }
1077
1078 /* Remove the statistics */
1079 pagetable->stats.entries--;
1080 pagetable->stats.mapped -= range;
1081
1082 /* Post all writes to the pagetable */
1083 wmb();
1084
1085 spin_unlock(&pagetable->lock);
1086
1087 gen_pool_free(pagetable->pool, gpuaddr, range);
1088
1089 return 0;
1090}
1091EXPORT_SYMBOL(kgsl_mmu_unmap);
1092
1093int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable,
1094 struct kgsl_memdesc *memdesc, unsigned int protflags)
1095{
1096 int result = -EINVAL;
1097 unsigned int gpuaddr = 0;
1098
1099 if (memdesc == NULL) {
1100 KGSL_CORE_ERR("invalid memdesc\n");
1101 goto error;
1102 }
1103
1104 gpuaddr = memdesc->gpuaddr;
1105
1106 result = kgsl_mmu_map(pagetable, memdesc, protflags);
1107 if (result)
1108 goto error;
1109
1110 /*global mappings must have the same gpu address in all pagetables*/
1111 if (gpuaddr && gpuaddr != memdesc->gpuaddr) {
1112 KGSL_CORE_ERR("pt %p addr mismatch phys 0x%08x"
1113 "gpu 0x%0x 0x%08x", pagetable, memdesc->physaddr,
1114 gpuaddr, memdesc->gpuaddr);
1115 goto error_unmap;
1116 }
1117 return result;
1118error_unmap:
1119 kgsl_mmu_unmap(pagetable, memdesc);
1120error:
1121 return result;
1122}
1123EXPORT_SYMBOL(kgsl_mmu_map_global);
1124
1125int kgsl_mmu_stop(struct kgsl_device *device)
1126{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001127 struct kgsl_mmu *mmu = &device->mmu;
Jeremy Gebben4e8aada2011-07-12 10:07:47 -06001128 kgsl_regwrite(device, MH_MMU_CONFIG, 0x00000000);
1129 mmu->flags &= ~KGSL_FLAGS_STARTED;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001130 return 0;
1131}
1132EXPORT_SYMBOL(kgsl_mmu_stop);
1133
1134int kgsl_mmu_close(struct kgsl_device *device)
1135{
1136 /*
1137 * close device mmu
1138 *
1139 * call this with the global lock held
1140 */
1141 struct kgsl_mmu *mmu = &device->mmu;
1142
1143 if (mmu->dummyspace.gpuaddr)
1144 kgsl_sharedmem_free(&mmu->dummyspace);
1145
1146 if (mmu->defaultpagetable)
1147 kgsl_mmu_putpagetable(mmu->defaultpagetable);
1148
1149 return 0;
1150}