blob: 15ec0ece48e360bd348326531b971c5db8c9b5c7 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/types.h>
14#include <linux/device.h>
15#include <linux/spinlock.h>
16#include <linux/genalloc.h>
17#include <linux/slab.h>
18#include <linux/sched.h>
19
20#include "kgsl.h"
21#include "kgsl_mmu.h"
22#include "kgsl_device.h"
23#include "kgsl_sharedmem.h"
24
25#define KGSL_MMU_ALIGN_SHIFT 13
26#define KGSL_MMU_ALIGN_MASK (~((1 << KGSL_MMU_ALIGN_SHIFT) - 1))
27
28#define GSL_PT_PAGE_BITS_MASK 0x00000007
29#define GSL_PT_PAGE_ADDR_MASK PAGE_MASK
30
31static void pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable);
32
33static ssize_t
34sysfs_show_ptpool_entries(struct kobject *kobj,
35 struct kobj_attribute *attr,
36 char *buf)
37{
38 return sprintf(buf, "%d\n", kgsl_driver.ptpool.entries);
39}
40
41static ssize_t
42sysfs_show_ptpool_min(struct kobject *kobj,
43 struct kobj_attribute *attr,
44 char *buf)
45{
46 return sprintf(buf, "%d\n", kgsl_driver.ptpool.static_entries);
47}
48
49static ssize_t
50sysfs_show_ptpool_chunks(struct kobject *kobj,
51 struct kobj_attribute *attr,
52 char *buf)
53{
54 return sprintf(buf, "%d\n", kgsl_driver.ptpool.chunks);
55}
56
57static ssize_t
58sysfs_show_ptpool_ptsize(struct kobject *kobj,
59 struct kobj_attribute *attr,
60 char *buf)
61{
62 return sprintf(buf, "%d\n", kgsl_driver.ptpool.ptsize);
63}
64
65static struct kobj_attribute attr_ptpool_entries = {
66 .attr = { .name = "ptpool_entries", .mode = 0444 },
67 .show = sysfs_show_ptpool_entries,
68 .store = NULL,
69};
70
71static struct kobj_attribute attr_ptpool_min = {
72 .attr = { .name = "ptpool_min", .mode = 0444 },
73 .show = sysfs_show_ptpool_min,
74 .store = NULL,
75};
76
77static struct kobj_attribute attr_ptpool_chunks = {
78 .attr = { .name = "ptpool_chunks", .mode = 0444 },
79 .show = sysfs_show_ptpool_chunks,
80 .store = NULL,
81};
82
83static struct kobj_attribute attr_ptpool_ptsize = {
84 .attr = { .name = "ptpool_ptsize", .mode = 0444 },
85 .show = sysfs_show_ptpool_ptsize,
86 .store = NULL,
87};
88
89static struct attribute *ptpool_attrs[] = {
90 &attr_ptpool_entries.attr,
91 &attr_ptpool_min.attr,
92 &attr_ptpool_chunks.attr,
93 &attr_ptpool_ptsize.attr,
94 NULL,
95};
96
97static struct attribute_group ptpool_attr_group = {
98 .attrs = ptpool_attrs,
99};
100
101static int
102_kgsl_ptpool_add_entries(struct kgsl_ptpool *pool, int count, int dynamic)
103{
104 struct kgsl_ptpool_chunk *chunk;
105 size_t size = ALIGN(count * pool->ptsize, PAGE_SIZE);
106
107 BUG_ON(count == 0);
108
109 if (get_order(size) >= MAX_ORDER) {
110 KGSL_CORE_ERR("ptpool allocation is too big: %d\n", size);
111 return -EINVAL;
112 }
113
114 chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
115 if (chunk == NULL) {
116 KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*chunk));
117 return -ENOMEM;
118 }
119
120 chunk->size = size;
121 chunk->count = count;
122 chunk->dynamic = dynamic;
123
124 chunk->data = dma_alloc_coherent(NULL, size,
125 &chunk->phys, GFP_KERNEL);
126
127 if (chunk->data == NULL) {
128 KGSL_CORE_ERR("dma_alloc_coherent(%d) failed\n", size);
129 goto err;
130 }
131
132 chunk->bitmap = kzalloc(BITS_TO_LONGS(count) * 4, GFP_KERNEL);
133
134 if (chunk->bitmap == NULL) {
135 KGSL_CORE_ERR("kzalloc(%d) failed\n",
136 BITS_TO_LONGS(count) * 4);
137 goto err_dma;
138 }
139
140 list_add_tail(&chunk->list, &pool->list);
141
142 pool->chunks++;
143 pool->entries += count;
144
145 if (!dynamic)
146 pool->static_entries += count;
147
148 return 0;
149
150err_dma:
151 dma_free_coherent(NULL, chunk->size, chunk->data, chunk->phys);
152err:
153 kfree(chunk);
154 return -ENOMEM;
155}
156
157static void *
158_kgsl_ptpool_get_entry(struct kgsl_ptpool *pool, unsigned int *physaddr)
159{
160 struct kgsl_ptpool_chunk *chunk;
161
162 list_for_each_entry(chunk, &pool->list, list) {
163 int bit = find_first_zero_bit(chunk->bitmap, chunk->count);
164
165 if (bit >= chunk->count)
166 continue;
167
168 set_bit(bit, chunk->bitmap);
169 *physaddr = chunk->phys + (bit * pool->ptsize);
170
171 return chunk->data + (bit * pool->ptsize);
172 }
173
174 return NULL;
175}
176
177/**
178 * kgsl_ptpool_add
179 * @pool: A pointer to a ptpool structure
180 * @entries: Number of entries to add
181 *
182 * Add static entries to the pagetable pool.
183 */
184
185int
186kgsl_ptpool_add(struct kgsl_ptpool *pool, int count)
187{
188 int ret = 0;
189 BUG_ON(count == 0);
190
191 mutex_lock(&pool->lock);
192
193 /* Only 4MB can be allocated in one chunk, so larger allocations
194 need to be split into multiple sections */
195
196 while (count) {
197 int entries = ((count * pool->ptsize) > SZ_4M) ?
198 SZ_4M / pool->ptsize : count;
199
200 /* Add the entries as static, i.e. they don't ever stand
201 a chance of being removed */
202
203 ret = _kgsl_ptpool_add_entries(pool, entries, 0);
204 if (ret)
205 break;
206
207 count -= entries;
208 }
209
210 mutex_unlock(&pool->lock);
211 return ret;
212}
213
214/**
215 * kgsl_ptpool_alloc
216 * @pool: A pointer to a ptpool structure
217 * @addr: A pointer to store the physical address of the chunk
218 *
219 * Allocate a pagetable from the pool. Returns the virtual address
220 * of the pagetable, the physical address is returned in physaddr
221 */
222
223void *kgsl_ptpool_alloc(struct kgsl_ptpool *pool, unsigned int *physaddr)
224{
225 void *addr = NULL;
226 int ret;
227
228 mutex_lock(&pool->lock);
229 addr = _kgsl_ptpool_get_entry(pool, physaddr);
230 if (addr)
231 goto done;
232
233 /* Add a chunk for 1 more pagetable and mark it as dynamic */
234 ret = _kgsl_ptpool_add_entries(pool, 1, 1);
235
236 if (ret)
237 goto done;
238
239 addr = _kgsl_ptpool_get_entry(pool, physaddr);
240done:
241 mutex_unlock(&pool->lock);
242 return addr;
243}
244
245static inline void _kgsl_ptpool_rm_chunk(struct kgsl_ptpool_chunk *chunk)
246{
247 list_del(&chunk->list);
248
249 if (chunk->data)
250 dma_free_coherent(NULL, chunk->size, chunk->data,
251 chunk->phys);
252 kfree(chunk->bitmap);
253 kfree(chunk);
254}
255
256/**
257 * kgsl_ptpool_free
258 * @pool: A pointer to a ptpool structure
259 * @addr: A pointer to the virtual address to free
260 *
261 * Free a pagetable allocated from the pool
262 */
263
264void kgsl_ptpool_free(struct kgsl_ptpool *pool, void *addr)
265{
266 struct kgsl_ptpool_chunk *chunk, *tmp;
267
268 if (pool == NULL || addr == NULL)
269 return;
270
271 mutex_lock(&pool->lock);
272 list_for_each_entry_safe(chunk, tmp, &pool->list, list) {
273 if (addr >= chunk->data &&
274 addr < chunk->data + chunk->size) {
275 int bit = ((unsigned long) (addr - chunk->data)) /
276 pool->ptsize;
277
278 clear_bit(bit, chunk->bitmap);
279 memset(addr, 0, pool->ptsize);
280
281 if (chunk->dynamic &&
282 bitmap_empty(chunk->bitmap, chunk->count))
283 _kgsl_ptpool_rm_chunk(chunk);
284
285 break;
286 }
287 }
288
289 mutex_unlock(&pool->lock);
290}
291
292void kgsl_ptpool_destroy(struct kgsl_ptpool *pool)
293{
294 struct kgsl_ptpool_chunk *chunk, *tmp;
295
296 if (pool == NULL)
297 return;
298
299 mutex_lock(&pool->lock);
300 list_for_each_entry_safe(chunk, tmp, &pool->list, list)
301 _kgsl_ptpool_rm_chunk(chunk);
302 mutex_unlock(&pool->lock);
303
304 memset(pool, 0, sizeof(*pool));
305}
306
307/**
308 * kgsl_ptpool_init
309 * @pool: A pointer to a ptpool structure to initialize
310 * @ptsize: The size of each pagetable entry
311 * @entries: The number of inital entries to add to the pool
312 *
313 * Initalize a pool and allocate an initial chunk of entries.
314 */
315
316int kgsl_ptpool_init(struct kgsl_ptpool *pool, int ptsize, int entries)
317{
318 int ret = 0;
319 BUG_ON(ptsize == 0);
320
321 pool->ptsize = ptsize;
322 mutex_init(&pool->lock);
323 INIT_LIST_HEAD(&pool->list);
324
325 if (entries) {
326 ret = kgsl_ptpool_add(pool, entries);
327 if (ret)
328 return ret;
329 }
330
331 return sysfs_create_group(kgsl_driver.ptkobj, &ptpool_attr_group);
332}
333
334static int kgsl_cleanup_pt(struct kgsl_pagetable *pt)
335{
336 int i;
337 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
338 struct kgsl_device *device = kgsl_driver.devp[i];
339 if (device)
340 device->ftbl->cleanup_pt(device, pt);
341 }
342 return 0;
343}
344
345static void kgsl_destroy_pagetable(struct kref *kref)
346{
347 struct kgsl_pagetable *pagetable = container_of(kref,
348 struct kgsl_pagetable, refcount);
349 unsigned long flags;
350
351 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
352 list_del(&pagetable->list);
353 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
354
355 pagetable_remove_sysfs_objects(pagetable);
356
357 kgsl_cleanup_pt(pagetable);
358
359 kgsl_ptpool_free(&kgsl_driver.ptpool, pagetable->base.hostptr);
360
361 kgsl_driver.stats.coherent -= KGSL_PAGETABLE_SIZE;
362
363 if (pagetable->pool)
364 gen_pool_destroy(pagetable->pool);
365
366 kfree(pagetable->tlbflushfilter.base);
367 kfree(pagetable);
368}
369
370static inline void kgsl_put_pagetable(struct kgsl_pagetable *pagetable)
371{
372 if (pagetable)
373 kref_put(&pagetable->refcount, kgsl_destroy_pagetable);
374}
375
376static struct kgsl_pagetable *
377kgsl_get_pagetable(unsigned long name)
378{
379 struct kgsl_pagetable *pt, *ret = NULL;
380 unsigned long flags;
381
382 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
383 list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
384 if (pt->name == name) {
385 ret = pt;
386 kref_get(&ret->refcount);
387 break;
388 }
389 }
390
391 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
392 return ret;
393}
394
395static struct kgsl_pagetable *
396_get_pt_from_kobj(struct kobject *kobj)
397{
398 unsigned long ptname;
399
400 if (!kobj)
401 return NULL;
402
403 if (sscanf(kobj->name, "%ld", &ptname) != 1)
404 return NULL;
405
406 return kgsl_get_pagetable(ptname);
407}
408
409static ssize_t
410sysfs_show_entries(struct kobject *kobj,
411 struct kobj_attribute *attr,
412 char *buf)
413{
414 struct kgsl_pagetable *pt;
415 int ret = 0;
416
417 pt = _get_pt_from_kobj(kobj);
418
419 if (pt)
420 ret += sprintf(buf, "%d\n", pt->stats.entries);
421
422 kgsl_put_pagetable(pt);
423 return ret;
424}
425
426static ssize_t
427sysfs_show_mapped(struct kobject *kobj,
428 struct kobj_attribute *attr,
429 char *buf)
430{
431 struct kgsl_pagetable *pt;
432 int ret = 0;
433
434 pt = _get_pt_from_kobj(kobj);
435
436 if (pt)
437 ret += sprintf(buf, "%d\n", pt->stats.mapped);
438
439 kgsl_put_pagetable(pt);
440 return ret;
441}
442
443static ssize_t
444sysfs_show_va_range(struct kobject *kobj,
445 struct kobj_attribute *attr,
446 char *buf)
447{
448 struct kgsl_pagetable *pt;
449 int ret = 0;
450
451 pt = _get_pt_from_kobj(kobj);
452
453 if (pt)
454 ret += sprintf(buf, "0x%x\n", pt->va_range);
455
456 kgsl_put_pagetable(pt);
457 return ret;
458}
459
460static ssize_t
461sysfs_show_max_mapped(struct kobject *kobj,
462 struct kobj_attribute *attr,
463 char *buf)
464{
465 struct kgsl_pagetable *pt;
466 int ret = 0;
467
468 pt = _get_pt_from_kobj(kobj);
469
470 if (pt)
471 ret += sprintf(buf, "%d\n", pt->stats.max_mapped);
472
473 kgsl_put_pagetable(pt);
474 return ret;
475}
476
477static ssize_t
478sysfs_show_max_entries(struct kobject *kobj,
479 struct kobj_attribute *attr,
480 char *buf)
481{
482 struct kgsl_pagetable *pt;
483 int ret = 0;
484
485 pt = _get_pt_from_kobj(kobj);
486
487 if (pt)
488 ret += sprintf(buf, "%d\n", pt->stats.max_entries);
489
490 kgsl_put_pagetable(pt);
491 return ret;
492}
493
494static struct kobj_attribute attr_entries = {
495 .attr = { .name = "entries", .mode = 0444 },
496 .show = sysfs_show_entries,
497 .store = NULL,
498};
499
500static struct kobj_attribute attr_mapped = {
501 .attr = { .name = "mapped", .mode = 0444 },
502 .show = sysfs_show_mapped,
503 .store = NULL,
504};
505
506static struct kobj_attribute attr_va_range = {
507 .attr = { .name = "va_range", .mode = 0444 },
508 .show = sysfs_show_va_range,
509 .store = NULL,
510};
511
512static struct kobj_attribute attr_max_mapped = {
513 .attr = { .name = "max_mapped", .mode = 0444 },
514 .show = sysfs_show_max_mapped,
515 .store = NULL,
516};
517
518static struct kobj_attribute attr_max_entries = {
519 .attr = { .name = "max_entries", .mode = 0444 },
520 .show = sysfs_show_max_entries,
521 .store = NULL,
522};
523
524static struct attribute *pagetable_attrs[] = {
525 &attr_entries.attr,
526 &attr_mapped.attr,
527 &attr_va_range.attr,
528 &attr_max_mapped.attr,
529 &attr_max_entries.attr,
530 NULL,
531};
532
533static struct attribute_group pagetable_attr_group = {
534 .attrs = pagetable_attrs,
535};
536
537static void
538pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable)
539{
540 if (pagetable->kobj)
541 sysfs_remove_group(pagetable->kobj,
542 &pagetable_attr_group);
543
544 kobject_put(pagetable->kobj);
545}
546
547static int
548pagetable_add_sysfs_objects(struct kgsl_pagetable *pagetable)
549{
550 char ptname[16];
551 int ret = -ENOMEM;
552
553 snprintf(ptname, sizeof(ptname), "%d", pagetable->name);
554 pagetable->kobj = kobject_create_and_add(ptname,
555 kgsl_driver.ptkobj);
556 if (pagetable->kobj == NULL)
557 goto err;
558
559 ret = sysfs_create_group(pagetable->kobj, &pagetable_attr_group);
560
561err:
562 if (ret) {
563 if (pagetable->kobj)
564 kobject_put(pagetable->kobj);
565
566 pagetable->kobj = NULL;
567 }
568
569 return ret;
570}
571
572static inline uint32_t
573kgsl_pt_entry_get(struct kgsl_pagetable *pt, uint32_t va)
574{
575 return (va - pt->va_base) >> PAGE_SHIFT;
576}
577
578static inline void
579kgsl_pt_map_set(struct kgsl_pagetable *pt, uint32_t pte, uint32_t val)
580{
581 uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
582
583 writel_relaxed(val, &baseptr[pte]);
584}
585
586static inline uint32_t
587kgsl_pt_map_getaddr(struct kgsl_pagetable *pt, uint32_t pte)
588{
589 uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
590 uint32_t ret = readl_relaxed(&baseptr[pte]) & GSL_PT_PAGE_ADDR_MASK;
591 return ret;
592}
593
594void kgsl_mh_intrcallback(struct kgsl_device *device)
595{
596 unsigned int status = 0;
597 unsigned int reg;
598
599 kgsl_regread(device, MH_INTERRUPT_STATUS, &status);
600 kgsl_regread(device, MH_AXI_ERROR, &reg);
601
602 if (status & MH_INTERRUPT_MASK__AXI_READ_ERROR)
603 KGSL_MEM_CRIT(device, "axi read error interrupt: %08x\n", reg);
604 else if (status & MH_INTERRUPT_MASK__AXI_WRITE_ERROR)
605 KGSL_MEM_CRIT(device, "axi write error interrupt: %08x\n", reg);
606 else if (status & MH_INTERRUPT_MASK__MMU_PAGE_FAULT) {
607 unsigned int ptbase;
608 struct kgsl_pagetable *pt;
609 int ptid = -1;
610
611 kgsl_regread(device, MH_MMU_PAGE_FAULT, &reg);
612 kgsl_regread(device, MH_MMU_PT_BASE, &ptbase);
613
614 spin_lock(&kgsl_driver.ptlock);
615 list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
616 if (ptbase == pt->base.gpuaddr) {
617 ptid = (int) pt->name;
618 break;
619 }
620 }
621 spin_unlock(&kgsl_driver.ptlock);
622
623 KGSL_MEM_CRIT(device,
624 "mmu page fault: page=0x%lx pt=%d op=%s axi=%d\n",
625 reg & ~(PAGE_SIZE - 1), ptid,
626 reg & 0x02 ? "WRITE" : "READ", (reg >> 4) & 0xF);
627 } else
628 KGSL_MEM_WARN(device,
629 "bad bits in REG_MH_INTERRUPT_STATUS %08x\n", status);
630
631 kgsl_regwrite(device, MH_INTERRUPT_CLEAR, status);
632
633 /*TODO: figure out how to handle errror interupts.
634 * specifically, page faults should probably nuke the client that
635 * caused them, but we don't have enough info to figure that out yet.
636 */
637}
638EXPORT_SYMBOL(kgsl_mh_intrcallback);
639
640static int kgsl_setup_pt(struct kgsl_pagetable *pt)
641{
642 int i = 0;
643 int status = 0;
644
645 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
646 struct kgsl_device *device = kgsl_driver.devp[i];
647 if (device) {
648 status = device->ftbl->setup_pt(device, pt);
649 if (status)
650 goto error_pt;
651 }
652 }
653 return status;
654error_pt:
655 while (i >= 0) {
656 struct kgsl_device *device = kgsl_driver.devp[i];
657 if (device)
658 device->ftbl->cleanup_pt(device, pt);
659 i--;
660 }
661 return status;
662}
663
664static struct kgsl_pagetable *kgsl_mmu_createpagetableobject(
665 unsigned int name)
666{
667 int status = 0;
668 struct kgsl_pagetable *pagetable = NULL;
669 unsigned long flags;
670
671 pagetable = kzalloc(sizeof(struct kgsl_pagetable), GFP_KERNEL);
672 if (pagetable == NULL) {
673 KGSL_CORE_ERR("kzalloc(%d) failed\n",
674 sizeof(struct kgsl_pagetable));
675 return NULL;
676 }
677
678 kref_init(&pagetable->refcount);
679
680 spin_lock_init(&pagetable->lock);
681 pagetable->tlb_flags = 0;
682 pagetable->name = name;
683 pagetable->va_base = KGSL_PAGETABLE_BASE;
684 pagetable->va_range = CONFIG_MSM_KGSL_PAGE_TABLE_SIZE;
685 pagetable->last_superpte = 0;
686 pagetable->max_entries = KGSL_PAGETABLE_ENTRIES(pagetable->va_range);
687
688 pagetable->tlbflushfilter.size = (pagetable->va_range /
689 (PAGE_SIZE * GSL_PT_SUPER_PTE * 8)) + 1;
690 pagetable->tlbflushfilter.base = (unsigned int *)
691 kzalloc(pagetable->tlbflushfilter.size, GFP_KERNEL);
692 if (!pagetable->tlbflushfilter.base) {
693 KGSL_CORE_ERR("kzalloc(%d) failed\n",
694 pagetable->tlbflushfilter.size);
695 goto err_alloc;
696 }
697 GSL_TLBFLUSH_FILTER_RESET();
698
699 pagetable->pool = gen_pool_create(PAGE_SHIFT, -1);
700 if (pagetable->pool == NULL) {
701 KGSL_CORE_ERR("gen_pool_create(%d) failed\n", PAGE_SHIFT);
702 goto err_flushfilter;
703 }
704
705 if (gen_pool_add(pagetable->pool, pagetable->va_base,
706 pagetable->va_range, -1)) {
707 KGSL_CORE_ERR("gen_pool_add failed\n");
708 goto err_pool;
709 }
710
711 pagetable->base.hostptr = kgsl_ptpool_alloc(&kgsl_driver.ptpool,
712 &pagetable->base.physaddr);
713
714 if (pagetable->base.hostptr == NULL)
715 goto err_pool;
716
717 /* ptpool allocations are from coherent memory, so update the
718 device statistics acordingly */
719
720 KGSL_STATS_ADD(KGSL_PAGETABLE_SIZE, kgsl_driver.stats.coherent,
721 kgsl_driver.stats.coherent_max);
722
723 pagetable->base.gpuaddr = pagetable->base.physaddr;
724 pagetable->base.size = KGSL_PAGETABLE_SIZE;
725
726 status = kgsl_setup_pt(pagetable);
727 if (status)
728 goto err_free_sharedmem;
729
730 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
731 list_add(&pagetable->list, &kgsl_driver.pagetable_list);
732 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
733
734 /* Create the sysfs entries */
735 pagetable_add_sysfs_objects(pagetable);
736
737 return pagetable;
738
739err_free_sharedmem:
740 kgsl_ptpool_free(&kgsl_driver.ptpool, &pagetable->base.hostptr);
741err_pool:
742 gen_pool_destroy(pagetable->pool);
743err_flushfilter:
744 kfree(pagetable->tlbflushfilter.base);
745err_alloc:
746 kfree(pagetable);
747
748 return NULL;
749}
750
751struct kgsl_pagetable *kgsl_mmu_getpagetable(unsigned long name)
752{
753 struct kgsl_pagetable *pt;
754
755 pt = kgsl_get_pagetable(name);
756
757 if (pt == NULL)
758 pt = kgsl_mmu_createpagetableobject(name);
759
760 return pt;
761}
762
763void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable)
764{
765 kgsl_put_pagetable(pagetable);
766}
767
768void kgsl_default_setstate(struct kgsl_device *device, uint32_t flags)
769{
770 if (!kgsl_mmu_enabled())
771 return;
772
773 if (flags & KGSL_MMUFLAGS_PTUPDATE) {
774 kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
775 kgsl_regwrite(device, MH_MMU_PT_BASE,
776 device->mmu.hwpagetable->base.gpuaddr);
777 }
778
779 if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
780 /* Invalidate all and tc */
781 kgsl_regwrite(device, MH_MMU_INVALIDATE, 0x00000003);
782 }
783}
784EXPORT_SYMBOL(kgsl_default_setstate);
785
786void kgsl_setstate(struct kgsl_device *device, uint32_t flags)
787{
788 if (device->ftbl->setstate)
789 device->ftbl->setstate(device, flags);
790}
791EXPORT_SYMBOL(kgsl_setstate);
792
793void kgsl_mmu_setstate(struct kgsl_device *device,
794 struct kgsl_pagetable *pagetable)
795{
796 struct kgsl_mmu *mmu = &device->mmu;
797
798 if (mmu->flags & KGSL_FLAGS_STARTED) {
799 /* page table not current, then setup mmu to use new
800 * specified page table
801 */
802 if (mmu->hwpagetable != pagetable) {
803 mmu->hwpagetable = pagetable;
804 spin_lock(&mmu->hwpagetable->lock);
805 mmu->hwpagetable->tlb_flags &= ~(1<<device->id);
806 spin_unlock(&mmu->hwpagetable->lock);
807
808 /* call device specific set page table */
809 kgsl_setstate(mmu->device, KGSL_MMUFLAGS_TLBFLUSH |
810 KGSL_MMUFLAGS_PTUPDATE);
811 }
812 }
813}
814EXPORT_SYMBOL(kgsl_mmu_setstate);
815
816int kgsl_mmu_init(struct kgsl_device *device)
817{
818 /*
819 * intialize device mmu
820 *
821 * call this with the global lock held
822 */
823 int status = 0;
824 struct kgsl_mmu *mmu = &device->mmu;
825
826 mmu->device = device;
827
828 /* make sure aligned to pagesize */
829 BUG_ON(mmu->mpu_base & (PAGE_SIZE - 1));
830 BUG_ON((mmu->mpu_base + mmu->mpu_range) & (PAGE_SIZE - 1));
831
832 /* sub-client MMU lookups require address translation */
833 if ((mmu->config & ~0x1) > 0) {
834 /*make sure virtual address range is a multiple of 64Kb */
835 BUG_ON(CONFIG_MSM_KGSL_PAGE_TABLE_SIZE & ((1 << 16) - 1));
836
837 /* allocate memory used for completing r/w operations that
838 * cannot be mapped by the MMU
839 */
840 status = kgsl_allocate_contiguous(&mmu->dummyspace, 64);
841 if (!status)
842 kgsl_sharedmem_set(&mmu->dummyspace, 0, 0,
843 mmu->dummyspace.size);
844 }
845
846 return status;
847}
848
849int kgsl_mmu_start(struct kgsl_device *device)
850{
851 /*
852 * intialize device mmu
853 *
854 * call this with the global lock held
855 */
856
857 struct kgsl_mmu *mmu = &device->mmu;
858
859 if (mmu->flags & KGSL_FLAGS_STARTED)
860 return 0;
861
862 /* MMU not enabled */
863 if ((mmu->config & 0x1) == 0)
864 return 0;
865
866 mmu->flags |= KGSL_FLAGS_STARTED;
867
868 /* setup MMU and sub-client behavior */
869 kgsl_regwrite(device, MH_MMU_CONFIG, mmu->config);
870
871 /*
872 * Interrupts are enabled on a per-device level when
873 * kgsl_pwrctrl_irq() is called
874 */
875
876 /* idle device */
877 kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
878
879 /* define physical memory range accessible by the core */
880 kgsl_regwrite(device, MH_MMU_MPU_BASE, mmu->mpu_base);
881 kgsl_regwrite(device, MH_MMU_MPU_END,
882 mmu->mpu_base + mmu->mpu_range);
883
884 /* sub-client MMU lookups require address translation */
885 if ((mmu->config & ~0x1) > 0) {
886
887 kgsl_sharedmem_set(&mmu->dummyspace, 0, 0,
888 mmu->dummyspace.size);
889
890 /* TRAN_ERROR needs a 32 byte (32 byte aligned) chunk of memory
891 * to complete transactions in case of an MMU fault. Note that
892 * we'll leave the bottom 32 bytes of the dummyspace for other
893 * purposes (e.g. use it when dummy read cycles are needed
894 * for other blocks */
895 kgsl_regwrite(device, MH_MMU_TRAN_ERROR,
896 mmu->dummyspace.physaddr + 32);
897
898 if (mmu->defaultpagetable == NULL)
899 mmu->defaultpagetable =
900 kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
901
902 /* Return error if the default pagetable doesn't exist */
903 if (mmu->defaultpagetable == NULL)
904 return -ENOMEM;
905
906 mmu->hwpagetable = mmu->defaultpagetable;
907
908 kgsl_regwrite(device, MH_MMU_PT_BASE,
909 mmu->hwpagetable->base.gpuaddr);
910 kgsl_regwrite(device, MH_MMU_VA_RANGE,
911 (mmu->hwpagetable->va_base |
912 (mmu->hwpagetable->va_range >> 16)));
913 kgsl_setstate(device, KGSL_MMUFLAGS_TLBFLUSH);
914 }
915
916 return 0;
917}
918EXPORT_SYMBOL(kgsl_mmu_start);
919
920unsigned int kgsl_virtaddr_to_physaddr(void *virtaddr)
921{
922 unsigned int physaddr = 0;
923 pgd_t *pgd_ptr = NULL;
924 pmd_t *pmd_ptr = NULL;
925 pte_t *pte_ptr = NULL, pte;
926
927 pgd_ptr = pgd_offset(current->mm, (unsigned long) virtaddr);
928 if (pgd_none(*pgd) || pgd_bad(*pgd)) {
929 KGSL_CORE_ERR("Invalid pgd entry\n");
930 return 0;
931 }
932
933 pmd_ptr = pmd_offset(pgd_ptr, (unsigned long) virtaddr);
934 if (pmd_none(*pmd_ptr) || pmd_bad(*pmd_ptr)) {
935 KGSL_CORE_ERR("Invalid pmd entry\n");
936 return 0;
937 }
938
939 pte_ptr = pte_offset_map(pmd_ptr, (unsigned long) virtaddr);
940 if (!pte_ptr) {
941 KGSL_CORE_ERR("pt_offset_map failed\n");
942 return 0;
943 }
944 pte = *pte_ptr;
945 physaddr = pte_pfn(pte);
946 pte_unmap(pte_ptr);
947 physaddr <<= PAGE_SHIFT;
948 return physaddr;
949}
950
951int
952kgsl_mmu_map(struct kgsl_pagetable *pagetable,
953 struct kgsl_memdesc *memdesc,
954 unsigned int protflags)
955{
956 int numpages;
957 unsigned int pte, ptefirst, ptelast, physaddr;
958 int flushtlb;
959 unsigned int offset = 0;
960
961 BUG_ON(protflags & ~(GSL_PT_PAGE_RV | GSL_PT_PAGE_WV));
962 BUG_ON(protflags == 0);
963
964 memdesc->gpuaddr = gen_pool_alloc_aligned(pagetable->pool,
965 memdesc->size, KGSL_MMU_ALIGN_SHIFT);
966
967 if (memdesc->gpuaddr == 0) {
968 KGSL_CORE_ERR("gen_pool_alloc(%d) failed\n", memdesc->size);
969 KGSL_CORE_ERR(" [%d] allocated=%d, entries=%d\n",
970 pagetable->name, pagetable->stats.mapped,
971 pagetable->stats.entries);
972 return -ENOMEM;
973 }
974
975 numpages = (memdesc->size >> PAGE_SHIFT);
976
977 ptefirst = kgsl_pt_entry_get(pagetable, memdesc->gpuaddr);
978 ptelast = ptefirst + numpages;
979
980 pte = ptefirst;
981 flushtlb = 0;
982
983 /* tlb needs to be flushed when the first and last pte are not at
984 * superpte boundaries */
985 if ((ptefirst & (GSL_PT_SUPER_PTE - 1)) != 0 ||
986 ((ptelast + 1) & (GSL_PT_SUPER_PTE-1)) != 0)
987 flushtlb = 1;
988
989 spin_lock(&pagetable->lock);
990 for (pte = ptefirst; pte < ptelast; pte++, offset += PAGE_SIZE) {
991#ifdef VERBOSE_DEBUG
992 /* check if PTE exists */
993 uint32_t val = kgsl_pt_map_getaddr(pagetable, pte);
994 BUG_ON(val != 0 && val != GSL_PT_PAGE_DIRTY);
995#endif
996 if ((pte & (GSL_PT_SUPER_PTE-1)) == 0)
997 if (GSL_TLBFLUSH_FILTER_ISDIRTY(pte / GSL_PT_SUPER_PTE))
998 flushtlb = 1;
999 /* mark pte as in use */
1000
1001 physaddr = memdesc->ops->physaddr(memdesc, offset);
1002 BUG_ON(physaddr == 0);
1003 kgsl_pt_map_set(pagetable, pte, physaddr | protflags);
1004 }
1005
1006 /* Keep track of the statistics for the sysfs files */
1007
1008 KGSL_STATS_ADD(1, pagetable->stats.entries,
1009 pagetable->stats.max_entries);
1010
1011 KGSL_STATS_ADD(memdesc->size, pagetable->stats.mapped,
1012 pagetable->stats.max_mapped);
1013
1014 /* Post all writes to the pagetable */
1015 wmb();
1016
1017 /* Invalidate tlb only if current page table used by GPU is the
1018 * pagetable that we used to allocate */
1019 if (flushtlb) {
1020 /*set all devices as needing flushing*/
1021 pagetable->tlb_flags = UINT_MAX;
1022 GSL_TLBFLUSH_FILTER_RESET();
1023 }
1024 spin_unlock(&pagetable->lock);
1025
1026 return 0;
1027}
1028
1029int
1030kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
1031 struct kgsl_memdesc *memdesc)
1032{
1033 unsigned int numpages;
1034 unsigned int pte, ptefirst, ptelast, superpte;
1035 unsigned int range = memdesc->size;
1036
1037 /* All GPU addresses as assigned are page aligned, but some
1038 functions purturb the gpuaddr with an offset, so apply the
1039 mask here to make sure we have the right address */
1040
1041 unsigned int gpuaddr = memdesc->gpuaddr & KGSL_MMU_ALIGN_MASK;
1042
1043 if (range == 0 || gpuaddr == 0)
1044 return 0;
1045
1046 numpages = (range >> PAGE_SHIFT);
1047 if (range & (PAGE_SIZE - 1))
1048 numpages++;
1049
1050 ptefirst = kgsl_pt_entry_get(pagetable, gpuaddr);
1051 ptelast = ptefirst + numpages;
1052
1053 spin_lock(&pagetable->lock);
1054 superpte = ptefirst - (ptefirst & (GSL_PT_SUPER_PTE-1));
1055 GSL_TLBFLUSH_FILTER_SETDIRTY(superpte / GSL_PT_SUPER_PTE);
1056 for (pte = ptefirst; pte < ptelast; pte++) {
1057#ifdef VERBOSE_DEBUG
1058 /* check if PTE exists */
1059 BUG_ON(!kgsl_pt_map_getaddr(pagetable, pte));
1060#endif
1061 kgsl_pt_map_set(pagetable, pte, GSL_PT_PAGE_DIRTY);
1062 superpte = pte - (pte & (GSL_PT_SUPER_PTE - 1));
1063 if (pte == superpte)
1064 GSL_TLBFLUSH_FILTER_SETDIRTY(superpte /
1065 GSL_PT_SUPER_PTE);
1066 }
1067
1068 /* Remove the statistics */
1069 pagetable->stats.entries--;
1070 pagetable->stats.mapped -= range;
1071
1072 /* Post all writes to the pagetable */
1073 wmb();
1074
1075 spin_unlock(&pagetable->lock);
1076
1077 gen_pool_free(pagetable->pool, gpuaddr, range);
1078
1079 return 0;
1080}
1081EXPORT_SYMBOL(kgsl_mmu_unmap);
1082
1083int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable,
1084 struct kgsl_memdesc *memdesc, unsigned int protflags)
1085{
1086 int result = -EINVAL;
1087 unsigned int gpuaddr = 0;
1088
1089 if (memdesc == NULL) {
1090 KGSL_CORE_ERR("invalid memdesc\n");
1091 goto error;
1092 }
1093
1094 gpuaddr = memdesc->gpuaddr;
1095
1096 result = kgsl_mmu_map(pagetable, memdesc, protflags);
1097 if (result)
1098 goto error;
1099
1100 /*global mappings must have the same gpu address in all pagetables*/
1101 if (gpuaddr && gpuaddr != memdesc->gpuaddr) {
1102 KGSL_CORE_ERR("pt %p addr mismatch phys 0x%08x"
1103 "gpu 0x%0x 0x%08x", pagetable, memdesc->physaddr,
1104 gpuaddr, memdesc->gpuaddr);
1105 goto error_unmap;
1106 }
1107 return result;
1108error_unmap:
1109 kgsl_mmu_unmap(pagetable, memdesc);
1110error:
1111 return result;
1112}
1113EXPORT_SYMBOL(kgsl_mmu_map_global);
1114
1115int kgsl_mmu_stop(struct kgsl_device *device)
1116{
1117 /*
1118 * stop device mmu
1119 *
1120 * call this with the global lock held
1121 */
1122 struct kgsl_mmu *mmu = &device->mmu;
1123
1124 if (mmu->flags & KGSL_FLAGS_STARTED) {
1125 /* disable MMU */
1126 kgsl_regwrite(device, MH_MMU_CONFIG, 0x00000000);
1127
1128 mmu->flags &= ~KGSL_FLAGS_STARTED;
1129 }
1130
1131 return 0;
1132}
1133EXPORT_SYMBOL(kgsl_mmu_stop);
1134
1135int kgsl_mmu_close(struct kgsl_device *device)
1136{
1137 /*
1138 * close device mmu
1139 *
1140 * call this with the global lock held
1141 */
1142 struct kgsl_mmu *mmu = &device->mmu;
1143
1144 if (mmu->dummyspace.gpuaddr)
1145 kgsl_sharedmem_free(&mmu->dummyspace);
1146
1147 if (mmu->defaultpagetable)
1148 kgsl_mmu_putpagetable(mmu->defaultpagetable);
1149
1150 return 0;
1151}